VirtualBox

source: vbox/trunk/src/VBox/VMM/include/IEMInline.h@ 106054

Last change on this file since 106054 was 105768, checked in by vboxsync, 3 months ago

VMM/IEM: Eliminated an unnecessary CS.LIM check in IEM_MC_REL_JMP_XXX for FLAT 32-bit mode together with a unnecessary canonical target RIP check for 64-bit mode jumps within the same page (todo 5). bugref:10720

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 182.2 KB
Line 
1/* $Id: IEMInline.h 105768 2024-08-21 14:01:05Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - Inlined Functions.
4 */
5
6/*
7 * Copyright (C) 2011-2023 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28#ifndef VMM_INCLUDED_SRC_include_IEMInline_h
29#define VMM_INCLUDED_SRC_include_IEMInline_h
30#ifndef RT_WITHOUT_PRAGMA_ONCE
31# pragma once
32#endif
33
34
35
36/**
37 * Makes status code addjustments (pass up from I/O and access handler)
38 * as well as maintaining statistics.
39 *
40 * @returns Strict VBox status code to pass up.
41 * @param pVCpu The cross context virtual CPU structure of the calling thread.
42 * @param rcStrict The status from executing an instruction.
43 */
44DECL_FORCE_INLINE(VBOXSTRICTRC) iemExecStatusCodeFiddling(PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict) RT_NOEXCEPT
45{
46 if (rcStrict != VINF_SUCCESS)
47 {
48 /* Deal with the cases that should be treated as VINF_SUCCESS first. */
49 if ( rcStrict == VINF_IEM_YIELD_PENDING_FF
50#ifdef VBOX_WITH_NESTED_HWVIRT_VMX /** @todo r=bird: Why do we need TWO status codes here? */
51 || rcStrict == VINF_VMX_VMEXIT
52#endif
53#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
54 || rcStrict == VINF_SVM_VMEXIT
55#endif
56 )
57 {
58 rcStrict = pVCpu->iem.s.rcPassUp;
59 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
60 { /* likely */ }
61 else
62 pVCpu->iem.s.cRetPassUpStatus++;
63 }
64 else if (RT_SUCCESS(rcStrict))
65 {
66 AssertMsg( (rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST)
67 || rcStrict == VINF_IOM_R3_IOPORT_READ
68 || rcStrict == VINF_IOM_R3_IOPORT_WRITE
69 || rcStrict == VINF_IOM_R3_IOPORT_COMMIT_WRITE
70 || rcStrict == VINF_IOM_R3_MMIO_READ
71 || rcStrict == VINF_IOM_R3_MMIO_READ_WRITE
72 || rcStrict == VINF_IOM_R3_MMIO_WRITE
73 || rcStrict == VINF_IOM_R3_MMIO_COMMIT_WRITE
74 || rcStrict == VINF_CPUM_R3_MSR_READ
75 || rcStrict == VINF_CPUM_R3_MSR_WRITE
76 || rcStrict == VINF_EM_RAW_EMULATE_INSTR
77 || rcStrict == VINF_EM_RAW_TO_R3
78 || rcStrict == VINF_EM_TRIPLE_FAULT
79 || rcStrict == VINF_EM_EMULATE_SPLIT_LOCK
80 || rcStrict == VINF_GIM_R3_HYPERCALL
81 /* raw-mode / virt handlers only: */
82 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT
83 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_TSS_FAULT
84 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT
85 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_IDT_FAULT
86 || rcStrict == VINF_SELM_SYNC_GDT
87 || rcStrict == VINF_CSAM_PENDING_ACTION
88 || rcStrict == VINF_PATM_CHECK_PATCH_PAGE
89 /* nested hw.virt codes: */
90 || rcStrict == VINF_VMX_INTERCEPT_NOT_ACTIVE
91 || rcStrict == VINF_VMX_MODIFIES_BEHAVIOR
92 , ("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
93/** @todo adjust for VINF_EM_RAW_EMULATE_INSTR. */
94 int32_t const rcPassUp = pVCpu->iem.s.rcPassUp;
95 if (rcPassUp == VINF_SUCCESS)
96 pVCpu->iem.s.cRetInfStatuses++;
97 else if ( rcPassUp < VINF_EM_FIRST
98 || rcPassUp > VINF_EM_LAST
99 || rcPassUp < VBOXSTRICTRC_VAL(rcStrict))
100 {
101 LogEx(LOG_GROUP_IEM,("IEM: rcPassUp=%Rrc! rcStrict=%Rrc\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
102 pVCpu->iem.s.cRetPassUpStatus++;
103 rcStrict = rcPassUp;
104 }
105 else
106 {
107 LogEx(LOG_GROUP_IEM,("IEM: rcPassUp=%Rrc rcStrict=%Rrc!\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
108 pVCpu->iem.s.cRetInfStatuses++;
109 }
110 }
111 else if (rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED)
112 pVCpu->iem.s.cRetAspectNotImplemented++;
113 else if (rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED)
114 pVCpu->iem.s.cRetInstrNotImplemented++;
115 else
116 pVCpu->iem.s.cRetErrStatuses++;
117 }
118 else
119 {
120 rcStrict = pVCpu->iem.s.rcPassUp;
121 if (rcStrict != VINF_SUCCESS)
122 pVCpu->iem.s.cRetPassUpStatus++;
123 }
124
125 /* Just clear it here as well. */
126 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
127
128 return rcStrict;
129}
130
131
132/**
133 * Sets the pass up status.
134 *
135 * @returns VINF_SUCCESS.
136 * @param pVCpu The cross context virtual CPU structure of the
137 * calling thread.
138 * @param rcPassUp The pass up status. Must be informational.
139 * VINF_SUCCESS is not allowed.
140 */
141DECLINLINE(int) iemSetPassUpStatus(PVMCPUCC pVCpu, VBOXSTRICTRC rcPassUp) RT_NOEXCEPT
142{
143 AssertRC(VBOXSTRICTRC_VAL(rcPassUp)); Assert(rcPassUp != VINF_SUCCESS);
144
145 int32_t const rcOldPassUp = pVCpu->iem.s.rcPassUp;
146 if (rcOldPassUp == VINF_SUCCESS)
147 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
148 /* If both are EM scheduling codes, use EM priority rules. */
149 else if ( rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST
150 && rcPassUp >= VINF_EM_FIRST && rcPassUp <= VINF_EM_LAST)
151 {
152 if (rcPassUp < rcOldPassUp)
153 {
154 LogEx(LOG_GROUP_IEM,("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
155 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
156 }
157 else
158 LogEx(LOG_GROUP_IEM,("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
159 }
160 /* Override EM scheduling with specific status code. */
161 else if (rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST)
162 {
163 LogEx(LOG_GROUP_IEM,("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
164 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
165 }
166 /* Don't override specific status code, first come first served. */
167 else
168 LogEx(LOG_GROUP_IEM,("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
169 return VINF_SUCCESS;
170}
171
172
173/**
174 * Calculates the IEM_F_X86_AC flags.
175 *
176 * @returns IEM_F_X86_AC or zero
177 * @param pVCpu The cross context virtual CPU structure of the
178 * calling thread.
179 */
180DECL_FORCE_INLINE(uint32_t) iemCalcExecAcFlag(PVMCPUCC pVCpu) RT_NOEXCEPT
181{
182 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_RFLAGS);
183 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
184
185 if ( !pVCpu->cpum.GstCtx.eflags.Bits.u1AC
186 || (pVCpu->cpum.GstCtx.cr0 & (X86_CR0_AM | X86_CR0_PE)) != (X86_CR0_AM | X86_CR0_PE)
187 || ( !pVCpu->cpum.GstCtx.eflags.Bits.u1VM
188 && pVCpu->cpum.GstCtx.ss.Attr.n.u2Dpl != 3))
189 return 0;
190 return IEM_F_X86_AC;
191}
192
193
194/**
195 * Calculates the IEM_F_MODE_X86_32BIT_FLAT flag.
196 *
197 * Checks if CS, SS, DS and SS are all wide open flat 32-bit segments. This will
198 * reject expand down data segments and conforming code segments.
199 *
200 * ASSUMES that the CPU is in 32-bit mode.
201 *
202 * @note Will return zero when if any of the segment register state is marked
203 * external, this must be factored into assertions checking fExec
204 * consistency.
205 *
206 * @returns IEM_F_MODE_X86_32BIT_FLAT or zero.
207 * @param pVCpu The cross context virtual CPU structure of the
208 * calling thread.
209 * @sa iemCalc32BitFlatIndicatorEsDs
210 */
211DECL_FORCE_INLINE(uint32_t) iemCalc32BitFlatIndicator(PVMCPUCC pVCpu) RT_NOEXCEPT
212{
213 AssertCompile(X86_SEL_TYPE_DOWN == X86_SEL_TYPE_CONF);
214 return ( ( pVCpu->cpum.GstCtx.es.Attr.u
215 | pVCpu->cpum.GstCtx.cs.Attr.u
216 | pVCpu->cpum.GstCtx.ss.Attr.u
217 | pVCpu->cpum.GstCtx.ds.Attr.u)
218 & (X86_SEL_TYPE_ACCESSED | X86DESCATTR_G | X86DESCATTR_D | X86DESCATTR_P | X86_SEL_TYPE_DOWN | X86DESCATTR_UNUSABLE))
219 == (X86_SEL_TYPE_ACCESSED | X86DESCATTR_G | X86DESCATTR_D | X86DESCATTR_P)
220 && ( (pVCpu->cpum.GstCtx.es.u32Limit + 1)
221 | (pVCpu->cpum.GstCtx.cs.u32Limit + 1)
222 | (pVCpu->cpum.GstCtx.ss.u32Limit + 1)
223 | (pVCpu->cpum.GstCtx.ds.u32Limit + 1))
224 == 0
225 && ( pVCpu->cpum.GstCtx.es.u64Base
226 | pVCpu->cpum.GstCtx.cs.u64Base
227 | pVCpu->cpum.GstCtx.ss.u64Base
228 | pVCpu->cpum.GstCtx.ds.u64Base)
229 == 0
230 && !(pVCpu->cpum.GstCtx.fExtrn & (CPUMCTX_EXTRN_ES | CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_SS | CPUMCTX_EXTRN_ES))
231 ? IEM_F_MODE_X86_32BIT_FLAT : 0;
232}
233
234
235/**
236 * Calculates the IEM_F_MODE_X86_32BIT_FLAT flag, ASSUMING the CS and SS are
237 * flat already.
238 *
239 * This is used by sysenter.
240 *
241 * @note Will return zero when if any of the segment register state is marked
242 * external, this must be factored into assertions checking fExec
243 * consistency.
244 *
245 * @returns IEM_F_MODE_X86_32BIT_FLAT or zero.
246 * @param pVCpu The cross context virtual CPU structure of the
247 * calling thread.
248 * @sa iemCalc32BitFlatIndicator
249 */
250DECL_FORCE_INLINE(uint32_t) iemCalc32BitFlatIndicatorEsDs(PVMCPUCC pVCpu) RT_NOEXCEPT
251{
252 AssertCompile(X86_SEL_TYPE_DOWN == X86_SEL_TYPE_CONF);
253 return ( ( pVCpu->cpum.GstCtx.es.Attr.u
254 | pVCpu->cpum.GstCtx.ds.Attr.u)
255 & (X86_SEL_TYPE_ACCESSED | X86DESCATTR_G | X86DESCATTR_D | X86DESCATTR_P | X86_SEL_TYPE_DOWN | X86DESCATTR_UNUSABLE))
256 == (X86_SEL_TYPE_ACCESSED | X86DESCATTR_G | X86DESCATTR_D | X86DESCATTR_P)
257 && ( (pVCpu->cpum.GstCtx.es.u32Limit + 1)
258 | (pVCpu->cpum.GstCtx.ds.u32Limit + 1))
259 == 0
260 && ( pVCpu->cpum.GstCtx.es.u64Base
261 | pVCpu->cpum.GstCtx.ds.u64Base)
262 == 0
263 && !(pVCpu->cpum.GstCtx.fExtrn & (CPUMCTX_EXTRN_ES | CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_SS | CPUMCTX_EXTRN_ES))
264 ? IEM_F_MODE_X86_32BIT_FLAT : 0;
265}
266
267
268/**
269 * Calculates the IEM_F_MODE_XXX, CPL and AC flags.
270 *
271 * @returns IEM_F_MODE_XXX, IEM_F_X86_CPL_MASK and IEM_F_X86_AC.
272 * @param pVCpu The cross context virtual CPU structure of the
273 * calling thread.
274 */
275DECL_FORCE_INLINE(uint32_t) iemCalcExecModeAndCplFlags(PVMCPUCC pVCpu) RT_NOEXCEPT
276{
277 /*
278 * We're duplicates code from CPUMGetGuestCPL and CPUMIsGuestIn64BitCodeEx
279 * here to try get this done as efficiently as possible.
280 */
281 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_EFER | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_SS | CPUMCTX_EXTRN_CS);
282
283 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE)
284 {
285 if (!pVCpu->cpum.GstCtx.eflags.Bits.u1VM)
286 {
287 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
288 uint32_t fExec = ((uint32_t)pVCpu->cpum.GstCtx.ss.Attr.n.u2Dpl << IEM_F_X86_CPL_SHIFT);
289 if ( !pVCpu->cpum.GstCtx.eflags.Bits.u1AC
290 || !(pVCpu->cpum.GstCtx.cr0 & X86_CR0_AM)
291 || fExec != (3U << IEM_F_X86_CPL_SHIFT))
292 { /* likely */ }
293 else
294 fExec |= IEM_F_X86_AC;
295
296 if (pVCpu->cpum.GstCtx.cs.Attr.n.u1DefBig)
297 {
298 Assert(!pVCpu->cpum.GstCtx.cs.Attr.n.u1Long || !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_LMA));
299 fExec |= IEM_F_MODE_X86_32BIT_PROT | iemCalc32BitFlatIndicator(pVCpu);
300 }
301 else if ( pVCpu->cpum.GstCtx.cs.Attr.n.u1Long
302 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_LMA))
303 fExec |= IEM_F_MODE_X86_64BIT;
304 else if (IEM_GET_TARGET_CPU(pVCpu) >= IEMTARGETCPU_386)
305 fExec |= IEM_F_MODE_X86_16BIT_PROT;
306 else
307 fExec |= IEM_F_MODE_X86_16BIT_PROT_PRE_386;
308 return fExec;
309 }
310 if ( !pVCpu->cpum.GstCtx.eflags.Bits.u1AC
311 || !(pVCpu->cpum.GstCtx.cr0 & X86_CR0_AM))
312 return IEM_F_MODE_X86_16BIT_PROT_V86 | (UINT32_C(3) << IEM_F_X86_CPL_SHIFT);
313 return IEM_F_MODE_X86_16BIT_PROT_V86 | (UINT32_C(3) << IEM_F_X86_CPL_SHIFT) | IEM_F_X86_AC;
314 }
315
316 /* Real mode is zero; CPL set to 3 for VT-x real-mode emulation. */
317 if (RT_LIKELY(!pVCpu->cpum.GstCtx.cs.Attr.n.u1DefBig))
318 {
319 if (IEM_GET_TARGET_CPU(pVCpu) >= IEMTARGETCPU_386)
320 return IEM_F_MODE_X86_16BIT;
321 return IEM_F_MODE_X86_16BIT_PRE_386;
322 }
323
324 /* 32-bit unreal mode. */
325 return IEM_F_MODE_X86_32BIT | iemCalc32BitFlatIndicator(pVCpu);
326}
327
328
329/**
330 * Calculates the AMD-V and VT-x related context flags.
331 *
332 * @returns 0 or a combination of IEM_F_X86_CTX_IN_GUEST, IEM_F_X86_CTX_SVM and
333 * IEM_F_X86_CTX_VMX.
334 * @param pVCpu The cross context virtual CPU structure of the
335 * calling thread.
336 */
337DECL_FORCE_INLINE(uint32_t) iemCalcExecHwVirtFlags(PVMCPUCC pVCpu) RT_NOEXCEPT
338{
339 /*
340 * This duplicates code from CPUMIsGuestVmxEnabled, CPUMIsGuestSvmEnabled
341 * and CPUMIsGuestInNestedHwvirtMode to some extent.
342 */
343 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_EFER);
344
345 AssertCompile(X86_CR4_VMXE != MSR_K6_EFER_SVME);
346 uint64_t const fTmp = (pVCpu->cpum.GstCtx.cr4 & X86_CR4_VMXE)
347 | (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_SVME);
348 if (RT_LIKELY(!fTmp))
349 return 0; /* likely */
350
351 if (fTmp & X86_CR4_VMXE)
352 {
353 Assert(pVCpu->cpum.GstCtx.hwvirt.enmHwvirt == CPUMHWVIRT_VMX);
354 if (pVCpu->cpum.GstCtx.hwvirt.vmx.fInVmxNonRootMode)
355 return IEM_F_X86_CTX_VMX | IEM_F_X86_CTX_IN_GUEST;
356 return IEM_F_X86_CTX_VMX;
357 }
358
359 Assert(pVCpu->cpum.GstCtx.hwvirt.enmHwvirt == CPUMHWVIRT_SVM);
360 if (pVCpu->cpum.GstCtx.hwvirt.svm.Vmcb.ctrl.u64InterceptCtrl & SVM_CTRL_INTERCEPT_VMRUN)
361 return IEM_F_X86_CTX_SVM | IEM_F_X86_CTX_IN_GUEST;
362 return IEM_F_X86_CTX_SVM;
363}
364
365#ifdef VBOX_INCLUDED_vmm_dbgf_h /* VM::dbgf.ro.cEnabledHwBreakpoints is only accessible if VBox/vmm/dbgf.h is included. */
366
367/**
368 * Calculates IEM_F_BRK_PENDING_XXX (IEM_F_PENDING_BRK_MASK) flags.
369 *
370 * @returns IEM_F_BRK_PENDING_XXX or zero.
371 * @param pVCpu The cross context virtual CPU structure of the
372 * calling thread.
373 */
374DECL_FORCE_INLINE(uint32_t) iemCalcExecDbgFlags(PVMCPUCC pVCpu) RT_NOEXCEPT
375{
376 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_DR7);
377
378 if (RT_LIKELY( !(pVCpu->cpum.GstCtx.dr[7] & X86_DR7_ENABLED_MASK)
379 && pVCpu->CTX_SUFF(pVM)->dbgf.ro.cEnabledHwBreakpoints == 0))
380 return 0;
381 return iemCalcExecDbgFlagsSlow(pVCpu);
382}
383
384/**
385 * Calculates the the IEM_F_XXX flags.
386 *
387 * @returns IEM_F_XXX combination match the current CPU state.
388 * @param pVCpu The cross context virtual CPU structure of the
389 * calling thread.
390 */
391DECL_FORCE_INLINE(uint32_t) iemCalcExecFlags(PVMCPUCC pVCpu) RT_NOEXCEPT
392{
393 return iemCalcExecModeAndCplFlags(pVCpu)
394 | iemCalcExecHwVirtFlags(pVCpu)
395 /* SMM is not yet implemented */
396 | iemCalcExecDbgFlags(pVCpu)
397 ;
398}
399
400
401/**
402 * Re-calculates the MODE and CPL parts of IEMCPU::fExec.
403 *
404 * @param pVCpu The cross context virtual CPU structure of the
405 * calling thread.
406 */
407DECL_FORCE_INLINE(void) iemRecalcExecModeAndCplAndAcFlags(PVMCPUCC pVCpu)
408{
409 pVCpu->iem.s.fExec = (pVCpu->iem.s.fExec & ~(IEM_F_MODE_MASK | IEM_F_X86_CPL_MASK | IEM_F_X86_AC))
410 | iemCalcExecModeAndCplFlags(pVCpu);
411}
412
413
414/**
415 * Re-calculates the IEM_F_PENDING_BRK_MASK part of IEMCPU::fExec.
416 *
417 * @param pVCpu The cross context virtual CPU structure of the
418 * calling thread.
419 */
420DECL_FORCE_INLINE(void) iemRecalcExecDbgFlags(PVMCPUCC pVCpu)
421{
422 pVCpu->iem.s.fExec = (pVCpu->iem.s.fExec & ~IEM_F_PENDING_BRK_MASK)
423 | iemCalcExecDbgFlags(pVCpu);
424}
425
426#endif /* VBOX_INCLUDED_vmm_dbgf_h */
427
428
429#ifndef IEM_WITH_OPAQUE_DECODER_STATE
430
431# if defined(VBOX_INCLUDED_vmm_dbgf_h) || defined(DOXYGEN_RUNNING) /* dbgf.ro.cEnabledHwBreakpoints */
432
433/**
434 * Initializes the execution state.
435 *
436 * @param pVCpu The cross context virtual CPU structure of the
437 * calling thread.
438 * @param fExecOpts Optional execution flags:
439 * - IEM_F_BYPASS_HANDLERS
440 * - IEM_F_X86_DISREGARD_LOCK
441 *
442 * @remarks Callers of this must call iemUninitExec() to undo potentially fatal
443 * side-effects in strict builds.
444 */
445DECLINLINE(void) iemInitExec(PVMCPUCC pVCpu, uint32_t fExecOpts) RT_NOEXCEPT
446{
447 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK);
448 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
449 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
450 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
451 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
452 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
453 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
454 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
455 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
456 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
457
458 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
459 pVCpu->iem.s.fExec = iemCalcExecFlags(pVCpu) | fExecOpts;
460 pVCpu->iem.s.cActiveMappings = 0;
461 pVCpu->iem.s.iNextMapping = 0;
462
463# ifdef VBOX_STRICT
464 pVCpu->iem.s.enmDefAddrMode = (IEMMODE)0xfe;
465 pVCpu->iem.s.enmEffAddrMode = (IEMMODE)0xfe;
466 pVCpu->iem.s.enmDefOpSize = (IEMMODE)0xfe;
467 pVCpu->iem.s.enmEffOpSize = (IEMMODE)0xfe;
468 pVCpu->iem.s.fPrefixes = 0xfeedbeef;
469 pVCpu->iem.s.uRexReg = 127;
470 pVCpu->iem.s.uRexB = 127;
471 pVCpu->iem.s.offModRm = 127;
472 pVCpu->iem.s.uRexIndex = 127;
473 pVCpu->iem.s.iEffSeg = 127;
474 pVCpu->iem.s.idxPrefix = 127;
475 pVCpu->iem.s.uVex3rdReg = 127;
476 pVCpu->iem.s.uVexLength = 127;
477 pVCpu->iem.s.fEvexStuff = 127;
478 pVCpu->iem.s.uFpuOpcode = UINT16_MAX;
479# ifdef IEM_WITH_CODE_TLB
480 pVCpu->iem.s.offInstrNextByte = UINT16_MAX;
481 pVCpu->iem.s.pbInstrBuf = NULL;
482 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;
483 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;
484 pVCpu->iem.s.offCurInstrStart = INT16_MAX;
485 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);
486# ifdef IEM_WITH_CODE_TLB_AND_OPCODE_BUF
487 pVCpu->iem.s.offOpcode = 127;
488# endif
489# else
490 pVCpu->iem.s.offOpcode = 127;
491 pVCpu->iem.s.cbOpcode = 127;
492# endif
493# endif /* VBOX_STRICT */
494}
495
496
497# if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
498/**
499 * Performs a minimal reinitialization of the execution state.
500 *
501 * This is intended to be used by VM-exits, SMM, LOADALL and other similar
502 * 'world-switch' types operations on the CPU. Currently only nested
503 * hardware-virtualization uses it.
504 *
505 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
506 * @param cbInstr The instruction length (for flushing).
507 */
508DECLINLINE(void) iemReInitExec(PVMCPUCC pVCpu, uint8_t cbInstr) RT_NOEXCEPT
509{
510 pVCpu->iem.s.fExec = iemCalcExecFlags(pVCpu) | (pVCpu->iem.s.fExec & IEM_F_USER_OPTS);
511 iemOpcodeFlushHeavy(pVCpu, cbInstr);
512}
513# endif
514
515# endif /* VBOX_INCLUDED_vmm_dbgf_h || DOXYGEN_RUNNING */
516
517/**
518 * Counterpart to #iemInitExec that undoes evil strict-build stuff.
519 *
520 * @param pVCpu The cross context virtual CPU structure of the
521 * calling thread.
522 */
523DECLINLINE(void) iemUninitExec(PVMCPUCC pVCpu) RT_NOEXCEPT
524{
525 /* Note! do not touch fInPatchCode here! (see iemUninitExecAndFiddleStatusAndMaybeReenter) */
526# ifdef VBOX_STRICT
527# ifdef IEM_WITH_CODE_TLB
528 NOREF(pVCpu);
529# else
530 pVCpu->iem.s.cbOpcode = 0;
531# endif
532# else
533 NOREF(pVCpu);
534# endif
535}
536
537
538/**
539 * Calls iemUninitExec, iemExecStatusCodeFiddling and iemRCRawMaybeReenter.
540 *
541 * Only calling iemRCRawMaybeReenter in raw-mode, obviously.
542 *
543 * @returns Fiddled strict vbox status code, ready to return to non-IEM caller.
544 * @param pVCpu The cross context virtual CPU structure of the calling thread.
545 * @param rcStrict The status code to fiddle.
546 */
547DECLINLINE(VBOXSTRICTRC) iemUninitExecAndFiddleStatusAndMaybeReenter(PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict) RT_NOEXCEPT
548{
549 iemUninitExec(pVCpu);
550 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
551}
552
553
554/**
555 * Macro used by the IEMExec* method to check the given instruction length.
556 *
557 * Will return on failure!
558 *
559 * @param a_cbInstr The given instruction length.
560 * @param a_cbMin The minimum length.
561 */
562# define IEMEXEC_ASSERT_INSTR_LEN_RETURN(a_cbInstr, a_cbMin) \
563 AssertMsgReturn((unsigned)(a_cbInstr) - (unsigned)(a_cbMin) <= (unsigned)15 - (unsigned)(a_cbMin), \
564 ("cbInstr=%u cbMin=%u\n", (a_cbInstr), (a_cbMin)), VERR_IEM_INVALID_INSTR_LENGTH)
565
566
567# ifndef IEM_WITH_SETJMP
568
569/**
570 * Fetches the first opcode byte.
571 *
572 * @returns Strict VBox status code.
573 * @param pVCpu The cross context virtual CPU structure of the
574 * calling thread.
575 * @param pu8 Where to return the opcode byte.
576 */
577DECLINLINE(VBOXSTRICTRC) iemOpcodeGetFirstU8(PVMCPUCC pVCpu, uint8_t *pu8) RT_NOEXCEPT
578{
579 /*
580 * Check for hardware instruction breakpoints.
581 * Note! Guest breakpoints are only checked after POP SS or MOV SS on AMD CPUs.
582 */
583 if (RT_LIKELY(!(pVCpu->iem.s.fExec & IEM_F_PENDING_BRK_INSTR)))
584 { /* likely */ }
585 else
586 {
587 VBOXSTRICTRC rcStrict = DBGFBpCheckInstruction(pVCpu->CTX_SUFF(pVM), pVCpu,
588 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base,
589 !(pVCpu->cpum.GstCtx.rflags.uBoth & CPUMCTX_INHIBIT_SHADOW_SS)
590 || IEM_IS_GUEST_CPU_AMD(pVCpu));
591 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
592 { /* likely */ }
593 else
594 {
595 *pu8 = 0xff; /* shut up gcc. sigh */
596 if (rcStrict == VINF_EM_RAW_GUEST_TRAP)
597 return iemRaiseDebugException(pVCpu);
598 return rcStrict;
599 }
600 }
601
602 /*
603 * Fetch the first opcode byte.
604 */
605 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
606 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
607 {
608 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
609 *pu8 = pVCpu->iem.s.abOpcode[offOpcode];
610 return VINF_SUCCESS;
611 }
612 return iemOpcodeGetNextU8Slow(pVCpu, pu8);
613}
614
615# else /* IEM_WITH_SETJMP */
616
617/**
618 * Fetches the first opcode byte, longjmp on error.
619 *
620 * @returns The opcode byte.
621 * @param pVCpu The cross context virtual CPU structure of the calling thread.
622 */
623DECL_INLINE_THROW(uint8_t) iemOpcodeGetFirstU8Jmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
624{
625 /*
626 * Check for hardware instruction breakpoints.
627 * Note! Guest breakpoints are only checked after POP SS or MOV SS on AMD CPUs.
628 */
629 if (RT_LIKELY(!(pVCpu->iem.s.fExec & IEM_F_PENDING_BRK_INSTR)))
630 { /* likely */ }
631 else
632 {
633 VBOXSTRICTRC rcStrict = DBGFBpCheckInstruction(pVCpu->CTX_SUFF(pVM), pVCpu,
634 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base,
635 !(pVCpu->cpum.GstCtx.rflags.uBoth & CPUMCTX_INHIBIT_SHADOW_SS)
636 || IEM_IS_GUEST_CPU_AMD(pVCpu));
637 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
638 { /* likely */ }
639 else
640 {
641 if (rcStrict == VINF_EM_RAW_GUEST_TRAP)
642 rcStrict = iemRaiseDebugException(pVCpu);
643 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
644 }
645 }
646
647 /*
648 * Fetch the first opcode byte.
649 */
650# ifdef IEM_WITH_CODE_TLB
651 uint8_t bRet;
652 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
653 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
654 if (RT_LIKELY( pbBuf != NULL
655 && offBuf < pVCpu->iem.s.cbInstrBuf))
656 {
657 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 1;
658 bRet = pbBuf[offBuf];
659 }
660 else
661 bRet = iemOpcodeGetNextU8SlowJmp(pVCpu);
662# ifdef IEM_WITH_CODE_TLB_AND_OPCODE_BUF
663 Assert(pVCpu->iem.s.offOpcode == 0);
664 pVCpu->iem.s.abOpcode[pVCpu->iem.s.offOpcode++] = bRet;
665# endif
666 return bRet;
667
668# else /* !IEM_WITH_CODE_TLB */
669 uintptr_t offOpcode = pVCpu->iem.s.offOpcode;
670 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
671 {
672 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
673 return pVCpu->iem.s.abOpcode[offOpcode];
674 }
675 return iemOpcodeGetNextU8SlowJmp(pVCpu);
676# endif
677}
678
679# endif /* IEM_WITH_SETJMP */
680
681/**
682 * Fetches the first opcode byte, returns/throws automatically on failure.
683 *
684 * @param a_pu8 Where to return the opcode byte.
685 * @remark Implicitly references pVCpu.
686 */
687# ifndef IEM_WITH_SETJMP
688# define IEM_OPCODE_GET_FIRST_U8(a_pu8) \
689 do \
690 { \
691 VBOXSTRICTRC rcStrict2 = iemOpcodeGetFirstU8(pVCpu, (a_pu8)); \
692 if (rcStrict2 == VINF_SUCCESS) \
693 { /* likely */ } \
694 else \
695 return rcStrict2; \
696 } while (0)
697# else
698# define IEM_OPCODE_GET_FIRST_U8(a_pu8) (*(a_pu8) = iemOpcodeGetFirstU8Jmp(pVCpu))
699# endif /* IEM_WITH_SETJMP */
700
701
702# ifndef IEM_WITH_SETJMP
703
704/**
705 * Fetches the next opcode byte.
706 *
707 * @returns Strict VBox status code.
708 * @param pVCpu The cross context virtual CPU structure of the
709 * calling thread.
710 * @param pu8 Where to return the opcode byte.
711 */
712DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU8(PVMCPUCC pVCpu, uint8_t *pu8) RT_NOEXCEPT
713{
714 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
715 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
716 {
717 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
718 *pu8 = pVCpu->iem.s.abOpcode[offOpcode];
719 return VINF_SUCCESS;
720 }
721 return iemOpcodeGetNextU8Slow(pVCpu, pu8);
722}
723
724# else /* IEM_WITH_SETJMP */
725
726/**
727 * Fetches the next opcode byte, longjmp on error.
728 *
729 * @returns The opcode byte.
730 * @param pVCpu The cross context virtual CPU structure of the calling thread.
731 */
732DECL_INLINE_THROW(uint8_t) iemOpcodeGetNextU8Jmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
733{
734# ifdef IEM_WITH_CODE_TLB
735 uint8_t bRet;
736 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
737 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
738 if (RT_LIKELY( pbBuf != NULL
739 && offBuf < pVCpu->iem.s.cbInstrBuf))
740 {
741 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 1;
742 bRet = pbBuf[offBuf];
743 }
744 else
745 bRet = iemOpcodeGetNextU8SlowJmp(pVCpu);
746# ifdef IEM_WITH_CODE_TLB_AND_OPCODE_BUF
747 Assert(pVCpu->iem.s.offOpcode < sizeof(pVCpu->iem.s.abOpcode));
748 pVCpu->iem.s.abOpcode[pVCpu->iem.s.offOpcode++] = bRet;
749# endif
750 return bRet;
751
752# else /* !IEM_WITH_CODE_TLB */
753 uintptr_t offOpcode = pVCpu->iem.s.offOpcode;
754 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
755 {
756 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
757 return pVCpu->iem.s.abOpcode[offOpcode];
758 }
759 return iemOpcodeGetNextU8SlowJmp(pVCpu);
760# endif
761}
762
763# endif /* IEM_WITH_SETJMP */
764
765/**
766 * Fetches the next opcode byte, returns automatically on failure.
767 *
768 * @param a_pu8 Where to return the opcode byte.
769 * @remark Implicitly references pVCpu.
770 */
771# ifndef IEM_WITH_SETJMP
772# define IEM_OPCODE_GET_NEXT_U8(a_pu8) \
773 do \
774 { \
775 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU8(pVCpu, (a_pu8)); \
776 if (rcStrict2 == VINF_SUCCESS) \
777 { /* likely */ } \
778 else \
779 return rcStrict2; \
780 } while (0)
781# else
782# define IEM_OPCODE_GET_NEXT_U8(a_pu8) (*(a_pu8) = iemOpcodeGetNextU8Jmp(pVCpu))
783# endif /* IEM_WITH_SETJMP */
784
785
786# ifndef IEM_WITH_SETJMP
787/**
788 * Fetches the next signed byte from the opcode stream.
789 *
790 * @returns Strict VBox status code.
791 * @param pVCpu The cross context virtual CPU structure of the calling thread.
792 * @param pi8 Where to return the signed byte.
793 */
794DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8(PVMCPUCC pVCpu, int8_t *pi8) RT_NOEXCEPT
795{
796 return iemOpcodeGetNextU8(pVCpu, (uint8_t *)pi8);
797}
798# endif /* !IEM_WITH_SETJMP */
799
800
801/**
802 * Fetches the next signed byte from the opcode stream, returning automatically
803 * on failure.
804 *
805 * @param a_pi8 Where to return the signed byte.
806 * @remark Implicitly references pVCpu.
807 */
808# ifndef IEM_WITH_SETJMP
809# define IEM_OPCODE_GET_NEXT_S8(a_pi8) \
810 do \
811 { \
812 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8(pVCpu, (a_pi8)); \
813 if (rcStrict2 != VINF_SUCCESS) \
814 return rcStrict2; \
815 } while (0)
816# else /* IEM_WITH_SETJMP */
817# define IEM_OPCODE_GET_NEXT_S8(a_pi8) (*(a_pi8) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
818
819# endif /* IEM_WITH_SETJMP */
820
821
822# ifndef IEM_WITH_SETJMP
823/**
824 * Fetches the next signed byte from the opcode stream, extending it to
825 * unsigned 16-bit.
826 *
827 * @returns Strict VBox status code.
828 * @param pVCpu The cross context virtual CPU structure of the calling thread.
829 * @param pu16 Where to return the unsigned word.
830 */
831DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU16(PVMCPUCC pVCpu, uint16_t *pu16) RT_NOEXCEPT
832{
833 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
834 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
835 return iemOpcodeGetNextS8SxU16Slow(pVCpu, pu16);
836
837 *pu16 = (uint16_t)(int16_t)(int8_t)pVCpu->iem.s.abOpcode[offOpcode];
838 pVCpu->iem.s.offOpcode = offOpcode + 1;
839 return VINF_SUCCESS;
840}
841# endif /* !IEM_WITH_SETJMP */
842
843/**
844 * Fetches the next signed byte from the opcode stream and sign-extending it to
845 * a word, returning automatically on failure.
846 *
847 * @param a_pu16 Where to return the word.
848 * @remark Implicitly references pVCpu.
849 */
850# ifndef IEM_WITH_SETJMP
851# define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) \
852 do \
853 { \
854 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU16(pVCpu, (a_pu16)); \
855 if (rcStrict2 != VINF_SUCCESS) \
856 return rcStrict2; \
857 } while (0)
858# else
859# define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) (*(a_pu16) = (uint16_t)(int16_t)(int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
860# endif
861
862# ifndef IEM_WITH_SETJMP
863/**
864 * Fetches the next signed byte from the opcode stream, extending it to
865 * unsigned 32-bit.
866 *
867 * @returns Strict VBox status code.
868 * @param pVCpu The cross context virtual CPU structure of the calling thread.
869 * @param pu32 Where to return the unsigned dword.
870 */
871DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU32(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
872{
873 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
874 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
875 return iemOpcodeGetNextS8SxU32Slow(pVCpu, pu32);
876
877 *pu32 = (uint32_t)(int32_t)(int8_t)pVCpu->iem.s.abOpcode[offOpcode];
878 pVCpu->iem.s.offOpcode = offOpcode + 1;
879 return VINF_SUCCESS;
880}
881# endif /* !IEM_WITH_SETJMP */
882
883/**
884 * Fetches the next signed byte from the opcode stream and sign-extending it to
885 * a word, returning automatically on failure.
886 *
887 * @param a_pu32 Where to return the word.
888 * @remark Implicitly references pVCpu.
889 */
890# ifndef IEM_WITH_SETJMP
891# define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) \
892 do \
893 { \
894 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU32(pVCpu, (a_pu32)); \
895 if (rcStrict2 != VINF_SUCCESS) \
896 return rcStrict2; \
897 } while (0)
898# else
899# define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) (*(a_pu32) = (uint32_t)(int32_t)(int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
900# endif
901
902
903# ifndef IEM_WITH_SETJMP
904/**
905 * Fetches the next signed byte from the opcode stream, extending it to
906 * unsigned 64-bit.
907 *
908 * @returns Strict VBox status code.
909 * @param pVCpu The cross context virtual CPU structure of the calling thread.
910 * @param pu64 Where to return the unsigned qword.
911 */
912DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU64(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
913{
914 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
915 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
916 return iemOpcodeGetNextS8SxU64Slow(pVCpu, pu64);
917
918 *pu64 = (uint64_t)(int64_t)(int8_t)pVCpu->iem.s.abOpcode[offOpcode];
919 pVCpu->iem.s.offOpcode = offOpcode + 1;
920 return VINF_SUCCESS;
921}
922# endif /* !IEM_WITH_SETJMP */
923
924/**
925 * Fetches the next signed byte from the opcode stream and sign-extending it to
926 * a word, returning automatically on failure.
927 *
928 * @param a_pu64 Where to return the word.
929 * @remark Implicitly references pVCpu.
930 */
931# ifndef IEM_WITH_SETJMP
932# define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) \
933 do \
934 { \
935 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU64(pVCpu, (a_pu64)); \
936 if (rcStrict2 != VINF_SUCCESS) \
937 return rcStrict2; \
938 } while (0)
939# else
940# define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) (*(a_pu64) = (uint64_t)(int64_t)(int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
941# endif
942
943
944# ifndef IEM_WITH_SETJMP
945
946/**
947 * Fetches the next opcode word.
948 *
949 * @returns Strict VBox status code.
950 * @param pVCpu The cross context virtual CPU structure of the calling thread.
951 * @param pu16 Where to return the opcode word.
952 */
953DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16(PVMCPUCC pVCpu, uint16_t *pu16) RT_NOEXCEPT
954{
955 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
956 if (RT_LIKELY((uint8_t)offOpcode + 2 <= pVCpu->iem.s.cbOpcode))
957 {
958 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 2;
959# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
960 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
961# else
962 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
963# endif
964 return VINF_SUCCESS;
965 }
966 return iemOpcodeGetNextU16Slow(pVCpu, pu16);
967}
968
969# else /* IEM_WITH_SETJMP */
970
971/**
972 * Fetches the next opcode word, longjmp on error.
973 *
974 * @returns The opcode word.
975 * @param pVCpu The cross context virtual CPU structure of the calling thread.
976 */
977DECL_INLINE_THROW(uint16_t) iemOpcodeGetNextU16Jmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
978{
979# ifdef IEM_WITH_CODE_TLB
980 uint16_t u16Ret;
981 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
982 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
983 if (RT_LIKELY( pbBuf != NULL
984 && offBuf + 2 <= pVCpu->iem.s.cbInstrBuf))
985 {
986 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 2;
987# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
988 u16Ret = *(uint16_t const *)&pbBuf[offBuf];
989# else
990 u16Ret = RT_MAKE_U16(pbBuf[offBuf], pbBuf[offBuf + 1]);
991# endif
992 }
993 else
994 u16Ret = iemOpcodeGetNextU16SlowJmp(pVCpu);
995
996# ifdef IEM_WITH_CODE_TLB_AND_OPCODE_BUF
997 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
998 Assert(offOpcode + 1 < sizeof(pVCpu->iem.s.abOpcode));
999# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1000 *(uint16_t *)&pVCpu->iem.s.abOpcode[offOpcode] = u16Ret;
1001# else
1002 pVCpu->iem.s.abOpcode[offOpcode] = RT_LO_U8(u16Ret);
1003 pVCpu->iem.s.abOpcode[offOpcode + 1] = RT_HI_U8(u16Ret);
1004# endif
1005 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + (uint8_t)2;
1006# endif
1007
1008 return u16Ret;
1009
1010# else /* !IEM_WITH_CODE_TLB */
1011 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
1012 if (RT_LIKELY((uint8_t)offOpcode + 2 <= pVCpu->iem.s.cbOpcode))
1013 {
1014 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 2;
1015# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1016 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1017# else
1018 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1019# endif
1020 }
1021 return iemOpcodeGetNextU16SlowJmp(pVCpu);
1022# endif /* !IEM_WITH_CODE_TLB */
1023}
1024
1025# endif /* IEM_WITH_SETJMP */
1026
1027/**
1028 * Fetches the next opcode word, returns automatically on failure.
1029 *
1030 * @param a_pu16 Where to return the opcode word.
1031 * @remark Implicitly references pVCpu.
1032 */
1033# ifndef IEM_WITH_SETJMP
1034# define IEM_OPCODE_GET_NEXT_U16(a_pu16) \
1035 do \
1036 { \
1037 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16(pVCpu, (a_pu16)); \
1038 if (rcStrict2 != VINF_SUCCESS) \
1039 return rcStrict2; \
1040 } while (0)
1041# else
1042# define IEM_OPCODE_GET_NEXT_U16(a_pu16) (*(a_pu16) = iemOpcodeGetNextU16Jmp(pVCpu))
1043# endif
1044
1045# ifndef IEM_WITH_SETJMP
1046/**
1047 * Fetches the next opcode word, zero extending it to a double word.
1048 *
1049 * @returns Strict VBox status code.
1050 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1051 * @param pu32 Where to return the opcode double word.
1052 */
1053DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
1054{
1055 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
1056 if (RT_UNLIKELY(offOpcode + 2 > pVCpu->iem.s.cbOpcode))
1057 return iemOpcodeGetNextU16ZxU32Slow(pVCpu, pu32);
1058
1059 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1060 pVCpu->iem.s.offOpcode = offOpcode + 2;
1061 return VINF_SUCCESS;
1062}
1063# endif /* !IEM_WITH_SETJMP */
1064
1065/**
1066 * Fetches the next opcode word and zero extends it to a double word, returns
1067 * automatically on failure.
1068 *
1069 * @param a_pu32 Where to return the opcode double word.
1070 * @remark Implicitly references pVCpu.
1071 */
1072# ifndef IEM_WITH_SETJMP
1073# define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) \
1074 do \
1075 { \
1076 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU32(pVCpu, (a_pu32)); \
1077 if (rcStrict2 != VINF_SUCCESS) \
1078 return rcStrict2; \
1079 } while (0)
1080# else
1081# define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) (*(a_pu32) = iemOpcodeGetNextU16Jmp(pVCpu))
1082# endif
1083
1084# ifndef IEM_WITH_SETJMP
1085/**
1086 * Fetches the next opcode word, zero extending it to a quad word.
1087 *
1088 * @returns Strict VBox status code.
1089 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1090 * @param pu64 Where to return the opcode quad word.
1091 */
1092DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1093{
1094 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
1095 if (RT_UNLIKELY(offOpcode + 2 > pVCpu->iem.s.cbOpcode))
1096 return iemOpcodeGetNextU16ZxU64Slow(pVCpu, pu64);
1097
1098 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1099 pVCpu->iem.s.offOpcode = offOpcode + 2;
1100 return VINF_SUCCESS;
1101}
1102# endif /* !IEM_WITH_SETJMP */
1103
1104/**
1105 * Fetches the next opcode word and zero extends it to a quad word, returns
1106 * automatically on failure.
1107 *
1108 * @param a_pu64 Where to return the opcode quad word.
1109 * @remark Implicitly references pVCpu.
1110 */
1111# ifndef IEM_WITH_SETJMP
1112# define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) \
1113 do \
1114 { \
1115 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU64(pVCpu, (a_pu64)); \
1116 if (rcStrict2 != VINF_SUCCESS) \
1117 return rcStrict2; \
1118 } while (0)
1119# else
1120# define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) (*(a_pu64) = iemOpcodeGetNextU16Jmp(pVCpu))
1121# endif
1122
1123
1124# ifndef IEM_WITH_SETJMP
1125/**
1126 * Fetches the next signed word from the opcode stream.
1127 *
1128 * @returns Strict VBox status code.
1129 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1130 * @param pi16 Where to return the signed word.
1131 */
1132DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS16(PVMCPUCC pVCpu, int16_t *pi16) RT_NOEXCEPT
1133{
1134 return iemOpcodeGetNextU16(pVCpu, (uint16_t *)pi16);
1135}
1136# endif /* !IEM_WITH_SETJMP */
1137
1138
1139/**
1140 * Fetches the next signed word from the opcode stream, returning automatically
1141 * on failure.
1142 *
1143 * @param a_pi16 Where to return the signed word.
1144 * @remark Implicitly references pVCpu.
1145 */
1146# ifndef IEM_WITH_SETJMP
1147# define IEM_OPCODE_GET_NEXT_S16(a_pi16) \
1148 do \
1149 { \
1150 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS16(pVCpu, (a_pi16)); \
1151 if (rcStrict2 != VINF_SUCCESS) \
1152 return rcStrict2; \
1153 } while (0)
1154# else
1155# define IEM_OPCODE_GET_NEXT_S16(a_pi16) (*(a_pi16) = (int16_t)iemOpcodeGetNextU16Jmp(pVCpu))
1156# endif
1157
1158# ifndef IEM_WITH_SETJMP
1159
1160/**
1161 * Fetches the next opcode dword.
1162 *
1163 * @returns Strict VBox status code.
1164 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1165 * @param pu32 Where to return the opcode double word.
1166 */
1167DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
1168{
1169 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
1170 if (RT_LIKELY((uint8_t)offOpcode + 4 <= pVCpu->iem.s.cbOpcode))
1171 {
1172 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 4;
1173# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1174 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1175# else
1176 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1177 pVCpu->iem.s.abOpcode[offOpcode + 1],
1178 pVCpu->iem.s.abOpcode[offOpcode + 2],
1179 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1180# endif
1181 return VINF_SUCCESS;
1182 }
1183 return iemOpcodeGetNextU32Slow(pVCpu, pu32);
1184}
1185
1186# else /* IEM_WITH_SETJMP */
1187
1188/**
1189 * Fetches the next opcode dword, longjmp on error.
1190 *
1191 * @returns The opcode dword.
1192 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1193 */
1194DECL_INLINE_THROW(uint32_t) iemOpcodeGetNextU32Jmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
1195{
1196# ifdef IEM_WITH_CODE_TLB
1197 uint32_t u32Ret;
1198 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
1199 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
1200 if (RT_LIKELY( pbBuf != NULL
1201 && offBuf + 4 <= pVCpu->iem.s.cbInstrBuf))
1202 {
1203 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 4;
1204# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1205 u32Ret = *(uint32_t const *)&pbBuf[offBuf];
1206# else
1207 u32Ret = RT_MAKE_U32_FROM_U8(pbBuf[offBuf],
1208 pbBuf[offBuf + 1],
1209 pbBuf[offBuf + 2],
1210 pbBuf[offBuf + 3]);
1211# endif
1212 }
1213 else
1214 u32Ret = iemOpcodeGetNextU32SlowJmp(pVCpu);
1215
1216# ifdef IEM_WITH_CODE_TLB_AND_OPCODE_BUF
1217 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
1218 Assert(offOpcode + 3 < sizeof(pVCpu->iem.s.abOpcode));
1219# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1220 *(uint32_t *)&pVCpu->iem.s.abOpcode[offOpcode] = u32Ret;
1221# else
1222 pVCpu->iem.s.abOpcode[offOpcode] = RT_BYTE1(u32Ret);
1223 pVCpu->iem.s.abOpcode[offOpcode + 1] = RT_BYTE2(u32Ret);
1224 pVCpu->iem.s.abOpcode[offOpcode + 2] = RT_BYTE3(u32Ret);
1225 pVCpu->iem.s.abOpcode[offOpcode + 3] = RT_BYTE4(u32Ret);
1226# endif
1227 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + (uint8_t)4;
1228# endif /* IEM_WITH_CODE_TLB_AND_OPCODE_BUF */
1229
1230 return u32Ret;
1231
1232# else /* !IEM_WITH_CODE_TLB */
1233 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
1234 if (RT_LIKELY((uint8_t)offOpcode + 4 <= pVCpu->iem.s.cbOpcode))
1235 {
1236 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 4;
1237# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1238 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1239# else
1240 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1241 pVCpu->iem.s.abOpcode[offOpcode + 1],
1242 pVCpu->iem.s.abOpcode[offOpcode + 2],
1243 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1244# endif
1245 }
1246 return iemOpcodeGetNextU32SlowJmp(pVCpu);
1247# endif
1248}
1249
1250# endif /* IEM_WITH_SETJMP */
1251
1252/**
1253 * Fetches the next opcode dword, returns automatically on failure.
1254 *
1255 * @param a_pu32 Where to return the opcode dword.
1256 * @remark Implicitly references pVCpu.
1257 */
1258# ifndef IEM_WITH_SETJMP
1259# define IEM_OPCODE_GET_NEXT_U32(a_pu32) \
1260 do \
1261 { \
1262 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32(pVCpu, (a_pu32)); \
1263 if (rcStrict2 != VINF_SUCCESS) \
1264 return rcStrict2; \
1265 } while (0)
1266# else
1267# define IEM_OPCODE_GET_NEXT_U32(a_pu32) (*(a_pu32) = iemOpcodeGetNextU32Jmp(pVCpu))
1268# endif
1269
1270# ifndef IEM_WITH_SETJMP
1271/**
1272 * Fetches the next opcode dword, zero extending it to a quad word.
1273 *
1274 * @returns Strict VBox status code.
1275 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1276 * @param pu64 Where to return the opcode quad word.
1277 */
1278DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1279{
1280 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
1281 if (RT_UNLIKELY(offOpcode + 4 > pVCpu->iem.s.cbOpcode))
1282 return iemOpcodeGetNextU32ZxU64Slow(pVCpu, pu64);
1283
1284 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1285 pVCpu->iem.s.abOpcode[offOpcode + 1],
1286 pVCpu->iem.s.abOpcode[offOpcode + 2],
1287 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1288 pVCpu->iem.s.offOpcode = offOpcode + 4;
1289 return VINF_SUCCESS;
1290}
1291# endif /* !IEM_WITH_SETJMP */
1292
1293/**
1294 * Fetches the next opcode dword and zero extends it to a quad word, returns
1295 * automatically on failure.
1296 *
1297 * @param a_pu64 Where to return the opcode quad word.
1298 * @remark Implicitly references pVCpu.
1299 */
1300# ifndef IEM_WITH_SETJMP
1301# define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) \
1302 do \
1303 { \
1304 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32ZxU64(pVCpu, (a_pu64)); \
1305 if (rcStrict2 != VINF_SUCCESS) \
1306 return rcStrict2; \
1307 } while (0)
1308# else
1309# define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) (*(a_pu64) = iemOpcodeGetNextU32Jmp(pVCpu))
1310# endif
1311
1312
1313# ifndef IEM_WITH_SETJMP
1314/**
1315 * Fetches the next signed double word from the opcode stream.
1316 *
1317 * @returns Strict VBox status code.
1318 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1319 * @param pi32 Where to return the signed double word.
1320 */
1321DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32(PVMCPUCC pVCpu, int32_t *pi32) RT_NOEXCEPT
1322{
1323 return iemOpcodeGetNextU32(pVCpu, (uint32_t *)pi32);
1324}
1325# endif
1326
1327/**
1328 * Fetches the next signed double word from the opcode stream, returning
1329 * automatically on failure.
1330 *
1331 * @param a_pi32 Where to return the signed double word.
1332 * @remark Implicitly references pVCpu.
1333 */
1334# ifndef IEM_WITH_SETJMP
1335# define IEM_OPCODE_GET_NEXT_S32(a_pi32) \
1336 do \
1337 { \
1338 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32(pVCpu, (a_pi32)); \
1339 if (rcStrict2 != VINF_SUCCESS) \
1340 return rcStrict2; \
1341 } while (0)
1342# else
1343# define IEM_OPCODE_GET_NEXT_S32(a_pi32) (*(a_pi32) = (int32_t)iemOpcodeGetNextU32Jmp(pVCpu))
1344# endif
1345
1346# ifndef IEM_WITH_SETJMP
1347/**
1348 * Fetches the next opcode dword, sign extending it into a quad word.
1349 *
1350 * @returns Strict VBox status code.
1351 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1352 * @param pu64 Where to return the opcode quad word.
1353 */
1354DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32SxU64(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1355{
1356 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
1357 if (RT_UNLIKELY(offOpcode + 4 > pVCpu->iem.s.cbOpcode))
1358 return iemOpcodeGetNextS32SxU64Slow(pVCpu, pu64);
1359
1360 int32_t i32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1361 pVCpu->iem.s.abOpcode[offOpcode + 1],
1362 pVCpu->iem.s.abOpcode[offOpcode + 2],
1363 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1364 *pu64 = (uint64_t)(int64_t)i32;
1365 pVCpu->iem.s.offOpcode = offOpcode + 4;
1366 return VINF_SUCCESS;
1367}
1368# endif /* !IEM_WITH_SETJMP */
1369
1370/**
1371 * Fetches the next opcode double word and sign extends it to a quad word,
1372 * returns automatically on failure.
1373 *
1374 * @param a_pu64 Where to return the opcode quad word.
1375 * @remark Implicitly references pVCpu.
1376 */
1377# ifndef IEM_WITH_SETJMP
1378# define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) \
1379 do \
1380 { \
1381 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32SxU64(pVCpu, (a_pu64)); \
1382 if (rcStrict2 != VINF_SUCCESS) \
1383 return rcStrict2; \
1384 } while (0)
1385# else
1386# define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) (*(a_pu64) = (uint64_t)(int64_t)(int32_t)iemOpcodeGetNextU32Jmp(pVCpu))
1387# endif
1388
1389# ifndef IEM_WITH_SETJMP
1390
1391/**
1392 * Fetches the next opcode qword.
1393 *
1394 * @returns Strict VBox status code.
1395 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1396 * @param pu64 Where to return the opcode qword.
1397 */
1398DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU64(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1399{
1400 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
1401 if (RT_LIKELY((uint8_t)offOpcode + 8 <= pVCpu->iem.s.cbOpcode))
1402 {
1403# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1404 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1405# else
1406 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1407 pVCpu->iem.s.abOpcode[offOpcode + 1],
1408 pVCpu->iem.s.abOpcode[offOpcode + 2],
1409 pVCpu->iem.s.abOpcode[offOpcode + 3],
1410 pVCpu->iem.s.abOpcode[offOpcode + 4],
1411 pVCpu->iem.s.abOpcode[offOpcode + 5],
1412 pVCpu->iem.s.abOpcode[offOpcode + 6],
1413 pVCpu->iem.s.abOpcode[offOpcode + 7]);
1414# endif
1415 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 8;
1416 return VINF_SUCCESS;
1417 }
1418 return iemOpcodeGetNextU64Slow(pVCpu, pu64);
1419}
1420
1421# else /* IEM_WITH_SETJMP */
1422
1423/**
1424 * Fetches the next opcode qword, longjmp on error.
1425 *
1426 * @returns The opcode qword.
1427 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1428 */
1429DECL_INLINE_THROW(uint64_t) iemOpcodeGetNextU64Jmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
1430{
1431# ifdef IEM_WITH_CODE_TLB
1432 uint64_t u64Ret;
1433 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
1434 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
1435 if (RT_LIKELY( pbBuf != NULL
1436 && offBuf + 8 <= pVCpu->iem.s.cbInstrBuf))
1437 {
1438 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 8;
1439# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1440 u64Ret = *(uint64_t const *)&pbBuf[offBuf];
1441# else
1442 u64Ret = RT_MAKE_U64_FROM_U8(pbBuf[offBuf],
1443 pbBuf[offBuf + 1],
1444 pbBuf[offBuf + 2],
1445 pbBuf[offBuf + 3],
1446 pbBuf[offBuf + 4],
1447 pbBuf[offBuf + 5],
1448 pbBuf[offBuf + 6],
1449 pbBuf[offBuf + 7]);
1450# endif
1451 }
1452 else
1453 u64Ret = iemOpcodeGetNextU64SlowJmp(pVCpu);
1454
1455# ifdef IEM_WITH_CODE_TLB_AND_OPCODE_BUF
1456 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
1457 Assert(offOpcode + 7 < sizeof(pVCpu->iem.s.abOpcode));
1458# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1459 *(uint64_t *)&pVCpu->iem.s.abOpcode[offOpcode] = u64Ret;
1460# else
1461 pVCpu->iem.s.abOpcode[offOpcode] = RT_BYTE1(u64Ret);
1462 pVCpu->iem.s.abOpcode[offOpcode + 1] = RT_BYTE2(u64Ret);
1463 pVCpu->iem.s.abOpcode[offOpcode + 2] = RT_BYTE3(u64Ret);
1464 pVCpu->iem.s.abOpcode[offOpcode + 3] = RT_BYTE4(u64Ret);
1465 pVCpu->iem.s.abOpcode[offOpcode + 4] = RT_BYTE5(u64Ret);
1466 pVCpu->iem.s.abOpcode[offOpcode + 5] = RT_BYTE6(u64Ret);
1467 pVCpu->iem.s.abOpcode[offOpcode + 6] = RT_BYTE7(u64Ret);
1468 pVCpu->iem.s.abOpcode[offOpcode + 7] = RT_BYTE8(u64Ret);
1469# endif
1470 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + (uint8_t)8;
1471# endif /* IEM_WITH_CODE_TLB_AND_OPCODE_BUF */
1472
1473 return u64Ret;
1474
1475# else /* !IEM_WITH_CODE_TLB */
1476 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
1477 if (RT_LIKELY((uint8_t)offOpcode + 8 <= pVCpu->iem.s.cbOpcode))
1478 {
1479 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 8;
1480# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1481 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1482# else
1483 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1484 pVCpu->iem.s.abOpcode[offOpcode + 1],
1485 pVCpu->iem.s.abOpcode[offOpcode + 2],
1486 pVCpu->iem.s.abOpcode[offOpcode + 3],
1487 pVCpu->iem.s.abOpcode[offOpcode + 4],
1488 pVCpu->iem.s.abOpcode[offOpcode + 5],
1489 pVCpu->iem.s.abOpcode[offOpcode + 6],
1490 pVCpu->iem.s.abOpcode[offOpcode + 7]);
1491# endif
1492 }
1493 return iemOpcodeGetNextU64SlowJmp(pVCpu);
1494# endif /* !IEM_WITH_CODE_TLB */
1495}
1496
1497# endif /* IEM_WITH_SETJMP */
1498
1499/**
1500 * Fetches the next opcode quad word, returns automatically on failure.
1501 *
1502 * @param a_pu64 Where to return the opcode quad word.
1503 * @remark Implicitly references pVCpu.
1504 */
1505# ifndef IEM_WITH_SETJMP
1506# define IEM_OPCODE_GET_NEXT_U64(a_pu64) \
1507 do \
1508 { \
1509 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU64(pVCpu, (a_pu64)); \
1510 if (rcStrict2 != VINF_SUCCESS) \
1511 return rcStrict2; \
1512 } while (0)
1513# else
1514# define IEM_OPCODE_GET_NEXT_U64(a_pu64) ( *(a_pu64) = iemOpcodeGetNextU64Jmp(pVCpu) )
1515# endif
1516
1517/**
1518 * For fetching the opcode bytes for an ModR/M effective address, but throw
1519 * away the result.
1520 *
1521 * This is used when decoding undefined opcodes and such where we want to avoid
1522 * unnecessary MC blocks.
1523 *
1524 * @note The recompiler code overrides this one so iemOpHlpCalcRmEffAddrJmpEx is
1525 * used instead. At least for now...
1526 */
1527# ifndef IEM_WITH_SETJMP
1528# define IEM_OPCODE_SKIP_RM_EFF_ADDR_BYTES(a_bRm) do { \
1529 RTGCPTR GCPtrEff; \
1530 VBOXSTRICTRC rcStrict = iemOpHlpCalcRmEffAddr(pVCpu, bRm, 0, &GCPtrEff); \
1531 if (rcStrict != VINF_SUCCESS) \
1532 return rcStrict; \
1533 } while (0)
1534# else
1535# define IEM_OPCODE_SKIP_RM_EFF_ADDR_BYTES(a_bRm) do { \
1536 (void)iemOpHlpCalcRmEffAddrJmp(pVCpu, bRm, 0); \
1537 } while (0)
1538# endif
1539
1540#endif /* !IEM_WITH_OPAQUE_DECODER_STATE */
1541
1542
1543/** @name Misc Worker Functions.
1544 * @{
1545 */
1546
1547/**
1548 * Gets the correct EFLAGS regardless of whether PATM stores parts of them or
1549 * not (kind of obsolete now).
1550 *
1551 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
1552 */
1553#define IEMMISC_GET_EFL(a_pVCpu) ( (a_pVCpu)->cpum.GstCtx.eflags.u )
1554
1555/**
1556 * Updates the EFLAGS in the correct manner wrt. PATM (kind of obsolete).
1557 *
1558 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
1559 * @param a_fEfl The new EFLAGS.
1560 */
1561#define IEMMISC_SET_EFL(a_pVCpu, a_fEfl) do { (a_pVCpu)->cpum.GstCtx.eflags.u = (a_fEfl); } while (0)
1562
1563
1564/**
1565 * Loads a NULL data selector into a selector register, both the hidden and
1566 * visible parts, in protected mode.
1567 *
1568 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1569 * @param pSReg Pointer to the segment register.
1570 * @param uRpl The RPL.
1571 */
1572DECLINLINE(void) iemHlpLoadNullDataSelectorProt(PVMCPUCC pVCpu, PCPUMSELREG pSReg, RTSEL uRpl) RT_NOEXCEPT
1573{
1574 /** @todo Testcase: write a testcase checking what happends when loading a NULL
1575 * data selector in protected mode. */
1576 pSReg->Sel = uRpl;
1577 pSReg->ValidSel = uRpl;
1578 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
1579 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
1580 {
1581 /* VT-x (Intel 3960x) observed doing something like this. */
1582 pSReg->Attr.u = X86DESCATTR_UNUSABLE | X86DESCATTR_G | X86DESCATTR_D | (IEM_GET_CPL(pVCpu) << X86DESCATTR_DPL_SHIFT);
1583 pSReg->u32Limit = UINT32_MAX;
1584 pSReg->u64Base = 0;
1585 }
1586 else
1587 {
1588 pSReg->Attr.u = X86DESCATTR_UNUSABLE;
1589 pSReg->u32Limit = 0;
1590 pSReg->u64Base = 0;
1591 }
1592}
1593
1594/** @} */
1595
1596
1597/*
1598 *
1599 * Helpers routines.
1600 * Helpers routines.
1601 * Helpers routines.
1602 *
1603 */
1604
1605#ifndef IEM_WITH_OPAQUE_DECODER_STATE
1606
1607/**
1608 * Recalculates the effective operand size.
1609 *
1610 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1611 */
1612DECLINLINE(void) iemRecalEffOpSize(PVMCPUCC pVCpu) RT_NOEXCEPT
1613{
1614 switch (IEM_GET_CPU_MODE(pVCpu))
1615 {
1616 case IEMMODE_16BIT:
1617 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_32BIT : IEMMODE_16BIT;
1618 break;
1619 case IEMMODE_32BIT:
1620 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_16BIT : IEMMODE_32BIT;
1621 break;
1622 case IEMMODE_64BIT:
1623 switch (pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP))
1624 {
1625 case 0:
1626 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.enmDefOpSize;
1627 break;
1628 case IEM_OP_PRF_SIZE_OP:
1629 pVCpu->iem.s.enmEffOpSize = IEMMODE_16BIT;
1630 break;
1631 case IEM_OP_PRF_SIZE_REX_W:
1632 case IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP:
1633 pVCpu->iem.s.enmEffOpSize = IEMMODE_64BIT;
1634 break;
1635 }
1636 break;
1637 default:
1638 AssertFailed();
1639 }
1640}
1641
1642
1643/**
1644 * Sets the default operand size to 64-bit and recalculates the effective
1645 * operand size.
1646 *
1647 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1648 */
1649DECLINLINE(void) iemRecalEffOpSize64Default(PVMCPUCC pVCpu) RT_NOEXCEPT
1650{
1651 Assert(IEM_IS_64BIT_CODE(pVCpu));
1652 pVCpu->iem.s.enmDefOpSize = IEMMODE_64BIT;
1653 if ((pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP)) != IEM_OP_PRF_SIZE_OP)
1654 pVCpu->iem.s.enmEffOpSize = IEMMODE_64BIT;
1655 else
1656 pVCpu->iem.s.enmEffOpSize = IEMMODE_16BIT;
1657}
1658
1659
1660/**
1661 * Sets the default operand size to 64-bit and recalculates the effective
1662 * operand size, with intel ignoring any operand size prefix (AMD respects it).
1663 *
1664 * This is for the relative jumps.
1665 *
1666 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1667 */
1668DECLINLINE(void) iemRecalEffOpSize64DefaultAndIntelIgnoresOpSizePrefix(PVMCPUCC pVCpu) RT_NOEXCEPT
1669{
1670 Assert(IEM_IS_64BIT_CODE(pVCpu));
1671 pVCpu->iem.s.enmDefOpSize = IEMMODE_64BIT;
1672 if ( (pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP)) != IEM_OP_PRF_SIZE_OP
1673 || pVCpu->iem.s.enmCpuVendor == CPUMCPUVENDOR_INTEL)
1674 pVCpu->iem.s.enmEffOpSize = IEMMODE_64BIT;
1675 else
1676 pVCpu->iem.s.enmEffOpSize = IEMMODE_16BIT;
1677}
1678
1679#endif /* !IEM_WITH_OPAQUE_DECODER_STATE */
1680
1681
1682
1683/** @name Register Access.
1684 * @{
1685 */
1686
1687/**
1688 * Gets a reference (pointer) to the specified hidden segment register.
1689 *
1690 * @returns Hidden register reference.
1691 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1692 * @param iSegReg The segment register.
1693 */
1694DECL_FORCE_INLINE(PCPUMSELREG) iemSRegGetHid(PVMCPUCC pVCpu, uint8_t iSegReg) RT_NOEXCEPT
1695{
1696 Assert(iSegReg < X86_SREG_COUNT);
1697 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
1698 PCPUMSELREG pSReg = &pVCpu->cpum.GstCtx.aSRegs[iSegReg];
1699
1700 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
1701 return pSReg;
1702}
1703
1704
1705/**
1706 * Ensures that the given hidden segment register is up to date.
1707 *
1708 * @returns Hidden register reference.
1709 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1710 * @param pSReg The segment register.
1711 */
1712DECL_FORCE_INLINE(PCPUMSELREG) iemSRegUpdateHid(PVMCPUCC pVCpu, PCPUMSELREG pSReg) RT_NOEXCEPT
1713{
1714 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
1715 NOREF(pVCpu);
1716 return pSReg;
1717}
1718
1719
1720/**
1721 * Gets a reference (pointer) to the specified segment register (the selector
1722 * value).
1723 *
1724 * @returns Pointer to the selector variable.
1725 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1726 * @param iSegReg The segment register.
1727 */
1728DECL_FORCE_INLINE(uint16_t *) iemSRegRef(PVMCPUCC pVCpu, uint8_t iSegReg) RT_NOEXCEPT
1729{
1730 Assert(iSegReg < X86_SREG_COUNT);
1731 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
1732 return &pVCpu->cpum.GstCtx.aSRegs[iSegReg].Sel;
1733}
1734
1735
1736/**
1737 * Fetches the selector value of a segment register.
1738 *
1739 * @returns The selector value.
1740 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1741 * @param iSegReg The segment register.
1742 */
1743DECL_FORCE_INLINE(uint16_t) iemSRegFetchU16(PVMCPUCC pVCpu, uint8_t iSegReg) RT_NOEXCEPT
1744{
1745 Assert(iSegReg < X86_SREG_COUNT);
1746 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
1747 return pVCpu->cpum.GstCtx.aSRegs[iSegReg].Sel;
1748}
1749
1750
1751/**
1752 * Fetches the base address value of a segment register.
1753 *
1754 * @returns The selector value.
1755 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1756 * @param iSegReg The segment register.
1757 */
1758DECL_FORCE_INLINE(uint64_t) iemSRegBaseFetchU64(PVMCPUCC pVCpu, uint8_t iSegReg) RT_NOEXCEPT
1759{
1760 Assert(iSegReg < X86_SREG_COUNT);
1761 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
1762 return pVCpu->cpum.GstCtx.aSRegs[iSegReg].u64Base;
1763}
1764
1765
1766/**
1767 * Gets a reference (pointer) to the specified general purpose register.
1768 *
1769 * @returns Register reference.
1770 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1771 * @param iReg The general purpose register.
1772 */
1773DECL_FORCE_INLINE(void *) iemGRegRef(PVMCPUCC pVCpu, uint8_t iReg) RT_NOEXCEPT
1774{
1775 Assert(iReg < 16);
1776 return &pVCpu->cpum.GstCtx.aGRegs[iReg];
1777}
1778
1779
1780#ifndef IEM_WITH_OPAQUE_DECODER_STATE
1781/**
1782 * Gets a reference (pointer) to the specified 8-bit general purpose register.
1783 *
1784 * Because of AH, CH, DH and BH we cannot use iemGRegRef directly here.
1785 *
1786 * @returns Register reference.
1787 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1788 * @param iReg The register.
1789 */
1790DECL_FORCE_INLINE(uint8_t *) iemGRegRefU8(PVMCPUCC pVCpu, uint8_t iReg) RT_NOEXCEPT
1791{
1792 if (iReg < 4 || (pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_REX | IEM_OP_PRF_VEX)))
1793 {
1794 Assert(iReg < 16);
1795 return &pVCpu->cpum.GstCtx.aGRegs[iReg].u8;
1796 }
1797 /* high 8-bit register. */
1798 Assert(iReg < 8);
1799 return &pVCpu->cpum.GstCtx.aGRegs[iReg & 3].bHi;
1800}
1801#endif
1802
1803
1804/**
1805 * Gets a reference (pointer) to the specified 8-bit general purpose register,
1806 * alternative version with extended (20) register index.
1807 *
1808 * @returns Register reference.
1809 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1810 * @param iRegEx The register. The 16 first are regular ones,
1811 * whereas 16 thru 19 maps to AH, CH, DH and BH.
1812 */
1813DECL_FORCE_INLINE(uint8_t *) iemGRegRefU8Ex(PVMCPUCC pVCpu, uint8_t iRegEx) RT_NOEXCEPT
1814{
1815 /** @todo This could be done by double indexing on little endian hosts:
1816 * return &pVCpu->cpum.GstCtx.aGRegs[iRegEx & 15].ab[iRegEx >> 4]; */
1817 if (iRegEx < 16)
1818 return &pVCpu->cpum.GstCtx.aGRegs[iRegEx].u8;
1819
1820 /* high 8-bit register. */
1821 Assert(iRegEx < 20);
1822 return &pVCpu->cpum.GstCtx.aGRegs[iRegEx & 3].bHi;
1823}
1824
1825
1826/**
1827 * Gets a reference (pointer) to the specified 16-bit general purpose register.
1828 *
1829 * @returns Register reference.
1830 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1831 * @param iReg The register.
1832 */
1833DECL_FORCE_INLINE(uint16_t *) iemGRegRefU16(PVMCPUCC pVCpu, uint8_t iReg) RT_NOEXCEPT
1834{
1835 Assert(iReg < 16);
1836 return &pVCpu->cpum.GstCtx.aGRegs[iReg].u16;
1837}
1838
1839
1840/**
1841 * Gets a reference (pointer) to the specified 32-bit general purpose register.
1842 *
1843 * @returns Register reference.
1844 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1845 * @param iReg The register.
1846 */
1847DECL_FORCE_INLINE(uint32_t *) iemGRegRefU32(PVMCPUCC pVCpu, uint8_t iReg) RT_NOEXCEPT
1848{
1849 Assert(iReg < 16);
1850 return &pVCpu->cpum.GstCtx.aGRegs[iReg].u32;
1851}
1852
1853
1854/**
1855 * Gets a reference (pointer) to the specified signed 32-bit general purpose register.
1856 *
1857 * @returns Register reference.
1858 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1859 * @param iReg The register.
1860 */
1861DECL_FORCE_INLINE(int32_t *) iemGRegRefI32(PVMCPUCC pVCpu, uint8_t iReg) RT_NOEXCEPT
1862{
1863 Assert(iReg < 16);
1864 return (int32_t *)&pVCpu->cpum.GstCtx.aGRegs[iReg].u32;
1865}
1866
1867
1868/**
1869 * Gets a reference (pointer) to the specified 64-bit general purpose register.
1870 *
1871 * @returns Register reference.
1872 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1873 * @param iReg The register.
1874 */
1875DECL_FORCE_INLINE(uint64_t *) iemGRegRefU64(PVMCPUCC pVCpu, uint8_t iReg) RT_NOEXCEPT
1876{
1877 Assert(iReg < 64);
1878 return &pVCpu->cpum.GstCtx.aGRegs[iReg].u64;
1879}
1880
1881
1882/**
1883 * Gets a reference (pointer) to the specified signed 64-bit general purpose register.
1884 *
1885 * @returns Register reference.
1886 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1887 * @param iReg The register.
1888 */
1889DECL_FORCE_INLINE(int64_t *) iemGRegRefI64(PVMCPUCC pVCpu, uint8_t iReg) RT_NOEXCEPT
1890{
1891 Assert(iReg < 16);
1892 return (int64_t *)&pVCpu->cpum.GstCtx.aGRegs[iReg].u64;
1893}
1894
1895
1896/**
1897 * Gets a reference (pointer) to the specified segment register's base address.
1898 *
1899 * @returns Segment register base address reference.
1900 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1901 * @param iSegReg The segment selector.
1902 */
1903DECL_FORCE_INLINE(uint64_t *) iemSRegBaseRefU64(PVMCPUCC pVCpu, uint8_t iSegReg) RT_NOEXCEPT
1904{
1905 Assert(iSegReg < X86_SREG_COUNT);
1906 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
1907 return &pVCpu->cpum.GstCtx.aSRegs[iSegReg].u64Base;
1908}
1909
1910
1911#ifndef IEM_WITH_OPAQUE_DECODER_STATE
1912/**
1913 * Fetches the value of a 8-bit general purpose register.
1914 *
1915 * @returns The register value.
1916 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1917 * @param iReg The register.
1918 */
1919DECL_FORCE_INLINE(uint8_t) iemGRegFetchU8(PVMCPUCC pVCpu, uint8_t iReg) RT_NOEXCEPT
1920{
1921 return *iemGRegRefU8(pVCpu, iReg);
1922}
1923#endif
1924
1925
1926/**
1927 * Fetches the value of a 8-bit general purpose register, alternative version
1928 * with extended (20) register index.
1929
1930 * @returns The register value.
1931 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1932 * @param iRegEx The register. The 16 first are regular ones,
1933 * whereas 16 thru 19 maps to AH, CH, DH and BH.
1934 */
1935DECL_FORCE_INLINE(uint8_t) iemGRegFetchU8Ex(PVMCPUCC pVCpu, uint8_t iRegEx) RT_NOEXCEPT
1936{
1937 return *iemGRegRefU8Ex(pVCpu, iRegEx);
1938}
1939
1940
1941/**
1942 * Fetches the value of a 16-bit general purpose register.
1943 *
1944 * @returns The register value.
1945 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1946 * @param iReg The register.
1947 */
1948DECL_FORCE_INLINE(uint16_t) iemGRegFetchU16(PVMCPUCC pVCpu, uint8_t iReg) RT_NOEXCEPT
1949{
1950 Assert(iReg < 16);
1951 return pVCpu->cpum.GstCtx.aGRegs[iReg].u16;
1952}
1953
1954
1955/**
1956 * Fetches the value of a 32-bit general purpose register.
1957 *
1958 * @returns The register value.
1959 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1960 * @param iReg The register.
1961 */
1962DECL_FORCE_INLINE(uint32_t) iemGRegFetchU32(PVMCPUCC pVCpu, uint8_t iReg) RT_NOEXCEPT
1963{
1964 Assert(iReg < 16);
1965 return pVCpu->cpum.GstCtx.aGRegs[iReg].u32;
1966}
1967
1968
1969/**
1970 * Fetches the value of a 64-bit general purpose register.
1971 *
1972 * @returns The register value.
1973 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1974 * @param iReg The register.
1975 */
1976DECL_FORCE_INLINE(uint64_t) iemGRegFetchU64(PVMCPUCC pVCpu, uint8_t iReg) RT_NOEXCEPT
1977{
1978 Assert(iReg < 16);
1979 return pVCpu->cpum.GstCtx.aGRegs[iReg].u64;
1980}
1981
1982
1983/**
1984 * Stores a 16-bit value to a general purpose register.
1985 *
1986 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1987 * @param iReg The register.
1988 * @param uValue The value to store.
1989 */
1990DECL_FORCE_INLINE(void) iemGRegStoreU16(PVMCPUCC pVCpu, uint8_t iReg, uint16_t uValue) RT_NOEXCEPT
1991{
1992 Assert(iReg < 16);
1993 pVCpu->cpum.GstCtx.aGRegs[iReg].u16 = uValue;
1994}
1995
1996
1997/**
1998 * Stores a 32-bit value to a general purpose register, implicitly clearing high
1999 * values.
2000 *
2001 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2002 * @param iReg The register.
2003 * @param uValue The value to store.
2004 */
2005DECL_FORCE_INLINE(void) iemGRegStoreU32(PVMCPUCC pVCpu, uint8_t iReg, uint32_t uValue) RT_NOEXCEPT
2006{
2007 Assert(iReg < 16);
2008 pVCpu->cpum.GstCtx.aGRegs[iReg].u64 = uValue;
2009}
2010
2011
2012/**
2013 * Stores a 64-bit value to a general purpose register.
2014 *
2015 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2016 * @param iReg The register.
2017 * @param uValue The value to store.
2018 */
2019DECL_FORCE_INLINE(void) iemGRegStoreU64(PVMCPUCC pVCpu, uint8_t iReg, uint64_t uValue) RT_NOEXCEPT
2020{
2021 Assert(iReg < 16);
2022 pVCpu->cpum.GstCtx.aGRegs[iReg].u64 = uValue;
2023}
2024
2025
2026/**
2027 * Get the address of the top of the stack.
2028 *
2029 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2030 */
2031DECL_FORCE_INLINE(RTGCPTR) iemRegGetEffRsp(PCVMCPU pVCpu) RT_NOEXCEPT
2032{
2033 if (IEM_IS_64BIT_CODE(pVCpu))
2034 return pVCpu->cpum.GstCtx.rsp;
2035 if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
2036 return pVCpu->cpum.GstCtx.esp;
2037 return pVCpu->cpum.GstCtx.sp;
2038}
2039
2040
2041/**
2042 * Updates the RIP/EIP/IP to point to the next instruction.
2043 *
2044 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2045 * @param cbInstr The number of bytes to add.
2046 */
2047DECL_FORCE_INLINE(void) iemRegAddToRip(PVMCPUCC pVCpu, uint8_t cbInstr) RT_NOEXCEPT
2048{
2049 /*
2050 * Advance RIP.
2051 *
2052 * When we're targetting 8086/8, 80186/8 or 80286 mode the updates are 16-bit,
2053 * while in all other modes except LM64 the updates are 32-bit. This means
2054 * we need to watch for both 32-bit and 16-bit "carry" situations, i.e.
2055 * 4GB and 64KB rollovers, and decide whether anything needs masking.
2056 *
2057 * See PC wrap around tests in bs3-cpu-weird-1.
2058 */
2059 uint64_t const uRipPrev = pVCpu->cpum.GstCtx.rip;
2060 uint64_t const uRipNext = uRipPrev + cbInstr;
2061 if (RT_LIKELY( !((uRipNext ^ uRipPrev) & (RT_BIT_64(32) | RT_BIT_64(16)))
2062 || IEM_IS_64BIT_CODE(pVCpu)))
2063 pVCpu->cpum.GstCtx.rip = uRipNext;
2064 else if (IEM_GET_TARGET_CPU(pVCpu) >= IEMTARGETCPU_386)
2065 pVCpu->cpum.GstCtx.rip = (uint32_t)uRipNext;
2066 else
2067 pVCpu->cpum.GstCtx.rip = (uint16_t)uRipNext;
2068}
2069
2070
2071/**
2072 * Called by iemRegAddToRipAndFinishingClearingRF and others when any of the
2073 * following EFLAGS bits are set:
2074 * - X86_EFL_RF - clear it.
2075 * - CPUMCTX_INHIBIT_SHADOW (_SS/_STI) - clear them.
2076 * - X86_EFL_TF - generate single step \#DB trap.
2077 * - CPUMCTX_DBG_HIT_DR0/1/2/3 - generate \#DB trap (data or I/O, not
2078 * instruction).
2079 *
2080 * According to @sdmv3{077,200,Table 6-2,Priority Among Concurrent Events},
2081 * a \#DB due to TF (single stepping) or a DRx non-instruction breakpoint
2082 * takes priority over both NMIs and hardware interrupts. So, neither is
2083 * considered here. (The RESET, \#MC, SMI, INIT, STOPCLK and FLUSH events are
2084 * either unsupported will be triggered on-top of any \#DB raised here.)
2085 *
2086 * The RF flag only needs to be cleared here as it only suppresses instruction
2087 * breakpoints which are not raised here (happens synchronously during
2088 * instruction fetching).
2089 *
2090 * The CPUMCTX_INHIBIT_SHADOW_SS flag will be cleared by this function, so its
2091 * status has no bearing on whether \#DB exceptions are raised.
2092 *
2093 * @note This must *NOT* be called by the two instructions setting the
2094 * CPUMCTX_INHIBIT_SHADOW_SS flag.
2095 *
2096 * @see @sdmv3{077,200,Table 6-2,Priority Among Concurrent Events}
2097 * @see @sdmv3{077,200,6.8.3,Masking Exceptions and Interrupts When Switching
2098 * Stacks}
2099 */
2100template<uint32_t const a_fTF = X86_EFL_TF>
2101static VBOXSTRICTRC iemFinishInstructionWithFlagsSet(PVMCPUCC pVCpu, int rcNormal) RT_NOEXCEPT
2102{
2103 /*
2104 * Normally we're just here to clear RF and/or interrupt shadow bits.
2105 */
2106 if (RT_LIKELY((pVCpu->cpum.GstCtx.eflags.uBoth & (a_fTF | CPUMCTX_DBG_HIT_DRX_MASK | CPUMCTX_DBG_DBGF_MASK)) == 0))
2107 pVCpu->cpum.GstCtx.eflags.uBoth &= ~(X86_EFL_RF | CPUMCTX_INHIBIT_SHADOW);
2108 else
2109 {
2110 /*
2111 * Raise a #DB or/and DBGF event.
2112 */
2113 VBOXSTRICTRC rcStrict;
2114 if (pVCpu->cpum.GstCtx.eflags.uBoth & (a_fTF | CPUMCTX_DBG_HIT_DRX_MASK))
2115 {
2116 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR6);
2117 pVCpu->cpum.GstCtx.dr[6] &= ~X86_DR6_B_MASK;
2118 if (pVCpu->cpum.GstCtx.eflags.uBoth & a_fTF)
2119 pVCpu->cpum.GstCtx.dr[6] |= X86_DR6_BS;
2120 pVCpu->cpum.GstCtx.dr[6] |= (pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK_NONSILENT)
2121 >> CPUMCTX_DBG_HIT_DRX_SHIFT;
2122 LogFlowFunc(("Guest #DB fired at %04X:%016llX: DR6=%08X, RFLAGS=%16RX64\n",
2123 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, (unsigned)pVCpu->cpum.GstCtx.dr[6],
2124 pVCpu->cpum.GstCtx.rflags.uBoth));
2125
2126 pVCpu->cpum.GstCtx.eflags.uBoth &= ~(X86_EFL_RF | CPUMCTX_INHIBIT_SHADOW | CPUMCTX_DBG_HIT_DRX_MASK);
2127 rcStrict = iemRaiseDebugException(pVCpu);
2128
2129 /* A DBGF event/breakpoint trumps the iemRaiseDebugException informational status code. */
2130 if ((pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_DBGF_MASK) && RT_FAILURE(rcStrict))
2131 {
2132 rcStrict = pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_DBGF_BP ? VINF_EM_DBG_BREAKPOINT : VINF_EM_DBG_EVENT;
2133 LogFlowFunc(("dbgf at %04X:%016llX: %Rrc\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, VBOXSTRICTRC_VAL(rcStrict)));
2134 }
2135 }
2136 else
2137 {
2138 Assert(pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_DBGF_MASK);
2139 rcStrict = pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_DBGF_BP ? VINF_EM_DBG_BREAKPOINT : VINF_EM_DBG_EVENT;
2140 LogFlowFunc(("dbgf at %04X:%016llX: %Rrc\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, VBOXSTRICTRC_VAL(rcStrict)));
2141 }
2142 pVCpu->cpum.GstCtx.eflags.uBoth &= ~CPUMCTX_DBG_DBGF_MASK;
2143 Assert(rcStrict != VINF_SUCCESS);
2144 return rcStrict;
2145 }
2146 return rcNormal;
2147}
2148
2149
2150/**
2151 * Clears the RF and CPUMCTX_INHIBIT_SHADOW, triggering \#DB if pending.
2152 *
2153 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2154 * @param rcNormal VINF_SUCCESS to continue TB.
2155 * VINF_IEM_REEXEC_BREAK to force TB exit when
2156 * taking the wrong conditional branhc.
2157 */
2158DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegFinishClearingRF(PVMCPUCC pVCpu, int rcNormal) RT_NOEXCEPT
2159{
2160 /*
2161 * We assume that most of the time nothing actually needs doing here.
2162 */
2163 AssertCompile(CPUMCTX_INHIBIT_SHADOW < UINT32_MAX);
2164 if (RT_LIKELY(!( pVCpu->cpum.GstCtx.eflags.uBoth
2165 & (X86_EFL_TF | X86_EFL_RF | CPUMCTX_INHIBIT_SHADOW | CPUMCTX_DBG_HIT_DRX_MASK | CPUMCTX_DBG_DBGF_MASK)) ))
2166 return rcNormal;
2167 return iemFinishInstructionWithFlagsSet(pVCpu, rcNormal);
2168}
2169
2170
2171/**
2172 * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF
2173 * and CPUMCTX_INHIBIT_SHADOW.
2174 *
2175 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2176 * @param cbInstr The number of bytes to add.
2177 */
2178DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegAddToRipAndFinishingClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr) RT_NOEXCEPT
2179{
2180 iemRegAddToRip(pVCpu, cbInstr);
2181 return iemRegFinishClearingRF(pVCpu, VINF_SUCCESS);
2182}
2183
2184
2185/**
2186 * Updates the RIP to point to the next instruction and clears EFLAGS.RF
2187 * and CPUMCTX_INHIBIT_SHADOW.
2188 *
2189 * Only called from 64-bit code.
2190 *
2191 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2192 * @param cbInstr The number of bytes to add.
2193 * @param rcNormal VINF_SUCCESS to continue TB.
2194 * VINF_IEM_REEXEC_BREAK to force TB exit when
2195 * taking the wrong conditional branhc.
2196 */
2197DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegAddToRip64AndFinishingClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int rcNormal) RT_NOEXCEPT
2198{
2199 pVCpu->cpum.GstCtx.rip = pVCpu->cpum.GstCtx.rip + cbInstr;
2200 return iemRegFinishClearingRF(pVCpu, rcNormal);
2201}
2202
2203
2204/**
2205 * Updates the EIP to point to the next instruction and clears EFLAGS.RF and
2206 * CPUMCTX_INHIBIT_SHADOW.
2207 *
2208 * This is never from 64-bit code.
2209 *
2210 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2211 * @param cbInstr The number of bytes to add.
2212 * @param rcNormal VINF_SUCCESS to continue TB.
2213 * VINF_IEM_REEXEC_BREAK to force TB exit when
2214 * taking the wrong conditional branhc.
2215 */
2216DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegAddToEip32AndFinishingClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int rcNormal) RT_NOEXCEPT
2217{
2218 pVCpu->cpum.GstCtx.rip = (uint32_t)(pVCpu->cpum.GstCtx.eip + cbInstr);
2219 return iemRegFinishClearingRF(pVCpu, rcNormal);
2220}
2221
2222
2223/**
2224 * Updates the IP to point to the next instruction and clears EFLAGS.RF and
2225 * CPUMCTX_INHIBIT_SHADOW.
2226 *
2227 * This is only ever used from 16-bit code on a pre-386 CPU.
2228 *
2229 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2230 * @param cbInstr The number of bytes to add.
2231 * @param rcNormal VINF_SUCCESS to continue TB.
2232 * VINF_IEM_REEXEC_BREAK to force TB exit when
2233 * taking the wrong conditional branhc.
2234 */
2235DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegAddToIp16AndFinishingClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int rcNormal) RT_NOEXCEPT
2236{
2237 pVCpu->cpum.GstCtx.rip = (uint16_t)(pVCpu->cpum.GstCtx.ip + cbInstr);
2238 return iemRegFinishClearingRF(pVCpu, rcNormal);
2239}
2240
2241
2242/**
2243 * Tail method for a finish function that does't clear flags or raise \#DB.
2244 *
2245 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2246 * @param rcNormal VINF_SUCCESS to continue TB.
2247 * VINF_IEM_REEXEC_BREAK to force TB exit when
2248 * taking the wrong conditional branhc.
2249 */
2250DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegFinishNoFlags(PVMCPUCC pVCpu, int rcNormal) RT_NOEXCEPT
2251{
2252 AssertCompile(CPUMCTX_INHIBIT_SHADOW < UINT32_MAX);
2253 Assert(!( pVCpu->cpum.GstCtx.eflags.uBoth
2254 & (X86_EFL_TF | X86_EFL_RF | CPUMCTX_INHIBIT_SHADOW | CPUMCTX_DBG_HIT_DRX_MASK | CPUMCTX_DBG_DBGF_MASK)) );
2255 RT_NOREF(pVCpu);
2256 return rcNormal;
2257}
2258
2259
2260/**
2261 * Updates the RIP to point to the next instruction, but does not need to clear
2262 * EFLAGS.RF or CPUMCTX_INHIBIT_SHADOW nor check for debug flags.
2263 *
2264 * Only called from 64-bit code.
2265 *
2266 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2267 * @param cbInstr The number of bytes to add.
2268 * @param rcNormal VINF_SUCCESS to continue TB.
2269 * VINF_IEM_REEXEC_BREAK to force TB exit when
2270 * taking the wrong conditional branhc.
2271 */
2272DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegAddToRip64AndFinishingNoFlags(PVMCPUCC pVCpu, uint8_t cbInstr, int rcNormal) RT_NOEXCEPT
2273{
2274 pVCpu->cpum.GstCtx.rip = pVCpu->cpum.GstCtx.rip + cbInstr;
2275 return iemRegFinishNoFlags(pVCpu, rcNormal);
2276}
2277
2278
2279/**
2280 * Updates the EIP to point to the next instruction, but does not need to clear
2281 * EFLAGS.RF or CPUMCTX_INHIBIT_SHADOW nor check for debug flags.
2282 *
2283 * This is never from 64-bit code.
2284 *
2285 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2286 * @param cbInstr The number of bytes to add.
2287 * @param rcNormal VINF_SUCCESS to continue TB.
2288 * VINF_IEM_REEXEC_BREAK to force TB exit when
2289 * taking the wrong conditional branhc.
2290 */
2291DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegAddToEip32AndFinishingNoFlags(PVMCPUCC pVCpu, uint8_t cbInstr, int rcNormal) RT_NOEXCEPT
2292{
2293 pVCpu->cpum.GstCtx.rip = (uint32_t)(pVCpu->cpum.GstCtx.eip + cbInstr);
2294 return iemRegFinishNoFlags(pVCpu, rcNormal);
2295}
2296
2297
2298/**
2299 * Updates the IP to point to the next instruction, but does not need to clear
2300 * EFLAGS.RF or CPUMCTX_INHIBIT_SHADOW nor check for debug flags.
2301 *
2302 * This is only ever used from 16-bit code on a pre-386 CPU.
2303 *
2304 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2305 * @param cbInstr The number of bytes to add.
2306 * @param rcNormal VINF_SUCCESS to continue TB.
2307 * VINF_IEM_REEXEC_BREAK to force TB exit when
2308 * taking the wrong conditional branhc.
2309 *
2310 */
2311DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegAddToIp16AndFinishingNoFlags(PVMCPUCC pVCpu, uint8_t cbInstr, int rcNormal) RT_NOEXCEPT
2312{
2313 pVCpu->cpum.GstCtx.rip = (uint16_t)(pVCpu->cpum.GstCtx.ip + cbInstr);
2314 return iemRegFinishNoFlags(pVCpu, rcNormal);
2315}
2316
2317
2318/**
2319 * Adds a 8-bit signed jump offset to RIP from 64-bit code.
2320 *
2321 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
2322 * segment limit.
2323 *
2324 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2325 * @param cbInstr Instruction size.
2326 * @param offNextInstr The offset of the next instruction.
2327 * @param enmEffOpSize Effective operand size.
2328 * @param rcNormal VINF_SUCCESS to continue TB.
2329 * VINF_IEM_REEXEC_BREAK to force TB exit when
2330 * taking the wrong conditional branhc.
2331 */
2332DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegRip64RelativeJumpS8AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int8_t offNextInstr,
2333 IEMMODE enmEffOpSize, int rcNormal) RT_NOEXCEPT
2334{
2335 Assert(IEM_IS_64BIT_CODE(pVCpu));
2336 Assert(enmEffOpSize == IEMMODE_64BIT || enmEffOpSize == IEMMODE_16BIT);
2337
2338 uint64_t uNewRip = pVCpu->cpum.GstCtx.rip + cbInstr + (int64_t)offNextInstr;
2339 if (enmEffOpSize == IEMMODE_16BIT)
2340 uNewRip &= UINT16_MAX;
2341
2342 if (RT_LIKELY(IEM_IS_CANONICAL(uNewRip)))
2343 pVCpu->cpum.GstCtx.rip = uNewRip;
2344 else
2345 return iemRaiseGeneralProtectionFault0(pVCpu);
2346
2347#ifndef IEM_WITH_CODE_TLB
2348 iemOpcodeFlushLight(pVCpu, cbInstr);
2349#endif
2350
2351 /*
2352 * Clear RF and finish the instruction (maybe raise #DB).
2353 */
2354 return iemRegFinishClearingRF(pVCpu, rcNormal);
2355}
2356
2357
2358/**
2359 * Adds a 8-bit signed jump offset to RIP from 64-bit code when the caller is
2360 * sure it stays within the same page.
2361 *
2362 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
2363 * segment limit.
2364 *
2365 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2366 * @param cbInstr Instruction size.
2367 * @param offNextInstr The offset of the next instruction.
2368 * @param enmEffOpSize Effective operand size.
2369 * @param rcNormal VINF_SUCCESS to continue TB.
2370 * VINF_IEM_REEXEC_BREAK to force TB exit when
2371 * taking the wrong conditional branhc.
2372 */
2373DECL_FORCE_INLINE(VBOXSTRICTRC)
2374iemRegRip64RelativeJumpS8IntraPgAndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int8_t offNextInstr,
2375 IEMMODE enmEffOpSize, int rcNormal) RT_NOEXCEPT
2376{
2377 Assert(IEM_IS_64BIT_CODE(pVCpu));
2378 Assert(enmEffOpSize == IEMMODE_64BIT); RT_NOREF(enmEffOpSize);
2379
2380 uint64_t const uNewRip = pVCpu->cpum.GstCtx.rip + cbInstr + (int64_t)offNextInstr;
2381 Assert((pVCpu->cpum.GstCtx.rip >> GUEST_PAGE_SHIFT) == (uNewRip >> GUEST_PAGE_SHIFT));
2382 pVCpu->cpum.GstCtx.rip = uNewRip;
2383
2384#ifndef IEM_WITH_CODE_TLB
2385 iemOpcodeFlushLight(pVCpu, cbInstr);
2386#endif
2387
2388 /*
2389 * Clear RF and finish the instruction (maybe raise #DB).
2390 */
2391 return iemRegFinishClearingRF(pVCpu, rcNormal);
2392}
2393
2394
2395/**
2396 * Adds a 8-bit signed jump offset to EIP, on 386 or later from 16-bit or 32-bit
2397 * code (never 64-bit).
2398 *
2399 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
2400 * segment limit.
2401 *
2402 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2403 * @param cbInstr Instruction size.
2404 * @param offNextInstr The offset of the next instruction.
2405 * @param enmEffOpSize Effective operand size.
2406 * @param rcNormal VINF_SUCCESS to continue TB.
2407 * VINF_IEM_REEXEC_BREAK to force TB exit when
2408 * taking the wrong conditional branhc.
2409 */
2410DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegEip32RelativeJumpS8AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int8_t offNextInstr,
2411 IEMMODE enmEffOpSize, int rcNormal) RT_NOEXCEPT
2412{
2413 Assert(!IEM_IS_64BIT_CODE(pVCpu));
2414 Assert(enmEffOpSize == IEMMODE_32BIT || enmEffOpSize == IEMMODE_16BIT);
2415
2416 uint32_t uNewEip = pVCpu->cpum.GstCtx.eip + cbInstr + (int32_t)offNextInstr;
2417 if (enmEffOpSize == IEMMODE_16BIT)
2418 uNewEip &= UINT16_MAX;
2419 if (RT_LIKELY(uNewEip <= pVCpu->cpum.GstCtx.cs.u32Limit))
2420 pVCpu->cpum.GstCtx.rip = uNewEip;
2421 else
2422 return iemRaiseGeneralProtectionFault0(pVCpu);
2423
2424#ifndef IEM_WITH_CODE_TLB
2425 iemOpcodeFlushLight(pVCpu, cbInstr);
2426#endif
2427
2428 /*
2429 * Clear RF and finish the instruction (maybe raise #DB).
2430 */
2431 return iemRegFinishClearingRF(pVCpu, rcNormal);
2432}
2433
2434
2435/**
2436 * Adds a 8-bit signed jump offset to EIP, on 386 or later from FLAT 32-bit code
2437 * (never 64-bit).
2438 *
2439 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
2440 * segment limit.
2441 *
2442 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2443 * @param cbInstr Instruction size.
2444 * @param offNextInstr The offset of the next instruction.
2445 * @param enmEffOpSize Effective operand size.
2446 * @param rcNormal VINF_SUCCESS to continue TB.
2447 * VINF_IEM_REEXEC_BREAK to force TB exit when
2448 * taking the wrong conditional branhc.
2449 */
2450DECL_FORCE_INLINE(VBOXSTRICTRC)
2451 iemRegEip32RelativeJumpS8FlatAndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int8_t offNextInstr,
2452 IEMMODE enmEffOpSize, int rcNormal) RT_NOEXCEPT
2453{
2454 Assert(!IEM_IS_64BIT_CODE(pVCpu));
2455 Assert(enmEffOpSize == IEMMODE_32BIT || enmEffOpSize == IEMMODE_16BIT);
2456
2457 uint32_t uNewEip = pVCpu->cpum.GstCtx.eip + cbInstr + (int32_t)offNextInstr;
2458 if (enmEffOpSize == IEMMODE_16BIT)
2459 uNewEip &= UINT16_MAX;
2460 pVCpu->cpum.GstCtx.rip = uNewEip;
2461
2462#ifndef IEM_WITH_CODE_TLB
2463 iemOpcodeFlushLight(pVCpu, cbInstr);
2464#endif
2465
2466 /*
2467 * Clear RF and finish the instruction (maybe raise #DB).
2468 */
2469 return iemRegFinishClearingRF(pVCpu, rcNormal);
2470}
2471
2472
2473/**
2474 * Adds a 8-bit signed jump offset to IP, on a pre-386 CPU.
2475 *
2476 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
2477 * segment limit.
2478 *
2479 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2480 * @param cbInstr Instruction size.
2481 * @param offNextInstr The offset of the next instruction.
2482 * @param rcNormal VINF_SUCCESS to continue TB.
2483 * VINF_IEM_REEXEC_BREAK to force TB exit when
2484 * taking the wrong conditional branhc.
2485 */
2486DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegIp16RelativeJumpS8AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr,
2487 int8_t offNextInstr, int rcNormal) RT_NOEXCEPT
2488{
2489 Assert(!IEM_IS_64BIT_CODE(pVCpu));
2490
2491 uint16_t const uNewIp = pVCpu->cpum.GstCtx.ip + cbInstr + (int16_t)offNextInstr;
2492 if (RT_LIKELY(uNewIp <= pVCpu->cpum.GstCtx.cs.u32Limit))
2493 pVCpu->cpum.GstCtx.rip = uNewIp;
2494 else
2495 return iemRaiseGeneralProtectionFault0(pVCpu);
2496
2497#ifndef IEM_WITH_CODE_TLB
2498 iemOpcodeFlushLight(pVCpu, cbInstr);
2499#endif
2500
2501 /*
2502 * Clear RF and finish the instruction (maybe raise #DB).
2503 */
2504 return iemRegFinishClearingRF(pVCpu, rcNormal);
2505}
2506
2507
2508/**
2509 * Adds a 8-bit signed jump offset to RIP from 64-bit code, no checking or
2510 * clearing of flags.
2511 *
2512 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
2513 * segment limit.
2514 *
2515 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2516 * @param cbInstr Instruction size.
2517 * @param offNextInstr The offset of the next instruction.
2518 * @param enmEffOpSize Effective operand size.
2519 * @param rcNormal VINF_SUCCESS to continue TB.
2520 * VINF_IEM_REEXEC_BREAK to force TB exit when
2521 * taking the wrong conditional branhc.
2522 */
2523DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegRip64RelativeJumpS8AndFinishNoFlags(PVMCPUCC pVCpu, uint8_t cbInstr, int8_t offNextInstr,
2524 IEMMODE enmEffOpSize, int rcNormal) RT_NOEXCEPT
2525{
2526 Assert(IEM_IS_64BIT_CODE(pVCpu));
2527 Assert(enmEffOpSize == IEMMODE_64BIT || enmEffOpSize == IEMMODE_16BIT);
2528
2529 uint64_t uNewRip = pVCpu->cpum.GstCtx.rip + cbInstr + (int64_t)offNextInstr;
2530 if (enmEffOpSize == IEMMODE_16BIT)
2531 uNewRip &= UINT16_MAX;
2532
2533 if (RT_LIKELY(IEM_IS_CANONICAL(uNewRip)))
2534 pVCpu->cpum.GstCtx.rip = uNewRip;
2535 else
2536 return iemRaiseGeneralProtectionFault0(pVCpu);
2537
2538#ifndef IEM_WITH_CODE_TLB
2539 iemOpcodeFlushLight(pVCpu, cbInstr);
2540#endif
2541 return iemRegFinishNoFlags(pVCpu, rcNormal);
2542}
2543
2544
2545/**
2546 * Adds a 8-bit signed jump offset to RIP from 64-bit code when caller is sure
2547 * it stays within the same page, no checking or clearing of flags.
2548 *
2549 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
2550 * segment limit.
2551 *
2552 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2553 * @param cbInstr Instruction size.
2554 * @param offNextInstr The offset of the next instruction.
2555 * @param enmEffOpSize Effective operand size.
2556 * @param rcNormal VINF_SUCCESS to continue TB.
2557 * VINF_IEM_REEXEC_BREAK to force TB exit when
2558 * taking the wrong conditional branhc.
2559 */
2560DECL_FORCE_INLINE(VBOXSTRICTRC)
2561iemRegRip64RelativeJumpS8IntraPgAndFinishNoFlags(PVMCPUCC pVCpu, uint8_t cbInstr, int8_t offNextInstr,
2562 IEMMODE enmEffOpSize, int rcNormal) RT_NOEXCEPT
2563{
2564 Assert(IEM_IS_64BIT_CODE(pVCpu));
2565 Assert(enmEffOpSize == IEMMODE_64BIT); RT_NOREF(enmEffOpSize);
2566
2567 uint64_t const uNewRip = pVCpu->cpum.GstCtx.rip + cbInstr + (int64_t)offNextInstr;
2568 Assert((pVCpu->cpum.GstCtx.rip >> GUEST_PAGE_SHIFT) == (uNewRip >> GUEST_PAGE_SHIFT));
2569 pVCpu->cpum.GstCtx.rip = uNewRip;
2570
2571#ifndef IEM_WITH_CODE_TLB
2572 iemOpcodeFlushLight(pVCpu, cbInstr);
2573#endif
2574 return iemRegFinishNoFlags(pVCpu, rcNormal);
2575}
2576
2577
2578/**
2579 * Adds a 8-bit signed jump offset to EIP, on 386 or later from 16-bit or 32-bit
2580 * code (never 64-bit), no checking or clearing of flags.
2581 *
2582 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
2583 * segment limit.
2584 *
2585 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2586 * @param cbInstr Instruction size.
2587 * @param offNextInstr The offset of the next instruction.
2588 * @param enmEffOpSize Effective operand size.
2589 * @param rcNormal VINF_SUCCESS to continue TB.
2590 * VINF_IEM_REEXEC_BREAK to force TB exit when
2591 * taking the wrong conditional branhc.
2592 */
2593DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegEip32RelativeJumpS8AndFinishNoFlags(PVMCPUCC pVCpu, uint8_t cbInstr, int8_t offNextInstr,
2594 IEMMODE enmEffOpSize, int rcNormal) RT_NOEXCEPT
2595{
2596 Assert(!IEM_IS_64BIT_CODE(pVCpu));
2597 Assert(enmEffOpSize == IEMMODE_32BIT || enmEffOpSize == IEMMODE_16BIT);
2598
2599 uint32_t uNewEip = pVCpu->cpum.GstCtx.eip + cbInstr + (int32_t)offNextInstr;
2600 if (enmEffOpSize == IEMMODE_16BIT)
2601 uNewEip &= UINT16_MAX;
2602 if (RT_LIKELY(uNewEip <= pVCpu->cpum.GstCtx.cs.u32Limit))
2603 pVCpu->cpum.GstCtx.rip = uNewEip;
2604 else
2605 return iemRaiseGeneralProtectionFault0(pVCpu);
2606
2607#ifndef IEM_WITH_CODE_TLB
2608 iemOpcodeFlushLight(pVCpu, cbInstr);
2609#endif
2610 return iemRegFinishNoFlags(pVCpu, rcNormal);
2611}
2612
2613
2614/**
2615 * Adds a 8-bit signed jump offset to EIP, on 386 or later from flat 32-bit code
2616 * (never 64-bit), no checking or clearing of flags.
2617 *
2618 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
2619 * segment limit.
2620 *
2621 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2622 * @param cbInstr Instruction size.
2623 * @param offNextInstr The offset of the next instruction.
2624 * @param enmEffOpSize Effective operand size.
2625 * @param rcNormal VINF_SUCCESS to continue TB.
2626 * VINF_IEM_REEXEC_BREAK to force TB exit when
2627 * taking the wrong conditional branhc.
2628 */
2629DECL_FORCE_INLINE(VBOXSTRICTRC)
2630iemRegEip32RelativeJumpS8FlatAndFinishNoFlags(PVMCPUCC pVCpu, uint8_t cbInstr, int8_t offNextInstr,
2631 IEMMODE enmEffOpSize, int rcNormal) RT_NOEXCEPT
2632{
2633 Assert(!IEM_IS_64BIT_CODE(pVCpu));
2634 Assert(enmEffOpSize == IEMMODE_32BIT || enmEffOpSize == IEMMODE_16BIT);
2635
2636 uint32_t uNewEip = pVCpu->cpum.GstCtx.eip + cbInstr + (int32_t)offNextInstr;
2637 if (enmEffOpSize == IEMMODE_16BIT)
2638 uNewEip &= UINT16_MAX;
2639 pVCpu->cpum.GstCtx.rip = uNewEip;
2640
2641#ifndef IEM_WITH_CODE_TLB
2642 iemOpcodeFlushLight(pVCpu, cbInstr);
2643#endif
2644 return iemRegFinishNoFlags(pVCpu, rcNormal);
2645}
2646
2647
2648/**
2649 * Adds a 8-bit signed jump offset to IP, on a pre-386 CPU, no checking or
2650 * clearing of flags.
2651 *
2652 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
2653 * segment limit.
2654 *
2655 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2656 * @param cbInstr Instruction size.
2657 * @param offNextInstr The offset of the next instruction.
2658 * @param rcNormal VINF_SUCCESS to continue TB.
2659 * VINF_IEM_REEXEC_BREAK to force TB exit when
2660 * taking the wrong conditional branhc.
2661 */
2662DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegIp16RelativeJumpS8AndFinishNoFlags(PVMCPUCC pVCpu, uint8_t cbInstr,
2663 int8_t offNextInstr, int rcNormal) RT_NOEXCEPT
2664{
2665 Assert(!IEM_IS_64BIT_CODE(pVCpu));
2666
2667 uint16_t const uNewIp = pVCpu->cpum.GstCtx.ip + cbInstr + (int16_t)offNextInstr;
2668 if (RT_LIKELY(uNewIp <= pVCpu->cpum.GstCtx.cs.u32Limit))
2669 pVCpu->cpum.GstCtx.rip = uNewIp;
2670 else
2671 return iemRaiseGeneralProtectionFault0(pVCpu);
2672
2673#ifndef IEM_WITH_CODE_TLB
2674 iemOpcodeFlushLight(pVCpu, cbInstr);
2675#endif
2676 return iemRegFinishNoFlags(pVCpu, rcNormal);
2677}
2678
2679
2680/**
2681 * Adds a 16-bit signed jump offset to RIP from 64-bit code.
2682 *
2683 * @returns Strict VBox status code.
2684 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2685 * @param cbInstr Instruction size.
2686 * @param offNextInstr The offset of the next instruction.
2687 * @param rcNormal VINF_SUCCESS to continue TB.
2688 * VINF_IEM_REEXEC_BREAK to force TB exit when
2689 * taking the wrong conditional branhc.
2690 */
2691DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegRip64RelativeJumpS16AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr,
2692 int16_t offNextInstr, int rcNormal) RT_NOEXCEPT
2693{
2694 Assert(IEM_IS_64BIT_CODE(pVCpu));
2695
2696 pVCpu->cpum.GstCtx.rip = (uint16_t)(pVCpu->cpum.GstCtx.ip + cbInstr + offNextInstr);
2697
2698#ifndef IEM_WITH_CODE_TLB
2699 iemOpcodeFlushLight(pVCpu, cbInstr);
2700#endif
2701
2702 /*
2703 * Clear RF and finish the instruction (maybe raise #DB).
2704 */
2705 return iemRegFinishClearingRF(pVCpu, rcNormal);
2706}
2707
2708
2709/**
2710 * Adds a 16-bit signed jump offset to EIP from 16-bit or 32-bit code.
2711 *
2712 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
2713 * segment limit.
2714 *
2715 * @returns Strict VBox status code.
2716 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2717 * @param cbInstr Instruction size.
2718 * @param offNextInstr The offset of the next instruction.
2719 * @param rcNormal VINF_SUCCESS to continue TB.
2720 * VINF_IEM_REEXEC_BREAK to force TB exit when
2721 * taking the wrong conditional branhc.
2722 *
2723 * @note This is also used by 16-bit code in pre-386 mode, as the code is
2724 * identical.
2725 */
2726DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegEip32RelativeJumpS16AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr,
2727 int16_t offNextInstr, int rcNormal) RT_NOEXCEPT
2728{
2729 Assert(!IEM_IS_64BIT_CODE(pVCpu));
2730
2731 uint16_t const uNewIp = pVCpu->cpum.GstCtx.ip + cbInstr + offNextInstr;
2732 if (RT_LIKELY(uNewIp <= pVCpu->cpum.GstCtx.cs.u32Limit))
2733 pVCpu->cpum.GstCtx.rip = uNewIp;
2734 else
2735 return iemRaiseGeneralProtectionFault0(pVCpu);
2736
2737#ifndef IEM_WITH_CODE_TLB
2738 iemOpcodeFlushLight(pVCpu, cbInstr);
2739#endif
2740
2741 /*
2742 * Clear RF and finish the instruction (maybe raise #DB).
2743 */
2744 return iemRegFinishClearingRF(pVCpu, rcNormal);
2745}
2746
2747
2748/**
2749 * Adds a 16-bit signed jump offset to EIP from FLAT 32-bit code.
2750 *
2751 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
2752 * segment limit.
2753 *
2754 * @returns Strict VBox status code.
2755 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2756 * @param cbInstr Instruction size.
2757 * @param offNextInstr The offset of the next instruction.
2758 * @param rcNormal VINF_SUCCESS to continue TB.
2759 * VINF_IEM_REEXEC_BREAK to force TB exit when
2760 * taking the wrong conditional branhc.
2761 *
2762 * @note This is also used by 16-bit code in pre-386 mode, as the code is
2763 * identical.
2764 */
2765DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegEip32RelativeJumpS16FlatAndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr,
2766 int16_t offNextInstr, int rcNormal) RT_NOEXCEPT
2767{
2768 Assert(!IEM_IS_64BIT_CODE(pVCpu));
2769
2770 uint16_t const uNewIp = pVCpu->cpum.GstCtx.ip + cbInstr + offNextInstr;
2771 pVCpu->cpum.GstCtx.rip = uNewIp;
2772
2773#ifndef IEM_WITH_CODE_TLB
2774 iemOpcodeFlushLight(pVCpu, cbInstr);
2775#endif
2776
2777 /*
2778 * Clear RF and finish the instruction (maybe raise #DB).
2779 */
2780 return iemRegFinishClearingRF(pVCpu, rcNormal);
2781}
2782
2783
2784/**
2785 * Adds a 16-bit signed jump offset to RIP from 64-bit code, no checking or
2786 * clearing of flags.
2787 *
2788 * @returns Strict VBox status code.
2789 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2790 * @param cbInstr Instruction size.
2791 * @param offNextInstr The offset of the next instruction.
2792 * @param rcNormal VINF_SUCCESS to continue TB.
2793 * VINF_IEM_REEXEC_BREAK to force TB exit when
2794 * taking the wrong conditional branhc.
2795 */
2796DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegRip64RelativeJumpS16AndFinishNoFlags(PVMCPUCC pVCpu, uint8_t cbInstr,
2797 int16_t offNextInstr, int rcNormal) RT_NOEXCEPT
2798{
2799 Assert(IEM_IS_64BIT_CODE(pVCpu));
2800
2801 pVCpu->cpum.GstCtx.rip = (uint16_t)(pVCpu->cpum.GstCtx.ip + cbInstr + offNextInstr);
2802
2803#ifndef IEM_WITH_CODE_TLB
2804 iemOpcodeFlushLight(pVCpu, cbInstr);
2805#endif
2806 return iemRegFinishNoFlags(pVCpu, rcNormal);
2807}
2808
2809
2810/**
2811 * Adds a 16-bit signed jump offset to EIP from 16-bit or 32-bit code,
2812 * no checking or clearing of flags.
2813 *
2814 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
2815 * segment limit.
2816 *
2817 * @returns Strict VBox status code.
2818 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2819 * @param cbInstr Instruction size.
2820 * @param offNextInstr The offset of the next instruction.
2821 * @param rcNormal VINF_SUCCESS to continue TB.
2822 * VINF_IEM_REEXEC_BREAK to force TB exit when
2823 * taking the wrong conditional branhc.
2824 *
2825 * @note This is also used by 16-bit code in pre-386 mode, as the code is
2826 * identical.
2827 */
2828DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegEip32RelativeJumpS16AndFinishNoFlags(PVMCPUCC pVCpu, uint8_t cbInstr,
2829 int16_t offNextInstr, int rcNormal) RT_NOEXCEPT
2830{
2831 Assert(!IEM_IS_64BIT_CODE(pVCpu));
2832
2833 uint16_t const uNewIp = pVCpu->cpum.GstCtx.ip + cbInstr + offNextInstr;
2834 if (RT_LIKELY(uNewIp <= pVCpu->cpum.GstCtx.cs.u32Limit))
2835 pVCpu->cpum.GstCtx.rip = uNewIp;
2836 else
2837 return iemRaiseGeneralProtectionFault0(pVCpu);
2838
2839#ifndef IEM_WITH_CODE_TLB
2840 iemOpcodeFlushLight(pVCpu, cbInstr);
2841#endif
2842 return iemRegFinishNoFlags(pVCpu, rcNormal);
2843}
2844
2845
2846/**
2847 * Adds a 16-bit signed jump offset to EIP from FLAT 32-bit code, no checking or
2848 * clearing of flags.
2849 *
2850 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
2851 * segment limit.
2852 *
2853 * @returns Strict VBox status code.
2854 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2855 * @param cbInstr Instruction size.
2856 * @param offNextInstr The offset of the next instruction.
2857 * @param rcNormal VINF_SUCCESS to continue TB.
2858 * VINF_IEM_REEXEC_BREAK to force TB exit when
2859 * taking the wrong conditional branhc.
2860 *
2861 * @note This is also used by 16-bit code in pre-386 mode, as the code is
2862 * identical.
2863 */
2864DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegEip32RelativeJumpS16FlatAndFinishNoFlags(PVMCPUCC pVCpu, uint8_t cbInstr,
2865 int16_t offNextInstr, int rcNormal) RT_NOEXCEPT
2866{
2867 Assert(!IEM_IS_64BIT_CODE(pVCpu));
2868
2869 uint16_t const uNewIp = pVCpu->cpum.GstCtx.ip + cbInstr + offNextInstr;
2870 pVCpu->cpum.GstCtx.rip = uNewIp;
2871
2872#ifndef IEM_WITH_CODE_TLB
2873 iemOpcodeFlushLight(pVCpu, cbInstr);
2874#endif
2875 return iemRegFinishNoFlags(pVCpu, rcNormal);
2876}
2877
2878
2879/**
2880 * Adds a 32-bit signed jump offset to RIP from 64-bit code.
2881 *
2882 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
2883 * segment limit.
2884 *
2885 * We ASSUME that the effective operand size is 64-bit here, as 16-bit is the
2886 * only alternative for relative jumps in 64-bit code and that is already
2887 * handled in the decoder stage.
2888 *
2889 * @returns Strict VBox status code.
2890 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2891 * @param cbInstr Instruction size.
2892 * @param offNextInstr The offset of the next instruction.
2893 * @param rcNormal VINF_SUCCESS to continue TB.
2894 * VINF_IEM_REEXEC_BREAK to force TB exit when
2895 * taking the wrong conditional branhc.
2896 */
2897DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegRip64RelativeJumpS32AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr,
2898 int32_t offNextInstr, int rcNormal) RT_NOEXCEPT
2899{
2900 Assert(IEM_IS_64BIT_CODE(pVCpu));
2901
2902 uint64_t const uNewRip = pVCpu->cpum.GstCtx.rip + cbInstr + (int64_t)offNextInstr;
2903 if (RT_LIKELY(IEM_IS_CANONICAL(uNewRip)))
2904 pVCpu->cpum.GstCtx.rip = uNewRip;
2905 else
2906 return iemRaiseGeneralProtectionFault0(pVCpu);
2907
2908#ifndef IEM_WITH_CODE_TLB
2909 iemOpcodeFlushLight(pVCpu, cbInstr);
2910#endif
2911
2912 /*
2913 * Clear RF and finish the instruction (maybe raise #DB).
2914 */
2915 return iemRegFinishClearingRF(pVCpu, rcNormal);
2916}
2917
2918
2919/**
2920 * Adds a 32-bit signed jump offset to RIP from 64-bit code when the caller is
2921 * sure the target is in the same page.
2922 *
2923 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
2924 * segment limit.
2925 *
2926 * We ASSUME that the effective operand size is 64-bit here, as 16-bit is the
2927 * only alternative for relative jumps in 64-bit code and that is already
2928 * handled in the decoder stage.
2929 *
2930 * @returns Strict VBox status code.
2931 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2932 * @param cbInstr Instruction size.
2933 * @param offNextInstr The offset of the next instruction.
2934 * @param rcNormal VINF_SUCCESS to continue TB.
2935 * VINF_IEM_REEXEC_BREAK to force TB exit when
2936 * taking the wrong conditional branhc.
2937 */
2938DECL_FORCE_INLINE(VBOXSTRICTRC)
2939iemRegRip64RelativeJumpS32IntraPgAndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr,
2940 int32_t offNextInstr, int rcNormal) RT_NOEXCEPT
2941{
2942 Assert(IEM_IS_64BIT_CODE(pVCpu));
2943
2944 uint64_t const uNewRip = pVCpu->cpum.GstCtx.rip + cbInstr + (int64_t)offNextInstr;
2945 Assert((pVCpu->cpum.GstCtx.rip >> GUEST_PAGE_SHIFT) == (uNewRip >> GUEST_PAGE_SHIFT));
2946 pVCpu->cpum.GstCtx.rip = uNewRip;
2947
2948#ifndef IEM_WITH_CODE_TLB
2949 iemOpcodeFlushLight(pVCpu, cbInstr);
2950#endif
2951
2952 /*
2953 * Clear RF and finish the instruction (maybe raise #DB).
2954 */
2955 return iemRegFinishClearingRF(pVCpu, rcNormal);
2956}
2957
2958
2959/**
2960 * Adds a 32-bit signed jump offset to RIP from 64-bit code.
2961 *
2962 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
2963 * segment limit.
2964 *
2965 * We ASSUME that the effective operand size is 32-bit here, as 16-bit is the
2966 * only alternative for relative jumps in 32-bit code and that is already
2967 * handled in the decoder stage.
2968 *
2969 * @returns Strict VBox status code.
2970 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2971 * @param cbInstr Instruction size.
2972 * @param offNextInstr The offset of the next instruction.
2973 * @param rcNormal VINF_SUCCESS to continue TB.
2974 * VINF_IEM_REEXEC_BREAK to force TB exit when
2975 * taking the wrong conditional branhc.
2976 */
2977DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegEip32RelativeJumpS32AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr,
2978 int32_t offNextInstr, int rcNormal) RT_NOEXCEPT
2979{
2980 Assert(!IEM_IS_64BIT_CODE(pVCpu));
2981 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
2982
2983 uint32_t const uNewEip = pVCpu->cpum.GstCtx.eip + cbInstr + offNextInstr;
2984 if (RT_LIKELY(uNewEip <= pVCpu->cpum.GstCtx.cs.u32Limit))
2985 pVCpu->cpum.GstCtx.rip = uNewEip;
2986 else
2987 return iemRaiseGeneralProtectionFault0(pVCpu);
2988
2989#ifndef IEM_WITH_CODE_TLB
2990 iemOpcodeFlushLight(pVCpu, cbInstr);
2991#endif
2992
2993 /*
2994 * Clear RF and finish the instruction (maybe raise #DB).
2995 */
2996 return iemRegFinishClearingRF(pVCpu, rcNormal);
2997}
2998
2999
3000/**
3001 * Adds a 32-bit signed jump offset to RIP from FLAT 32-bit code.
3002 *
3003 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
3004 * segment limit.
3005 *
3006 * We ASSUME that the effective operand size is 32-bit here, as 16-bit is the
3007 * only alternative for relative jumps in 32-bit code and that is already
3008 * handled in the decoder stage.
3009 *
3010 * @returns Strict VBox status code.
3011 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3012 * @param cbInstr Instruction size.
3013 * @param offNextInstr The offset of the next instruction.
3014 * @param rcNormal VINF_SUCCESS to continue TB.
3015 * VINF_IEM_REEXEC_BREAK to force TB exit when
3016 * taking the wrong conditional branhc.
3017 */
3018DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegEip32RelativeJumpS32FlatAndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr,
3019 int32_t offNextInstr, int rcNormal) RT_NOEXCEPT
3020{
3021 Assert(!IEM_IS_64BIT_CODE(pVCpu));
3022 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
3023
3024 uint32_t const uNewEip = pVCpu->cpum.GstCtx.eip + cbInstr + offNextInstr;
3025 pVCpu->cpum.GstCtx.rip = uNewEip;
3026
3027#ifndef IEM_WITH_CODE_TLB
3028 iemOpcodeFlushLight(pVCpu, cbInstr);
3029#endif
3030
3031 /*
3032 * Clear RF and finish the instruction (maybe raise #DB).
3033 */
3034 return iemRegFinishClearingRF(pVCpu, rcNormal);
3035}
3036
3037
3038
3039/**
3040 * Adds a 32-bit signed jump offset to RIP from 64-bit code, no checking or
3041 * clearing of flags.
3042 *
3043 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
3044 * segment limit.
3045 *
3046 * We ASSUME that the effective operand size is 64-bit here, as 16-bit is the
3047 * only alternative for relative jumps in 64-bit code and that is already
3048 * handled in the decoder stage.
3049 *
3050 * @returns Strict VBox status code.
3051 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3052 * @param cbInstr Instruction size.
3053 * @param offNextInstr The offset of the next instruction.
3054 * @param rcNormal VINF_SUCCESS to continue TB.
3055 * VINF_IEM_REEXEC_BREAK to force TB exit when
3056 * taking the wrong conditional branhc.
3057 */
3058DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegRip64RelativeJumpS32AndFinishNoFlags(PVMCPUCC pVCpu, uint8_t cbInstr,
3059 int32_t offNextInstr, int rcNormal) RT_NOEXCEPT
3060{
3061 Assert(IEM_IS_64BIT_CODE(pVCpu));
3062
3063 uint64_t const uNewRip = pVCpu->cpum.GstCtx.rip + cbInstr + (int64_t)offNextInstr;
3064 if (RT_LIKELY(IEM_IS_CANONICAL(uNewRip)))
3065 pVCpu->cpum.GstCtx.rip = uNewRip;
3066 else
3067 return iemRaiseGeneralProtectionFault0(pVCpu);
3068
3069#ifndef IEM_WITH_CODE_TLB
3070 iemOpcodeFlushLight(pVCpu, cbInstr);
3071#endif
3072 return iemRegFinishNoFlags(pVCpu, rcNormal);
3073}
3074
3075
3076/**
3077 * Adds a 32-bit signed jump offset to RIP from 64-bit code when the caller is
3078 * sure it stays within the same page, no checking or clearing of flags.
3079 *
3080 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
3081 * segment limit.
3082 *
3083 * We ASSUME that the effective operand size is 64-bit here, as 16-bit is the
3084 * only alternative for relative jumps in 64-bit code and that is already
3085 * handled in the decoder stage.
3086 *
3087 * @returns Strict VBox status code.
3088 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3089 * @param cbInstr Instruction size.
3090 * @param offNextInstr The offset of the next instruction.
3091 * @param rcNormal VINF_SUCCESS to continue TB.
3092 * VINF_IEM_REEXEC_BREAK to force TB exit when
3093 * taking the wrong conditional branhc.
3094 */
3095DECL_FORCE_INLINE(VBOXSTRICTRC)
3096iemRegRip64RelativeJumpS32IntraPgAndFinishNoFlags(PVMCPUCC pVCpu, uint8_t cbInstr, int32_t offNextInstr, int rcNormal) RT_NOEXCEPT
3097{
3098 Assert(IEM_IS_64BIT_CODE(pVCpu));
3099
3100 uint64_t const uNewRip = pVCpu->cpum.GstCtx.rip + cbInstr + (int64_t)offNextInstr;
3101 Assert((pVCpu->cpum.GstCtx.rip >> GUEST_PAGE_SHIFT) == (uNewRip >> GUEST_PAGE_SHIFT));
3102 pVCpu->cpum.GstCtx.rip = uNewRip;
3103
3104#ifndef IEM_WITH_CODE_TLB
3105 iemOpcodeFlushLight(pVCpu, cbInstr);
3106#endif
3107 return iemRegFinishNoFlags(pVCpu, rcNormal);
3108}
3109
3110
3111/**
3112 * Adds a 32-bit signed jump offset to RIP from 32-bit code, no checking or
3113 * clearing of flags.
3114 *
3115 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
3116 * segment limit.
3117 *
3118 * We ASSUME that the effective operand size is 32-bit here, as 16-bit is the
3119 * only alternative for relative jumps in 32-bit code and that is already
3120 * handled in the decoder stage.
3121 *
3122 * @returns Strict VBox status code.
3123 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3124 * @param cbInstr Instruction size.
3125 * @param offNextInstr The offset of the next instruction.
3126 * @param rcNormal VINF_SUCCESS to continue TB.
3127 * VINF_IEM_REEXEC_BREAK to force TB exit when
3128 * taking the wrong conditional branhc.
3129 */
3130DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegEip32RelativeJumpS32AndFinishNoFlags(PVMCPUCC pVCpu, uint8_t cbInstr,
3131 int32_t offNextInstr, int rcNormal) RT_NOEXCEPT
3132{
3133 Assert(!IEM_IS_64BIT_CODE(pVCpu));
3134 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
3135
3136 uint32_t const uNewEip = pVCpu->cpum.GstCtx.eip + cbInstr + offNextInstr;
3137 if (RT_LIKELY(uNewEip <= pVCpu->cpum.GstCtx.cs.u32Limit))
3138 pVCpu->cpum.GstCtx.rip = uNewEip;
3139 else
3140 return iemRaiseGeneralProtectionFault0(pVCpu);
3141
3142#ifndef IEM_WITH_CODE_TLB
3143 iemOpcodeFlushLight(pVCpu, cbInstr);
3144#endif
3145 return iemRegFinishNoFlags(pVCpu, rcNormal);
3146}
3147
3148
3149/**
3150 * Adds a 32-bit signed jump offset to RIP from FLAT 32-bit code, no checking or
3151 * clearing of flags.
3152 *
3153 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
3154 * segment limit.
3155 *
3156 * We ASSUME that the effective operand size is 32-bit here, as 16-bit is the
3157 * only alternative for relative jumps in 32-bit code and that is already
3158 * handled in the decoder stage.
3159 *
3160 * @returns Strict VBox status code.
3161 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3162 * @param cbInstr Instruction size.
3163 * @param offNextInstr The offset of the next instruction.
3164 * @param rcNormal VINF_SUCCESS to continue TB.
3165 * VINF_IEM_REEXEC_BREAK to force TB exit when
3166 * taking the wrong conditional branhc.
3167 */
3168DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegEip32RelativeJumpS32FlatAndFinishNoFlags(PVMCPUCC pVCpu, uint8_t cbInstr,
3169 int32_t offNextInstr, int rcNormal) RT_NOEXCEPT
3170{
3171 Assert(!IEM_IS_64BIT_CODE(pVCpu));
3172 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
3173
3174 uint32_t const uNewEip = pVCpu->cpum.GstCtx.eip + cbInstr + offNextInstr;
3175 pVCpu->cpum.GstCtx.rip = uNewEip;
3176
3177#ifndef IEM_WITH_CODE_TLB
3178 iemOpcodeFlushLight(pVCpu, cbInstr);
3179#endif
3180 return iemRegFinishNoFlags(pVCpu, rcNormal);
3181}
3182
3183
3184/**
3185 * Extended version of iemFinishInstructionWithFlagsSet that goes with
3186 * iemRegAddToRipAndFinishingClearingRfEx.
3187 *
3188 * See iemFinishInstructionWithFlagsSet() for details.
3189 */
3190static VBOXSTRICTRC iemFinishInstructionWithTfSet(PVMCPUCC pVCpu) RT_NOEXCEPT
3191{
3192 /*
3193 * Raise a #DB.
3194 */
3195 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR6);
3196 pVCpu->cpum.GstCtx.dr[6] &= ~X86_DR6_B_MASK;
3197 pVCpu->cpum.GstCtx.dr[6] |= X86_DR6_BS
3198 | ( (pVCpu->cpum.GstCtx.eflags.uBoth & CPUMCTX_DBG_HIT_DRX_MASK_NONSILENT)
3199 >> CPUMCTX_DBG_HIT_DRX_SHIFT);
3200 /** @todo Do we set all pending \#DB events, or just one? */
3201 LogFlowFunc(("Guest #DB fired at %04X:%016llX: DR6=%08X, RFLAGS=%16RX64 (popf)\n",
3202 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, (unsigned)pVCpu->cpum.GstCtx.dr[6],
3203 pVCpu->cpum.GstCtx.rflags.uBoth));
3204 pVCpu->cpum.GstCtx.eflags.uBoth &= ~(X86_EFL_RF | CPUMCTX_INHIBIT_SHADOW | CPUMCTX_DBG_HIT_DRX_MASK | CPUMCTX_DBG_DBGF_MASK);
3205 return iemRaiseDebugException(pVCpu);
3206}
3207
3208
3209/**
3210 * Extended version of iemRegAddToRipAndFinishingClearingRF for use by POPF and
3211 * others potentially updating EFLAGS.TF.
3212 *
3213 * The single step event must be generated using the TF value at the start of
3214 * the instruction, not the new value set by it.
3215 *
3216 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3217 * @param cbInstr The number of bytes to add.
3218 * @param fEflOld The EFLAGS at the start of the instruction
3219 * execution.
3220 */
3221DECLINLINE(VBOXSTRICTRC) iemRegAddToRipAndFinishingClearingRfEx(PVMCPUCC pVCpu, uint8_t cbInstr, uint32_t fEflOld) RT_NOEXCEPT
3222{
3223 iemRegAddToRip(pVCpu, cbInstr);
3224 if (!(fEflOld & X86_EFL_TF))
3225 {
3226 /* Specialized iemRegFinishClearingRF edition here that doesn't check X86_EFL_TF. */
3227 AssertCompile(CPUMCTX_INHIBIT_SHADOW < UINT32_MAX);
3228 if (RT_LIKELY(!( pVCpu->cpum.GstCtx.eflags.uBoth
3229 & (X86_EFL_RF | CPUMCTX_INHIBIT_SHADOW | CPUMCTX_DBG_HIT_DRX_MASK | CPUMCTX_DBG_DBGF_MASK)) ))
3230 return VINF_SUCCESS;
3231 return iemFinishInstructionWithFlagsSet<0 /*a_fTF*/>(pVCpu, VINF_SUCCESS); /* TF=0, so ignore it. */
3232 }
3233 return iemFinishInstructionWithTfSet(pVCpu);
3234}
3235
3236
3237#ifndef IEM_WITH_OPAQUE_DECODER_STATE
3238/**
3239 * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF.
3240 *
3241 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3242 */
3243DECLINLINE(VBOXSTRICTRC) iemRegUpdateRipAndFinishClearingRF(PVMCPUCC pVCpu) RT_NOEXCEPT
3244{
3245 return iemRegAddToRipAndFinishingClearingRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu));
3246}
3247#endif
3248
3249
3250#ifdef IEM_WITH_CODE_TLB
3251
3252/**
3253 * Performs a near jump to the specified address, no checking or clearing of
3254 * flags
3255 *
3256 * May raise a \#GP(0) if the new IP outside the code segment limit.
3257 *
3258 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3259 * @param uNewIp The new IP value.
3260 */
3261DECLINLINE(VBOXSTRICTRC) iemRegRipJumpU16AndFinishNoFlags(PVMCPUCC pVCpu, uint16_t uNewIp) RT_NOEXCEPT
3262{
3263 if (RT_LIKELY( uNewIp <= pVCpu->cpum.GstCtx.cs.u32Limit
3264 || IEM_IS_64BIT_CODE(pVCpu) /* no limit checks in 64-bit mode */))
3265 pVCpu->cpum.GstCtx.rip = uNewIp;
3266 else
3267 return iemRaiseGeneralProtectionFault0(pVCpu);
3268 return iemRegFinishNoFlags(pVCpu, VINF_SUCCESS);
3269}
3270
3271
3272/**
3273 * Performs a near jump to the specified address, no checking or clearing of
3274 * flags
3275 *
3276 * May raise a \#GP(0) if the new RIP is outside the code segment limit.
3277 *
3278 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3279 * @param uNewEip The new EIP value.
3280 */
3281DECLINLINE(VBOXSTRICTRC) iemRegRipJumpU32AndFinishNoFlags(PVMCPUCC pVCpu, uint32_t uNewEip) RT_NOEXCEPT
3282{
3283 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
3284 Assert(!IEM_IS_64BIT_CODE(pVCpu));
3285 if (RT_LIKELY(uNewEip <= pVCpu->cpum.GstCtx.cs.u32Limit))
3286 pVCpu->cpum.GstCtx.rip = uNewEip;
3287 else
3288 return iemRaiseGeneralProtectionFault0(pVCpu);
3289 return iemRegFinishNoFlags(pVCpu, VINF_SUCCESS);
3290}
3291
3292
3293/**
3294 * Performs a near jump to the specified address, no checking or clearing of
3295 * flags.
3296 *
3297 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
3298 * segment limit.
3299 *
3300 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3301 * @param uNewRip The new RIP value.
3302 */
3303DECLINLINE(VBOXSTRICTRC) iemRegRipJumpU64AndFinishNoFlags(PVMCPUCC pVCpu, uint64_t uNewRip) RT_NOEXCEPT
3304{
3305 Assert(IEM_IS_64BIT_CODE(pVCpu));
3306 if (RT_LIKELY(IEM_IS_CANONICAL(uNewRip)))
3307 pVCpu->cpum.GstCtx.rip = uNewRip;
3308 else
3309 return iemRaiseGeneralProtectionFault0(pVCpu);
3310 return iemRegFinishNoFlags(pVCpu, VINF_SUCCESS);
3311}
3312
3313#endif /* IEM_WITH_CODE_TLB */
3314
3315/**
3316 * Performs a near jump to the specified address.
3317 *
3318 * May raise a \#GP(0) if the new IP outside the code segment limit.
3319 *
3320 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3321 * @param uNewIp The new IP value.
3322 * @param cbInstr The instruction length, for flushing in the non-TLB case.
3323 */
3324DECLINLINE(VBOXSTRICTRC) iemRegRipJumpU16AndFinishClearingRF(PVMCPUCC pVCpu, uint16_t uNewIp, uint8_t cbInstr) RT_NOEXCEPT
3325{
3326 if (RT_LIKELY( uNewIp <= pVCpu->cpum.GstCtx.cs.u32Limit
3327 || IEM_IS_64BIT_CODE(pVCpu) /* no limit checks in 64-bit mode */))
3328 pVCpu->cpum.GstCtx.rip = uNewIp;
3329 else
3330 return iemRaiseGeneralProtectionFault0(pVCpu);
3331#ifndef IEM_WITH_CODE_TLB
3332 iemOpcodeFlushLight(pVCpu, cbInstr);
3333#else
3334 RT_NOREF_PV(cbInstr);
3335#endif
3336 return iemRegFinishClearingRF(pVCpu, VINF_SUCCESS);
3337}
3338
3339
3340/**
3341 * Performs a near jump to the specified address.
3342 *
3343 * May raise a \#GP(0) if the new RIP is outside the code segment limit.
3344 *
3345 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3346 * @param uNewEip The new EIP value.
3347 * @param cbInstr The instruction length, for flushing in the non-TLB case.
3348 */
3349DECLINLINE(VBOXSTRICTRC) iemRegRipJumpU32AndFinishClearingRF(PVMCPUCC pVCpu, uint32_t uNewEip, uint8_t cbInstr) RT_NOEXCEPT
3350{
3351 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
3352 Assert(!IEM_IS_64BIT_CODE(pVCpu));
3353 if (RT_LIKELY(uNewEip <= pVCpu->cpum.GstCtx.cs.u32Limit))
3354 pVCpu->cpum.GstCtx.rip = uNewEip;
3355 else
3356 return iemRaiseGeneralProtectionFault0(pVCpu);
3357#ifndef IEM_WITH_CODE_TLB
3358 iemOpcodeFlushLight(pVCpu, cbInstr);
3359#else
3360 RT_NOREF_PV(cbInstr);
3361#endif
3362 return iemRegFinishClearingRF(pVCpu, VINF_SUCCESS);
3363}
3364
3365
3366/**
3367 * Performs a near jump to the specified address.
3368 *
3369 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
3370 * segment limit.
3371 *
3372 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3373 * @param uNewRip The new RIP value.
3374 * @param cbInstr The instruction length, for flushing in the non-TLB case.
3375 */
3376DECLINLINE(VBOXSTRICTRC) iemRegRipJumpU64AndFinishClearingRF(PVMCPUCC pVCpu, uint64_t uNewRip, uint8_t cbInstr) RT_NOEXCEPT
3377{
3378 Assert(IEM_IS_64BIT_CODE(pVCpu));
3379 if (RT_LIKELY(IEM_IS_CANONICAL(uNewRip)))
3380 pVCpu->cpum.GstCtx.rip = uNewRip;
3381 else
3382 return iemRaiseGeneralProtectionFault0(pVCpu);
3383#ifndef IEM_WITH_CODE_TLB
3384 iemOpcodeFlushLight(pVCpu, cbInstr);
3385#else
3386 RT_NOREF_PV(cbInstr);
3387#endif
3388 return iemRegFinishClearingRF(pVCpu, VINF_SUCCESS);
3389}
3390
3391
3392/**
3393 * Implements a 16-bit relative call, no checking or clearing of
3394 * flags.
3395 *
3396 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3397 * @param cbInstr The instruction length.
3398 * @param offDisp The 16-bit displacement.
3399 */
3400DECL_FORCE_INLINE(VBOXSTRICTRC)
3401iemRegRipRelativeCallS16AndFinishNoFlags(PVMCPUCC pVCpu, uint8_t cbInstr, int16_t offDisp) RT_NOEXCEPT
3402{
3403 uint16_t const uOldIp = pVCpu->cpum.GstCtx.ip + cbInstr;
3404 uint16_t const uNewIp = uOldIp + offDisp;
3405 if ( uNewIp <= pVCpu->cpum.GstCtx.cs.u32Limit
3406 || IEM_IS_64BIT_CODE(pVCpu) /* no CS limit checks in 64-bit mode */)
3407 { /* likely */ }
3408 else
3409 return iemRaiseGeneralProtectionFault0(pVCpu);
3410
3411 VBOXSTRICTRC rcStrict = iemMemStackPushU16(pVCpu, uOldIp);
3412 if (rcStrict == VINF_SUCCESS)
3413 { /* likely */ }
3414 else
3415 return rcStrict;
3416
3417 pVCpu->cpum.GstCtx.rip = uNewIp;
3418#ifndef IEM_WITH_CODE_TLB
3419 iemOpcodeFlushLight(pVCpu, cbInstr);
3420#endif
3421 return iemRegFinishNoFlags(pVCpu, VINF_SUCCESS);
3422}
3423
3424
3425/**
3426 * Implements a 16-bit relative call.
3427 *
3428 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3429 * @param cbInstr The instruction length.
3430 * @param offDisp The 16-bit displacement.
3431 */
3432DECL_FORCE_INLINE(VBOXSTRICTRC)
3433iemRegRipRelativeCallS16AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int16_t offDisp) RT_NOEXCEPT
3434{
3435 uint16_t const uOldIp = pVCpu->cpum.GstCtx.ip + cbInstr;
3436 uint16_t const uNewIp = uOldIp + offDisp;
3437 if ( uNewIp <= pVCpu->cpum.GstCtx.cs.u32Limit
3438 || IEM_IS_64BIT_CODE(pVCpu) /* no CS limit checks in 64-bit mode */)
3439 { /* likely */ }
3440 else
3441 return iemRaiseGeneralProtectionFault0(pVCpu);
3442
3443 VBOXSTRICTRC rcStrict = iemMemStackPushU16(pVCpu, uOldIp);
3444 if (rcStrict == VINF_SUCCESS)
3445 { /* likely */ }
3446 else
3447 return rcStrict;
3448
3449 pVCpu->cpum.GstCtx.rip = uNewIp;
3450#ifndef IEM_WITH_CODE_TLB
3451 iemOpcodeFlushLight(pVCpu, cbInstr);
3452#endif
3453 return iemRegFinishClearingRF(pVCpu, VINF_SUCCESS);
3454}
3455
3456
3457/**
3458 * Implements a 32-bit relative call, no checking or clearing of flags.
3459 *
3460 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3461 * @param cbInstr The instruction length.
3462 * @param offDisp The 32-bit displacement.
3463 */
3464DECL_FORCE_INLINE(VBOXSTRICTRC)
3465iemRegEip32RelativeCallS32AndFinishNoFlags(PVMCPUCC pVCpu, uint8_t cbInstr, int32_t offDisp) RT_NOEXCEPT
3466{
3467 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX); Assert(!IEM_IS_64BIT_CODE(pVCpu));
3468
3469 uint32_t const uOldRip = pVCpu->cpum.GstCtx.eip + cbInstr;
3470 uint32_t const uNewRip = uOldRip + offDisp;
3471 if (uNewRip <= pVCpu->cpum.GstCtx.cs.u32Limit)
3472 { /* likely */ }
3473 else
3474 return iemRaiseGeneralProtectionFault0(pVCpu);
3475
3476 VBOXSTRICTRC rcStrict = iemMemStackPushU32(pVCpu, uOldRip);
3477 if (rcStrict == VINF_SUCCESS)
3478 { /* likely */ }
3479 else
3480 return rcStrict;
3481
3482 pVCpu->cpum.GstCtx.rip = uNewRip;
3483#ifndef IEM_WITH_CODE_TLB
3484 iemOpcodeFlushLight(pVCpu, cbInstr);
3485#endif
3486 return iemRegFinishNoFlags(pVCpu, VINF_SUCCESS);
3487}
3488
3489
3490/**
3491 * Implements a 32-bit relative call.
3492 *
3493 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3494 * @param cbInstr The instruction length.
3495 * @param offDisp The 32-bit displacement.
3496 */
3497DECL_FORCE_INLINE(VBOXSTRICTRC)
3498iemRegEip32RelativeCallS32AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int32_t offDisp) RT_NOEXCEPT
3499{
3500 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX); Assert(!IEM_IS_64BIT_CODE(pVCpu));
3501
3502 uint32_t const uOldRip = pVCpu->cpum.GstCtx.eip + cbInstr;
3503 uint32_t const uNewRip = uOldRip + offDisp;
3504 if (uNewRip <= pVCpu->cpum.GstCtx.cs.u32Limit)
3505 { /* likely */ }
3506 else
3507 return iemRaiseGeneralProtectionFault0(pVCpu);
3508
3509 VBOXSTRICTRC rcStrict = iemMemStackPushU32(pVCpu, uOldRip);
3510 if (rcStrict == VINF_SUCCESS)
3511 { /* likely */ }
3512 else
3513 return rcStrict;
3514
3515 pVCpu->cpum.GstCtx.rip = uNewRip;
3516#ifndef IEM_WITH_CODE_TLB
3517 iemOpcodeFlushLight(pVCpu, cbInstr);
3518#endif
3519 return iemRegFinishClearingRF(pVCpu, VINF_SUCCESS);
3520}
3521
3522
3523/**
3524 * Implements a 64-bit relative call, no checking or clearing of flags.
3525 *
3526 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3527 * @param cbInstr The instruction length.
3528 * @param offDisp The 64-bit displacement.
3529 */
3530DECL_FORCE_INLINE(VBOXSTRICTRC)
3531iemRegRip64RelativeCallS64AndFinishNoFlags(PVMCPUCC pVCpu, uint8_t cbInstr, int64_t offDisp) RT_NOEXCEPT
3532{
3533 uint64_t const uOldRip = pVCpu->cpum.GstCtx.rip + cbInstr;
3534 uint64_t const uNewRip = uOldRip + (int64_t)offDisp;
3535 if (IEM_IS_CANONICAL(uNewRip))
3536 { /* likely */ }
3537 else
3538 return iemRaiseNotCanonical(pVCpu);
3539
3540 VBOXSTRICTRC rcStrict = iemMemStackPushU64(pVCpu, uOldRip);
3541 if (rcStrict == VINF_SUCCESS)
3542 { /* likely */ }
3543 else
3544 return rcStrict;
3545
3546 pVCpu->cpum.GstCtx.rip = uNewRip;
3547#ifndef IEM_WITH_CODE_TLB
3548 iemOpcodeFlushLight(pVCpu, cbInstr);
3549#endif
3550 return iemRegFinishNoFlags(pVCpu, VINF_SUCCESS);
3551}
3552
3553
3554/**
3555 * Implements a 64-bit relative call.
3556 *
3557 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3558 * @param cbInstr The instruction length.
3559 * @param offDisp The 64-bit displacement.
3560 */
3561DECL_FORCE_INLINE(VBOXSTRICTRC)
3562iemRegRip64RelativeCallS64AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int64_t offDisp) RT_NOEXCEPT
3563{
3564 uint64_t const uOldRip = pVCpu->cpum.GstCtx.rip + cbInstr;
3565 uint64_t const uNewRip = uOldRip + (int64_t)offDisp;
3566 if (IEM_IS_CANONICAL(uNewRip))
3567 { /* likely */ }
3568 else
3569 return iemRaiseNotCanonical(pVCpu);
3570
3571 VBOXSTRICTRC rcStrict = iemMemStackPushU64(pVCpu, uOldRip);
3572 if (rcStrict == VINF_SUCCESS)
3573 { /* likely */ }
3574 else
3575 return rcStrict;
3576
3577 pVCpu->cpum.GstCtx.rip = uNewRip;
3578#ifndef IEM_WITH_CODE_TLB
3579 iemOpcodeFlushLight(pVCpu, cbInstr);
3580#endif
3581 return iemRegFinishClearingRF(pVCpu, VINF_SUCCESS);
3582}
3583
3584
3585/**
3586 * Implements an 16-bit indirect call, no checking or clearing of
3587 * flags.
3588 *
3589 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3590 * @param cbInstr The instruction length.
3591 * @param uNewRip The new RIP value.
3592 */
3593DECL_FORCE_INLINE(VBOXSTRICTRC)
3594iemRegIp16IndirectCallU16AndFinishNoFlags(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t uNewRip) RT_NOEXCEPT
3595{
3596 uint16_t const uOldRip = pVCpu->cpum.GstCtx.ip + cbInstr;
3597 if (uNewRip <= pVCpu->cpum.GstCtx.cs.u32Limit)
3598 { /* likely */ }
3599 else
3600 return iemRaiseGeneralProtectionFault0(pVCpu);
3601
3602 VBOXSTRICTRC rcStrict = iemMemStackPushU16(pVCpu, uOldRip);
3603 if (rcStrict == VINF_SUCCESS)
3604 { /* likely */ }
3605 else
3606 return rcStrict;
3607
3608 pVCpu->cpum.GstCtx.rip = uNewRip;
3609#ifndef IEM_WITH_CODE_TLB
3610 iemOpcodeFlushLight(pVCpu, cbInstr);
3611#endif
3612 return iemRegFinishNoFlags(pVCpu, VINF_SUCCESS);
3613}
3614
3615
3616/**
3617 * Implements an 16-bit indirect call, no checking or clearing of
3618 * flags.
3619 *
3620 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3621 * @param cbInstr The instruction length.
3622 * @param uNewRip The new RIP value.
3623 */
3624DECL_FORCE_INLINE(VBOXSTRICTRC)
3625iemRegEip32IndirectCallU16AndFinishNoFlags(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t uNewRip) RT_NOEXCEPT
3626{
3627 uint16_t const uOldRip = pVCpu->cpum.GstCtx.ip + cbInstr;
3628 if (uNewRip <= pVCpu->cpum.GstCtx.cs.u32Limit)
3629 { /* likely */ }
3630 else
3631 return iemRaiseGeneralProtectionFault0(pVCpu);
3632
3633 VBOXSTRICTRC rcStrict = iemMemStackPushU16(pVCpu, uOldRip);
3634 if (rcStrict == VINF_SUCCESS)
3635 { /* likely */ }
3636 else
3637 return rcStrict;
3638
3639 pVCpu->cpum.GstCtx.rip = uNewRip;
3640#ifndef IEM_WITH_CODE_TLB
3641 iemOpcodeFlushLight(pVCpu, cbInstr);
3642#endif
3643 return iemRegFinishNoFlags(pVCpu, VINF_SUCCESS);
3644}
3645
3646
3647/**
3648 * Implements an 16-bit indirect call.
3649 *
3650 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3651 * @param cbInstr The instruction length.
3652 * @param uNewRip The new RIP value.
3653 */
3654DECL_FORCE_INLINE(VBOXSTRICTRC)
3655iemRegIp16IndirectCallU16AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t uNewRip) RT_NOEXCEPT
3656{
3657 uint16_t const uOldRip = pVCpu->cpum.GstCtx.ip + cbInstr;
3658 if (uNewRip <= pVCpu->cpum.GstCtx.cs.u32Limit)
3659 { /* likely */ }
3660 else
3661 return iemRaiseGeneralProtectionFault0(pVCpu);
3662
3663 VBOXSTRICTRC rcStrict = iemMemStackPushU16(pVCpu, uOldRip);
3664 if (rcStrict == VINF_SUCCESS)
3665 { /* likely */ }
3666 else
3667 return rcStrict;
3668
3669 pVCpu->cpum.GstCtx.rip = uNewRip;
3670#ifndef IEM_WITH_CODE_TLB
3671 iemOpcodeFlushLight(pVCpu, cbInstr);
3672#endif
3673 return iemRegFinishClearingRF(pVCpu, VINF_SUCCESS);
3674}
3675
3676
3677/**
3678 * Implements an 16-bit indirect call.
3679 *
3680 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3681 * @param cbInstr The instruction length.
3682 * @param uNewRip The new RIP value.
3683 */
3684DECL_FORCE_INLINE(VBOXSTRICTRC)
3685iemRegEip32IndirectCallU16AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t uNewRip) RT_NOEXCEPT
3686{
3687 uint16_t const uOldRip = pVCpu->cpum.GstCtx.ip + cbInstr;
3688 if (uNewRip <= pVCpu->cpum.GstCtx.cs.u32Limit)
3689 { /* likely */ }
3690 else
3691 return iemRaiseGeneralProtectionFault0(pVCpu);
3692
3693 VBOXSTRICTRC rcStrict = iemMemStackPushU16(pVCpu, uOldRip);
3694 if (rcStrict == VINF_SUCCESS)
3695 { /* likely */ }
3696 else
3697 return rcStrict;
3698
3699 pVCpu->cpum.GstCtx.rip = uNewRip;
3700#ifndef IEM_WITH_CODE_TLB
3701 iemOpcodeFlushLight(pVCpu, cbInstr);
3702#endif
3703 return iemRegFinishClearingRF(pVCpu, VINF_SUCCESS);
3704}
3705
3706
3707/**
3708 * Implements an 32-bit indirect call, no checking or clearing of
3709 * flags.
3710 *
3711 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3712 * @param cbInstr The instruction length.
3713 * @param uNewRip The new RIP value.
3714 */
3715DECL_FORCE_INLINE(VBOXSTRICTRC)
3716iemRegEip32IndirectCallU32AndFinishNoFlags(PVMCPUCC pVCpu, uint8_t cbInstr, uint32_t uNewRip) RT_NOEXCEPT
3717{
3718 uint32_t const uOldRip = pVCpu->cpum.GstCtx.eip + cbInstr;
3719 if (uNewRip <= pVCpu->cpum.GstCtx.cs.u32Limit)
3720 { /* likely */ }
3721 else
3722 return iemRaiseGeneralProtectionFault0(pVCpu);
3723
3724 VBOXSTRICTRC rcStrict = iemMemStackPushU32(pVCpu, uOldRip);
3725 if (rcStrict == VINF_SUCCESS)
3726 { /* likely */ }
3727 else
3728 return rcStrict;
3729
3730 pVCpu->cpum.GstCtx.rip = uNewRip;
3731#ifndef IEM_WITH_CODE_TLB
3732 iemOpcodeFlushLight(pVCpu, cbInstr);
3733#endif
3734 return iemRegFinishNoFlags(pVCpu, VINF_SUCCESS);
3735}
3736
3737
3738/**
3739 * Implements an 32-bit indirect call.
3740 *
3741 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3742 * @param cbInstr The instruction length.
3743 * @param uNewRip The new RIP value.
3744 */
3745DECL_FORCE_INLINE(VBOXSTRICTRC)
3746iemRegEip32IndirectCallU32AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, uint32_t uNewRip) RT_NOEXCEPT
3747{
3748 uint32_t const uOldRip = pVCpu->cpum.GstCtx.eip + cbInstr;
3749 if (uNewRip <= pVCpu->cpum.GstCtx.cs.u32Limit)
3750 { /* likely */ }
3751 else
3752 return iemRaiseGeneralProtectionFault0(pVCpu);
3753
3754 VBOXSTRICTRC rcStrict = iemMemStackPushU32(pVCpu, uOldRip);
3755 if (rcStrict == VINF_SUCCESS)
3756 { /* likely */ }
3757 else
3758 return rcStrict;
3759
3760 pVCpu->cpum.GstCtx.rip = uNewRip;
3761#ifndef IEM_WITH_CODE_TLB
3762 iemOpcodeFlushLight(pVCpu, cbInstr);
3763#endif
3764 return iemRegFinishClearingRF(pVCpu, VINF_SUCCESS);
3765}
3766
3767
3768/**
3769 * Implements an 64-bit indirect call, no checking or clearing of
3770 * flags.
3771 *
3772 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3773 * @param cbInstr The instruction length.
3774 * @param uNewRip The new RIP value.
3775 */
3776DECL_FORCE_INLINE(VBOXSTRICTRC)
3777iemRegRip64IndirectCallU64AndFinishNoFlags(PVMCPUCC pVCpu, uint8_t cbInstr, uint64_t uNewRip) RT_NOEXCEPT
3778{
3779 uint64_t const uOldRip = pVCpu->cpum.GstCtx.rip + cbInstr;
3780 if (IEM_IS_CANONICAL(uNewRip))
3781 { /* likely */ }
3782 else
3783 return iemRaiseGeneralProtectionFault0(pVCpu);
3784
3785 VBOXSTRICTRC rcStrict = iemMemStackPushU64(pVCpu, uOldRip);
3786 if (rcStrict == VINF_SUCCESS)
3787 { /* likely */ }
3788 else
3789 return rcStrict;
3790
3791 pVCpu->cpum.GstCtx.rip = uNewRip;
3792#ifndef IEM_WITH_CODE_TLB
3793 iemOpcodeFlushLight(pVCpu, cbInstr);
3794#endif
3795 return iemRegFinishNoFlags(pVCpu, VINF_SUCCESS);
3796}
3797
3798
3799/**
3800 * Implements an 64-bit indirect call.
3801 *
3802 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3803 * @param cbInstr The instruction length.
3804 * @param uNewRip The new RIP value.
3805 */
3806DECL_FORCE_INLINE(VBOXSTRICTRC)
3807iemRegRip64IndirectCallU64AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, uint64_t uNewRip) RT_NOEXCEPT
3808{
3809 uint64_t const uOldRip = pVCpu->cpum.GstCtx.rip + cbInstr;
3810 if (IEM_IS_CANONICAL(uNewRip))
3811 { /* likely */ }
3812 else
3813 return iemRaiseGeneralProtectionFault0(pVCpu);
3814
3815 VBOXSTRICTRC rcStrict = iemMemStackPushU64(pVCpu, uOldRip);
3816 if (rcStrict == VINF_SUCCESS)
3817 { /* likely */ }
3818 else
3819 return rcStrict;
3820
3821 pVCpu->cpum.GstCtx.rip = uNewRip;
3822#ifndef IEM_WITH_CODE_TLB
3823 iemOpcodeFlushLight(pVCpu, cbInstr);
3824#endif
3825 return iemRegFinishClearingRF(pVCpu, VINF_SUCCESS);
3826}
3827
3828
3829
3830/**
3831 * Adds to the stack pointer.
3832 *
3833 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3834 * @param cbToAdd The number of bytes to add (8-bit!).
3835 */
3836DECLINLINE(void) iemRegAddToRsp(PVMCPUCC pVCpu, uint8_t cbToAdd) RT_NOEXCEPT
3837{
3838 if (IEM_IS_64BIT_CODE(pVCpu))
3839 pVCpu->cpum.GstCtx.rsp += cbToAdd;
3840 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
3841 pVCpu->cpum.GstCtx.esp += cbToAdd;
3842 else
3843 pVCpu->cpum.GstCtx.sp += cbToAdd;
3844}
3845
3846
3847/**
3848 * Subtracts from the stack pointer.
3849 *
3850 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3851 * @param cbToSub The number of bytes to subtract (8-bit!).
3852 */
3853DECLINLINE(void) iemRegSubFromRsp(PVMCPUCC pVCpu, uint8_t cbToSub) RT_NOEXCEPT
3854{
3855 if (IEM_IS_64BIT_CODE(pVCpu))
3856 pVCpu->cpum.GstCtx.rsp -= cbToSub;
3857 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
3858 pVCpu->cpum.GstCtx.esp -= cbToSub;
3859 else
3860 pVCpu->cpum.GstCtx.sp -= cbToSub;
3861}
3862
3863
3864/**
3865 * Adds to the temporary stack pointer.
3866 *
3867 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3868 * @param pTmpRsp The temporary SP/ESP/RSP to update.
3869 * @param cbToAdd The number of bytes to add (16-bit).
3870 */
3871DECLINLINE(void) iemRegAddToRspEx(PCVMCPU pVCpu, PRTUINT64U pTmpRsp, uint16_t cbToAdd) RT_NOEXCEPT
3872{
3873 if (IEM_IS_64BIT_CODE(pVCpu))
3874 pTmpRsp->u += cbToAdd;
3875 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
3876 pTmpRsp->DWords.dw0 += cbToAdd;
3877 else
3878 pTmpRsp->Words.w0 += cbToAdd;
3879}
3880
3881
3882/**
3883 * Subtracts from the temporary stack pointer.
3884 *
3885 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3886 * @param pTmpRsp The temporary SP/ESP/RSP to update.
3887 * @param cbToSub The number of bytes to subtract.
3888 * @remarks The @a cbToSub argument *MUST* be 16-bit, iemCImpl_enter is
3889 * expecting that.
3890 */
3891DECLINLINE(void) iemRegSubFromRspEx(PCVMCPU pVCpu, PRTUINT64U pTmpRsp, uint16_t cbToSub) RT_NOEXCEPT
3892{
3893 if (IEM_IS_64BIT_CODE(pVCpu))
3894 pTmpRsp->u -= cbToSub;
3895 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
3896 pTmpRsp->DWords.dw0 -= cbToSub;
3897 else
3898 pTmpRsp->Words.w0 -= cbToSub;
3899}
3900
3901
3902/**
3903 * Calculates the effective stack address for a push of the specified size as
3904 * well as the new RSP value (upper bits may be masked).
3905 *
3906 * @returns Effective stack addressf for the push.
3907 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3908 * @param cbItem The size of the stack item to pop.
3909 * @param puNewRsp Where to return the new RSP value.
3910 */
3911DECLINLINE(RTGCPTR) iemRegGetRspForPush(PCVMCPU pVCpu, uint8_t cbItem, uint64_t *puNewRsp) RT_NOEXCEPT
3912{
3913 RTUINT64U uTmpRsp;
3914 RTGCPTR GCPtrTop;
3915 uTmpRsp.u = pVCpu->cpum.GstCtx.rsp;
3916
3917 if (IEM_IS_64BIT_CODE(pVCpu))
3918 GCPtrTop = uTmpRsp.u -= cbItem;
3919 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
3920 GCPtrTop = uTmpRsp.DWords.dw0 -= cbItem;
3921 else
3922 GCPtrTop = uTmpRsp.Words.w0 -= cbItem;
3923 *puNewRsp = uTmpRsp.u;
3924 return GCPtrTop;
3925}
3926
3927
3928/**
3929 * Gets the current stack pointer and calculates the value after a pop of the
3930 * specified size.
3931 *
3932 * @returns Current stack pointer.
3933 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3934 * @param cbItem The size of the stack item to pop.
3935 * @param puNewRsp Where to return the new RSP value.
3936 */
3937DECLINLINE(RTGCPTR) iemRegGetRspForPop(PCVMCPU pVCpu, uint8_t cbItem, uint64_t *puNewRsp) RT_NOEXCEPT
3938{
3939 RTUINT64U uTmpRsp;
3940 RTGCPTR GCPtrTop;
3941 uTmpRsp.u = pVCpu->cpum.GstCtx.rsp;
3942
3943 if (IEM_IS_64BIT_CODE(pVCpu))
3944 {
3945 GCPtrTop = uTmpRsp.u;
3946 uTmpRsp.u += cbItem;
3947 }
3948 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
3949 {
3950 GCPtrTop = uTmpRsp.DWords.dw0;
3951 uTmpRsp.DWords.dw0 += cbItem;
3952 }
3953 else
3954 {
3955 GCPtrTop = uTmpRsp.Words.w0;
3956 uTmpRsp.Words.w0 += cbItem;
3957 }
3958 *puNewRsp = uTmpRsp.u;
3959 return GCPtrTop;
3960}
3961
3962
3963/**
3964 * Calculates the effective stack address for a push of the specified size as
3965 * well as the new temporary RSP value (upper bits may be masked).
3966 *
3967 * @returns Effective stack addressf for the push.
3968 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3969 * @param pTmpRsp The temporary stack pointer. This is updated.
3970 * @param cbItem The size of the stack item to pop.
3971 */
3972DECLINLINE(RTGCPTR) iemRegGetRspForPushEx(PCVMCPU pVCpu, PRTUINT64U pTmpRsp, uint8_t cbItem) RT_NOEXCEPT
3973{
3974 RTGCPTR GCPtrTop;
3975
3976 if (IEM_IS_64BIT_CODE(pVCpu))
3977 GCPtrTop = pTmpRsp->u -= cbItem;
3978 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
3979 GCPtrTop = pTmpRsp->DWords.dw0 -= cbItem;
3980 else
3981 GCPtrTop = pTmpRsp->Words.w0 -= cbItem;
3982 return GCPtrTop;
3983}
3984
3985
3986/**
3987 * Gets the effective stack address for a pop of the specified size and
3988 * calculates and updates the temporary RSP.
3989 *
3990 * @returns Current stack pointer.
3991 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3992 * @param pTmpRsp The temporary stack pointer. This is updated.
3993 * @param cbItem The size of the stack item to pop.
3994 */
3995DECLINLINE(RTGCPTR) iemRegGetRspForPopEx(PCVMCPU pVCpu, PRTUINT64U pTmpRsp, uint8_t cbItem) RT_NOEXCEPT
3996{
3997 RTGCPTR GCPtrTop;
3998 if (IEM_IS_64BIT_CODE(pVCpu))
3999 {
4000 GCPtrTop = pTmpRsp->u;
4001 pTmpRsp->u += cbItem;
4002 }
4003 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
4004 {
4005 GCPtrTop = pTmpRsp->DWords.dw0;
4006 pTmpRsp->DWords.dw0 += cbItem;
4007 }
4008 else
4009 {
4010 GCPtrTop = pTmpRsp->Words.w0;
4011 pTmpRsp->Words.w0 += cbItem;
4012 }
4013 return GCPtrTop;
4014}
4015
4016
4017/** Common body for iemRegRipNearReturnAndFinishClearingRF()
4018 * and iemRegRipNearReturnAndFinishNoFlags(). */
4019template<bool a_fWithFlags>
4020DECL_FORCE_INLINE(VBOXSTRICTRC)
4021iemRegRipNearReturnCommon(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t cbPop, IEMMODE enmEffOpSize) RT_NOEXCEPT
4022{
4023 /* Fetch the new RIP from the stack. */
4024 VBOXSTRICTRC rcStrict;
4025 RTUINT64U NewRip;
4026 RTUINT64U NewRsp;
4027 NewRsp.u = pVCpu->cpum.GstCtx.rsp;
4028 switch (enmEffOpSize)
4029 {
4030 case IEMMODE_16BIT:
4031 NewRip.u = 0;
4032 rcStrict = iemMemStackPopU16Ex(pVCpu, &NewRip.Words.w0, &NewRsp);
4033 break;
4034 case IEMMODE_32BIT:
4035 NewRip.u = 0;
4036 rcStrict = iemMemStackPopU32Ex(pVCpu, &NewRip.DWords.dw0, &NewRsp);
4037 break;
4038 case IEMMODE_64BIT:
4039 rcStrict = iemMemStackPopU64Ex(pVCpu, &NewRip.u, &NewRsp);
4040 break;
4041 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4042 }
4043 if (rcStrict != VINF_SUCCESS)
4044 return rcStrict;
4045
4046 /* Check the new ew RIP before loading it. */
4047 /** @todo Should test this as the intel+amd pseudo code doesn't mention half
4048 * of it. The canonical test is performed here and for call. */
4049 if (enmEffOpSize != IEMMODE_64BIT)
4050 {
4051 if (RT_LIKELY(NewRip.DWords.dw0 <= pVCpu->cpum.GstCtx.cs.u32Limit))
4052 { /* likely */ }
4053 else
4054 {
4055 Log(("retn newrip=%llx - out of bounds (%x) -> #GP\n", NewRip.u, pVCpu->cpum.GstCtx.cs.u32Limit));
4056 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
4057 }
4058 }
4059 else
4060 {
4061 if (RT_LIKELY(IEM_IS_CANONICAL(NewRip.u)))
4062 { /* likely */ }
4063 else
4064 {
4065 Log(("retn newrip=%llx - not canonical -> #GP\n", NewRip.u));
4066 return iemRaiseNotCanonical(pVCpu);
4067 }
4068 }
4069
4070 /* Apply cbPop */
4071 if (cbPop)
4072 iemRegAddToRspEx(pVCpu, &NewRsp, cbPop);
4073
4074 /* Commit it. */
4075 pVCpu->cpum.GstCtx.rip = NewRip.u;
4076 pVCpu->cpum.GstCtx.rsp = NewRsp.u;
4077
4078 /* Flush the prefetch buffer. */
4079#ifndef IEM_WITH_CODE_TLB
4080 iemOpcodeFlushLight(pVCpu, cbInstr);
4081#endif
4082 RT_NOREF(cbInstr);
4083
4084
4085 if (a_fWithFlags)
4086 return iemRegFinishClearingRF(pVCpu, VINF_SUCCESS);
4087 return iemRegFinishNoFlags(pVCpu, VINF_SUCCESS);
4088}
4089
4090
4091/**
4092 * Implements retn and retn imm16.
4093 *
4094 * @param pVCpu The cross context virtual CPU structure of the
4095 * calling thread.
4096 * @param cbInstr The current instruction length.
4097 * @param enmEffOpSize The effective operand size. This is constant.
4098 * @param cbPop The amount of arguments to pop from the stack
4099 * (bytes). This can be constant (zero).
4100 */
4101DECL_FORCE_INLINE(VBOXSTRICTRC)
4102iemRegRipNearReturnAndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t cbPop, IEMMODE enmEffOpSize) RT_NOEXCEPT
4103{
4104 return iemRegRipNearReturnCommon<true /*a_fWithFlags*/>(pVCpu, cbInstr, cbPop, enmEffOpSize);
4105}
4106
4107
4108/**
4109 * Implements retn and retn imm16, no checking or clearing of
4110 * flags.
4111 *
4112 * @param pVCpu The cross context virtual CPU structure of the
4113 * calling thread.
4114 * @param cbInstr The current instruction length.
4115 * @param enmEffOpSize The effective operand size. This is constant.
4116 * @param cbPop The amount of arguments to pop from the stack
4117 * (bytes). This can be constant (zero).
4118 */
4119DECL_FORCE_INLINE(VBOXSTRICTRC)
4120iemRegRipNearReturnAndFinishNoFlags(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t cbPop, IEMMODE enmEffOpSize) RT_NOEXCEPT
4121{
4122 return iemRegRipNearReturnCommon<false /*a_fWithFlags*/>(pVCpu, cbInstr, cbPop, enmEffOpSize);
4123}
4124
4125/** @} */
4126
4127
4128/** @name FPU access and helpers.
4129 *
4130 * @{
4131 */
4132
4133
4134/**
4135 * Hook for preparing to use the host FPU.
4136 *
4137 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
4138 *
4139 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4140 */
4141DECLINLINE(void) iemFpuPrepareUsage(PVMCPUCC pVCpu) RT_NOEXCEPT
4142{
4143#ifdef IN_RING3
4144 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
4145#else
4146 CPUMRZFpuStatePrepareHostCpuForUse(pVCpu);
4147#endif
4148 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
4149}
4150
4151
4152/**
4153 * Hook for preparing to use the host FPU for SSE.
4154 *
4155 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
4156 *
4157 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4158 */
4159DECLINLINE(void) iemFpuPrepareUsageSse(PVMCPUCC pVCpu) RT_NOEXCEPT
4160{
4161 iemFpuPrepareUsage(pVCpu);
4162}
4163
4164
4165/**
4166 * Hook for preparing to use the host FPU for AVX.
4167 *
4168 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
4169 *
4170 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4171 */
4172DECLINLINE(void) iemFpuPrepareUsageAvx(PVMCPUCC pVCpu) RT_NOEXCEPT
4173{
4174 iemFpuPrepareUsage(pVCpu);
4175}
4176
4177
4178/**
4179 * Hook for actualizing the guest FPU state before the interpreter reads it.
4180 *
4181 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
4182 *
4183 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4184 */
4185DECLINLINE(void) iemFpuActualizeStateForRead(PVMCPUCC pVCpu) RT_NOEXCEPT
4186{
4187#ifdef IN_RING3
4188 NOREF(pVCpu);
4189#else
4190 CPUMRZFpuStateActualizeForRead(pVCpu);
4191#endif
4192 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
4193}
4194
4195
4196/**
4197 * Hook for actualizing the guest FPU state before the interpreter changes it.
4198 *
4199 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
4200 *
4201 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4202 */
4203DECLINLINE(void) iemFpuActualizeStateForChange(PVMCPUCC pVCpu) RT_NOEXCEPT
4204{
4205#ifdef IN_RING3
4206 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
4207#else
4208 CPUMRZFpuStateActualizeForChange(pVCpu);
4209#endif
4210 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
4211}
4212
4213
4214/**
4215 * Hook for actualizing the guest XMM0..15 and MXCSR register state for read
4216 * only.
4217 *
4218 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
4219 *
4220 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4221 */
4222DECLINLINE(void) iemFpuActualizeSseStateForRead(PVMCPUCC pVCpu) RT_NOEXCEPT
4223{
4224#if defined(IN_RING3) || defined(VBOX_WITH_KERNEL_USING_XMM)
4225 NOREF(pVCpu);
4226#else
4227 CPUMRZFpuStateActualizeSseForRead(pVCpu);
4228#endif
4229 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
4230}
4231
4232
4233/**
4234 * Hook for actualizing the guest XMM0..15 and MXCSR register state for
4235 * read+write.
4236 *
4237 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
4238 *
4239 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4240 */
4241DECLINLINE(void) iemFpuActualizeSseStateForChange(PVMCPUCC pVCpu) RT_NOEXCEPT
4242{
4243#if defined(IN_RING3) || defined(VBOX_WITH_KERNEL_USING_XMM)
4244 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
4245#else
4246 CPUMRZFpuStateActualizeForChange(pVCpu);
4247#endif
4248 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
4249
4250 /* Make sure any changes are loaded the next time around. */
4251 pVCpu->cpum.GstCtx.XState.Hdr.bmXState |= XSAVE_C_SSE;
4252}
4253
4254
4255/**
4256 * Hook for actualizing the guest YMM0..15 and MXCSR register state for read
4257 * only.
4258 *
4259 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
4260 *
4261 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4262 */
4263DECLINLINE(void) iemFpuActualizeAvxStateForRead(PVMCPUCC pVCpu) RT_NOEXCEPT
4264{
4265#ifdef IN_RING3
4266 NOREF(pVCpu);
4267#else
4268 CPUMRZFpuStateActualizeAvxForRead(pVCpu);
4269#endif
4270 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
4271}
4272
4273
4274/**
4275 * Hook for actualizing the guest YMM0..15 and MXCSR register state for
4276 * read+write.
4277 *
4278 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
4279 *
4280 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4281 */
4282DECLINLINE(void) iemFpuActualizeAvxStateForChange(PVMCPUCC pVCpu) RT_NOEXCEPT
4283{
4284#ifdef IN_RING3
4285 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
4286#else
4287 CPUMRZFpuStateActualizeForChange(pVCpu);
4288#endif
4289 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
4290
4291 /* Just assume we're going to make changes to the SSE and YMM_HI parts. */
4292 pVCpu->cpum.GstCtx.XState.Hdr.bmXState |= XSAVE_C_YMM | XSAVE_C_SSE;
4293}
4294
4295
4296/**
4297 * Stores a QNaN value into a FPU register.
4298 *
4299 * @param pReg Pointer to the register.
4300 */
4301DECLINLINE(void) iemFpuStoreQNan(PRTFLOAT80U pReg) RT_NOEXCEPT
4302{
4303 pReg->au32[0] = UINT32_C(0x00000000);
4304 pReg->au32[1] = UINT32_C(0xc0000000);
4305 pReg->au16[4] = UINT16_C(0xffff);
4306}
4307
4308
4309/**
4310 * Updates the FOP, FPU.CS and FPUIP registers, extended version.
4311 *
4312 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4313 * @param pFpuCtx The FPU context.
4314 * @param uFpuOpcode The FPU opcode value (see IEMCPU::uFpuOpcode).
4315 */
4316DECLINLINE(void) iemFpuUpdateOpcodeAndIpWorkerEx(PVMCPUCC pVCpu, PX86FXSTATE pFpuCtx, uint16_t uFpuOpcode) RT_NOEXCEPT
4317{
4318 Assert(uFpuOpcode != UINT16_MAX);
4319 pFpuCtx->FOP = uFpuOpcode;
4320 /** @todo x87.CS and FPUIP needs to be kept seperately. */
4321 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
4322 {
4323 /** @todo Testcase: making assumptions about how FPUIP and FPUDP are handled
4324 * happens in real mode here based on the fnsave and fnstenv images. */
4325 pFpuCtx->CS = 0;
4326 pFpuCtx->FPUIP = pVCpu->cpum.GstCtx.eip | ((uint32_t)pVCpu->cpum.GstCtx.cs.Sel << 4);
4327 }
4328 else if (!IEM_IS_LONG_MODE(pVCpu))
4329 {
4330 pFpuCtx->CS = pVCpu->cpum.GstCtx.cs.Sel;
4331 pFpuCtx->FPUIP = pVCpu->cpum.GstCtx.rip;
4332 }
4333 else
4334 *(uint64_t *)&pFpuCtx->FPUIP = pVCpu->cpum.GstCtx.rip;
4335}
4336
4337
4338/**
4339 * Marks the specified stack register as free (for FFREE).
4340 *
4341 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4342 * @param iStReg The register to free.
4343 */
4344DECLINLINE(void) iemFpuStackFree(PVMCPUCC pVCpu, uint8_t iStReg) RT_NOEXCEPT
4345{
4346 Assert(iStReg < 8);
4347 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4348 uint8_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
4349 pFpuCtx->FTW &= ~RT_BIT(iReg);
4350}
4351
4352
4353/**
4354 * Increments FSW.TOP, i.e. pops an item off the stack without freeing it.
4355 *
4356 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4357 */
4358DECLINLINE(void) iemFpuStackIncTop(PVMCPUCC pVCpu) RT_NOEXCEPT
4359{
4360 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4361 uint16_t uFsw = pFpuCtx->FSW;
4362 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
4363 uTop = (uTop + (1 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
4364 uFsw &= ~X86_FSW_TOP_MASK;
4365 uFsw |= uTop;
4366 pFpuCtx->FSW = uFsw;
4367}
4368
4369
4370/**
4371 * Decrements FSW.TOP, i.e. push an item off the stack without storing anything.
4372 *
4373 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4374 */
4375DECLINLINE(void) iemFpuStackDecTop(PVMCPUCC pVCpu) RT_NOEXCEPT
4376{
4377 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4378 uint16_t uFsw = pFpuCtx->FSW;
4379 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
4380 uTop = (uTop + (7 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
4381 uFsw &= ~X86_FSW_TOP_MASK;
4382 uFsw |= uTop;
4383 pFpuCtx->FSW = uFsw;
4384}
4385
4386
4387
4388
4389DECLINLINE(int) iemFpuStRegNotEmpty(PVMCPUCC pVCpu, uint8_t iStReg) RT_NOEXCEPT
4390{
4391 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4392 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
4393 if (pFpuCtx->FTW & RT_BIT(iReg))
4394 return VINF_SUCCESS;
4395 return VERR_NOT_FOUND;
4396}
4397
4398
4399DECLINLINE(int) iemFpuStRegNotEmptyRef(PVMCPUCC pVCpu, uint8_t iStReg, PCRTFLOAT80U *ppRef) RT_NOEXCEPT
4400{
4401 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4402 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
4403 if (pFpuCtx->FTW & RT_BIT(iReg))
4404 {
4405 *ppRef = &pFpuCtx->aRegs[iStReg].r80;
4406 return VINF_SUCCESS;
4407 }
4408 return VERR_NOT_FOUND;
4409}
4410
4411
4412DECLINLINE(int) iemFpu2StRegsNotEmptyRef(PVMCPUCC pVCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0,
4413 uint8_t iStReg1, PCRTFLOAT80U *ppRef1) RT_NOEXCEPT
4414{
4415 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4416 uint16_t iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
4417 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
4418 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
4419 if ((pFpuCtx->FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
4420 {
4421 *ppRef0 = &pFpuCtx->aRegs[iStReg0].r80;
4422 *ppRef1 = &pFpuCtx->aRegs[iStReg1].r80;
4423 return VINF_SUCCESS;
4424 }
4425 return VERR_NOT_FOUND;
4426}
4427
4428
4429DECLINLINE(int) iemFpu2StRegsNotEmptyRefFirst(PVMCPUCC pVCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0, uint8_t iStReg1) RT_NOEXCEPT
4430{
4431 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4432 uint16_t iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
4433 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
4434 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
4435 if ((pFpuCtx->FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
4436 {
4437 *ppRef0 = &pFpuCtx->aRegs[iStReg0].r80;
4438 return VINF_SUCCESS;
4439 }
4440 return VERR_NOT_FOUND;
4441}
4442
4443
4444/**
4445 * Rotates the stack registers when setting new TOS.
4446 *
4447 * @param pFpuCtx The FPU context.
4448 * @param iNewTop New TOS value.
4449 * @remarks We only do this to speed up fxsave/fxrstor which
4450 * arrange the FP registers in stack order.
4451 * MUST be done before writing the new TOS (FSW).
4452 */
4453DECLINLINE(void) iemFpuRotateStackSetTop(PX86FXSTATE pFpuCtx, uint16_t iNewTop) RT_NOEXCEPT
4454{
4455 uint16_t iOldTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
4456 RTFLOAT80U ar80Temp[8];
4457
4458 if (iOldTop == iNewTop)
4459 return;
4460
4461 /* Unscrew the stack and get it into 'native' order. */
4462 ar80Temp[0] = pFpuCtx->aRegs[(8 - iOldTop + 0) & X86_FSW_TOP_SMASK].r80;
4463 ar80Temp[1] = pFpuCtx->aRegs[(8 - iOldTop + 1) & X86_FSW_TOP_SMASK].r80;
4464 ar80Temp[2] = pFpuCtx->aRegs[(8 - iOldTop + 2) & X86_FSW_TOP_SMASK].r80;
4465 ar80Temp[3] = pFpuCtx->aRegs[(8 - iOldTop + 3) & X86_FSW_TOP_SMASK].r80;
4466 ar80Temp[4] = pFpuCtx->aRegs[(8 - iOldTop + 4) & X86_FSW_TOP_SMASK].r80;
4467 ar80Temp[5] = pFpuCtx->aRegs[(8 - iOldTop + 5) & X86_FSW_TOP_SMASK].r80;
4468 ar80Temp[6] = pFpuCtx->aRegs[(8 - iOldTop + 6) & X86_FSW_TOP_SMASK].r80;
4469 ar80Temp[7] = pFpuCtx->aRegs[(8 - iOldTop + 7) & X86_FSW_TOP_SMASK].r80;
4470
4471 /* Now rotate the stack to the new position. */
4472 pFpuCtx->aRegs[0].r80 = ar80Temp[(iNewTop + 0) & X86_FSW_TOP_SMASK];
4473 pFpuCtx->aRegs[1].r80 = ar80Temp[(iNewTop + 1) & X86_FSW_TOP_SMASK];
4474 pFpuCtx->aRegs[2].r80 = ar80Temp[(iNewTop + 2) & X86_FSW_TOP_SMASK];
4475 pFpuCtx->aRegs[3].r80 = ar80Temp[(iNewTop + 3) & X86_FSW_TOP_SMASK];
4476 pFpuCtx->aRegs[4].r80 = ar80Temp[(iNewTop + 4) & X86_FSW_TOP_SMASK];
4477 pFpuCtx->aRegs[5].r80 = ar80Temp[(iNewTop + 5) & X86_FSW_TOP_SMASK];
4478 pFpuCtx->aRegs[6].r80 = ar80Temp[(iNewTop + 6) & X86_FSW_TOP_SMASK];
4479 pFpuCtx->aRegs[7].r80 = ar80Temp[(iNewTop + 7) & X86_FSW_TOP_SMASK];
4480}
4481
4482
4483/**
4484 * Updates the FPU exception status after FCW is changed.
4485 *
4486 * @param pFpuCtx The FPU context.
4487 */
4488DECLINLINE(void) iemFpuRecalcExceptionStatus(PX86FXSTATE pFpuCtx) RT_NOEXCEPT
4489{
4490 uint16_t u16Fsw = pFpuCtx->FSW;
4491 if ((u16Fsw & X86_FSW_XCPT_MASK) & ~(pFpuCtx->FCW & X86_FCW_XCPT_MASK))
4492 u16Fsw |= X86_FSW_ES | X86_FSW_B;
4493 else
4494 u16Fsw &= ~(X86_FSW_ES | X86_FSW_B);
4495 pFpuCtx->FSW = u16Fsw;
4496}
4497
4498
4499/**
4500 * Calculates the full FTW (FPU tag word) for use in FNSTENV and FNSAVE.
4501 *
4502 * @returns The full FTW.
4503 * @param pFpuCtx The FPU context.
4504 */
4505DECLINLINE(uint16_t) iemFpuCalcFullFtw(PCX86FXSTATE pFpuCtx) RT_NOEXCEPT
4506{
4507 uint8_t const u8Ftw = (uint8_t)pFpuCtx->FTW;
4508 uint16_t u16Ftw = 0;
4509 unsigned const iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
4510 for (unsigned iSt = 0; iSt < 8; iSt++)
4511 {
4512 unsigned const iReg = (iSt + iTop) & 7;
4513 if (!(u8Ftw & RT_BIT(iReg)))
4514 u16Ftw |= 3 << (iReg * 2); /* empty */
4515 else
4516 {
4517 uint16_t uTag;
4518 PCRTFLOAT80U const pr80Reg = &pFpuCtx->aRegs[iSt].r80;
4519 if (pr80Reg->s.uExponent == 0x7fff)
4520 uTag = 2; /* Exponent is all 1's => Special. */
4521 else if (pr80Reg->s.uExponent == 0x0000)
4522 {
4523 if (pr80Reg->s.uMantissa == 0x0000)
4524 uTag = 1; /* All bits are zero => Zero. */
4525 else
4526 uTag = 2; /* Must be special. */
4527 }
4528 else if (pr80Reg->s.uMantissa & RT_BIT_64(63)) /* The J bit. */
4529 uTag = 0; /* Valid. */
4530 else
4531 uTag = 2; /* Must be special. */
4532
4533 u16Ftw |= uTag << (iReg * 2);
4534 }
4535 }
4536
4537 return u16Ftw;
4538}
4539
4540
4541/**
4542 * Converts a full FTW to a compressed one (for use in FLDENV and FRSTOR).
4543 *
4544 * @returns The compressed FTW.
4545 * @param u16FullFtw The full FTW to convert.
4546 */
4547DECLINLINE(uint16_t) iemFpuCompressFtw(uint16_t u16FullFtw) RT_NOEXCEPT
4548{
4549 uint8_t u8Ftw = 0;
4550 for (unsigned i = 0; i < 8; i++)
4551 {
4552 if ((u16FullFtw & 3) != 3 /*empty*/)
4553 u8Ftw |= RT_BIT(i);
4554 u16FullFtw >>= 2;
4555 }
4556
4557 return u8Ftw;
4558}
4559
4560/** @} */
4561
4562
4563/** @name Memory access.
4564 *
4565 * @{
4566 */
4567
4568
4569/**
4570 * Checks whether alignment checks are enabled or not.
4571 *
4572 * @returns true if enabled, false if not.
4573 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4574 */
4575DECLINLINE(bool) iemMemAreAlignmentChecksEnabled(PVMCPUCC pVCpu) RT_NOEXCEPT
4576{
4577#if 0
4578 AssertCompile(X86_CR0_AM == X86_EFL_AC);
4579 return IEM_GET_CPL(pVCpu) == 3
4580 && (((uint32_t)pVCpu->cpum.GstCtx.cr0 & pVCpu->cpum.GstCtx.eflags.u) & X86_CR0_AM);
4581#else
4582 return RT_BOOL(pVCpu->iem.s.fExec & IEM_F_X86_AC);
4583#endif
4584}
4585
4586/**
4587 * Checks if the given segment can be written to, raise the appropriate
4588 * exception if not.
4589 *
4590 * @returns VBox strict status code.
4591 *
4592 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4593 * @param pHid Pointer to the hidden register.
4594 * @param iSegReg The register number.
4595 * @param pu64BaseAddr Where to return the base address to use for the
4596 * segment. (In 64-bit code it may differ from the
4597 * base in the hidden segment.)
4598 */
4599DECLINLINE(VBOXSTRICTRC) iemMemSegCheckWriteAccessEx(PVMCPUCC pVCpu, PCCPUMSELREGHID pHid,
4600 uint8_t iSegReg, uint64_t *pu64BaseAddr) RT_NOEXCEPT
4601{
4602 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
4603
4604 if (IEM_IS_64BIT_CODE(pVCpu))
4605 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
4606 else
4607 {
4608 if (!pHid->Attr.n.u1Present)
4609 {
4610 uint16_t uSel = iemSRegFetchU16(pVCpu, iSegReg);
4611 AssertRelease(uSel == 0);
4612 LogEx(LOG_GROUP_IEM,("iemMemSegCheckWriteAccessEx: %#x (index %u) - bad selector -> #GP\n", uSel, iSegReg));
4613 return iemRaiseGeneralProtectionFault0(pVCpu);
4614 }
4615
4616 if ( ( (pHid->Attr.n.u4Type & X86_SEL_TYPE_CODE)
4617 || !(pHid->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
4618 && !IEM_IS_64BIT_CODE(pVCpu) )
4619 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
4620 *pu64BaseAddr = pHid->u64Base;
4621 }
4622 return VINF_SUCCESS;
4623}
4624
4625
4626/**
4627 * Checks if the given segment can be read from, raise the appropriate
4628 * exception if not.
4629 *
4630 * @returns VBox strict status code.
4631 *
4632 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4633 * @param pHid Pointer to the hidden register.
4634 * @param iSegReg The register number.
4635 * @param pu64BaseAddr Where to return the base address to use for the
4636 * segment. (In 64-bit code it may differ from the
4637 * base in the hidden segment.)
4638 */
4639DECLINLINE(VBOXSTRICTRC) iemMemSegCheckReadAccessEx(PVMCPUCC pVCpu, PCCPUMSELREGHID pHid,
4640 uint8_t iSegReg, uint64_t *pu64BaseAddr) RT_NOEXCEPT
4641{
4642 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
4643
4644 if (IEM_IS_64BIT_CODE(pVCpu))
4645 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
4646 else
4647 {
4648 if (!pHid->Attr.n.u1Present)
4649 {
4650 uint16_t uSel = iemSRegFetchU16(pVCpu, iSegReg);
4651 AssertRelease(uSel == 0);
4652 LogEx(LOG_GROUP_IEM,("iemMemSegCheckReadAccessEx: %#x (index %u) - bad selector -> #GP\n", uSel, iSegReg));
4653 return iemRaiseGeneralProtectionFault0(pVCpu);
4654 }
4655
4656 if ((pHid->Attr.n.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
4657 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
4658 *pu64BaseAddr = pHid->u64Base;
4659 }
4660 return VINF_SUCCESS;
4661}
4662
4663
4664/**
4665 * Maps a physical page.
4666 *
4667 * @returns VBox status code (see PGMR3PhysTlbGCPhys2Ptr).
4668 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4669 * @param GCPhysMem The physical address.
4670 * @param fAccess The intended access.
4671 * @param ppvMem Where to return the mapping address.
4672 * @param pLock The PGM lock.
4673 */
4674DECLINLINE(int) iemMemPageMap(PVMCPUCC pVCpu, RTGCPHYS GCPhysMem, uint32_t fAccess,
4675 void **ppvMem, PPGMPAGEMAPLOCK pLock) RT_NOEXCEPT
4676{
4677#ifdef IEM_LOG_MEMORY_WRITES
4678 if (fAccess & IEM_ACCESS_TYPE_WRITE)
4679 return VERR_PGM_PHYS_TLB_CATCH_ALL;
4680#endif
4681
4682 /** @todo This API may require some improving later. A private deal with PGM
4683 * regarding locking and unlocking needs to be struct. A couple of TLBs
4684 * living in PGM, but with publicly accessible inlined access methods
4685 * could perhaps be an even better solution. */
4686 int rc = PGMPhysIemGCPhys2Ptr(pVCpu->CTX_SUFF(pVM), pVCpu,
4687 GCPhysMem,
4688 RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE),
4689 RT_BOOL(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS),
4690 ppvMem,
4691 pLock);
4692 /*Log(("PGMPhysIemGCPhys2Ptr %Rrc pLock=%.*Rhxs\n", rc, sizeof(*pLock), pLock));*/
4693 AssertMsg(rc == VINF_SUCCESS || RT_FAILURE_NP(rc), ("%Rrc\n", rc));
4694
4695 return rc;
4696}
4697
4698
4699/**
4700 * Unmap a page previously mapped by iemMemPageMap.
4701 *
4702 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4703 * @param GCPhysMem The physical address.
4704 * @param fAccess The intended access.
4705 * @param pvMem What iemMemPageMap returned.
4706 * @param pLock The PGM lock.
4707 */
4708DECLINLINE(void) iemMemPageUnmap(PVMCPUCC pVCpu, RTGCPHYS GCPhysMem, uint32_t fAccess,
4709 const void *pvMem, PPGMPAGEMAPLOCK pLock) RT_NOEXCEPT
4710{
4711 NOREF(pVCpu);
4712 NOREF(GCPhysMem);
4713 NOREF(fAccess);
4714 NOREF(pvMem);
4715 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), pLock);
4716}
4717
4718#ifdef IEM_WITH_SETJMP
4719
4720/** @todo slim this down */
4721DECL_INLINE_THROW(RTGCPTR) iemMemApplySegmentToReadJmp(PVMCPUCC pVCpu, uint8_t iSegReg,
4722 size_t cbMem, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
4723{
4724 Assert(cbMem >= 1);
4725 Assert(iSegReg < X86_SREG_COUNT);
4726
4727 /*
4728 * 64-bit mode is simpler.
4729 */
4730 if (IEM_IS_64BIT_CODE(pVCpu))
4731 {
4732 if (iSegReg >= X86_SREG_FS && iSegReg != UINT8_MAX)
4733 {
4734 IEM_CTX_IMPORT_JMP(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
4735 PCPUMSELREGHID const pSel = iemSRegGetHid(pVCpu, iSegReg);
4736 GCPtrMem += pSel->u64Base;
4737 }
4738
4739 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
4740 return GCPtrMem;
4741 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
4742 }
4743 /*
4744 * 16-bit and 32-bit segmentation.
4745 */
4746 else if (iSegReg != UINT8_MAX)
4747 {
4748 /** @todo Does this apply to segments with 4G-1 limit? */
4749 uint32_t const GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem - 1;
4750 if (RT_LIKELY(GCPtrLast32 >= (uint32_t)GCPtrMem))
4751 {
4752 IEM_CTX_IMPORT_JMP(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
4753 PCPUMSELREGHID const pSel = iemSRegGetHid(pVCpu, iSegReg);
4754 switch (pSel->Attr.u & ( X86DESCATTR_P | X86DESCATTR_UNUSABLE
4755 | X86_SEL_TYPE_READ | X86_SEL_TYPE_WRITE /* same as read */
4756 | X86_SEL_TYPE_DOWN | X86_SEL_TYPE_CONF /* same as down */
4757 | X86_SEL_TYPE_CODE))
4758 {
4759 case X86DESCATTR_P: /* readonly data, expand up */
4760 case X86DESCATTR_P | X86_SEL_TYPE_WRITE: /* writable data, expand up */
4761 case X86DESCATTR_P | X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ: /* code, read-only */
4762 case X86DESCATTR_P | X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ | X86_SEL_TYPE_CONF: /* conforming code, read-only */
4763 /* expand up */
4764 if (RT_LIKELY(GCPtrLast32 <= pSel->u32Limit))
4765 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
4766 Log10(("iemMemApplySegmentToReadJmp: out of bounds %#x..%#x vs %#x\n",
4767 (uint32_t)GCPtrMem, GCPtrLast32, pSel->u32Limit));
4768 break;
4769
4770 case X86DESCATTR_P | X86_SEL_TYPE_DOWN: /* readonly data, expand down */
4771 case X86DESCATTR_P | X86_SEL_TYPE_DOWN | X86_SEL_TYPE_WRITE: /* writable data, expand down */
4772 /* expand down */
4773 if (RT_LIKELY( (uint32_t)GCPtrMem > pSel->u32Limit
4774 && ( pSel->Attr.n.u1DefBig
4775 || GCPtrLast32 <= UINT32_C(0xffff)) ))
4776 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
4777 Log10(("iemMemApplySegmentToReadJmp: expand down out of bounds %#x..%#x vs %#x..%#x\n",
4778 (uint32_t)GCPtrMem, GCPtrLast32, pSel->u32Limit, pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT16_MAX));
4779 break;
4780
4781 default:
4782 Log10(("iemMemApplySegmentToReadJmp: bad selector %#x\n", pSel->Attr.u));
4783 iemRaiseSelectorInvalidAccessJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
4784 break;
4785 }
4786 }
4787 Log10(("iemMemApplySegmentToReadJmp: out of bounds %#x..%#x\n",(uint32_t)GCPtrMem, GCPtrLast32));
4788 iemRaiseSelectorBoundsJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
4789 }
4790 /*
4791 * 32-bit flat address.
4792 */
4793 else
4794 return GCPtrMem;
4795}
4796
4797
4798/** @todo slim this down */
4799DECL_INLINE_THROW(RTGCPTR) iemMemApplySegmentToWriteJmp(PVMCPUCC pVCpu, uint8_t iSegReg, size_t cbMem,
4800 RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
4801{
4802 Assert(cbMem >= 1);
4803 Assert(iSegReg < X86_SREG_COUNT);
4804
4805 /*
4806 * 64-bit mode is simpler.
4807 */
4808 if (IEM_IS_64BIT_CODE(pVCpu))
4809 {
4810 if (iSegReg >= X86_SREG_FS)
4811 {
4812 IEM_CTX_IMPORT_JMP(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
4813 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
4814 GCPtrMem += pSel->u64Base;
4815 }
4816
4817 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
4818 return GCPtrMem;
4819 }
4820 /*
4821 * 16-bit and 32-bit segmentation.
4822 */
4823 else
4824 {
4825 Assert(GCPtrMem <= UINT32_MAX);
4826 IEM_CTX_IMPORT_JMP(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
4827 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
4828 uint32_t const fRelevantAttrs = pSel->Attr.u & ( X86DESCATTR_P | X86DESCATTR_UNUSABLE
4829 | X86_SEL_TYPE_CODE | X86_SEL_TYPE_WRITE | X86_SEL_TYPE_DOWN);
4830 if ( fRelevantAttrs == (X86DESCATTR_P | X86_SEL_TYPE_WRITE) /* data, expand up */
4831 /** @todo explore exactly how the CS stuff works in real mode. See also
4832 * http://www.rcollins.org/Productivity/DescriptorCache.html and
4833 * http://www.rcollins.org/ddj/Aug98/Aug98.html for some insight. */
4834 || (iSegReg == X86_SREG_CS && IEM_IS_REAL_OR_V86_MODE(pVCpu)) ) /* Ignored for CS. */ /** @todo testcase! */
4835 {
4836 /* expand up */
4837 uint32_t const GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem - 1;
4838 if (RT_LIKELY( GCPtrLast32 <= pSel->u32Limit
4839 && GCPtrLast32 >= (uint32_t)GCPtrMem))
4840 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
4841 iemRaiseSelectorBoundsJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
4842 }
4843 else if (fRelevantAttrs == (X86DESCATTR_P | X86_SEL_TYPE_WRITE | X86_SEL_TYPE_DOWN)) /* data, expand up */
4844 {
4845 /* expand down - the uppger boundary is defined by the B bit, not G. */
4846 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem - 1;
4847 if (RT_LIKELY( (uint32_t)GCPtrMem >= pSel->u32Limit
4848 && (pSel->Attr.n.u1DefBig || GCPtrLast32 <= UINT32_C(0xffff))
4849 && GCPtrLast32 >= (uint32_t)GCPtrMem))
4850 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
4851 iemRaiseSelectorBoundsJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
4852 }
4853 else
4854 iemRaiseSelectorInvalidAccessJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
4855 }
4856 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
4857}
4858
4859#endif /* IEM_WITH_SETJMP */
4860
4861/**
4862 * Fakes a long mode stack selector for SS = 0.
4863 *
4864 * @param pDescSs Where to return the fake stack descriptor.
4865 * @param uDpl The DPL we want.
4866 */
4867DECLINLINE(void) iemMemFakeStackSelDesc(PIEMSELDESC pDescSs, uint32_t uDpl) RT_NOEXCEPT
4868{
4869 pDescSs->Long.au64[0] = 0;
4870 pDescSs->Long.au64[1] = 0;
4871 pDescSs->Long.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
4872 pDescSs->Long.Gen.u1DescType = 1; /* 1 = code / data, 0 = system. */
4873 pDescSs->Long.Gen.u2Dpl = uDpl;
4874 pDescSs->Long.Gen.u1Present = 1;
4875 pDescSs->Long.Gen.u1Long = 1;
4876}
4877
4878
4879/*
4880 * Unmap helpers.
4881 */
4882
4883#ifdef IEM_WITH_SETJMP
4884
4885DECL_INLINE_THROW(void) iemMemCommitAndUnmapRwJmp(PVMCPUCC pVCpu, uint8_t bMapInfo) IEM_NOEXCEPT_MAY_LONGJMP
4886{
4887# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3)
4888 if (RT_LIKELY(bMapInfo == 0))
4889 return;
4890# endif
4891 iemMemCommitAndUnmapRwSafeJmp(pVCpu, bMapInfo);
4892}
4893
4894
4895DECL_INLINE_THROW(void) iemMemCommitAndUnmapAtJmp(PVMCPUCC pVCpu, uint8_t bMapInfo) IEM_NOEXCEPT_MAY_LONGJMP
4896{
4897# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3)
4898 if (RT_LIKELY(bMapInfo == 0))
4899 return;
4900# endif
4901 iemMemCommitAndUnmapAtSafeJmp(pVCpu, bMapInfo);
4902}
4903
4904
4905DECL_INLINE_THROW(void) iemMemCommitAndUnmapWoJmp(PVMCPUCC pVCpu, uint8_t bMapInfo) IEM_NOEXCEPT_MAY_LONGJMP
4906{
4907# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3)
4908 if (RT_LIKELY(bMapInfo == 0))
4909 return;
4910# endif
4911 iemMemCommitAndUnmapWoSafeJmp(pVCpu, bMapInfo);
4912}
4913
4914
4915DECL_INLINE_THROW(void) iemMemCommitAndUnmapRoJmp(PVMCPUCC pVCpu, uint8_t bMapInfo) IEM_NOEXCEPT_MAY_LONGJMP
4916{
4917# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3)
4918 if (RT_LIKELY(bMapInfo == 0))
4919 return;
4920# endif
4921 iemMemCommitAndUnmapRoSafeJmp(pVCpu, bMapInfo);
4922}
4923
4924DECLINLINE(void) iemMemRollbackAndUnmapWo(PVMCPUCC pVCpu, uint8_t bMapInfo) RT_NOEXCEPT
4925{
4926# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3)
4927 if (RT_LIKELY(bMapInfo == 0))
4928 return;
4929# endif
4930 iemMemRollbackAndUnmapWoSafe(pVCpu, bMapInfo);
4931}
4932
4933#endif /* IEM_WITH_SETJMP */
4934
4935
4936/*
4937 * Instantiate R/W inline templates.
4938 */
4939
4940/** @def TMPL_MEM_CHECK_UNALIGNED_WITHIN_PAGE_OK
4941 * Used to check if an unaligned access is if within the page and won't
4942 * trigger an \#AC.
4943 *
4944 * This can also be used to deal with misaligned accesses on platforms that are
4945 * senstive to such if desires.
4946 */
4947#if 1
4948# define TMPL_MEM_CHECK_UNALIGNED_WITHIN_PAGE_OK(a_pVCpu, a_GCPtrEff, a_TmplMemType) \
4949 ( ((a_GCPtrEff) & GUEST_PAGE_OFFSET_MASK) <= GUEST_PAGE_SIZE - sizeof(a_TmplMemType) \
4950 && !((a_pVCpu)->iem.s.fExec & IEM_F_X86_AC) )
4951#else
4952# define TMPL_MEM_CHECK_UNALIGNED_WITHIN_PAGE_OK(a_pVCpu, a_GCPtrEff, a_TmplMemType) 0
4953#endif
4954
4955#define TMPL_MEM_WITH_ATOMIC_MAPPING
4956
4957#define TMPL_MEM_TYPE uint8_t
4958#define TMPL_MEM_TYPE_ALIGN 0
4959#define TMPL_MEM_TYPE_SIZE 1
4960#define TMPL_MEM_FN_SUFF U8
4961#define TMPL_MEM_FMT_TYPE "%#04x"
4962#define TMPL_MEM_FMT_DESC "byte"
4963#include "../VMMAll/IEMAllMemRWTmplInline.cpp.h"
4964
4965#define TMPL_MEM_WITH_STACK
4966
4967#define TMPL_MEM_TYPE uint16_t
4968#define TMPL_MEM_TYPE_ALIGN 1
4969#define TMPL_MEM_TYPE_SIZE 2
4970#define TMPL_MEM_FN_SUFF U16
4971#define TMPL_MEM_FMT_TYPE "%#06x"
4972#define TMPL_MEM_FMT_DESC "word"
4973#include "../VMMAll/IEMAllMemRWTmplInline.cpp.h"
4974
4975#define TMPL_WITH_PUSH_SREG
4976#define TMPL_MEM_TYPE uint32_t
4977#define TMPL_MEM_TYPE_ALIGN 3
4978#define TMPL_MEM_TYPE_SIZE 4
4979#define TMPL_MEM_FN_SUFF U32
4980#define TMPL_MEM_FMT_TYPE "%#010x"
4981#define TMPL_MEM_FMT_DESC "dword"
4982#include "../VMMAll/IEMAllMemRWTmplInline.cpp.h"
4983#undef TMPL_WITH_PUSH_SREG
4984
4985#define TMPL_MEM_TYPE uint64_t
4986#define TMPL_MEM_TYPE_ALIGN 7
4987#define TMPL_MEM_TYPE_SIZE 8
4988#define TMPL_MEM_FN_SUFF U64
4989#define TMPL_MEM_FMT_TYPE "%#018RX64"
4990#define TMPL_MEM_FMT_DESC "qword"
4991#include "../VMMAll/IEMAllMemRWTmplInline.cpp.h"
4992
4993#undef TMPL_MEM_WITH_STACK
4994#undef TMPL_MEM_WITH_ATOMIC_MAPPING
4995
4996#define TMPL_MEM_NO_MAPPING /* currently sticky */
4997
4998#define TMPL_MEM_NO_STORE
4999#define TMPL_MEM_TYPE uint32_t
5000#define TMPL_MEM_TYPE_ALIGN 0
5001#define TMPL_MEM_TYPE_SIZE 4
5002#define TMPL_MEM_FN_SUFF U32NoAc
5003#define TMPL_MEM_FMT_TYPE "%#010x"
5004#define TMPL_MEM_FMT_DESC "dword"
5005#include "../VMMAll/IEMAllMemRWTmplInline.cpp.h"
5006
5007#define TMPL_MEM_NO_STORE
5008#define TMPL_MEM_TYPE uint64_t
5009#define TMPL_MEM_TYPE_ALIGN 0
5010#define TMPL_MEM_TYPE_SIZE 8
5011#define TMPL_MEM_FN_SUFF U64NoAc
5012#define TMPL_MEM_FMT_TYPE "%#018RX64"
5013#define TMPL_MEM_FMT_DESC "qword"
5014#include "../VMMAll/IEMAllMemRWTmplInline.cpp.h"
5015
5016#define TMPL_MEM_NO_STORE
5017#define TMPL_MEM_TYPE uint64_t
5018#define TMPL_MEM_TYPE_ALIGN 15
5019#define TMPL_MEM_TYPE_SIZE 8
5020#define TMPL_MEM_FN_SUFF U64AlignedU128
5021#define TMPL_MEM_FMT_TYPE "%#018RX64"
5022#define TMPL_MEM_FMT_DESC "qword"
5023#include "../VMMAll/IEMAllMemRWTmplInline.cpp.h"
5024
5025#undef TMPL_MEM_NO_MAPPING
5026
5027#define TMPL_MEM_TYPE RTFLOAT80U
5028#define TMPL_MEM_TYPE_ALIGN 7
5029#define TMPL_MEM_TYPE_SIZE 10
5030#define TMPL_MEM_FN_SUFF R80
5031#define TMPL_MEM_FMT_TYPE "%.10Rhxs"
5032#define TMPL_MEM_FMT_DESC "tword"
5033#include "../VMMAll/IEMAllMemRWTmplInline.cpp.h"
5034
5035#define TMPL_MEM_TYPE RTPBCD80U
5036#define TMPL_MEM_TYPE_ALIGN 7 /** @todo RTPBCD80U alignment testcase */
5037#define TMPL_MEM_TYPE_SIZE 10
5038#define TMPL_MEM_FN_SUFF D80
5039#define TMPL_MEM_FMT_TYPE "%.10Rhxs"
5040#define TMPL_MEM_FMT_DESC "tword"
5041#include "../VMMAll/IEMAllMemRWTmplInline.cpp.h"
5042
5043#define TMPL_MEM_WITH_ATOMIC_MAPPING
5044#define TMPL_MEM_TYPE RTUINT128U
5045#define TMPL_MEM_TYPE_ALIGN 15
5046#define TMPL_MEM_TYPE_SIZE 16
5047#define TMPL_MEM_FN_SUFF U128
5048#define TMPL_MEM_FMT_TYPE "%.16Rhxs"
5049#define TMPL_MEM_FMT_DESC "dqword"
5050#include "../VMMAll/IEMAllMemRWTmplInline.cpp.h"
5051#undef TMPL_MEM_WITH_ATOMIC_MAPPING
5052
5053#define TMPL_MEM_NO_MAPPING
5054#define TMPL_MEM_TYPE RTUINT128U
5055#define TMPL_MEM_TYPE_ALIGN 0
5056#define TMPL_MEM_TYPE_SIZE 16
5057#define TMPL_MEM_FN_SUFF U128NoAc
5058#define TMPL_MEM_FMT_TYPE "%.16Rhxs"
5059#define TMPL_MEM_FMT_DESC "dqword"
5060#include "../VMMAll/IEMAllMemRWTmplInline.cpp.h"
5061#undef TMPL_MEM_NO_MAPPING
5062
5063
5064/* Every template relying on unaligned accesses inside a page not being okay should go below. */
5065#undef TMPL_MEM_CHECK_UNALIGNED_WITHIN_PAGE_OK
5066#define TMPL_MEM_CHECK_UNALIGNED_WITHIN_PAGE_OK(a_pVCpu, a_GCPtrEff, a_TmplMemType) 0
5067
5068#define TMPL_MEM_NO_MAPPING
5069#define TMPL_MEM_TYPE RTUINT128U
5070#define TMPL_MEM_TYPE_ALIGN 15
5071#define TMPL_MEM_TYPE_SIZE 16
5072#define TMPL_MEM_FN_SUFF U128AlignedSse
5073#define TMPL_MEM_FMT_TYPE "%.16Rhxs"
5074#define TMPL_MEM_FMT_DESC "dqword"
5075#include "../VMMAll/IEMAllMemRWTmplInline.cpp.h"
5076#undef TMPL_MEM_NO_MAPPING
5077
5078#define TMPL_MEM_NO_MAPPING
5079#define TMPL_MEM_TYPE RTUINT256U
5080#define TMPL_MEM_TYPE_ALIGN 0
5081#define TMPL_MEM_TYPE_SIZE 32
5082#define TMPL_MEM_FN_SUFF U256NoAc
5083#define TMPL_MEM_FMT_TYPE "%.32Rhxs"
5084#define TMPL_MEM_FMT_DESC "qqword"
5085#include "../VMMAll/IEMAllMemRWTmplInline.cpp.h"
5086#undef TMPL_MEM_NO_MAPPING
5087
5088#define TMPL_MEM_NO_MAPPING
5089#define TMPL_MEM_TYPE RTUINT256U
5090#define TMPL_MEM_TYPE_ALIGN 31
5091#define TMPL_MEM_TYPE_SIZE 32
5092#define TMPL_MEM_FN_SUFF U256AlignedAvx
5093#define TMPL_MEM_FMT_TYPE "%.32Rhxs"
5094#define TMPL_MEM_FMT_DESC "qqword"
5095#include "../VMMAll/IEMAllMemRWTmplInline.cpp.h"
5096#undef TMPL_MEM_NO_MAPPING
5097
5098#undef TMPL_MEM_CHECK_UNALIGNED_WITHIN_PAGE_OK
5099
5100/** @} */
5101
5102
5103#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
5104
5105/**
5106 * Gets CR0 fixed-0 bits in VMX operation.
5107 *
5108 * We do this rather than fetching what we report to the guest (in
5109 * IA32_VMX_CR0_FIXED0 MSR) because real hardware (and so do we) report the same
5110 * values regardless of whether unrestricted-guest feature is available on the CPU.
5111 *
5112 * @returns CR0 fixed-0 bits.
5113 * @param pVCpu The cross context virtual CPU structure.
5114 * @param fVmxNonRootMode Whether the CR0 fixed-0 bits for VMX non-root mode
5115 * must be returned. When @c false, the CR0 fixed-0
5116 * bits for VMX root mode is returned.
5117 *
5118 */
5119DECLINLINE(uint64_t) iemVmxGetCr0Fixed0(PCVMCPUCC pVCpu, bool fVmxNonRootMode) RT_NOEXCEPT
5120{
5121 Assert(IEM_VMX_IS_ROOT_MODE(pVCpu));
5122
5123 PCVMXMSRS pMsrs = &pVCpu->cpum.GstCtx.hwvirt.vmx.Msrs;
5124 if ( fVmxNonRootMode
5125 && (pMsrs->ProcCtls2.n.allowed1 & VMX_PROC_CTLS2_UNRESTRICTED_GUEST))
5126 return VMX_V_CR0_FIXED0_UX;
5127 return VMX_V_CR0_FIXED0;
5128}
5129
5130
5131# ifdef XAPIC_OFF_END /* Requires VBox/apic.h to be included before IEMInline.h. */
5132/**
5133 * Sets virtual-APIC write emulation as pending.
5134 *
5135 * @param pVCpu The cross context virtual CPU structure.
5136 * @param offApic The offset in the virtual-APIC page that was written.
5137 */
5138DECLINLINE(void) iemVmxVirtApicSetPendingWrite(PVMCPUCC pVCpu, uint16_t offApic) RT_NOEXCEPT
5139{
5140 Assert(offApic < XAPIC_OFF_END + 4);
5141
5142 /*
5143 * Record the currently updated APIC offset, as we need this later for figuring
5144 * out whether to perform TPR, EOI or self-IPI virtualization as well as well
5145 * as for supplying the exit qualification when causing an APIC-write VM-exit.
5146 */
5147 pVCpu->cpum.GstCtx.hwvirt.vmx.offVirtApicWrite = offApic;
5148
5149 /*
5150 * Flag that we need to perform virtual-APIC write emulation (TPR/PPR/EOI/Self-IPI
5151 * virtualization or APIC-write emulation).
5152 */
5153 if (!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE))
5154 VMCPU_FF_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE);
5155}
5156# endif /* XAPIC_OFF_END */
5157
5158#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
5159
5160#if defined(IEM_WITH_TLB_TRACE) && defined(IN_RING3)
5161/**
5162 * Adds an entry to the TLB trace buffer.
5163 *
5164 * @note Don't use directly, only via the IEMTLBTRACE_XXX macros.
5165 */
5166DECLINLINE(void) iemTlbTrace(PVMCPU pVCpu, IEMTLBTRACETYPE enmType, uint64_t u64Param, uint64_t u64Param2 = 0,
5167 uint8_t bParam = 0, uint32_t u32Param = 0/*, uint16_t u16Param = 0 */)
5168{
5169 uint32_t const fMask = RT_BIT_32(pVCpu->iem.s.cTlbTraceEntriesShift) - 1;
5170 PIEMTLBTRACEENTRY const pEntry = &pVCpu->iem.s.paTlbTraceEntries[pVCpu->iem.s.idxTlbTraceEntry++ & fMask];
5171 pEntry->u64Param = u64Param;
5172 pEntry->u64Param2 = u64Param2;
5173 pEntry->u16Param = 0; //u16Param;
5174 pEntry->u32Param = u32Param;
5175 pEntry->bParam = bParam;
5176 pEntry->enmType = enmType;
5177 pEntry->rip = pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base;
5178}
5179#endif
5180
5181#endif /* !VMM_INCLUDED_SRC_include_IEMInline_h */
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette