1 | /* $Id: IEMAllThrdTables.h 106401 2024-10-16 20:56:01Z vboxsync $ */
|
---|
2 | /** @file
|
---|
3 | * IEM - Instruction Decoding and Threaded Recompilation, Instruction Tables.
|
---|
4 | */
|
---|
5 |
|
---|
6 | /*
|
---|
7 | * Copyright (C) 2011-2024 Oracle and/or its affiliates.
|
---|
8 | *
|
---|
9 | * This file is part of VirtualBox base platform packages, as
|
---|
10 | * available from https://www.virtualbox.org.
|
---|
11 | *
|
---|
12 | * This program is free software; you can redistribute it and/or
|
---|
13 | * modify it under the terms of the GNU General Public License
|
---|
14 | * as published by the Free Software Foundation, in version 3 of the
|
---|
15 | * License.
|
---|
16 | *
|
---|
17 | * This program is distributed in the hope that it will be useful, but
|
---|
18 | * WITHOUT ANY WARRANTY; without even the implied warranty of
|
---|
19 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
---|
20 | * General Public License for more details.
|
---|
21 | *
|
---|
22 | * You should have received a copy of the GNU General Public License
|
---|
23 | * along with this program; if not, see <https://www.gnu.org/licenses>.
|
---|
24 | *
|
---|
25 | * SPDX-License-Identifier: GPL-3.0-only
|
---|
26 | */
|
---|
27 |
|
---|
28 | #ifndef VMM_INCLUDED_SRC_VMMAll_IEMAllThrdTables_h
|
---|
29 | #define VMM_INCLUDED_SRC_VMMAll_IEMAllThrdTables_h
|
---|
30 | #ifndef RT_WITHOUT_PRAGMA_ONCE
|
---|
31 | # pragma once
|
---|
32 | #endif
|
---|
33 |
|
---|
34 |
|
---|
35 | /*********************************************************************************************************************************
|
---|
36 | * Header Files *
|
---|
37 | *********************************************************************************************************************************/
|
---|
38 | #ifndef LOG_GROUP /* defined when included by tstIEMCheckMc.cpp */
|
---|
39 | # define LOG_GROUP LOG_GROUP_IEM_RE_THREADED
|
---|
40 | #endif
|
---|
41 | #define IEM_WITH_CODE_TLB_AND_OPCODE_BUF /* A bit hackish, but its all in IEMInline.h. */
|
---|
42 | #define VMCPU_INCL_CPUM_GST_CTX
|
---|
43 | #include <VBox/vmm/iem.h>
|
---|
44 | #include <VBox/vmm/cpum.h>
|
---|
45 | #include <VBox/vmm/apic.h>
|
---|
46 | #include <VBox/vmm/pdm.h>
|
---|
47 | #include <VBox/vmm/pgm.h>
|
---|
48 | #include <VBox/vmm/iom.h>
|
---|
49 | #include <VBox/vmm/em.h>
|
---|
50 | #include <VBox/vmm/hm.h>
|
---|
51 | #include <VBox/vmm/nem.h>
|
---|
52 | #include <VBox/vmm/gim.h>
|
---|
53 | #ifdef VBOX_WITH_NESTED_HWVIRT_SVM
|
---|
54 | # include <VBox/vmm/em.h>
|
---|
55 | # include <VBox/vmm/hm_svm.h>
|
---|
56 | #endif
|
---|
57 | #ifdef VBOX_WITH_NESTED_HWVIRT_VMX
|
---|
58 | # include <VBox/vmm/hmvmxinline.h>
|
---|
59 | #endif
|
---|
60 | #include <VBox/vmm/tm.h>
|
---|
61 | #include <VBox/vmm/dbgf.h>
|
---|
62 | #include <VBox/vmm/dbgftrace.h>
|
---|
63 | #ifndef TST_IEM_CHECK_MC
|
---|
64 | # include "IEMInternal.h"
|
---|
65 | #endif
|
---|
66 | #include <VBox/vmm/vmcc.h>
|
---|
67 | #include <VBox/log.h>
|
---|
68 | #include <VBox/err.h>
|
---|
69 | #include <VBox/param.h>
|
---|
70 | #include <VBox/dis.h>
|
---|
71 | #include <VBox/disopcode-x86-amd64.h>
|
---|
72 | #include <iprt/asm-math.h>
|
---|
73 | #include <iprt/assert.h>
|
---|
74 | #include <iprt/mem.h>
|
---|
75 | #include <iprt/string.h>
|
---|
76 | #include <iprt/x86.h>
|
---|
77 |
|
---|
78 | #ifndef TST_IEM_CHECK_MC
|
---|
79 | # include "IEMInline.h"
|
---|
80 | # include "IEMOpHlp.h"
|
---|
81 | # include "IEMMc.h"
|
---|
82 | #endif
|
---|
83 |
|
---|
84 | #include "IEMThreadedFunctions.h"
|
---|
85 | #include "IEMN8veRecompiler.h" /* For a_fGstShwFlush and iemThreadedRecompilerMcDeferToCImpl0. */
|
---|
86 |
|
---|
87 |
|
---|
88 | /*
|
---|
89 | * Narrow down configs here to avoid wasting time on unused configs here.
|
---|
90 | */
|
---|
91 |
|
---|
92 | #ifndef IEM_WITH_CODE_TLB
|
---|
93 | # error The code TLB must be enabled for the recompiler.
|
---|
94 | #endif
|
---|
95 |
|
---|
96 | #ifndef IEM_WITH_DATA_TLB
|
---|
97 | # error The data TLB must be enabled for the recompiler.
|
---|
98 | #endif
|
---|
99 |
|
---|
100 | #ifndef IEM_WITH_SETJMP
|
---|
101 | # error The setjmp approach must be enabled for the recompiler.
|
---|
102 | #endif
|
---|
103 |
|
---|
104 |
|
---|
105 | /*********************************************************************************************************************************
|
---|
106 | * Defined Constants And Macros *
|
---|
107 | *********************************************************************************************************************************/
|
---|
108 | #define g_apfnOneByteMap g_apfnIemThreadedRecompilerOneByteMap
|
---|
109 | #define g_apfnTwoByteMap g_apfnIemThreadedRecompilerTwoByteMap
|
---|
110 | #define g_apfnThreeByte0f3a g_apfnIemThreadedRecompilerThreeByte0f3a
|
---|
111 | #define g_apfnThreeByte0f38 g_apfnIemThreadedRecompilerThreeByte0f38
|
---|
112 | #define g_apfnVexMap1 g_apfnIemThreadedRecompilerVecMap1
|
---|
113 | #define g_apfnVexMap2 g_apfnIemThreadedRecompilerVecMap2
|
---|
114 | #define g_apfnVexMap3 g_apfnIemThreadedRecompilerVecMap3
|
---|
115 |
|
---|
116 |
|
---|
117 | /*
|
---|
118 | * Override IEM_MC_BEGIN to take down the IEM_CIMPL_F_XXX flags.
|
---|
119 | */
|
---|
120 | #undef IEM_MC_BEGIN
|
---|
121 | #define IEM_MC_BEGIN(a_fMcFlags, a_fCImplFlags) \
|
---|
122 | { \
|
---|
123 | pVCpu->iem.s.fTbCurInstr = (a_fCImplFlags) /*| ((a_fMcFlags) << 20*/
|
---|
124 |
|
---|
125 | /*
|
---|
126 | * Override IEM_MC_CALC_RM_EFF_ADDR to use iemOpHlpCalcRmEffAddrJmpEx and produce uEffAddrInfo.
|
---|
127 | */
|
---|
128 | #undef IEM_MC_CALC_RM_EFF_ADDR
|
---|
129 | #ifndef IEM_WITH_SETJMP
|
---|
130 | # define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, a_bRm, a_cbImmAndRspOffset) \
|
---|
131 | uint64_t uEffAddrInfo; \
|
---|
132 | IEM_MC_RETURN_ON_FAILURE(iemOpHlpCalcRmEffAddrJmpEx(pVCpu, (a_bRm), (a_cbImmAndRspOffset), &(a_GCPtrEff), &uEffAddrInfo))
|
---|
133 | #else
|
---|
134 | # define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, a_bRm, a_cbImmAndRspOffset) \
|
---|
135 | uint64_t uEffAddrInfo; \
|
---|
136 | ((a_GCPtrEff) = iemOpHlpCalcRmEffAddrJmpEx(pVCpu, (a_bRm), (a_cbImmAndRspOffset), &uEffAddrInfo))
|
---|
137 | #endif
|
---|
138 |
|
---|
139 | /*
|
---|
140 | * Likewise override IEM_OPCODE_SKIP_RM_EFF_ADDR_BYTES so we fetch all the opcodes.
|
---|
141 | */
|
---|
142 | #undef IEM_OPCODE_SKIP_RM_EFF_ADDR_BYTES
|
---|
143 | #define IEM_OPCODE_SKIP_RM_EFF_ADDR_BYTES(a_bRm) do { \
|
---|
144 | uint64_t uEffAddrInfo; \
|
---|
145 | (void)iemOpHlpCalcRmEffAddrJmpEx(pVCpu, bRm, 0, &uEffAddrInfo); \
|
---|
146 | } while (0)
|
---|
147 |
|
---|
148 | /*
|
---|
149 | * Override the IEM_MC_REL_JMP_S*_AND_FINISH macros to check for zero byte jumps.
|
---|
150 | */
|
---|
151 | #undef IEM_MC_REL_JMP_S8_AND_FINISH
|
---|
152 | #define IEM_MC_REL_JMP_S8_AND_FINISH(a_i8) do { \
|
---|
153 | Assert(pVCpu->iem.s.fTbBranched != 0); \
|
---|
154 | if ((a_i8) == 0) \
|
---|
155 | pVCpu->iem.s.fTbBranched |= IEMBRANCHED_F_ZERO; \
|
---|
156 | return iemRegRipRelativeJumpS8AndFinishClearingRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu), (a_i8), pVCpu->iem.s.enmEffOpSize); \
|
---|
157 | } while (0)
|
---|
158 |
|
---|
159 | #undef IEM_MC_REL_JMP_S16_AND_FINISH
|
---|
160 | #define IEM_MC_REL_JMP_S16_AND_FINISH(a_i16) do { \
|
---|
161 | Assert(pVCpu->iem.s.fTbBranched != 0); \
|
---|
162 | if ((a_i16) == 0) \
|
---|
163 | pVCpu->iem.s.fTbBranched |= IEMBRANCHED_F_ZERO; \
|
---|
164 | return iemRegRipRelativeJumpS16AndFinishClearingRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu), (a_i16)); \
|
---|
165 | } while (0)
|
---|
166 |
|
---|
167 | #undef IEM_MC_REL_JMP_S32_AND_FINISH
|
---|
168 | #define IEM_MC_REL_JMP_S32_AND_FINISH(a_i32) do { \
|
---|
169 | Assert(pVCpu->iem.s.fTbBranched != 0); \
|
---|
170 | if ((a_i32) == 0) \
|
---|
171 | pVCpu->iem.s.fTbBranched |= IEMBRANCHED_F_ZERO; \
|
---|
172 | return iemRegRipRelativeJumpS32AndFinishClearingRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu), (a_i32), pVCpu->iem.s.enmEffOpSize); \
|
---|
173 | } while (0)
|
---|
174 |
|
---|
175 |
|
---|
176 | #ifndef IEM_WITH_INTRA_TB_JUMPS
|
---|
177 | /**
|
---|
178 | * Stub for a no-jumps config, see IEMAllThrdRecompiler.cpp for the real thing.
|
---|
179 | */
|
---|
180 | DECL_FORCE_INLINE(int) iemThreadedCompileBackAtFirstInstruction(PVMCPU pVCpu, PIEMTB pTb)
|
---|
181 | {
|
---|
182 | RT_NOREF(pTb);
|
---|
183 | STAM_REL_COUNTER_INC(&pVCpu->iem.s.StatTbLoopFullTbDetected2);
|
---|
184 | return VINF_IEM_RECOMPILE_END_TB;
|
---|
185 | }
|
---|
186 | #endif
|
---|
187 |
|
---|
188 |
|
---|
189 | /*
|
---|
190 | * Emit call macros.
|
---|
191 | */
|
---|
192 | #define IEM_MC2_BEGIN_EMIT_CALLS(a_fCheckIrqBefore) \
|
---|
193 | { \
|
---|
194 | PIEMTB const pTb = pVCpu->iem.s.pCurTbR3; \
|
---|
195 | uint8_t const cbInstrMc2 = IEM_GET_INSTR_LEN(pVCpu); \
|
---|
196 | AssertMsg(pVCpu->iem.s.offOpcode == cbInstrMc2, \
|
---|
197 | ("%u vs %u (%04x:%08RX64)\n", pVCpu->iem.s.offOpcode, cbInstrMc2, \
|
---|
198 | pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip)); \
|
---|
199 | \
|
---|
200 | /* If we need to check for IRQs before the instruction, we do that before \
|
---|
201 | adding any opcodes as it may abort the instruction. \
|
---|
202 | Note! During compilation, we may swap IRQ and #PF exceptions here \
|
---|
203 | in a manner that a real CPU would not do. However it shouldn't \
|
---|
204 | be something that is easy (if at all possible) to observe in the \
|
---|
205 | guest, so fine. The unexpected end-of-tb below have the same \
|
---|
206 | potential "issue". */ \
|
---|
207 | if (!(a_fCheckIrqBefore) || iemThreadedCompileEmitIrqCheckBefore(pVCpu, pTb)) \
|
---|
208 | { /* likely */ } \
|
---|
209 | else \
|
---|
210 | return VINF_IEM_RECOMPILE_END_TB; \
|
---|
211 | \
|
---|
212 | /* No page crossing, right? */ \
|
---|
213 | uint16_t const offOpcodeMc2 = pTb->cbOpcodes; \
|
---|
214 | uint8_t const idxRangeMc2 = pTb->cRanges - 1; \
|
---|
215 | if ( !pVCpu->iem.s.fTbCrossedPage \
|
---|
216 | && !pVCpu->iem.s.fTbCheckOpcodes \
|
---|
217 | && !pVCpu->iem.s.fTbBranched \
|
---|
218 | && !(pTb->fFlags & IEMTB_F_CS_LIM_CHECKS)) \
|
---|
219 | { \
|
---|
220 | /* Break/loop if we're back to the first instruction in the TB again. */ \
|
---|
221 | if ( pTb->aRanges[idxRangeMc2].idxPhysPage != 0 \
|
---|
222 | || (unsigned)pTb->aRanges[idxRangeMc2].offPhysPage + (unsigned)pTb->aRanges[idxRangeMc2].cbOpcodes \
|
---|
223 | != (pTb->GCPhysPc & GUEST_PAGE_OFFSET_MASK) \
|
---|
224 | || offOpcodeMc2 == 0) \
|
---|
225 | { \
|
---|
226 | /** @todo Custom copy function, given range is 1 thru 15 bytes. */ \
|
---|
227 | memcpy(&pTb->pabOpcodes[offOpcodeMc2], pVCpu->iem.s.abOpcode, pVCpu->iem.s.offOpcode); \
|
---|
228 | pTb->cbOpcodes = offOpcodeMc2 + pVCpu->iem.s.offOpcode; \
|
---|
229 | pTb->aRanges[idxRangeMc2].cbOpcodes += cbInstrMc2; \
|
---|
230 | Assert(pTb->cbOpcodes <= pVCpu->iem.s.cbOpcodesAllocated); \
|
---|
231 | } \
|
---|
232 | else \
|
---|
233 | return iemThreadedCompileBackAtFirstInstruction(pVCpu, pTb); \
|
---|
234 | } \
|
---|
235 | else if (iemThreadedCompileBeginEmitCallsComplications(pVCpu, pTb)) \
|
---|
236 | { /* likely */ } \
|
---|
237 | else \
|
---|
238 | return VINF_IEM_RECOMPILE_END_TB; \
|
---|
239 | \
|
---|
240 | uint8_t const idxInstrMc2 = pTb->cInstructions; \
|
---|
241 | \
|
---|
242 | /* Emit hardware instruction breakpoint check if enabled. */ \
|
---|
243 | if (!(pTb->fFlags & IEM_F_PENDING_BRK_INSTR)) \
|
---|
244 | { /* likely */ } \
|
---|
245 | else \
|
---|
246 | IEM_MC2_EMIT_CALL_0(kIemThreadedFunc_BltIn_CheckHwInstrBps)
|
---|
247 |
|
---|
248 | #define IEM_MC2_EMIT_CALL_0(a_enmFunction) do { \
|
---|
249 | IEMTHREADEDFUNCS const enmFunctionCheck = a_enmFunction; RT_NOREF(enmFunctionCheck); \
|
---|
250 | \
|
---|
251 | LogFlow(("Call #%u: " #a_enmFunction "\n", pTb->Thrd.cCalls)); \
|
---|
252 | PIEMTHRDEDCALLENTRY const pCall = &pTb->Thrd.paCalls[pTb->Thrd.cCalls++]; \
|
---|
253 | pCall->enmFunction = a_enmFunction; \
|
---|
254 | pCall->idxInstr = idxInstrMc2; \
|
---|
255 | pCall->cbOpcode = cbInstrMc2; \
|
---|
256 | pCall->offOpcode = offOpcodeMc2; \
|
---|
257 | pCall->uTbLookup = 0; \
|
---|
258 | pCall->fFlags = 0; \
|
---|
259 | pCall->auParams[0] = 0; \
|
---|
260 | pCall->auParams[1] = 0; \
|
---|
261 | pCall->auParams[2] = 0; \
|
---|
262 | } while (0)
|
---|
263 | #define IEM_MC2_EMIT_CALL_1(a_enmFunction, a_uArg0) do { \
|
---|
264 | IEMTHREADEDFUNCS const enmFunctionCheck = a_enmFunction; RT_NOREF(enmFunctionCheck); \
|
---|
265 | uint64_t const uArg0Check = (a_uArg0); RT_NOREF(uArg0Check); \
|
---|
266 | \
|
---|
267 | LogFlow(("Call #%u: " #a_enmFunction " a0=%RX64\n", pTb->Thrd.cCalls, (uint64_t)a_uArg0)); \
|
---|
268 | PIEMTHRDEDCALLENTRY const pCall = &pTb->Thrd.paCalls[pTb->Thrd.cCalls++]; \
|
---|
269 | pCall->enmFunction = a_enmFunction; \
|
---|
270 | pCall->idxInstr = idxInstrMc2; \
|
---|
271 | pCall->cbOpcode = cbInstrMc2; \
|
---|
272 | pCall->offOpcode = offOpcodeMc2; \
|
---|
273 | pCall->uTbLookup = 0; \
|
---|
274 | pCall->fFlags = 0; \
|
---|
275 | pCall->auParams[0] = a_uArg0; \
|
---|
276 | pCall->auParams[1] = 0; \
|
---|
277 | pCall->auParams[2] = 0; \
|
---|
278 | } while (0)
|
---|
279 | #define IEM_MC2_EMIT_CALL_2(a_enmFunction, a_uArg0, a_uArg1) do { \
|
---|
280 | IEMTHREADEDFUNCS const enmFunctionCheck = a_enmFunction; RT_NOREF(enmFunctionCheck); \
|
---|
281 | uint64_t const uArg0Check = (a_uArg0); RT_NOREF(uArg0Check); \
|
---|
282 | uint64_t const uArg1Check = (a_uArg1); RT_NOREF(uArg1Check); \
|
---|
283 | \
|
---|
284 | LogFlow(("Call #%u: " #a_enmFunction " a0=%RX64 a1=%RX64\n", pTb->Thrd.cCalls, (uint64_t)a_uArg0, (uint64_t)a_uArg1)); \
|
---|
285 | PIEMTHRDEDCALLENTRY const pCall = &pTb->Thrd.paCalls[pTb->Thrd.cCalls++]; \
|
---|
286 | pCall->enmFunction = a_enmFunction; \
|
---|
287 | pCall->idxInstr = idxInstrMc2; \
|
---|
288 | pCall->cbOpcode = cbInstrMc2; \
|
---|
289 | pCall->offOpcode = offOpcodeMc2; \
|
---|
290 | pCall->uTbLookup = 0; \
|
---|
291 | pCall->fFlags = 0; \
|
---|
292 | pCall->auParams[0] = a_uArg0; \
|
---|
293 | pCall->auParams[1] = a_uArg1; \
|
---|
294 | pCall->auParams[2] = 0; \
|
---|
295 | } while (0)
|
---|
296 | #define IEM_MC2_EMIT_CALL_3(a_enmFunction, a_uArg0, a_uArg1, a_uArg2) do { \
|
---|
297 | IEMTHREADEDFUNCS const enmFunctionCheck = a_enmFunction; RT_NOREF(enmFunctionCheck); \
|
---|
298 | uint64_t const uArg0Check = (a_uArg0); RT_NOREF(uArg0Check); \
|
---|
299 | uint64_t const uArg1Check = (a_uArg1); RT_NOREF(uArg1Check); \
|
---|
300 | uint64_t const uArg2Check = (a_uArg2); RT_NOREF(uArg2Check); \
|
---|
301 | \
|
---|
302 | LogFlow(("Call #%u: " #a_enmFunction " a0=%RX64 a1=%RX64 a2=%RX64\n", pTb->Thrd.cCalls, (uint64_t)a_uArg0, (uint64_t)a_uArg1, (uint64_t)a_uArg2)); \
|
---|
303 | PIEMTHRDEDCALLENTRY const pCall = &pTb->Thrd.paCalls[pTb->Thrd.cCalls++]; \
|
---|
304 | pCall->enmFunction = a_enmFunction; \
|
---|
305 | pCall->idxInstr = idxInstrMc2; \
|
---|
306 | pCall->offOpcode = offOpcodeMc2; \
|
---|
307 | pCall->cbOpcode = cbInstrMc2; \
|
---|
308 | pCall->uTbLookup = 0; \
|
---|
309 | pCall->fFlags = 0; \
|
---|
310 | pCall->auParams[0] = a_uArg0; \
|
---|
311 | pCall->auParams[1] = a_uArg1; \
|
---|
312 | pCall->auParams[2] = a_uArg2; \
|
---|
313 | } while (0)
|
---|
314 |
|
---|
315 | #define IEM_MC2_EMIT_CALL_WITH_TB_LOOKUP_0(a_fLargeTbLookup, a_enmFunction) do { \
|
---|
316 | IEMTHREADEDFUNCS const enmFunctionCheck = a_enmFunction; RT_NOREF(enmFunctionCheck); \
|
---|
317 | \
|
---|
318 | LogFlow(("Call #%u: " #a_enmFunction "\n", pTb->Thrd.cCalls)); \
|
---|
319 | PIEMTHRDEDCALLENTRY const pCall = &pTb->Thrd.paCalls[pTb->Thrd.cCalls++]; \
|
---|
320 | pCall->enmFunction = a_enmFunction; \
|
---|
321 | pCall->idxInstr = idxInstrMc2; \
|
---|
322 | pCall->cbOpcode = cbInstrMc2; \
|
---|
323 | pCall->offOpcode = offOpcodeMc2; \
|
---|
324 | pCall->uTbLookup = IEM_TB_LOOKUP_TAB_MAKE(pTb->cTbLookupEntries, a_fLargeTbLookup); \
|
---|
325 | pTb->cTbLookupEntries += !(a_fLargeTbLookup) ? 1 : IEM_TB_LOOKUP_TAB_LARGE_SIZE; \
|
---|
326 | pCall->fFlags = 0; \
|
---|
327 | pCall->auParams[0] = 0; \
|
---|
328 | pCall->auParams[1] = 0; \
|
---|
329 | pCall->auParams[2] = 0; \
|
---|
330 | } while (0)
|
---|
331 | #define IEM_MC2_EMIT_CALL_WITH_TB_LOOKUP_1(a_fLargeTbLookup, a_enmFunction, a_uArg0) do { \
|
---|
332 | IEMTHREADEDFUNCS const enmFunctionCheck = a_enmFunction; RT_NOREF(enmFunctionCheck); \
|
---|
333 | uint64_t const uArg0Check = (a_uArg0); RT_NOREF(uArg0Check); \
|
---|
334 | \
|
---|
335 | LogFlow(("Call #%u: " #a_enmFunction " a0=%RX64\n", pTb->Thrd.cCalls, (uint64_t)a_uArg0)); \
|
---|
336 | PIEMTHRDEDCALLENTRY const pCall = &pTb->Thrd.paCalls[pTb->Thrd.cCalls++]; \
|
---|
337 | pCall->enmFunction = a_enmFunction; \
|
---|
338 | pCall->idxInstr = idxInstrMc2; \
|
---|
339 | pCall->cbOpcode = cbInstrMc2; \
|
---|
340 | pCall->offOpcode = offOpcodeMc2; \
|
---|
341 | pCall->uTbLookup = IEM_TB_LOOKUP_TAB_MAKE(pTb->cTbLookupEntries, a_fLargeTbLookup); \
|
---|
342 | pTb->cTbLookupEntries += !(a_fLargeTbLookup) ? 1 : IEM_TB_LOOKUP_TAB_LARGE_SIZE; \
|
---|
343 | pCall->fFlags = 0; \
|
---|
344 | pCall->auParams[0] = a_uArg0; \
|
---|
345 | pCall->auParams[1] = 0; \
|
---|
346 | pCall->auParams[2] = 0; \
|
---|
347 | } while (0)
|
---|
348 | #define IEM_MC2_EMIT_CALL_WITH_TB_LOOKUP_2(a_fLargeTbLookup, a_enmFunction, a_uArg0, a_uArg1) do { \
|
---|
349 | IEMTHREADEDFUNCS const enmFunctionCheck = a_enmFunction; RT_NOREF(enmFunctionCheck); \
|
---|
350 | uint64_t const uArg0Check = (a_uArg0); RT_NOREF(uArg0Check); \
|
---|
351 | uint64_t const uArg1Check = (a_uArg1); RT_NOREF(uArg1Check); \
|
---|
352 | \
|
---|
353 | LogFlow(("Call #%u: " #a_enmFunction " a0=%RX64 a1=%RX64\n", pTb->Thrd.cCalls, (uint64_t)a_uArg0, (uint64_t)a_uArg1)); \
|
---|
354 | PIEMTHRDEDCALLENTRY const pCall = &pTb->Thrd.paCalls[pTb->Thrd.cCalls++]; \
|
---|
355 | pCall->enmFunction = a_enmFunction; \
|
---|
356 | pCall->idxInstr = idxInstrMc2; \
|
---|
357 | pCall->cbOpcode = cbInstrMc2; \
|
---|
358 | pCall->offOpcode = offOpcodeMc2; \
|
---|
359 | pCall->uTbLookup = IEM_TB_LOOKUP_TAB_MAKE(pTb->cTbLookupEntries, a_fLargeTbLookup); \
|
---|
360 | pTb->cTbLookupEntries += !(a_fLargeTbLookup) ? 1 : IEM_TB_LOOKUP_TAB_LARGE_SIZE; \
|
---|
361 | pCall->fFlags = 0; \
|
---|
362 | pCall->auParams[0] = a_uArg0; \
|
---|
363 | pCall->auParams[1] = a_uArg1; \
|
---|
364 | pCall->auParams[2] = 0; \
|
---|
365 | } while (0)
|
---|
366 | #define IEM_MC2_EMIT_CALL_WITH_TB_LOOKUP_3(a_fLargeTbLookup, a_enmFunction, a_uArg0, a_uArg1, a_uArg2) do { \
|
---|
367 | IEMTHREADEDFUNCS const enmFunctionCheck = a_enmFunction; RT_NOREF(enmFunctionCheck); \
|
---|
368 | uint64_t const uArg0Check = (a_uArg0); RT_NOREF(uArg0Check); \
|
---|
369 | uint64_t const uArg1Check = (a_uArg1); RT_NOREF(uArg1Check); \
|
---|
370 | uint64_t const uArg2Check = (a_uArg2); RT_NOREF(uArg2Check); \
|
---|
371 | \
|
---|
372 | LogFlow(("Call #%u: " #a_enmFunction " a0=%RX64 a1=%RX64 a2=%RX64\n", pTb->Thrd.cCalls, (uint64_t)a_uArg0, (uint64_t)a_uArg1, (uint64_t)a_uArg2)); \
|
---|
373 | PIEMTHRDEDCALLENTRY const pCall = &pTb->Thrd.paCalls[pTb->Thrd.cCalls++]; \
|
---|
374 | pCall->enmFunction = a_enmFunction; \
|
---|
375 | pCall->idxInstr = idxInstrMc2; \
|
---|
376 | pCall->cbOpcode = cbInstrMc2; \
|
---|
377 | pCall->offOpcode = offOpcodeMc2; \
|
---|
378 | pCall->uTbLookup = IEM_TB_LOOKUP_TAB_MAKE(pTb->cTbLookupEntries, a_fLargeTbLookup); \
|
---|
379 | pTb->cTbLookupEntries += !(a_fLargeTbLookup) ? 1 : IEM_TB_LOOKUP_TAB_LARGE_SIZE; \
|
---|
380 | pCall->fFlags = 0; \
|
---|
381 | pCall->auParams[0] = a_uArg0; \
|
---|
382 | pCall->auParams[1] = a_uArg1; \
|
---|
383 | pCall->auParams[2] = a_uArg2; \
|
---|
384 | } while (0)
|
---|
385 |
|
---|
386 | #define IEM_MC2_END_EMIT_CALLS(a_fCImplFlags) \
|
---|
387 | Assert(pTb->cInstructions <= pTb->Thrd.cCalls); \
|
---|
388 | if (pTb->cInstructions < 255) \
|
---|
389 | pTb->cInstructions++; \
|
---|
390 | uint32_t const fCImplFlagsMc2 = (a_fCImplFlags); \
|
---|
391 | RT_NOREF(fCImplFlagsMc2); \
|
---|
392 | } while (0)
|
---|
393 |
|
---|
394 |
|
---|
395 | /*
|
---|
396 | * IEM_MC_DEFER_TO_CIMPL_0 is easily wrapped up.
|
---|
397 | *
|
---|
398 | * Doing so will also take care of IEMOP_RAISE_DIVIDE_ERROR, IEMOP_RAISE_INVALID_LOCK_PREFIX,
|
---|
399 | * IEMOP_RAISE_INVALID_OPCODE and their users.
|
---|
400 | */
|
---|
401 | #undef IEM_MC_DEFER_TO_CIMPL_0_RET
|
---|
402 | #define IEM_MC_DEFER_TO_CIMPL_0_RET(a_fFlags, a_fGstShwFlush, a_pfnCImpl) \
|
---|
403 | return iemThreadedRecompilerMcDeferToCImpl0(pVCpu, a_fFlags, a_fGstShwFlush, a_pfnCImpl)
|
---|
404 |
|
---|
405 | IEM_DECL_MSC_GUARD_IGNORE DECLINLINE(VBOXSTRICTRC)
|
---|
406 | iemThreadedRecompilerMcDeferToCImpl0(PVMCPUCC pVCpu, uint32_t fFlags, uint64_t fGstShwFlush, PFNIEMCIMPL0 pfnCImpl)
|
---|
407 | {
|
---|
408 | LogFlow(("CImpl0: %04x:%08RX64 LB %#x: %#x %#RX64 %p\n",
|
---|
409 | pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, IEM_GET_INSTR_LEN(pVCpu), fFlags, fGstShwFlush, pfnCImpl));
|
---|
410 | pVCpu->iem.s.fTbCurInstr = fFlags;
|
---|
411 |
|
---|
412 | IEM_MC2_BEGIN_EMIT_CALLS(fFlags & IEM_CIMPL_F_CHECK_IRQ_BEFORE);
|
---|
413 | IEM_MC2_EMIT_CALL_3(kIemThreadedFunc_BltIn_DeferToCImpl0, (uintptr_t)pfnCImpl, IEM_GET_INSTR_LEN(pVCpu), fGstShwFlush);
|
---|
414 | if ( (fFlags & (IEM_CIMPL_F_MODE | IEM_CIMPL_F_VMEXIT))
|
---|
415 | && !(fFlags & (IEM_CIMPL_F_END_TB | IEM_CIMPL_F_BRANCH_FAR)))
|
---|
416 | IEM_MC2_EMIT_CALL_1(kIemThreadedFunc_BltIn_CheckMode, pVCpu->iem.s.fExec);
|
---|
417 | IEM_MC2_END_EMIT_CALLS(fFlags);
|
---|
418 |
|
---|
419 | /*
|
---|
420 | * We have to repeat work normally done by kdCImplFlags and
|
---|
421 | * ThreadedFunctionVariation.emitThreadedCallStmts here.
|
---|
422 | */
|
---|
423 | AssertCompile(IEM_CIMPL_F_BRANCH_DIRECT == IEMBRANCHED_F_DIRECT);
|
---|
424 | AssertCompile(IEM_CIMPL_F_BRANCH_INDIRECT == IEMBRANCHED_F_INDIRECT);
|
---|
425 | AssertCompile(IEM_CIMPL_F_BRANCH_RELATIVE == IEMBRANCHED_F_RELATIVE);
|
---|
426 | AssertCompile(IEM_CIMPL_F_BRANCH_CONDITIONAL == IEMBRANCHED_F_CONDITIONAL);
|
---|
427 | AssertCompile(IEM_CIMPL_F_BRANCH_FAR == IEMBRANCHED_F_FAR);
|
---|
428 |
|
---|
429 | if (fFlags & (IEM_CIMPL_F_END_TB | IEM_CIMPL_F_BRANCH_FAR))
|
---|
430 | pVCpu->iem.s.fEndTb = true;
|
---|
431 | else if (fFlags & IEM_CIMPL_F_BRANCH_ANY)
|
---|
432 | pVCpu->iem.s.fTbBranched = fFlags & (IEM_CIMPL_F_BRANCH_ANY | IEM_CIMPL_F_BRANCH_FAR | IEM_CIMPL_F_BRANCH_CONDITIONAL);
|
---|
433 |
|
---|
434 | if (fFlags & IEM_CIMPL_F_CHECK_IRQ_BEFORE)
|
---|
435 | pVCpu->iem.s.cInstrTillIrqCheck = 0;
|
---|
436 |
|
---|
437 | return pfnCImpl(pVCpu, IEM_GET_INSTR_LEN(pVCpu));
|
---|
438 | }
|
---|
439 |
|
---|
440 |
|
---|
441 | /**
|
---|
442 | * Helper for indicating that we've branched.
|
---|
443 | */
|
---|
444 | DECL_FORCE_INLINE(void) iemThreadedSetBranched(PVMCPUCC pVCpu, uint8_t fTbBranched)
|
---|
445 | {
|
---|
446 | pVCpu->iem.s.fTbBranched = fTbBranched;
|
---|
447 | //pVCpu->iem.s.GCPhysTbBranchSrcBuf = pVCpu->iem.s.GCPhysInstrBuf;
|
---|
448 | //pVCpu->iem.s.GCVirtTbBranchSrcBuf = pVCpu->iem.s.uInstrBufPc;
|
---|
449 | }
|
---|
450 |
|
---|
451 |
|
---|
452 | #endif /* !VMM_INCLUDED_SRC_VMMAll_IEMAllThrdTables_h */
|
---|