VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAllInstructionsThreadedRecompiler.cpp@ 100148

Last change on this file since 100148 was 100148, checked in by vboxsync, 22 months ago

VMM/IEM: Made the python scripts pick up and deal with the IEM_MC_DEFER_TO_CIMPL_[0-5]_RET short-hand macros. bugref:10369

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 13.0 KB
Line 
1/* $Id: IEMAllInstructionsThreadedRecompiler.cpp 100148 2023-06-10 19:44:02Z vboxsync $ */
2/** @file
3 * IEM - Instruction Decoding and Emulation.
4 */
5
6/*
7 * Copyright (C) 2011-2023 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/*********************************************************************************************************************************
30* Header Files *
31*********************************************************************************************************************************/
32#ifndef LOG_GROUP /* defined when included by tstIEMCheckMc.cpp */
33# define LOG_GROUP LOG_GROUP_IEM
34#endif
35#define VMCPU_INCL_CPUM_GST_CTX
36#include <VBox/vmm/iem.h>
37#include <VBox/vmm/cpum.h>
38#include <VBox/vmm/apic.h>
39#include <VBox/vmm/pdm.h>
40#include <VBox/vmm/pgm.h>
41#include <VBox/vmm/iom.h>
42#include <VBox/vmm/em.h>
43#include <VBox/vmm/hm.h>
44#include <VBox/vmm/nem.h>
45#include <VBox/vmm/gim.h>
46#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
47# include <VBox/vmm/em.h>
48# include <VBox/vmm/hm_svm.h>
49#endif
50#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
51# include <VBox/vmm/hmvmxinline.h>
52#endif
53#include <VBox/vmm/tm.h>
54#include <VBox/vmm/dbgf.h>
55#include <VBox/vmm/dbgftrace.h>
56#ifndef TST_IEM_CHECK_MC
57# include "IEMInternal.h"
58#endif
59#include <VBox/vmm/vmcc.h>
60#include <VBox/log.h>
61#include <VBox/err.h>
62#include <VBox/param.h>
63#include <VBox/dis.h>
64#include <VBox/disopcode-x86-amd64.h>
65#include <iprt/asm-math.h>
66#include <iprt/assert.h>
67#include <iprt/string.h>
68#include <iprt/x86.h>
69
70#ifndef TST_IEM_CHECK_MC
71# include "IEMInline.h"
72# include "IEMOpHlp.h"
73# include "IEMMc.h"
74#endif
75
76#include "IEMThreadedFunctions.h"
77
78
79/*
80 * Narrow down configs here to avoid wasting time on unused configs here.
81 */
82
83#ifndef IEM_WITH_CODE_TLB
84# error The code TLB must be enabled for the recompiler.
85#endif
86
87#ifndef IEM_WITH_DATA_TLB
88# error The data TLB must be enabled for the recompiler.
89#endif
90
91#ifndef IEM_WITH_SETJMP
92# error The setjmp approach must be enabled for the recompiler.
93#endif
94
95
96/*********************************************************************************************************************************
97* Structures and Typedefs *
98*********************************************************************************************************************************/
99/**
100 * A call for the threaded call table.
101 */
102typedef struct IEMTHRDEDCALLENTRY
103{
104 /** The function to call (IEMTHREADEDFUNCS). */
105 uint16_t enmFunction;
106 uint16_t uUnused0;
107
108 /** The opcode length. */
109 uint8_t cbOpcode;
110 /** The opcode chunk number.
111 * @note sketches for discontiguous opcode support */
112 uint8_t idxOpcodeChunk;
113 /** The offset into the opcode chunk of this function.
114 * @note sketches for discontiguous opcode support */
115 uint16_t offOpcodeChunk;
116
117 /** Generic parameters. */
118 uint64_t auParams[3];
119} IEMTHRDEDCALLENTRY;
120AssertCompileSize(IEMTHRDEDCALLENTRY, sizeof(uint64_t) * 4);
121/** Pointer to a threaded call entry. */
122typedef IEMTHRDEDCALLENTRY *PIEMTHRDEDCALLENTRY;
123/** Pointer to a const threaded call entry. */
124typedef IEMTHRDEDCALLENTRY const *PCIEMTHRDEDCALLENTRY;
125
126
127
128/**
129 * Translation block.
130 */
131typedef struct IEMTB
132{
133 /** Next block with the same hash table entry. */
134 PIEMTB volatile pNext;
135 /** List on the local VCPU for blocks. */
136 RTLISTNODE LocalList;
137
138 /** @name What uniquely identifies the block.
139 * @{ */
140 RTGCPHYS GCPhysPc;
141 uint64_t uPc;
142 uint32_t fFlags;
143 union
144 {
145 struct
146 {
147 /** The CS base. */
148 uint32_t uCsBase;
149 /** The CS limit (UINT32_MAX for 64-bit code). */
150 uint32_t uCsLimit;
151 /** The CS selector value. */
152 uint16_t CS;
153 /**< Relevant X86DESCATTR_XXX bits. */
154 uint16_t fAttr;
155 } x86;
156 };
157 /** @} */
158
159 /** Number of bytes of opcodes covered by this block.
160 * @todo Support discontiguous chunks of opcodes in same block, though maybe
161 * restrict to the initial page or smth. */
162 uint32_t cbPC;
163
164 union
165 {
166 struct
167 {
168 /** Number of calls in paCalls. */
169 uint32_t cCalls;
170 /** Number of calls allocated. */
171 uint32_t cAllocated;
172 /** The call sequence table. */
173 PIEMTHRDEDCALLENTRY paCalls;
174 } Thrd;
175 };
176
177
178} IEMTB;
179
180
181/*********************************************************************************************************************************
182* Defined Constants And Macros *
183*********************************************************************************************************************************/
184#define g_apfnOneByteMap g_apfnIemThreadedRecompilerOneByteMap
185
186
187#undef IEM_MC_CALC_RM_EFF_ADDR
188#ifndef IEM_WITH_SETJMP
189# define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, bRm, cbImm) \
190 uint64_t uEffAddrInfo; \
191 IEM_MC_RETURN_ON_FAILURE(iemOpHlpCalcRmEffAddrJmpEx(pVCpu, (bRm), (cbImm), &(a_GCPtrEff), &uEffAddrInfo))
192#else
193# define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, bRm, cbImm) \
194 uint64_t uEffAddrInfo; \
195 ((a_GCPtrEff) = iemOpHlpCalcRmEffAddrJmpEx(pVCpu, (bRm), (cbImm), &uEffAddrInfo))
196#endif
197
198#define IEM_MC2_EMIT_CALL_0(a_enmFunction) do { \
199 IEMTHREADEDFUNCS const enmFunctionCheck = a_enmFunction; RT_NOREF(enmFunctionCheck); \
200 \
201 PIEMTB const pTb = pVCpu->iem.s.pCurTbR3; \
202 PIEMTHRDEDCALLENTRY const pCall = &pTb->Thrd.paCalls[pTb->Thrd.cCalls++]; \
203 pCall->enmFunction = a_enmFunction; \
204 pCall->cbOpcode = IEM_GET_INSTR_LEN(pVCpu); \
205 pCall->auParams[0] = 0; \
206 pCall->auParams[1] = 0; \
207 pCall->auParams[2] = 0; \
208 } while (0)
209#define IEM_MC2_EMIT_CALL_1(a_enmFunction, a_uArg0) do { \
210 IEMTHREADEDFUNCS const enmFunctionCheck = a_enmFunction; RT_NOREF(enmFunctionCheck); \
211 uint64_t const uArg0Check = (a_uArg0); RT_NOREF(uArg0Check); \
212 \
213 PIEMTB const pTb = pVCpu->iem.s.pCurTbR3; \
214 PIEMTHRDEDCALLENTRY const pCall = &pTb->Thrd.paCalls[pTb->Thrd.cCalls++]; \
215 pCall->enmFunction = a_enmFunction; \
216 pCall->cbOpcode = IEM_GET_INSTR_LEN(pVCpu); \
217 pCall->auParams[0] = a_uArg0; \
218 pCall->auParams[1] = 0; \
219 pCall->auParams[2] = 0; \
220 } while (0)
221#define IEM_MC2_EMIT_CALL_2(a_enmFunction, a_uArg0, a_uArg1) do { \
222 IEMTHREADEDFUNCS const enmFunctionCheck = a_enmFunction; RT_NOREF(enmFunctionCheck); \
223 uint64_t const uArg0Check = (a_uArg0); RT_NOREF(uArg0Check); \
224 uint64_t const uArg1Check = (a_uArg1); RT_NOREF(uArg1Check); \
225 \
226 PIEMTB const pTb = pVCpu->iem.s.pCurTbR3; \
227 PIEMTHRDEDCALLENTRY const pCall = &pTb->Thrd.paCalls[pTb->Thrd.cCalls++]; \
228 pCall->enmFunction = a_enmFunction; \
229 pCall->cbOpcode = IEM_GET_INSTR_LEN(pVCpu); \
230 pCall->auParams[0] = a_uArg0; \
231 pCall->auParams[1] = a_uArg1; \
232 pCall->auParams[2] = 0; \
233 } while (0)
234#define IEM_MC2_EMIT_CALL_3(a_enmFunction, a_uArg0, a_uArg1, a_uArg2) do { \
235 IEMTHREADEDFUNCS const enmFunctionCheck = a_enmFunction; RT_NOREF(enmFunctionCheck); \
236 uint64_t const uArg0Check = (a_uArg0); RT_NOREF(uArg0Check); \
237 uint64_t const uArg1Check = (a_uArg1); RT_NOREF(uArg1Check); \
238 uint64_t const uArg2Check = (a_uArg2); RT_NOREF(uArg2Check); \
239 \
240 PIEMTB const pTb = pVCpu->iem.s.pCurTbR3; \
241 PIEMTHRDEDCALLENTRY const pCall = &pTb->Thrd.paCalls[pTb->Thrd.cCalls++]; \
242 pCall->enmFunction = a_enmFunction; \
243 pCall->cbOpcode = IEM_GET_INSTR_LEN(pVCpu); \
244 pCall->auParams[0] = a_uArg0; \
245 pCall->auParams[1] = a_uArg1; \
246 pCall->auParams[2] = a_uArg2; \
247 } while (0)
248
249
250/*
251 * IEM_MC_DEFER_TO_CIMPL_0 is easily wrapped up.
252 *
253 * Doing so will also take care of IEMOP_RAISE_DIVIDE_ERROR, IEMOP_RAISE_INVALID_LOCK_PREFIX,
254 * IEMOP_RAISE_INVALID_OPCODE and their users.
255 */
256#undef IEM_MC_DEFER_TO_CIMPL_0_RET
257#define IEM_MC_DEFER_TO_CIMPL_0_RET(a_fFlags, a_pfnCImpl) return iemThreadedRecompilerMcDeferToCImpl0(pVCpu, a_pfnCImpl)
258
259typedef IEM_CIMPL_DECL_TYPE_0(FNIEMCIMPL0);
260typedef FNIEMCIMPL0 *PFNIEMCIMPL0;
261
262DECLINLINE(VBOXSTRICTRC) iemThreadedRecompilerMcDeferToCImpl0(PVMCPUCC pVCpu, PFNIEMCIMPL0 pfnCImpl)
263{
264 return pfnCImpl(pVCpu, IEM_GET_INSTR_LEN(pVCpu));
265}
266
267/** @todo deal with IEM_MC_DEFER_TO_CIMPL_1, IEM_MC_DEFER_TO_CIMPL_2 and
268 * IEM_MC_DEFER_TO_CIMPL_3 as well. */
269
270/*
271 * Include the "annotated" IEMAllInstructions*.cpp.h files.
272 */
273#include "IEMThreadedInstructions.cpp.h"
274
275
276
277/*
278 * Real code.
279 */
280
281static VBOXSTRICTRC iemThreadedCompile(PVMCC pVM, PVMCPUCC pVCpu)
282{
283 RT_NOREF(pVM, pVCpu);
284 return VERR_NOT_IMPLEMENTED;
285}
286
287
288static VBOXSTRICTRC iemThreadedCompileLongJumped(PVMCC pVM, PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict)
289{
290 RT_NOREF(pVM, pVCpu);
291 return rcStrict;
292}
293
294
295static PIEMTB iemThreadedTbLookup(PVMCC pVM, PVMCPUCC pVCpu, RTGCPHYS GCPhysPC, uint64_t uPc)
296{
297 RT_NOREF(pVM, pVCpu, GCPhysPC, uPc);
298 return NULL;
299}
300
301
302static VBOXSTRICTRC iemThreadedTbExec(PVMCC pVM, PVMCPUCC pVCpu, PIEMTB pTb)
303{
304 RT_NOREF(pVM, pVCpu, pTb);
305 return VERR_NOT_IMPLEMENTED;
306}
307
308
309/**
310 * This is called when the PC doesn't match the current pbInstrBuf.
311 */
312static uint64_t iemGetPcWithPhysAndCodeMissed(PVMCPUCC pVCpu, uint64_t const uPc, PRTGCPHYS pPhys)
313{
314 /** @todo see iemOpcodeFetchBytesJmp */
315 pVCpu->iem.s.pbInstrBuf = NULL;
316
317 pVCpu->iem.s.offInstrNextByte = 0;
318 pVCpu->iem.s.offCurInstrStart = 0;
319 pVCpu->iem.s.cbInstrBuf = 0;
320 pVCpu->iem.s.cbInstrBufTotal = 0;
321
322 RT_NOREF(uPc, pPhys);
323 return 0;
324}
325
326
327/** @todo need private inline decl for throw/nothrow matching IEM_WITH_SETJMP? */
328DECL_INLINE_THROW(uint64_t) iemGetPcWithPhysAndCode(PVMCPUCC pVCpu, PRTGCPHYS pPhys)
329{
330 Assert(pVCpu->cpum.GstCtx.cs.u64Base == 0 || !IEM_IS_64BIT_CODE(pVCpu));
331 uint64_t const uPc = pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base;
332 if (pVCpu->iem.s.pbInstrBuf)
333 {
334 uint64_t off = uPc - pVCpu->iem.s.uInstrBufPc;
335 if (off < pVCpu->iem.s.cbInstrBufTotal)
336 {
337 pVCpu->iem.s.offInstrNextByte = (uint32_t)off;
338 pVCpu->iem.s.offCurInstrStart = (uint16_t)off;
339 if ((uint16_t)off + 15 <= pVCpu->iem.s.cbInstrBufTotal)
340 pVCpu->iem.s.cbInstrBuf = (uint16_t)off + 15;
341 else
342 pVCpu->iem.s.cbInstrBuf = pVCpu->iem.s.cbInstrBufTotal;
343
344 *pPhys = pVCpu->iem.s.GCPhysInstrBuf + off;
345 return uPc;
346 }
347 }
348 return iemGetPcWithPhysAndCodeMissed(pVCpu, uPc, pPhys);
349}
350
351
352VMMDECL(VBOXSTRICTRC) IEMExecRecompilerThreaded(PVMCC pVM, PVMCPUCC pVCpu)
353{
354 /*
355 * Init the execution environment.
356 */
357 iemInitExec(pVCpu, 0 /*fExecOpts*/);
358
359 /*
360 * Run-loop.
361 *
362 * If we're using setjmp/longjmp we combine all the catching here to avoid
363 * having to call setjmp for each block we're executing.
364 */
365 for (;;)
366 {
367 PIEMTB pTb = NULL;
368 VBOXSTRICTRC rcStrict;
369#ifdef IEM_WITH_SETJMP
370 IEM_TRY_SETJMP(pVCpu, rcStrict)
371#endif
372 {
373 for (;;)
374 {
375 /* Translate PC to physical address, we'll need this for both lookup and compilation. */
376 RTGCPHYS GCPhysPc;
377 uint64_t const uPc = iemGetPcWithPhysAndCode(pVCpu, &GCPhysPc);
378
379 pTb = iemThreadedTbLookup(pVM, pVCpu, GCPhysPc, uPc);
380 if (pTb)
381 rcStrict = iemThreadedTbExec(pVM, pVCpu, pTb);
382 else
383 rcStrict = iemThreadedCompile(pVM, pVCpu /*, GCPhysPc, uPc*/);
384 if (rcStrict == VINF_SUCCESS)
385 { /* likely */ }
386 else
387 return rcStrict;
388 }
389 }
390#ifdef IEM_WITH_SETJMP
391 IEM_CATCH_LONGJMP_BEGIN(pVCpu, rcStrict);
392 {
393 pVCpu->iem.s.cLongJumps++;
394 if (pTb)
395 return rcStrict;
396 return iemThreadedCompileLongJumped(pVM, pVCpu, rcStrict);
397 }
398 IEM_CATCH_LONGJMP_END(pVCpu);
399#endif
400 }
401}
402
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette