VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAllThrdPython.py@ 107044

Last change on this file since 107044 was 106179, checked in by vboxsync, 2 months ago

VMM/IEM: Reworked the div, idiv, mul and imul assembly workers and how we raise division error exceptions. The latter is to simplify eflags management. bugref:10720

  • Property svn:eol-style set to LF
  • Property svn:executable set to *
  • Property svn:keywords set to Author Date Id Revision
File size: 205.1 KB
Line 
1#!/usr/bin/env python
2# -*- coding: utf-8 -*-
3# $Id: IEMAllThrdPython.py 106179 2024-09-29 01:14:19Z vboxsync $
4# pylint: disable=invalid-name
5
6"""
7Annotates and generates threaded functions from IEMAllInst*.cpp.h.
8"""
9
10from __future__ import print_function;
11
12__copyright__ = \
13"""
14Copyright (C) 2023-2024 Oracle and/or its affiliates.
15
16This file is part of VirtualBox base platform packages, as
17available from https://www.virtualbox.org.
18
19This program is free software; you can redistribute it and/or
20modify it under the terms of the GNU General Public License
21as published by the Free Software Foundation, in version 3 of the
22License.
23
24This program is distributed in the hope that it will be useful, but
25WITHOUT ANY WARRANTY; without even the implied warranty of
26MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
27General Public License for more details.
28
29You should have received a copy of the GNU General Public License
30along with this program; if not, see <https://www.gnu.org/licenses>.
31
32SPDX-License-Identifier: GPL-3.0-only
33"""
34__version__ = "$Revision: 106179 $"
35
36# Standard python imports.
37import copy;
38import datetime;
39import os;
40import re;
41import sys;
42import argparse;
43from typing import Dict, List;
44
45import IEMAllInstPython as iai;
46import IEMAllN8vePython as ian;
47
48
49# Python 3 hacks:
50if sys.version_info[0] >= 3:
51 long = int; # pylint: disable=redefined-builtin,invalid-name
52
53## Number of generic parameters for the thread functions.
54g_kcThreadedParams = 3;
55
56g_kdTypeInfo = {
57 # type name: (cBits, fSigned, C-type )
58 'int8_t': ( 8, True, 'int8_t', ),
59 'int16_t': ( 16, True, 'int16_t', ),
60 'int32_t': ( 32, True, 'int32_t', ),
61 'int64_t': ( 64, True, 'int64_t', ),
62 'uint4_t': ( 4, False, 'uint8_t', ),
63 'uint8_t': ( 8, False, 'uint8_t', ),
64 'uint16_t': ( 16, False, 'uint16_t', ),
65 'uint32_t': ( 32, False, 'uint32_t', ),
66 'uint64_t': ( 64, False, 'uint64_t', ),
67 'uintptr_t': ( 64, False, 'uintptr_t',), # ASSUMES 64-bit host pointer size.
68 'bool': ( 1, False, 'bool', ),
69 'IEMMODE': ( 2, False, 'IEMMODE', ),
70};
71
72# Only for getTypeBitCount/variables.
73g_kdTypeInfo2 = {
74 'RTFLOAT32U': ( 32, False, 'RTFLOAT32U', ),
75 'RTFLOAT64U': ( 64, False, 'RTFLOAT64U', ),
76 'RTUINT64U': ( 64, False, 'RTUINT64U', ),
77 'RTGCPTR': ( 64, False, 'RTGCPTR', ),
78 'RTPBCD80U': ( 80, False, 'RTPBCD80U', ),
79 'RTFLOAT80U': ( 80, False, 'RTFLOAT80U', ),
80 'IEMFPURESULT': (80+16, False, 'IEMFPURESULT', ),
81 'IEMFPURESULTTWO': (80+16+80,False, 'IEMFPURESULTTWO', ),
82 'RTUINT128U': ( 128, False, 'RTUINT128U', ),
83 'X86XMMREG': ( 128, False, 'X86XMMREG', ),
84 'X86YMMREG': ( 256, False, 'X86YMMREG', ),
85 'IEMMEDIAF2XMMSRC': ( 256, False, 'IEMMEDIAF2XMMSRC',),
86 'IEMMEDIAF2YMMSRC': ( 512, False, 'IEMMEDIAF2YMMSRC',),
87 'RTUINT256U': ( 256, False, 'RTUINT256U', ),
88 'IEMPCMPISTRXSRC': ( 256, False, 'IEMPCMPISTRXSRC', ),
89 'IEMPCMPESTRXSRC': ( 384, False, 'IEMPCMPESTRXSRC', ),
90}; #| g_kdTypeInfo; - requires 3.9
91g_kdTypeInfo2.update(g_kdTypeInfo);
92
93def getTypeBitCount(sType):
94 """
95 Translate a type to size in bits
96 """
97 if sType in g_kdTypeInfo2:
98 return g_kdTypeInfo2[sType][0];
99 if '*' in sType or sType[0] == 'P':
100 return 64;
101 #raise Exception('Unknown type: %s' % (sType,));
102 print('error: Unknown type: %s' % (sType,));
103 return 64;
104
105g_kdIemFieldToType = {
106 # Illegal ones:
107 'offInstrNextByte': ( None, ),
108 'cbInstrBuf': ( None, ),
109 'pbInstrBuf': ( None, ),
110 'uInstrBufPc': ( None, ),
111 'cbInstrBufTotal': ( None, ),
112 'offCurInstrStart': ( None, ),
113 'cbOpcode': ( None, ),
114 'offOpcode': ( None, ),
115 'offModRm': ( None, ),
116 # Okay ones.
117 'fPrefixes': ( 'uint32_t', ),
118 'uRexReg': ( 'uint8_t', ),
119 'uRexB': ( 'uint8_t', ),
120 'uRexIndex': ( 'uint8_t', ),
121 'iEffSeg': ( 'uint8_t', ),
122 'enmEffOpSize': ( 'IEMMODE', ),
123 'enmDefAddrMode': ( 'IEMMODE', ),
124 'enmEffAddrMode': ( 'IEMMODE', ),
125 'enmDefOpSize': ( 'IEMMODE', ),
126 'idxPrefix': ( 'uint8_t', ),
127 'uVex3rdReg': ( 'uint8_t', ),
128 'uVexLength': ( 'uint8_t', ),
129 'fEvexStuff': ( 'uint8_t', ),
130 'uFpuOpcode': ( 'uint16_t', ),
131};
132
133## @name McStmtCond.oIfBranchAnnotation/McStmtCond.oElseBranchAnnotation values
134## @{
135g_ksFinishAnnotation_Advance = 'Advance';
136g_ksFinishAnnotation_RelJmp = 'RelJmp';
137g_ksFinishAnnotation_SetJmp = 'SetJmp';
138g_ksFinishAnnotation_RelCall = 'RelCall';
139g_ksFinishAnnotation_IndCall = 'IndCall';
140g_ksFinishAnnotation_DeferToCImpl = 'DeferToCImpl';
141## @}
142
143
144class ThreadedParamRef(object):
145 """
146 A parameter reference for a threaded function.
147 """
148
149 def __init__(self, sOrgRef, sType, oStmt, iParam = None, offParam = 0, sStdRef = None):
150 ## The name / reference in the original code.
151 self.sOrgRef = sOrgRef;
152 ## Normalized name to deal with spaces in macro invocations and such.
153 self.sStdRef = sStdRef if sStdRef else ''.join(sOrgRef.split());
154 ## Indicates that sOrgRef may not match the parameter.
155 self.fCustomRef = sStdRef is not None;
156 ## The type (typically derived).
157 self.sType = sType;
158 ## The statement making the reference.
159 self.oStmt = oStmt;
160 ## The parameter containing the references. None if implicit.
161 self.iParam = iParam;
162 ## The offset in the parameter of the reference.
163 self.offParam = offParam;
164
165 ## The variable name in the threaded function.
166 self.sNewName = 'x';
167 ## The this is packed into.
168 self.iNewParam = 99;
169 ## The bit offset in iNewParam.
170 self.offNewParam = 1024
171
172
173class ThreadedFunctionVariation(object):
174 """ Threaded function variation. """
175
176 ## @name Variations.
177 ## These variations will match translation block selection/distinctions as well.
178 ## @{
179 # pylint: disable=line-too-long
180 ksVariation_Default = ''; ##< No variations - only used by IEM_MC_DEFER_TO_CIMPL_X_RET.
181 ksVariation_16 = '_16'; ##< 16-bit mode code (386+).
182 ksVariation_16f = '_16f'; ##< 16-bit mode code (386+), check+clear eflags.
183 ksVariation_16_Jmp = '_16_Jmp'; ##< 16-bit mode code (386+), conditional jump taken.
184 ksVariation_16f_Jmp = '_16f_Jmp'; ##< 16-bit mode code (386+), check+clear eflags, conditional jump taken.
185 ksVariation_16_NoJmp = '_16_NoJmp'; ##< 16-bit mode code (386+), conditional jump not taken.
186 ksVariation_16f_NoJmp = '_16f_NoJmp'; ##< 16-bit mode code (386+), check+clear eflags, conditional jump not taken.
187 ksVariation_16_Addr32 = '_16_Addr32'; ##< 16-bit mode code (386+), address size prefixed to 32-bit addressing.
188 ksVariation_16f_Addr32 = '_16f_Addr32'; ##< 16-bit mode code (386+), address size prefixed to 32-bit addressing, eflags.
189 ksVariation_16_Pre386 = '_16_Pre386'; ##< 16-bit mode code, pre-386 CPU target.
190 ksVariation_16f_Pre386 = '_16f_Pre386'; ##< 16-bit mode code, pre-386 CPU target, check+clear eflags.
191 ksVariation_16_Pre386_Jmp = '_16_Pre386_Jmp'; ##< 16-bit mode code, pre-386 CPU target, conditional jump taken.
192 ksVariation_16f_Pre386_Jmp = '_16f_Pre386_Jmp'; ##< 16-bit mode code, pre-386 CPU target, check+clear eflags, conditional jump taken.
193 ksVariation_16_Pre386_NoJmp = '_16_Pre386_NoJmp'; ##< 16-bit mode code, pre-386 CPU target, conditional jump not taken.
194 ksVariation_16f_Pre386_NoJmp = '_16f_Pre386_NoJmp'; ##< 16-bit mode code, pre-386 CPU target, check+clear eflags, conditional jump not taken.
195 ksVariation_32 = '_32'; ##< 32-bit mode code (386+).
196 ksVariation_32f = '_32f'; ##< 32-bit mode code (386+), check+clear eflags.
197 ksVariation_32_Jmp = '_32_Jmp'; ##< 32-bit mode code (386+), conditional jump taken.
198 ksVariation_32f_Jmp = '_32f_Jmp'; ##< 32-bit mode code (386+), check+clear eflags, conditional jump taken.
199 ksVariation_32_NoJmp = '_32_NoJmp'; ##< 32-bit mode code (386+), conditional jump not taken.
200 ksVariation_32f_NoJmp = '_32f_NoJmp'; ##< 32-bit mode code (386+), check+clear eflags, conditional jump not taken.
201 ksVariation_32_Flat_Jmp = '_32_Flat_Jmp'; ##< 32-bit mode code (386+) with flat CS, SS, DS and ES, conditional jump taken.
202 ksVariation_32f_Flat_Jmp = '_32f_Flat_Jmp'; ##< 32-bit mode code (386+) with flat CS, SS, DS and ES, check+clear eflags, conditional jump taken.
203 ksVariation_32_Flat_NoJmp = '_32_Flat_NoJmp'; ##< 32-bit mode code (386+) with flat CS, SS, DS and ES, conditional jump not taken.
204 ksVariation_32f_Flat_NoJmp = '_32f_Flat_NoJmp'; ##< 32-bit mode code (386+) with flat CS, SS, DS and ES, check+clear eflags, conditional jump not taken.
205 ksVariation_32_Flat = '_32_Flat'; ##< 32-bit mode code (386+) with CS, DS, ES and SS flat and 4GB wide.
206 ksVariation_32f_Flat = '_32f_Flat'; ##< 32-bit mode code (386+) with CS, DS, ES and SS flat and 4GB wide, eflags.
207 ksVariation_32_Addr16 = '_32_Addr16'; ##< 32-bit mode code (386+), address size prefixed to 16-bit addressing.
208 ksVariation_32f_Addr16 = '_32f_Addr16'; ##< 32-bit mode code (386+), address size prefixed to 16-bit addressing, eflags.
209 ksVariation_64 = '_64'; ##< 64-bit mode code.
210 ksVariation_64f = '_64f'; ##< 64-bit mode code, check+clear eflags.
211 ksVariation_64_Jmp = '_64_Jmp'; ##< 64-bit mode code, conditional jump taken.
212 ksVariation_64f_Jmp = '_64f_Jmp'; ##< 64-bit mode code, check+clear eflags, conditional jump taken.
213 ksVariation_64_NoJmp = '_64_NoJmp'; ##< 64-bit mode code, conditional jump not taken.
214 ksVariation_64f_NoJmp = '_64f_NoJmp'; ##< 64-bit mode code, check+clear eflags, conditional jump within page not taken.
215 ksVariation_64_SamePg_Jmp = '_64_SamePg_Jmp'; ##< 64-bit mode code, conditional jump within page taken.
216 ksVariation_64f_SamePg_Jmp = '_64f_SamePg_Jmp'; ##< 64-bit mode code, check+clear eflags, conditional jump taken.
217 ksVariation_64_SamePg_NoJmp = '_64_SamePg_NoJmp'; ##< 64-bit mode code, conditional jump within page not taken.
218 ksVariation_64f_SamePg_NoJmp = '_64f_SamePg_NoJmp'; ##< 64-bit mode code, check+clear eflags, conditional jump within page not taken.
219 ksVariation_64_FsGs = '_64_FsGs'; ##< 64-bit mode code, with memory accesses via FS or GS.
220 ksVariation_64f_FsGs = '_64f_FsGs'; ##< 64-bit mode code, with memory accesses via FS or GS, check+clear eflags.
221 ksVariation_64_Addr32 = '_64_Addr32'; ##< 64-bit mode code, address size prefixed to 32-bit addressing.
222 ksVariation_64f_Addr32 = '_64f_Addr32'; ##< 64-bit mode code, address size prefixed to 32-bit addressing, c+c eflags.
223 # pylint: enable=line-too-long
224 kasVariations = (
225 ksVariation_Default,
226 ksVariation_16,
227 ksVariation_16f,
228 ksVariation_16_Jmp,
229 ksVariation_16f_Jmp,
230 ksVariation_16_NoJmp,
231 ksVariation_16f_NoJmp,
232 ksVariation_16_Addr32,
233 ksVariation_16f_Addr32,
234 ksVariation_16_Pre386,
235 ksVariation_16f_Pre386,
236 ksVariation_16_Pre386_Jmp,
237 ksVariation_16f_Pre386_Jmp,
238 ksVariation_16_Pre386_NoJmp,
239 ksVariation_16f_Pre386_NoJmp,
240 ksVariation_32,
241 ksVariation_32f,
242 ksVariation_32_Jmp,
243 ksVariation_32f_Jmp,
244 ksVariation_32_NoJmp,
245 ksVariation_32f_NoJmp,
246 ksVariation_32_Flat_Jmp,
247 ksVariation_32f_Flat_Jmp,
248 ksVariation_32_Flat_NoJmp,
249 ksVariation_32f_Flat_NoJmp,
250 ksVariation_32_Flat,
251 ksVariation_32f_Flat,
252 ksVariation_32_Addr16,
253 ksVariation_32f_Addr16,
254 ksVariation_64,
255 ksVariation_64f,
256 ksVariation_64_Jmp,
257 ksVariation_64f_Jmp,
258 ksVariation_64_NoJmp,
259 ksVariation_64f_NoJmp,
260 ksVariation_64_SamePg_Jmp,
261 ksVariation_64f_SamePg_Jmp,
262 ksVariation_64_SamePg_NoJmp,
263 ksVariation_64f_SamePg_NoJmp,
264 ksVariation_64_FsGs,
265 ksVariation_64f_FsGs,
266 ksVariation_64_Addr32,
267 ksVariation_64f_Addr32,
268 );
269 kasVariationsWithoutAddress = (
270 ksVariation_16,
271 ksVariation_16f,
272 ksVariation_16_Pre386,
273 ksVariation_16f_Pre386,
274 ksVariation_32,
275 ksVariation_32f,
276 ksVariation_64,
277 ksVariation_64f,
278 );
279 kasVariationsWithoutAddressNot286 = (
280 ksVariation_16,
281 ksVariation_16f,
282 ksVariation_32,
283 ksVariation_32f,
284 ksVariation_64,
285 ksVariation_64f,
286 );
287 kasVariationsWithoutAddressNot286Not64 = (
288 ksVariation_16,
289 ksVariation_16f,
290 ksVariation_32,
291 ksVariation_32f,
292 );
293 kasVariationsWithoutAddressNot64 = (
294 ksVariation_16,
295 ksVariation_16f,
296 ksVariation_16_Pre386,
297 ksVariation_16f_Pre386,
298 ksVariation_32,
299 ksVariation_32f,
300 );
301 kasVariationsWithoutAddressOnly64 = (
302 ksVariation_64,
303 ksVariation_64f,
304 );
305 kasVariationsWithAddress = (
306 ksVariation_16,
307 ksVariation_16f,
308 ksVariation_16_Addr32,
309 ksVariation_16f_Addr32,
310 ksVariation_16_Pre386,
311 ksVariation_16f_Pre386,
312 ksVariation_32,
313 ksVariation_32f,
314 ksVariation_32_Flat,
315 ksVariation_32f_Flat,
316 ksVariation_32_Addr16,
317 ksVariation_32f_Addr16,
318 ksVariation_64,
319 ksVariation_64f,
320 ksVariation_64_FsGs,
321 ksVariation_64f_FsGs,
322 ksVariation_64_Addr32,
323 ksVariation_64f_Addr32,
324 );
325 kasVariationsWithAddressNot286 = (
326 ksVariation_16,
327 ksVariation_16f,
328 ksVariation_16_Addr32,
329 ksVariation_16f_Addr32,
330 ksVariation_32,
331 ksVariation_32f,
332 ksVariation_32_Flat,
333 ksVariation_32f_Flat,
334 ksVariation_32_Addr16,
335 ksVariation_32f_Addr16,
336 ksVariation_64,
337 ksVariation_64f,
338 ksVariation_64_FsGs,
339 ksVariation_64f_FsGs,
340 ksVariation_64_Addr32,
341 ksVariation_64f_Addr32,
342 );
343 kasVariationsWithAddressNot286Not64 = (
344 ksVariation_16,
345 ksVariation_16f,
346 ksVariation_16_Addr32,
347 ksVariation_16f_Addr32,
348 ksVariation_32,
349 ksVariation_32f,
350 ksVariation_32_Flat,
351 ksVariation_32f_Flat,
352 ksVariation_32_Addr16,
353 ksVariation_32f_Addr16,
354 );
355 kasVariationsWithAddressNot64 = (
356 ksVariation_16,
357 ksVariation_16f,
358 ksVariation_16_Addr32,
359 ksVariation_16f_Addr32,
360 ksVariation_16_Pre386,
361 ksVariation_16f_Pre386,
362 ksVariation_32,
363 ksVariation_32f,
364 ksVariation_32_Flat,
365 ksVariation_32f_Flat,
366 ksVariation_32_Addr16,
367 ksVariation_32f_Addr16,
368 );
369 kasVariationsWithAddressOnly64 = (
370 ksVariation_64,
371 ksVariation_64f,
372 ksVariation_64_FsGs,
373 ksVariation_64f_FsGs,
374 ksVariation_64_Addr32,
375 ksVariation_64f_Addr32,
376 );
377 kasVariationsOnlyPre386 = (
378 ksVariation_16_Pre386,
379 ksVariation_16f_Pre386,
380 );
381 kasVariationsEmitOrder = (
382 ksVariation_Default,
383 ksVariation_64,
384 ksVariation_64f,
385 ksVariation_64_Jmp,
386 ksVariation_64f_Jmp,
387 ksVariation_64_SamePg_Jmp,
388 ksVariation_64f_SamePg_Jmp,
389 ksVariation_64_NoJmp,
390 ksVariation_64f_NoJmp,
391 ksVariation_64_SamePg_NoJmp,
392 ksVariation_64f_SamePg_NoJmp,
393 ksVariation_64_FsGs,
394 ksVariation_64f_FsGs,
395 ksVariation_32_Flat,
396 ksVariation_32f_Flat,
397 ksVariation_32_Flat_Jmp,
398 ksVariation_32f_Flat_Jmp,
399 ksVariation_32_Flat_NoJmp,
400 ksVariation_32f_Flat_NoJmp,
401 ksVariation_32,
402 ksVariation_32f,
403 ksVariation_32_Jmp,
404 ksVariation_32f_Jmp,
405 ksVariation_32_NoJmp,
406 ksVariation_32f_NoJmp,
407 ksVariation_32_Addr16,
408 ksVariation_32f_Addr16,
409 ksVariation_16,
410 ksVariation_16f,
411 ksVariation_16_Jmp,
412 ksVariation_16f_Jmp,
413 ksVariation_16_NoJmp,
414 ksVariation_16f_NoJmp,
415 ksVariation_16_Addr32,
416 ksVariation_16f_Addr32,
417 ksVariation_16_Pre386,
418 ksVariation_16f_Pre386,
419 ksVariation_16_Pre386_Jmp,
420 ksVariation_16f_Pre386_Jmp,
421 ksVariation_16_Pre386_NoJmp,
422 ksVariation_16f_Pre386_NoJmp,
423 ksVariation_64_Addr32,
424 ksVariation_64f_Addr32,
425 );
426 kdVariationNames = {
427 ksVariation_Default: 'defer-to-cimpl',
428 ksVariation_16: '16-bit',
429 ksVariation_16f: '16-bit w/ eflag checking and clearing',
430 ksVariation_16_Jmp: '16-bit w/ conditional jump taken',
431 ksVariation_16f_Jmp: '16-bit w/ eflag checking and clearing and conditional jump taken',
432 ksVariation_16_NoJmp: '16-bit w/ conditional jump not taken',
433 ksVariation_16f_NoJmp: '16-bit w/ eflag checking and clearing and conditional jump not taken',
434 ksVariation_16_Addr32: '16-bit w/ address prefix (Addr32)',
435 ksVariation_16f_Addr32: '16-bit w/ address prefix (Addr32) and eflag checking and clearing',
436 ksVariation_16_Pre386: '16-bit on pre-386 CPU',
437 ksVariation_16f_Pre386: '16-bit on pre-386 CPU w/ eflag checking and clearing',
438 ksVariation_16_Pre386_Jmp: '16-bit on pre-386 CPU w/ conditional jump taken',
439 ksVariation_16f_Pre386_Jmp: '16-bit on pre-386 CPU w/ eflag checking and clearing and conditional jump taken',
440 ksVariation_16_Pre386_NoJmp: '16-bit on pre-386 CPU w/ conditional jump taken',
441 ksVariation_16f_Pre386_NoJmp: '16-bit on pre-386 CPU w/ eflag checking and clearing and conditional jump taken',
442 ksVariation_32: '32-bit',
443 ksVariation_32f: '32-bit w/ eflag checking and clearing',
444 ksVariation_32_Jmp: '32-bit w/ conditional jump taken',
445 ksVariation_32f_Jmp: '32-bit w/ eflag checking and clearing and conditional jump taken',
446 ksVariation_32_NoJmp: '32-bit w/ conditional jump not taken',
447 ksVariation_32f_NoJmp: '32-bit w/ eflag checking and clearing and conditional jump not taken',
448 ksVariation_32_Flat_Jmp: '32-bit flat+wide CS, ++ w/ conditional jump taken',
449 ksVariation_32f_Flat_Jmp: '32-bit flat+wide CS, ++ w/ eflag checking and clearing and conditional jump taken',
450 ksVariation_32_Flat_NoJmp: '32-bit flat+wide CS, ++ w/ conditional jump not taken',
451 ksVariation_32f_Flat_NoJmp: '32-bit flat+wide CS, ++ w/ eflag checking and clearing and conditional jump not taken',
452 ksVariation_32_Flat: '32-bit flat and wide open CS, SS, DS and ES',
453 ksVariation_32f_Flat: '32-bit flat and wide open CS, SS, DS and ES w/ eflag checking and clearing',
454 ksVariation_32_Addr16: '32-bit w/ address prefix (Addr16)',
455 ksVariation_32f_Addr16: '32-bit w/ address prefix (Addr16) and eflag checking and clearing',
456 ksVariation_64: '64-bit',
457 ksVariation_64f: '64-bit w/ eflag checking and clearing',
458 ksVariation_64_Jmp: '64-bit w/ conditional jump taken',
459 ksVariation_64f_Jmp: '64-bit w/ eflag checking and clearing and conditional jump taken',
460 ksVariation_64_NoJmp: '64-bit w/ conditional jump not taken',
461 ksVariation_64f_NoJmp: '64-bit w/ eflag checking and clearing and conditional jump not taken',
462 ksVariation_64_SamePg_Jmp: '64-bit w/ conditional jump within page taken',
463 ksVariation_64f_SamePg_Jmp: '64-bit w/ eflag checking and clearing and conditional jumpwithin page taken',
464 ksVariation_64_SamePg_NoJmp: '64-bit w/ conditional jump within page not taken',
465 ksVariation_64f_SamePg_NoJmp: '64-bit w/ eflag checking and clearing and conditional jump within page not taken',
466 ksVariation_64_FsGs: '64-bit with memory accessed via FS or GS',
467 ksVariation_64f_FsGs: '64-bit with memory accessed via FS or GS and eflag checking and clearing',
468 ksVariation_64_Addr32: '64-bit w/ address prefix (Addr32)',
469 ksVariation_64f_Addr32: '64-bit w/ address prefix (Addr32) and eflag checking and clearing',
470 };
471 kdVariationsWithEflagsCheckingAndClearing = {
472 ksVariation_16f: True,
473 ksVariation_16f_Jmp: True,
474 ksVariation_16f_NoJmp: True,
475 ksVariation_16f_Addr32: True,
476 ksVariation_16f_Pre386: True,
477 ksVariation_16f_Pre386_Jmp: True,
478 ksVariation_16f_Pre386_NoJmp: True,
479 ksVariation_32f: True,
480 ksVariation_32f_Jmp: True,
481 ksVariation_32f_NoJmp: True,
482 ksVariation_32f_Flat: True,
483 ksVariation_32f_Flat_Jmp: True,
484 ksVariation_32f_Flat_NoJmp: True,
485 ksVariation_32f_Addr16: True,
486 ksVariation_64f: True,
487 ksVariation_64f_Jmp: True,
488 ksVariation_64f_NoJmp: True,
489 ksVariation_64f_SamePg_Jmp: True,
490 ksVariation_64f_SamePg_NoJmp: True,
491 ksVariation_64f_FsGs: True,
492 ksVariation_64f_Addr32: True,
493 };
494 kdVariationsOnly64NoFlags = {
495 ksVariation_64: True,
496 ksVariation_64_Jmp: True,
497 ksVariation_64_NoJmp: True,
498 ksVariation_64_SamePg_Jmp: True,
499 ksVariation_64_SamePg_NoJmp: True,
500 ksVariation_64_FsGs: True,
501 ksVariation_64_Addr32: True,
502 };
503 kdVariationsOnly64WithFlags = {
504 ksVariation_64f: True,
505 ksVariation_64f_Jmp: True,
506 ksVariation_64f_NoJmp: True,
507 ksVariation_64f_SamePg_Jmp: True,
508 ksVariation_64f_SamePg_NoJmp: True,
509 ksVariation_64f_FsGs: True,
510 ksVariation_64f_Addr32: True,
511 };
512 kdVariationsOnlyPre386NoFlags = {
513 ksVariation_16_Pre386: True,
514 ksVariation_16_Pre386_Jmp: True,
515 ksVariation_16_Pre386_NoJmp: True,
516 };
517 kdVariationsOnlyPre386WithFlags = {
518 ksVariation_16f_Pre386: True,
519 ksVariation_16f_Pre386_Jmp: True,
520 ksVariation_16f_Pre386_NoJmp: True,
521 };
522 kdVariationsWithFlatAddress = {
523 ksVariation_32_Flat: True,
524 ksVariation_32f_Flat: True,
525 ksVariation_64: True,
526 ksVariation_64f: True,
527 ksVariation_64_Addr32: True,
528 ksVariation_64f_Addr32: True,
529 };
530 kdVariationsWithFlatStackAddress = {
531 ksVariation_32_Flat: True,
532 ksVariation_32f_Flat: True,
533 ksVariation_64: True,
534 ksVariation_64f: True,
535 ksVariation_64_FsGs: True,
536 ksVariation_64f_FsGs: True,
537 ksVariation_64_Addr32: True,
538 ksVariation_64f_Addr32: True,
539 };
540 kdVariationsWithFlat64StackAddress = {
541 ksVariation_64: True,
542 ksVariation_64f: True,
543 ksVariation_64_FsGs: True,
544 ksVariation_64f_FsGs: True,
545 ksVariation_64_Addr32: True,
546 ksVariation_64f_Addr32: True,
547 };
548 kdVariationsWithFlatAddr16 = {
549 ksVariation_16: True,
550 ksVariation_16f: True,
551 ksVariation_16_Pre386: True,
552 ksVariation_16f_Pre386: True,
553 ksVariation_32_Addr16: True,
554 ksVariation_32f_Addr16: True,
555 };
556 kdVariationsWithFlatAddr32No64 = {
557 ksVariation_16_Addr32: True,
558 ksVariation_16f_Addr32: True,
559 ksVariation_32: True,
560 ksVariation_32f: True,
561 ksVariation_32_Flat: True,
562 ksVariation_32f_Flat: True,
563 };
564 kdVariationsWithAddressOnly64 = {
565 ksVariation_64: True,
566 ksVariation_64f: True,
567 ksVariation_64_FsGs: True,
568 ksVariation_64f_FsGs: True,
569 ksVariation_64_Addr32: True,
570 ksVariation_64f_Addr32: True,
571 };
572 kdVariationsWithConditional = {
573 ksVariation_16_Jmp: True,
574 ksVariation_16_NoJmp: True,
575 ksVariation_16_Pre386_Jmp: True,
576 ksVariation_16_Pre386_NoJmp: True,
577 ksVariation_32_Jmp: True,
578 ksVariation_32_NoJmp: True,
579 ksVariation_32_Flat_Jmp: True,
580 ksVariation_32_Flat_NoJmp: True,
581 ksVariation_64_Jmp: True,
582 ksVariation_64_NoJmp: True,
583 ksVariation_64_SamePg_Jmp: True,
584 ksVariation_64_SamePg_NoJmp: True,
585 ksVariation_16f_Jmp: True,
586 ksVariation_16f_NoJmp: True,
587 ksVariation_16f_Pre386_Jmp: True,
588 ksVariation_16f_Pre386_NoJmp: True,
589 ksVariation_32f_Jmp: True,
590 ksVariation_32f_NoJmp: True,
591 ksVariation_32f_Flat_Jmp: True,
592 ksVariation_32f_Flat_NoJmp: True,
593 ksVariation_64f_Jmp: True,
594 ksVariation_64f_NoJmp: True,
595 ksVariation_64f_SamePg_Jmp: True,
596 ksVariation_64f_SamePg_NoJmp: True,
597 };
598 kdVariationsWithConditionalNoJmp = {
599 ksVariation_16_NoJmp: True,
600 ksVariation_16_Pre386_NoJmp: True,
601 ksVariation_32_NoJmp: True,
602 ksVariation_32_Flat_NoJmp: True,
603 ksVariation_64_NoJmp: True,
604 ksVariation_64_SamePg_NoJmp: True,
605 ksVariation_16f_NoJmp: True,
606 ksVariation_16f_Pre386_NoJmp: True,
607 ksVariation_32f_NoJmp: True,
608 ksVariation_32f_Flat_NoJmp: True,
609 ksVariation_64f_NoJmp: True,
610 ksVariation_64f_SamePg_NoJmp: True,
611 };
612 kdVariationsWithFlat32Conditional = {
613 ksVariation_32_Flat_Jmp: True,
614 ksVariation_32_Flat_NoJmp: True,
615 ksVariation_32f_Flat_Jmp: True,
616 ksVariation_32f_Flat_NoJmp: True,
617 };
618 kdVariationsWithSamePgConditional = {
619 ksVariation_64_SamePg_Jmp: True,
620 ksVariation_64_SamePg_NoJmp: True,
621 ksVariation_64f_SamePg_Jmp: True,
622 ksVariation_64f_SamePg_NoJmp: True,
623 };
624 kdVariationsOnlyPre386 = {
625 ksVariation_16_Pre386: True,
626 ksVariation_16f_Pre386: True,
627 ksVariation_16_Pre386_Jmp: True,
628 ksVariation_16f_Pre386_Jmp: True,
629 ksVariation_16_Pre386_NoJmp: True,
630 ksVariation_16f_Pre386_NoJmp: True,
631 };
632 ## @}
633
634 ## IEM_CIMPL_F_XXX flags that we know.
635 ## The value indicates whether it terminates the TB or not. The goal is to
636 ## improve the recompiler so all but END_TB will be False.
637 ##
638 ## @note iemThreadedRecompilerMcDeferToCImpl0 duplicates info found here.
639 kdCImplFlags = {
640 'IEM_CIMPL_F_MODE': False,
641 'IEM_CIMPL_F_BRANCH_DIRECT': False,
642 'IEM_CIMPL_F_BRANCH_INDIRECT': False,
643 'IEM_CIMPL_F_BRANCH_RELATIVE': False,
644 'IEM_CIMPL_F_BRANCH_FAR': True,
645 'IEM_CIMPL_F_BRANCH_CONDITIONAL': False,
646 # IEM_CIMPL_F_BRANCH_ANY should only be used for testing, so not included here.
647 'IEM_CIMPL_F_BRANCH_STACK': False,
648 'IEM_CIMPL_F_BRANCH_STACK_FAR': False,
649 'IEM_CIMPL_F_RFLAGS': False,
650 'IEM_CIMPL_F_INHIBIT_SHADOW': False,
651 'IEM_CIMPL_F_CHECK_IRQ_AFTER': False,
652 'IEM_CIMPL_F_CHECK_IRQ_BEFORE': False,
653 'IEM_CIMPL_F_CHECK_IRQ_BEFORE_AND_AFTER': False, # (ignore)
654 'IEM_CIMPL_F_STATUS_FLAGS': False,
655 'IEM_CIMPL_F_VMEXIT': False,
656 'IEM_CIMPL_F_FPU': False,
657 'IEM_CIMPL_F_REP': False,
658 'IEM_CIMPL_F_IO': False,
659 'IEM_CIMPL_F_END_TB': True,
660 'IEM_CIMPL_F_XCPT': True,
661 'IEM_CIMPL_F_CALLS_CIMPL': False,
662 'IEM_CIMPL_F_CALLS_AIMPL': False,
663 'IEM_CIMPL_F_CALLS_AIMPL_WITH_FXSTATE': False,
664 'IEM_CIMPL_F_CALLS_AIMPL_WITH_XSTATE': False,
665 };
666
667 def __init__(self, oThreadedFunction, sVariation = ksVariation_Default):
668 self.oParent = oThreadedFunction # type: ThreadedFunction
669 ##< ksVariation_Xxxx.
670 self.sVariation = sVariation
671
672 ## Threaded function parameter references.
673 self.aoParamRefs = [] # type: List[ThreadedParamRef]
674 ## Unique parameter references.
675 self.dParamRefs = {} # type: Dict[str, List[ThreadedParamRef]]
676 ## Minimum number of parameters to the threaded function.
677 self.cMinParams = 0;
678
679 ## List/tree of statements for the threaded function.
680 self.aoStmtsForThreadedFunction = [] # type: List[McStmt]
681
682 ## Function enum number, for verification. Set by generateThreadedFunctionsHeader.
683 self.iEnumValue = -1;
684
685 ## Native recompilation details for this variation.
686 self.oNativeRecomp = None;
687
688 def getIndexName(self):
689 sName = self.oParent.oMcBlock.sFunction;
690 if sName.startswith('iemOp_'):
691 sName = sName[len('iemOp_'):];
692 return 'kIemThreadedFunc_%s%s%s' % ( sName, self.oParent.sSubName, self.sVariation, );
693
694 def getThreadedFunctionName(self):
695 sName = self.oParent.oMcBlock.sFunction;
696 if sName.startswith('iemOp_'):
697 sName = sName[len('iemOp_'):];
698 return 'iemThreadedFunc_%s%s%s' % ( sName, self.oParent.sSubName, self.sVariation, );
699
700 def getNativeFunctionName(self):
701 return 'iemNativeRecompFunc_' + self.getThreadedFunctionName()[len('iemThreadedFunc_'):];
702
703 def getLivenessFunctionName(self):
704 return 'iemNativeLivenessFunc_' + self.getThreadedFunctionName()[len('iemThreadedFunc_'):];
705
706 def getShortName(self):
707 sName = self.oParent.oMcBlock.sFunction;
708 if sName.startswith('iemOp_'):
709 sName = sName[len('iemOp_'):];
710 return '%s%s%s' % ( sName, self.oParent.sSubName, self.sVariation, );
711
712 def getThreadedFunctionStatisticsName(self):
713 sName = self.oParent.oMcBlock.sFunction;
714 if sName.startswith('iemOp_'):
715 sName = sName[len('iemOp_'):];
716
717 sVarNm = self.sVariation;
718 if sVarNm:
719 if sVarNm.startswith('_'):
720 sVarNm = sVarNm[1:];
721 if sVarNm.endswith('_Jmp'):
722 sVarNm = sVarNm[:-4];
723 sName += '_Jmp';
724 elif sVarNm.endswith('_NoJmp'):
725 sVarNm = sVarNm[:-6];
726 sName += '_NoJmp';
727 else:
728 sVarNm = 'DeferToCImpl';
729
730 return '%s/%s%s' % ( sVarNm, sName, self.oParent.sSubName );
731
732 def isWithFlagsCheckingAndClearingVariation(self):
733 """
734 Checks if this is a variation that checks and clears EFLAGS.
735 """
736 return self.sVariation in ThreadedFunctionVariation.kdVariationsWithEflagsCheckingAndClearing;
737
738 #
739 # Analysis and code morphing.
740 #
741
742 def raiseProblem(self, sMessage):
743 """ Raises a problem. """
744 self.oParent.raiseProblem(sMessage);
745
746 def warning(self, sMessage):
747 """ Emits a warning. """
748 self.oParent.warning(sMessage);
749
750 def analyzeReferenceToType(self, sRef):
751 """
752 Translates a variable or structure reference to a type.
753 Returns type name.
754 Raises exception if unable to figure it out.
755 """
756 ch0 = sRef[0];
757 if ch0 == 'u':
758 if sRef.startswith('u32'):
759 return 'uint32_t';
760 if sRef.startswith('u8') or sRef == 'uReg':
761 return 'uint8_t';
762 if sRef.startswith('u64'):
763 return 'uint64_t';
764 if sRef.startswith('u16'):
765 return 'uint16_t';
766 elif ch0 == 'b':
767 return 'uint8_t';
768 elif ch0 == 'f':
769 return 'bool';
770 elif ch0 == 'i':
771 if sRef.startswith('i8'):
772 return 'int8_t';
773 if sRef.startswith('i16'):
774 return 'int16_t';
775 if sRef.startswith('i32'):
776 return 'int32_t';
777 if sRef.startswith('i64'):
778 return 'int64_t';
779 if sRef in ('iReg', 'iFixedReg', 'iGReg', 'iSegReg', 'iSrcReg', 'iDstReg', 'iCrReg'):
780 return 'uint8_t';
781 elif ch0 == 'p':
782 if sRef.find('-') < 0:
783 return 'uintptr_t';
784 if sRef.startswith('pVCpu->iem.s.'):
785 sField = sRef[len('pVCpu->iem.s.') : ];
786 if sField in g_kdIemFieldToType:
787 if g_kdIemFieldToType[sField][0]:
788 return g_kdIemFieldToType[sField][0];
789 elif ch0 == 'G' and sRef.startswith('GCPtr'):
790 return 'uint64_t';
791 elif ch0 == 'e':
792 if sRef == 'enmEffOpSize':
793 return 'IEMMODE';
794 elif ch0 == 'o':
795 if sRef.startswith('off32'):
796 return 'uint32_t';
797 elif sRef == 'cbFrame': # enter
798 return 'uint16_t';
799 elif sRef == 'cShift': ## @todo risky
800 return 'uint8_t';
801
802 self.raiseProblem('Unknown reference: %s' % (sRef,));
803 return None; # Shut up pylint 2.16.2.
804
805 def analyzeCallToType(self, sFnRef):
806 """
807 Determins the type of an indirect function call.
808 """
809 assert sFnRef[0] == 'p';
810
811 #
812 # Simple?
813 #
814 if sFnRef.find('-') < 0:
815 oDecoderFunction = self.oParent.oMcBlock.oFunction;
816
817 # Try the argument list of the function defintion macro invocation first.
818 iArg = 2;
819 while iArg < len(oDecoderFunction.asDefArgs):
820 if sFnRef == oDecoderFunction.asDefArgs[iArg]:
821 return oDecoderFunction.asDefArgs[iArg - 1];
822 iArg += 1;
823
824 # Then check out line that includes the word and looks like a variable declaration.
825 oRe = re.compile(' +(P[A-Z0-9_]+|const +IEMOP[A-Z0-9_]+ *[*]) +(const |) *' + sFnRef + ' *(;|=)');
826 for sLine in oDecoderFunction.asLines:
827 oMatch = oRe.match(sLine);
828 if oMatch:
829 if not oMatch.group(1).startswith('const'):
830 return oMatch.group(1);
831 return 'PC' + oMatch.group(1)[len('const ') : -1].strip();
832
833 #
834 # Deal with the pImpl->pfnXxx:
835 #
836 elif sFnRef.startswith('pImpl->pfn'):
837 sMember = sFnRef[len('pImpl->') : ];
838 sBaseType = self.analyzeCallToType('pImpl');
839 offBits = sMember.rfind('U') + 1;
840 if sBaseType == 'PCIEMOPBINSIZES': return 'PFNIEMAIMPLBINU' + sMember[offBits:];
841 if sBaseType == 'PCIEMOPBINTODOSIZES': return 'PFNIEMAIMPLBINTODOU' + sMember[offBits:];
842 if sBaseType == 'PCIEMOPUNARYSIZES': return 'PFNIEMAIMPLUNARYU' + sMember[offBits:];
843 if sBaseType == 'PCIEMOPSHIFTSIZES': return 'PFNIEMAIMPLSHIFTU' + sMember[offBits:];
844 if sBaseType == 'PCIEMOPSHIFTDBLSIZES': return 'PFNIEMAIMPLSHIFTDBLU' + sMember[offBits:];
845 if sBaseType == 'PCIEMOPMULDIVSIZES': return 'PFNIEMAIMPLMULDIVU' + sMember[offBits:];
846 if sBaseType == 'PCIEMOPMEDIAF2': return 'PFNIEMAIMPLMEDIAF2U' + sMember[offBits:];
847 if sBaseType == 'PCIEMOPMEDIAF2IMM8': return 'PFNIEMAIMPLMEDIAF2U' + sMember[offBits:] + 'IMM8';
848 if sBaseType == 'PCIEMOPMEDIAF3': return 'PFNIEMAIMPLMEDIAF3U' + sMember[offBits:];
849 if sBaseType == 'PCIEMOPMEDIAOPTF2': return 'PFNIEMAIMPLMEDIAOPTF2U' + sMember[offBits:];
850 if sBaseType == 'PCIEMOPMEDIAOPTF2IMM8': return 'PFNIEMAIMPLMEDIAOPTF2U' + sMember[offBits:] + 'IMM8';
851 if sBaseType == 'PCIEMOPMEDIAOPTF3': return 'PFNIEMAIMPLMEDIAOPTF3U' + sMember[offBits:];
852 if sBaseType == 'PCIEMOPMEDIAOPTF3IMM8': return 'PFNIEMAIMPLMEDIAOPTF3U' + sMember[offBits:] + 'IMM8';
853 if sBaseType == 'PCIEMOPBLENDOP': return 'PFNIEMAIMPLAVXBLENDU' + sMember[offBits:];
854
855 self.raiseProblem('Unknown call reference: %s::%s (%s)' % (sBaseType, sMember, sFnRef,));
856
857 self.raiseProblem('Unknown call reference: %s' % (sFnRef,));
858 return None; # Shut up pylint 2.16.2.
859
860 def analyze8BitGRegStmt(self, oStmt):
861 """
862 Gets the 8-bit general purpose register access details of the given statement.
863 ASSUMES the statement is one accessing an 8-bit GREG.
864 """
865 idxReg = 0;
866 if ( oStmt.sName.find('_FETCH_') > 0
867 or oStmt.sName.find('_REF_') > 0
868 or oStmt.sName.find('_TO_LOCAL') > 0):
869 idxReg = 1;
870
871 sRegRef = oStmt.asParams[idxReg];
872 if sRegRef.startswith('IEM_GET_MODRM_RM') or sRegRef.startswith('IEM_GET_MODRM_REG'):
873 asBits = [sBit.strip() for sBit in sRegRef.replace('(', ',').replace(')', '').split(',')];
874 if len(asBits) != 3 or asBits[1] != 'pVCpu' or (asBits[0] != 'IEM_GET_MODRM_RM' and asBits[0] != 'IEM_GET_MODRM_REG'):
875 self.raiseProblem('Unexpected reference: %s (asBits=%s)' % (sRegRef, asBits));
876 sOrgExpr = asBits[0] + '_EX8(pVCpu, ' + asBits[2] + ')';
877 else:
878 sOrgExpr = '((%s) < 4 || (pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_REX | IEM_OP_PRF_VEX)) ? (%s) : (%s) + 12)' \
879 % (sRegRef, sRegRef, sRegRef,);
880
881 if sRegRef.find('IEM_GET_MODRM_RM') >= 0: sStdRef = 'bRmRm8Ex';
882 elif sRegRef.find('IEM_GET_MODRM_REG') >= 0: sStdRef = 'bRmReg8Ex';
883 elif sRegRef == 'X86_GREG_xAX': sStdRef = 'bGregXAx8Ex';
884 elif sRegRef == 'X86_GREG_xCX': sStdRef = 'bGregXCx8Ex';
885 elif sRegRef == 'X86_GREG_xSP': sStdRef = 'bGregXSp8Ex';
886 elif sRegRef == 'iFixedReg': sStdRef = 'bFixedReg8Ex';
887 else:
888 self.warning('analyze8BitGRegStmt: sRegRef=%s -> bOther8Ex; %s %s; sOrgExpr=%s'
889 % (sRegRef, oStmt.sName, oStmt.asParams, sOrgExpr,));
890 sStdRef = 'bOther8Ex';
891
892 #print('analyze8BitGRegStmt: %s %s; sRegRef=%s\n -> idxReg=%s sOrgExpr=%s sStdRef=%s'
893 # % (oStmt.sName, oStmt.asParams, sRegRef, idxReg, sOrgExpr, sStdRef));
894 return (idxReg, sOrgExpr, sStdRef);
895
896
897 ## Maps memory related MCs to info for FLAT conversion.
898 ## This is used in 64-bit and flat 32-bit variants to skip the unnecessary
899 ## segmentation checking for every memory access. Only applied to access
900 ## via ES, DS and SS. FS, GS and CS gets the full segmentation threatment,
901 ## the latter (CS) is just to keep things simple (we could safely fetch via
902 ## it, but only in 64-bit mode could we safely write via it, IIRC).
903 kdMemMcToFlatInfo = {
904 'IEM_MC_FETCH_MEM_U8': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U8' ),
905 'IEM_MC_FETCH_MEM16_U8': ( 1, 'IEM_MC_FETCH_MEM16_FLAT_U8' ),
906 'IEM_MC_FETCH_MEM32_U8': ( 1, 'IEM_MC_FETCH_MEM32_FLAT_U8' ),
907 'IEM_MC_FETCH_MEM_U16': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U16' ),
908 'IEM_MC_FETCH_MEM_U16_DISP': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U16_DISP' ),
909 'IEM_MC_FETCH_MEM_I16': ( 1, 'IEM_MC_FETCH_MEM_FLAT_I16' ),
910 'IEM_MC_FETCH_MEM_I16_DISP': ( 1, 'IEM_MC_FETCH_MEM_FLAT_I16_DISP' ),
911 'IEM_MC_FETCH_MEM_U32': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U32' ),
912 'IEM_MC_FETCH_MEM_U32_DISP': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U32_DISP' ),
913 'IEM_MC_FETCH_MEM_I32': ( 1, 'IEM_MC_FETCH_MEM_FLAT_I32' ),
914 'IEM_MC_FETCH_MEM_I32_DISP': ( 1, 'IEM_MC_FETCH_MEM_FLAT_I32_DISP' ),
915 'IEM_MC_FETCH_MEM_U64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U64' ),
916 'IEM_MC_FETCH_MEM_U64_DISP': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U64_DISP' ),
917 'IEM_MC_FETCH_MEM_U64_ALIGN_U128': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U64_ALIGN_U128' ),
918 'IEM_MC_FETCH_MEM_I64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_I64' ),
919 'IEM_MC_FETCH_MEM_R32': ( 1, 'IEM_MC_FETCH_MEM_FLAT_R32' ),
920 'IEM_MC_FETCH_MEM_R64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_R64' ),
921 'IEM_MC_FETCH_MEM_R80': ( 1, 'IEM_MC_FETCH_MEM_FLAT_R80' ),
922 'IEM_MC_FETCH_MEM_D80': ( 1, 'IEM_MC_FETCH_MEM_FLAT_D80' ),
923 'IEM_MC_FETCH_MEM_U128': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U128' ),
924 'IEM_MC_FETCH_MEM_U128_NO_AC': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U128_NO_AC' ),
925 'IEM_MC_FETCH_MEM_U128_ALIGN_SSE': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U128_ALIGN_SSE' ),
926 'IEM_MC_FETCH_MEM_XMM': ( 1, 'IEM_MC_FETCH_MEM_FLAT_XMM' ),
927 'IEM_MC_FETCH_MEM_XMM_NO_AC': ( 1, 'IEM_MC_FETCH_MEM_FLAT_XMM_NO_AC' ),
928 'IEM_MC_FETCH_MEM_XMM_ALIGN_SSE': ( 1, 'IEM_MC_FETCH_MEM_FLAT_XMM_ALIGN_SSE' ),
929 'IEM_MC_FETCH_MEM_XMM_U32': ( 2, 'IEM_MC_FETCH_MEM_FLAT_XMM_U32' ),
930 'IEM_MC_FETCH_MEM_XMM_U64': ( 2, 'IEM_MC_FETCH_MEM_FLAT_XMM_U64' ),
931 'IEM_MC_FETCH_MEM_U256': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U256' ),
932 'IEM_MC_FETCH_MEM_U256_NO_AC': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U256_NO_AC' ),
933 'IEM_MC_FETCH_MEM_U256_ALIGN_AVX': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U256_ALIGN_AVX' ),
934 'IEM_MC_FETCH_MEM_YMM': ( 1, 'IEM_MC_FETCH_MEM_FLAT_YMM' ),
935 'IEM_MC_FETCH_MEM_YMM_NO_AC': ( 1, 'IEM_MC_FETCH_MEM_FLAT_YMM_NO_AC' ),
936 'IEM_MC_FETCH_MEM_YMM_ALIGN_AVX': ( 1, 'IEM_MC_FETCH_MEM_FLAT_YMM_ALIGN_AVX' ),
937 'IEM_MC_FETCH_MEM_U8_ZX_U16': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U8_ZX_U16' ),
938 'IEM_MC_FETCH_MEM_U8_ZX_U32': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U8_ZX_U32' ),
939 'IEM_MC_FETCH_MEM_U8_ZX_U64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U8_ZX_U64' ),
940 'IEM_MC_FETCH_MEM_U16_ZX_U32': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U16_ZX_U32' ),
941 'IEM_MC_FETCH_MEM_U16_ZX_U64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U16_ZX_U64' ),
942 'IEM_MC_FETCH_MEM_U32_ZX_U64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U32_ZX_U64' ),
943 'IEM_MC_FETCH_MEM_U8_SX_U16': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U8_SX_U16' ),
944 'IEM_MC_FETCH_MEM_U8_SX_U32': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U8_SX_U32' ),
945 'IEM_MC_FETCH_MEM_U8_SX_U64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U8_SX_U64' ),
946 'IEM_MC_FETCH_MEM_U16_SX_U32': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U16_SX_U32' ),
947 'IEM_MC_FETCH_MEM_U16_SX_U64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U16_SX_U64' ),
948 'IEM_MC_FETCH_MEM_U32_SX_U64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U32_SX_U64' ),
949 'IEM_MC_FETCH_MEM_U128_AND_XREG_U128': ( 2, 'IEM_MC_FETCH_MEM_FLAT_U128_AND_XREG_U128' ),
950 'IEM_MC_FETCH_MEM_XMM_ALIGN_SSE_AND_XREG_XMM': ( 2, 'IEM_MC_FETCH_MEM_FLAT_XMM_ALIGN_SSE_AND_XREG_XMM' ),
951 'IEM_MC_FETCH_MEM_XMM_U32_AND_XREG_XMM': ( 3, 'IEM_MC_FETCH_MEM_FLAT_XMM_U32_AND_XREG_XMM' ),
952 'IEM_MC_FETCH_MEM_XMM_U64_AND_XREG_XMM': ( 3, 'IEM_MC_FETCH_MEM_FLAT_XMM_U64_AND_XREG_XMM' ),
953 'IEM_MC_FETCH_MEM_U128_AND_XREG_U128_AND_RAX_RDX_U64':
954 ( 2, 'IEM_MC_FETCH_MEM_FLAT_U128_AND_XREG_U128_AND_RAX_RDX_U64' ),
955 'IEM_MC_FETCH_MEM_U128_AND_XREG_U128_AND_EAX_EDX_U32_SX_U64':
956 ( 2, 'IEM_MC_FETCH_MEM_FLAT_U128_AND_XREG_U128_AND_EAX_EDX_U32_SX_U64' ),
957 'IEM_MC_FETCH_MEM_YMM_ALIGN_AVX_AND_YREG_YMM': ( 2, 'IEM_MC_FETCH_MEM_FLAT_YMM_ALIGN_AVX_AND_YREG_YMM' ),
958 'IEM_MC_STORE_MEM_U8': ( 0, 'IEM_MC_STORE_MEM_FLAT_U8' ),
959 'IEM_MC_STORE_MEM_U16': ( 0, 'IEM_MC_STORE_MEM_FLAT_U16' ),
960 'IEM_MC_STORE_MEM_U32': ( 0, 'IEM_MC_STORE_MEM_FLAT_U32' ),
961 'IEM_MC_STORE_MEM_U64': ( 0, 'IEM_MC_STORE_MEM_FLAT_U64' ),
962 'IEM_MC_STORE_MEM_U8_CONST': ( 0, 'IEM_MC_STORE_MEM_FLAT_U8_CONST' ),
963 'IEM_MC_STORE_MEM_U16_CONST': ( 0, 'IEM_MC_STORE_MEM_FLAT_U16_CONST' ),
964 'IEM_MC_STORE_MEM_U32_CONST': ( 0, 'IEM_MC_STORE_MEM_FLAT_U32_CONST' ),
965 'IEM_MC_STORE_MEM_U64_CONST': ( 0, 'IEM_MC_STORE_MEM_FLAT_U64_CONST' ),
966 'IEM_MC_STORE_MEM_U128': ( 0, 'IEM_MC_STORE_MEM_FLAT_U128' ),
967 'IEM_MC_STORE_MEM_U128_NO_AC': ( 0, 'IEM_MC_STORE_MEM_FLAT_U128_NO_AC' ),
968 'IEM_MC_STORE_MEM_U128_ALIGN_SSE': ( 0, 'IEM_MC_STORE_MEM_FLAT_U128_ALIGN_SSE' ),
969 'IEM_MC_STORE_MEM_U256': ( 0, 'IEM_MC_STORE_MEM_FLAT_U256' ),
970 'IEM_MC_STORE_MEM_U256_NO_AC': ( 0, 'IEM_MC_STORE_MEM_FLAT_U256_NO_AC' ),
971 'IEM_MC_STORE_MEM_U256_ALIGN_AVX': ( 0, 'IEM_MC_STORE_MEM_FLAT_U256_ALIGN_AVX' ),
972 'IEM_MC_MEM_MAP_D80_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_D80_WO' ),
973 'IEM_MC_MEM_MAP_I16_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_I16_WO' ),
974 'IEM_MC_MEM_MAP_I32_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_I32_WO' ),
975 'IEM_MC_MEM_MAP_I64_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_I64_WO' ),
976 'IEM_MC_MEM_MAP_R32_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_R32_WO' ),
977 'IEM_MC_MEM_MAP_R64_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_R64_WO' ),
978 'IEM_MC_MEM_MAP_R80_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_R80_WO' ),
979 'IEM_MC_MEM_MAP_U8_ATOMIC': ( 2, 'IEM_MC_MEM_FLAT_MAP_U8_ATOMIC' ),
980 'IEM_MC_MEM_MAP_U8_RW': ( 2, 'IEM_MC_MEM_FLAT_MAP_U8_RW' ),
981 'IEM_MC_MEM_MAP_U8_RO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U8_RO' ),
982 'IEM_MC_MEM_MAP_U8_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U8_WO' ),
983 'IEM_MC_MEM_MAP_U16_ATOMIC': ( 2, 'IEM_MC_MEM_FLAT_MAP_U16_ATOMIC' ),
984 'IEM_MC_MEM_MAP_U16_RW': ( 2, 'IEM_MC_MEM_FLAT_MAP_U16_RW' ),
985 'IEM_MC_MEM_MAP_U16_RO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U16_RO' ),
986 'IEM_MC_MEM_MAP_U16_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U16_WO' ),
987 'IEM_MC_MEM_MAP_U32_ATOMIC': ( 2, 'IEM_MC_MEM_FLAT_MAP_U32_ATOMIC' ),
988 'IEM_MC_MEM_MAP_U32_RW': ( 2, 'IEM_MC_MEM_FLAT_MAP_U32_RW' ),
989 'IEM_MC_MEM_MAP_U32_RO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U32_RO' ),
990 'IEM_MC_MEM_MAP_U32_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U32_WO' ),
991 'IEM_MC_MEM_MAP_U64_ATOMIC': ( 2, 'IEM_MC_MEM_FLAT_MAP_U64_ATOMIC' ),
992 'IEM_MC_MEM_MAP_U64_RW': ( 2, 'IEM_MC_MEM_FLAT_MAP_U64_RW' ),
993 'IEM_MC_MEM_MAP_U64_RO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U64_RO' ),
994 'IEM_MC_MEM_MAP_U64_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U64_WO' ),
995 'IEM_MC_MEM_MAP_U128_ATOMIC': ( 2, 'IEM_MC_MEM_FLAT_MAP_U128_ATOMIC' ),
996 'IEM_MC_MEM_MAP_U128_RW': ( 2, 'IEM_MC_MEM_FLAT_MAP_U128_RW' ),
997 'IEM_MC_MEM_MAP_U128_RO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U128_RO' ),
998 'IEM_MC_MEM_MAP_U128_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U128_WO' ),
999 'IEM_MC_MEM_MAP_EX': ( 3, 'IEM_MC_MEM_FLAT_MAP_EX' ),
1000 };
1001
1002 kdMemMcToFlatInfoStack = {
1003 'IEM_MC_PUSH_U16': ( 'IEM_MC_FLAT32_PUSH_U16', 'IEM_MC_FLAT64_PUSH_U16', ),
1004 'IEM_MC_PUSH_U32': ( 'IEM_MC_FLAT32_PUSH_U32', 'IEM_MC_PUSH_U32', ),
1005 'IEM_MC_PUSH_U64': ( 'IEM_MC_PUSH_U64', 'IEM_MC_FLAT64_PUSH_U64', ),
1006 'IEM_MC_PUSH_U32_SREG': ( 'IEM_MC_FLAT32_PUSH_U32_SREG', 'IEM_MC_PUSH_U32_SREG' ),
1007 'IEM_MC_POP_GREG_U16': ( 'IEM_MC_FLAT32_POP_GREG_U16', 'IEM_MC_FLAT64_POP_GREG_U16', ),
1008 'IEM_MC_POP_GREG_U32': ( 'IEM_MC_FLAT32_POP_GREG_U32', 'IEM_MC_POP_GREG_U32', ),
1009 'IEM_MC_POP_GREG_U64': ( 'IEM_MC_POP_GREG_U64', 'IEM_MC_FLAT64_POP_GREG_U64', ),
1010 };
1011
1012 kdThreadedCalcRmEffAddrMcByVariation = {
1013 ksVariation_16: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_16',
1014 ksVariation_16f: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_16',
1015 ksVariation_16_Pre386: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_16',
1016 ksVariation_16f_Pre386: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_16',
1017 ksVariation_32_Addr16: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_16',
1018 ksVariation_32f_Addr16: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_16',
1019 ksVariation_16_Addr32: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_32',
1020 ksVariation_16f_Addr32: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_32',
1021 ksVariation_32: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_32',
1022 ksVariation_32f: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_32',
1023 ksVariation_32_Flat: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_32',
1024 ksVariation_32f_Flat: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_32',
1025 ksVariation_64: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_64',
1026 ksVariation_64f: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_64',
1027 ksVariation_64_FsGs: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_64_FSGS',
1028 ksVariation_64f_FsGs: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_64_FSGS',
1029 ksVariation_64_Addr32: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_64_ADDR32', ## @todo How did this work again...
1030 ksVariation_64f_Addr32: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_64_ADDR32',
1031 };
1032
1033 kdRelJmpMcWithFlatOrSamePageVariations = {
1034 'IEM_MC_REL_JMP_S8_AND_FINISH': True,
1035 'IEM_MC_REL_JMP_S16_AND_FINISH': True,
1036 'IEM_MC_REL_JMP_S32_AND_FINISH': True,
1037 };
1038
1039 def analyzeMorphStmtForThreaded(self, aoStmts, dState, iParamRef = 0, iLevel = 0):
1040 """
1041 Transforms (copy) the statements into those for the threaded function.
1042
1043 Returns list/tree of statements (aoStmts is not modified) and the new
1044 iParamRef value.
1045 """
1046 #
1047 # We'll be traversing aoParamRefs in parallel to the statements, so we
1048 # must match the traversal in analyzeFindThreadedParamRefs exactly.
1049 #
1050 #print('McBlock at %s:%s' % (os.path.split(self.oMcBlock.sSrcFile)[1], self.oMcBlock.iBeginLine,));
1051 aoThreadedStmts = [];
1052 for oStmt in aoStmts:
1053 # Skip C++ statements that is purely related to decoding.
1054 if not oStmt.isCppStmt() or not oStmt.fDecode:
1055 # Copy the statement. Make a deep copy to make sure we've got our own
1056 # copies of all instance variables, even if a bit overkill at the moment.
1057 oNewStmt = copy.deepcopy(oStmt);
1058 aoThreadedStmts.append(oNewStmt);
1059 #print('oNewStmt %s %s' % (oNewStmt.sName, len(oNewStmt.asParams),));
1060
1061 # If the statement has parameter references, process the relevant parameters.
1062 # We grab the references relevant to this statement and apply them in reserve order.
1063 if iParamRef < len(self.aoParamRefs) and self.aoParamRefs[iParamRef].oStmt == oStmt:
1064 iParamRefFirst = iParamRef;
1065 while True:
1066 iParamRef += 1;
1067 if iParamRef >= len(self.aoParamRefs) or self.aoParamRefs[iParamRef].oStmt != oStmt:
1068 break;
1069
1070 #print('iParamRefFirst=%s iParamRef=%s' % (iParamRefFirst, iParamRef));
1071 for iCurRef in range(iParamRef - 1, iParamRefFirst - 1, -1):
1072 oCurRef = self.aoParamRefs[iCurRef];
1073 if oCurRef.iParam is not None:
1074 assert oCurRef.oStmt == oStmt;
1075 #print('iCurRef=%s iParam=%s sOrgRef=%s' % (iCurRef, oCurRef.iParam, oCurRef.sOrgRef));
1076 sSrcParam = oNewStmt.asParams[oCurRef.iParam];
1077 assert ( sSrcParam[oCurRef.offParam : oCurRef.offParam + len(oCurRef.sOrgRef)] == oCurRef.sOrgRef
1078 or oCurRef.fCustomRef), \
1079 'offParam=%s sOrgRef=%s iParam=%s oStmt.sName=%s sSrcParam=%s<eos>' \
1080 % (oCurRef.offParam, oCurRef.sOrgRef, oCurRef.iParam, oStmt.sName, sSrcParam);
1081 oNewStmt.asParams[oCurRef.iParam] = sSrcParam[0 : oCurRef.offParam] \
1082 + oCurRef.sNewName \
1083 + sSrcParam[oCurRef.offParam + len(oCurRef.sOrgRef) : ];
1084
1085 # Morph IEM_MC_CALC_RM_EFF_ADDR into IEM_MC_CALC_RM_EFF_ADDR_THREADED ...
1086 if oNewStmt.sName == 'IEM_MC_CALC_RM_EFF_ADDR':
1087 oNewStmt.sName = self.kdThreadedCalcRmEffAddrMcByVariation[self.sVariation];
1088 assert len(oNewStmt.asParams) == 3;
1089
1090 if self.sVariation in self.kdVariationsWithFlatAddr16:
1091 oNewStmt.asParams = [
1092 oNewStmt.asParams[0], oNewStmt.asParams[1], self.dParamRefs['u16Disp'][0].sNewName,
1093 ];
1094 else:
1095 sSibAndMore = self.dParamRefs['bSib'][0].sNewName; # Merge bSib and 2nd part of cbImmAndRspOffset.
1096 if oStmt.asParams[2] not in ('0', '1', '2', '4'):
1097 sSibAndMore = '(%s) | ((%s) & 0x0f00)' % (self.dParamRefs['bSib'][0].sNewName, oStmt.asParams[2]);
1098
1099 if self.sVariation in self.kdVariationsWithFlatAddr32No64:
1100 oNewStmt.asParams = [
1101 oNewStmt.asParams[0], oNewStmt.asParams[1], sSibAndMore, self.dParamRefs['u32Disp'][0].sNewName,
1102 ];
1103 else:
1104 oNewStmt.asParams = [
1105 oNewStmt.asParams[0], self.dParamRefs['bRmEx'][0].sNewName, sSibAndMore,
1106 self.dParamRefs['u32Disp'][0].sNewName, self.dParamRefs['cbInstr'][0].sNewName,
1107 ];
1108 # ... and IEM_MC_ADVANCE_RIP_AND_FINISH into *_THREADED_PCxx[_WITH_FLAGS] ...
1109 elif ( oNewStmt.sName
1110 in ('IEM_MC_ADVANCE_RIP_AND_FINISH',
1111 'IEM_MC_REL_JMP_S8_AND_FINISH', 'IEM_MC_REL_JMP_S16_AND_FINISH', 'IEM_MC_REL_JMP_S32_AND_FINISH',
1112 'IEM_MC_SET_RIP_U16_AND_FINISH', 'IEM_MC_SET_RIP_U32_AND_FINISH', 'IEM_MC_SET_RIP_U64_AND_FINISH',
1113 'IEM_MC_REL_CALL_S16_AND_FINISH', 'IEM_MC_REL_CALL_S32_AND_FINISH', 'IEM_MC_REL_CALL_S64_AND_FINISH',
1114 'IEM_MC_IND_CALL_U16_AND_FINISH', 'IEM_MC_IND_CALL_U32_AND_FINISH', 'IEM_MC_IND_CALL_U64_AND_FINISH',
1115 'IEM_MC_RETN_AND_FINISH',)):
1116 if oNewStmt.sName not in ('IEM_MC_SET_RIP_U16_AND_FINISH', 'IEM_MC_SET_RIP_U32_AND_FINISH',
1117 'IEM_MC_SET_RIP_U64_AND_FINISH', ):
1118 oNewStmt.asParams.append(self.dParamRefs['cbInstr'][0].sNewName);
1119 if ( oNewStmt.sName in ('IEM_MC_REL_JMP_S8_AND_FINISH', 'IEM_MC_RETN_AND_FINISH', )
1120 and self.sVariation not in self.kdVariationsOnlyPre386):
1121 oNewStmt.asParams.append(self.dParamRefs['pVCpu->iem.s.enmEffOpSize'][0].sNewName);
1122 if self.sVariation in self.kdVariationsOnly64NoFlags:
1123 if ( self.sVariation not in self.kdVariationsWithSamePgConditional
1124 or oNewStmt.sName not in self.kdRelJmpMcWithFlatOrSamePageVariations):
1125 oNewStmt.sName += '_THREADED_PC64';
1126 else:
1127 oNewStmt.sName += '_THREADED_PC64_INTRAPG';
1128 elif self.sVariation in self.kdVariationsOnly64WithFlags:
1129 if ( self.sVariation not in self.kdVariationsWithSamePgConditional
1130 or oNewStmt.sName not in self.kdRelJmpMcWithFlatOrSamePageVariations):
1131 oNewStmt.sName += '_THREADED_PC64_WITH_FLAGS';
1132 else:
1133 oNewStmt.sName += '_THREADED_PC64_INTRAPG_WITH_FLAGS';
1134 elif self.sVariation in self.kdVariationsOnlyPre386NoFlags:
1135 oNewStmt.sName += '_THREADED_PC16';
1136 elif self.sVariation in self.kdVariationsOnlyPre386WithFlags:
1137 oNewStmt.sName += '_THREADED_PC16_WITH_FLAGS';
1138 elif oNewStmt.sName not in self.kdRelJmpMcWithFlatOrSamePageVariations:
1139 if self.sVariation not in self.kdVariationsWithEflagsCheckingAndClearing:
1140 assert self.sVariation != self.ksVariation_Default;
1141 oNewStmt.sName += '_THREADED_PC32';
1142 else:
1143 oNewStmt.sName += '_THREADED_PC32_WITH_FLAGS';
1144 else:
1145 if self.sVariation not in self.kdVariationsWithEflagsCheckingAndClearing:
1146 assert self.sVariation != self.ksVariation_Default;
1147 oNewStmt.sName += '_THREADED_PC32_FLAT';
1148 else:
1149 oNewStmt.sName += '_THREADED_PC32_FLAT_WITH_FLAGS';
1150
1151 # This is making the wrong branch of conditionals break out of the TB.
1152 if (oStmt.sName in ('IEM_MC_ADVANCE_RIP_AND_FINISH', 'IEM_MC_REL_JMP_S8_AND_FINISH',
1153 'IEM_MC_REL_JMP_S16_AND_FINISH', 'IEM_MC_REL_JMP_S32_AND_FINISH')):
1154 sExitTbStatus = 'VINF_SUCCESS';
1155 if self.sVariation in self.kdVariationsWithConditional:
1156 if self.sVariation in self.kdVariationsWithConditionalNoJmp:
1157 if oStmt.sName != 'IEM_MC_ADVANCE_RIP_AND_FINISH':
1158 sExitTbStatus = 'VINF_IEM_REEXEC_BREAK';
1159 elif oStmt.sName == 'IEM_MC_ADVANCE_RIP_AND_FINISH':
1160 sExitTbStatus = 'VINF_IEM_REEXEC_BREAK';
1161 oNewStmt.asParams.append(sExitTbStatus);
1162
1163 # Insert an MC so we can assert the correctioness of modified flags annotations on IEM_MC_REF_EFLAGS.
1164 if 'IEM_MC_ASSERT_EFLAGS' in dState:
1165 aoThreadedStmts.insert(len(aoThreadedStmts) - 1,
1166 iai.McStmtAssertEFlags(self.oParent.oMcBlock.oInstruction));
1167 del dState['IEM_MC_ASSERT_EFLAGS'];
1168
1169 # ... and IEM_MC_*_GREG_U8 into *_THREADED w/ reworked index taking REX into account
1170 elif oNewStmt.sName.startswith('IEM_MC_') and oNewStmt.sName.find('_GREG_U8') > 0:
1171 (idxReg, _, sStdRef) = self.analyze8BitGRegStmt(oStmt); # Don't use oNewStmt as it has been modified!
1172 oNewStmt.asParams[idxReg] = self.dParamRefs[sStdRef][0].sNewName;
1173 oNewStmt.sName += '_THREADED';
1174
1175 # ... and IEM_MC_CALL_CIMPL_[0-5] and IEM_MC_DEFER_TO_CIMPL_[0-5]_RET into *_THREADED ...
1176 elif oNewStmt.sName.startswith('IEM_MC_CALL_CIMPL_') or oNewStmt.sName.startswith('IEM_MC_DEFER_TO_CIMPL_'):
1177 oNewStmt.sName += '_THREADED';
1178 oNewStmt.idxFn += 1;
1179 oNewStmt.idxParams += 1;
1180 oNewStmt.asParams.insert(0, self.dParamRefs['cbInstr'][0].sNewName);
1181
1182 # ... and in FLAT modes we must morph memory access into FLAT accesses ...
1183 elif ( self.sVariation in self.kdVariationsWithFlatAddress
1184 and ( oNewStmt.sName.startswith('IEM_MC_FETCH_MEM')
1185 or (oNewStmt.sName.startswith('IEM_MC_STORE_MEM_') and oNewStmt.sName.find('_BY_REF') < 0)
1186 or oNewStmt.sName.startswith('IEM_MC_MEM_MAP') )):
1187 idxEffSeg = self.kdMemMcToFlatInfo[oNewStmt.sName][0];
1188 if idxEffSeg != -1:
1189 if ( oNewStmt.asParams[idxEffSeg].find('iEffSeg') < 0
1190 and oNewStmt.asParams[idxEffSeg] not in ('X86_SREG_ES', ) ):
1191 self.raiseProblem('Expected iEffSeg as param #%d to %s: %s'
1192 % (idxEffSeg + 1, oNewStmt.sName, oNewStmt.asParams[idxEffSeg],));
1193 oNewStmt.asParams.pop(idxEffSeg);
1194 oNewStmt.sName = self.kdMemMcToFlatInfo[oNewStmt.sName][1];
1195
1196 # ... PUSH and POP also needs flat variants, but these differ a little.
1197 elif ( self.sVariation in self.kdVariationsWithFlatStackAddress
1198 and ( (oNewStmt.sName.startswith('IEM_MC_PUSH') and oNewStmt.sName.find('_FPU') < 0)
1199 or oNewStmt.sName.startswith('IEM_MC_POP'))):
1200 oNewStmt.sName = self.kdMemMcToFlatInfoStack[oNewStmt.sName][int(self.sVariation in
1201 self.kdVariationsWithFlat64StackAddress)];
1202
1203 # Add EFLAGS usage annotations to relevant MCs.
1204 elif oNewStmt.sName in ('IEM_MC_COMMIT_EFLAGS', 'IEM_MC_COMMIT_EFLAGS_OPT', 'IEM_MC_REF_EFLAGS',
1205 'IEM_MC_FETCH_EFLAGS'):
1206 oInstruction = self.oParent.oMcBlock.oInstruction;
1207 oNewStmt.sName += '_EX';
1208 oNewStmt.asParams.append(oInstruction.getTestedFlagsCStyle()); # Shall crash and burn if oInstruction is
1209 oNewStmt.asParams.append(oInstruction.getModifiedFlagsCStyle()); # None. Fix the IEM decoder code.
1210
1211 # For IEM_MC_REF_EFLAGS we to emit an MC before the ..._FINISH
1212 if oNewStmt.sName == 'IEM_MC_REF_EFLAGS_EX':
1213 dState['IEM_MC_ASSERT_EFLAGS'] = iLevel;
1214
1215 # Process branches of conditionals recursively.
1216 if isinstance(oStmt, iai.McStmtCond):
1217 (oNewStmt.aoIfBranch, iParamRef) = self.analyzeMorphStmtForThreaded(oStmt.aoIfBranch, dState,
1218 iParamRef, iLevel + 1);
1219 if oStmt.aoElseBranch:
1220 (oNewStmt.aoElseBranch, iParamRef) = self.analyzeMorphStmtForThreaded(oStmt.aoElseBranch,
1221 dState, iParamRef, iLevel + 1);
1222
1223 # Insert an MC so we can assert the correctioness of modified flags annotations
1224 # on IEM_MC_REF_EFLAGS if it goes out of scope.
1225 if dState.get('IEM_MC_ASSERT_EFLAGS', -1) == iLevel:
1226 aoThreadedStmts.append(iai.McStmtAssertEFlags(self.oParent.oMcBlock.oInstruction));
1227 del dState['IEM_MC_ASSERT_EFLAGS'];
1228
1229 return (aoThreadedStmts, iParamRef);
1230
1231
1232 def analyzeConsolidateThreadedParamRefs(self):
1233 """
1234 Consolidate threaded function parameter references into a dictionary
1235 with lists of the references to each variable/field.
1236 """
1237 # Gather unique parameters.
1238 self.dParamRefs = {};
1239 for oRef in self.aoParamRefs:
1240 if oRef.sStdRef not in self.dParamRefs:
1241 self.dParamRefs[oRef.sStdRef] = [oRef,];
1242 else:
1243 self.dParamRefs[oRef.sStdRef].append(oRef);
1244
1245 # Generate names for them for use in the threaded function.
1246 dParamNames = {};
1247 for sName, aoRefs in self.dParamRefs.items():
1248 # Morph the reference expression into a name.
1249 if sName.startswith('IEM_GET_MODRM_REG'): sName = 'bModRmRegP';
1250 elif sName.startswith('IEM_GET_MODRM_RM'): sName = 'bModRmRmP';
1251 elif sName.startswith('IEM_GET_MODRM_REG_8'): sName = 'bModRmReg8P';
1252 elif sName.startswith('IEM_GET_MODRM_RM_8'): sName = 'bModRmRm8P';
1253 elif sName.startswith('IEM_GET_EFFECTIVE_VVVV'): sName = 'bEffVvvvP';
1254 elif sName.startswith('IEM_GET_IMM8_REG'): sName = 'bImm8Reg';
1255 elif sName.find('.') >= 0 or sName.find('->') >= 0:
1256 sName = sName[max(sName.rfind('.'), sName.rfind('>')) + 1 : ] + 'P';
1257 else:
1258 sName += 'P';
1259
1260 # Ensure it's unique.
1261 if sName in dParamNames:
1262 for i in range(10):
1263 if sName + str(i) not in dParamNames:
1264 sName += str(i);
1265 break;
1266 dParamNames[sName] = True;
1267
1268 # Update all the references.
1269 for oRef in aoRefs:
1270 oRef.sNewName = sName;
1271
1272 # Organize them by size too for the purpose of optimize them.
1273 dBySize = {} # type: Dict[str, str]
1274 for sStdRef, aoRefs in self.dParamRefs.items():
1275 if aoRefs[0].sType[0] != 'P':
1276 cBits = g_kdTypeInfo[aoRefs[0].sType][0];
1277 assert(cBits <= 64);
1278 else:
1279 cBits = 64;
1280
1281 if cBits not in dBySize:
1282 dBySize[cBits] = [sStdRef,]
1283 else:
1284 dBySize[cBits].append(sStdRef);
1285
1286 # Pack the parameters as best as we can, starting with the largest ones
1287 # and ASSUMING a 64-bit parameter size.
1288 self.cMinParams = 0;
1289 offNewParam = 0;
1290 for cBits in sorted(dBySize.keys(), reverse = True):
1291 for sStdRef in dBySize[cBits]:
1292 if offNewParam == 0 or offNewParam + cBits > 64:
1293 self.cMinParams += 1;
1294 offNewParam = cBits;
1295 else:
1296 offNewParam += cBits;
1297 assert(offNewParam <= 64);
1298
1299 for oRef in self.dParamRefs[sStdRef]:
1300 oRef.iNewParam = self.cMinParams - 1;
1301 oRef.offNewParam = offNewParam - cBits;
1302
1303 # Currently there are a few that requires 4 parameters, list these so we can figure out why:
1304 if self.cMinParams >= 4:
1305 print('debug: cMinParams=%s cRawParams=%s - %s:%d'
1306 % (self.cMinParams, len(self.dParamRefs), self.oParent.oMcBlock.sSrcFile, self.oParent.oMcBlock.iBeginLine,));
1307
1308 return True;
1309
1310 ksHexDigits = '0123456789abcdefABCDEF';
1311
1312 def analyzeFindThreadedParamRefs(self, aoStmts): # pylint: disable=too-many-statements
1313 """
1314 Scans the statements for things that have to passed on to the threaded
1315 function (populates self.aoParamRefs).
1316 """
1317 for oStmt in aoStmts:
1318 # Some statements we can skip alltogether.
1319 if isinstance(oStmt, iai.McCppPreProc):
1320 continue;
1321 if oStmt.isCppStmt() and oStmt.fDecode:
1322 continue;
1323 if oStmt.sName in ('IEM_MC_BEGIN',):
1324 continue;
1325
1326 if isinstance(oStmt, iai.McStmtVar):
1327 if oStmt.sValue is None:
1328 continue;
1329 aiSkipParams = { 0: True, 1: True, 3: True };
1330 else:
1331 aiSkipParams = {};
1332
1333 # Several statements have implicit parameters and some have different parameters.
1334 if oStmt.sName in ('IEM_MC_ADVANCE_RIP_AND_FINISH', 'IEM_MC_REL_JMP_S8_AND_FINISH', 'IEM_MC_REL_JMP_S16_AND_FINISH',
1335 'IEM_MC_REL_JMP_S32_AND_FINISH',
1336 'IEM_MC_REL_CALL_S16_AND_FINISH', 'IEM_MC_REL_CALL_S32_AND_FINISH',
1337 'IEM_MC_REL_CALL_S64_AND_FINISH',
1338 'IEM_MC_IND_CALL_U16_AND_FINISH', 'IEM_MC_IND_CALL_U32_AND_FINISH',
1339 'IEM_MC_IND_CALL_U64_AND_FINISH',
1340 'IEM_MC_RETN_AND_FINISH',
1341 'IEM_MC_CALL_CIMPL_0', 'IEM_MC_CALL_CIMPL_1', 'IEM_MC_CALL_CIMPL_2', 'IEM_MC_CALL_CIMPL_3',
1342 'IEM_MC_CALL_CIMPL_4', 'IEM_MC_CALL_CIMPL_5',
1343 'IEM_MC_DEFER_TO_CIMPL_0_RET', 'IEM_MC_DEFER_TO_CIMPL_1_RET', 'IEM_MC_DEFER_TO_CIMPL_2_RET',
1344 'IEM_MC_DEFER_TO_CIMPL_3_RET', 'IEM_MC_DEFER_TO_CIMPL_4_RET', 'IEM_MC_DEFER_TO_CIMPL_5_RET', ):
1345 self.aoParamRefs.append(ThreadedParamRef('IEM_GET_INSTR_LEN(pVCpu)', 'uint4_t', oStmt, sStdRef = 'cbInstr'));
1346
1347 if ( oStmt.sName in ('IEM_MC_REL_JMP_S8_AND_FINISH', 'IEM_MC_RETN_AND_FINISH', )
1348 and self.sVariation not in self.kdVariationsOnlyPre386):
1349 self.aoParamRefs.append(ThreadedParamRef('pVCpu->iem.s.enmEffOpSize', 'IEMMODE', oStmt));
1350
1351 if oStmt.sName == 'IEM_MC_CALC_RM_EFF_ADDR':
1352 # This is being pretty presumptive about bRm always being the RM byte...
1353 assert len(oStmt.asParams) == 3;
1354 assert oStmt.asParams[1] == 'bRm';
1355
1356 if self.sVariation in self.kdVariationsWithFlatAddr16:
1357 self.aoParamRefs.append(ThreadedParamRef('bRm', 'uint8_t', oStmt));
1358 self.aoParamRefs.append(ThreadedParamRef('(uint16_t)uEffAddrInfo' ,
1359 'uint16_t', oStmt, sStdRef = 'u16Disp'));
1360 elif self.sVariation in self.kdVariationsWithFlatAddr32No64:
1361 self.aoParamRefs.append(ThreadedParamRef('bRm', 'uint8_t', oStmt));
1362 self.aoParamRefs.append(ThreadedParamRef('(uint8_t)(uEffAddrInfo >> 32)',
1363 'uint8_t', oStmt, sStdRef = 'bSib'));
1364 self.aoParamRefs.append(ThreadedParamRef('(uint32_t)uEffAddrInfo',
1365 'uint32_t', oStmt, sStdRef = 'u32Disp'));
1366 else:
1367 assert self.sVariation in self.kdVariationsWithAddressOnly64;
1368 self.aoParamRefs.append(ThreadedParamRef('IEM_GET_MODRM_EX(pVCpu, bRm)',
1369 'uint8_t', oStmt, sStdRef = 'bRmEx'));
1370 self.aoParamRefs.append(ThreadedParamRef('(uint8_t)(uEffAddrInfo >> 32)',
1371 'uint8_t', oStmt, sStdRef = 'bSib'));
1372 self.aoParamRefs.append(ThreadedParamRef('(uint32_t)uEffAddrInfo',
1373 'uint32_t', oStmt, sStdRef = 'u32Disp'));
1374 self.aoParamRefs.append(ThreadedParamRef('IEM_GET_INSTR_LEN(pVCpu)',
1375 'uint4_t', oStmt, sStdRef = 'cbInstr'));
1376 aiSkipParams[1] = True; # Skip the bRm parameter as it is being replaced by bRmEx.
1377
1378 # 8-bit register accesses needs to have their index argument reworked to take REX into account.
1379 if oStmt.sName.startswith('IEM_MC_') and oStmt.sName.find('_GREG_U8') > 0:
1380 (idxReg, sOrgRef, sStdRef) = self.analyze8BitGRegStmt(oStmt);
1381 self.aoParamRefs.append(ThreadedParamRef(sOrgRef, 'uint8_t', oStmt, idxReg, sStdRef = sStdRef));
1382 aiSkipParams[idxReg] = True; # Skip the parameter below.
1383
1384 # If in flat mode variation, ignore the effective segment parameter to memory MCs.
1385 if ( self.sVariation in self.kdVariationsWithFlatAddress
1386 and oStmt.sName in self.kdMemMcToFlatInfo
1387 and self.kdMemMcToFlatInfo[oStmt.sName][0] != -1):
1388 aiSkipParams[self.kdMemMcToFlatInfo[oStmt.sName][0]] = True;
1389
1390 # Inspect the target of calls to see if we need to pass down a
1391 # function pointer or function table pointer for it to work.
1392 if isinstance(oStmt, iai.McStmtCall):
1393 if oStmt.sFn[0] == 'p':
1394 self.aoParamRefs.append(ThreadedParamRef(oStmt.sFn, self.analyzeCallToType(oStmt.sFn), oStmt, oStmt.idxFn));
1395 elif ( oStmt.sFn[0] != 'i'
1396 and not oStmt.sFn.startswith('RT_CONCAT3')
1397 and not oStmt.sFn.startswith('IEMTARGETCPU_EFL_BEHAVIOR_SELECT')
1398 and not oStmt.sFn.startswith('IEM_SELECT_HOST_OR_FALLBACK') ):
1399 self.raiseProblem('Bogus function name in %s: %s' % (oStmt.sName, oStmt.sFn,));
1400 aiSkipParams[oStmt.idxFn] = True;
1401
1402 # Skip the hint parameter (first) for IEM_MC_CALL_CIMPL_X.
1403 if oStmt.sName.startswith('IEM_MC_CALL_CIMPL_'):
1404 assert oStmt.idxFn == 2;
1405 aiSkipParams[0] = True;
1406
1407 # Skip the function parameter (first) for IEM_MC_NATIVE_EMIT_X.
1408 if oStmt.sName.startswith('IEM_MC_NATIVE_EMIT_'):
1409 aiSkipParams[0] = True;
1410
1411
1412 # Check all the parameters for bogus references.
1413 for iParam, sParam in enumerate(oStmt.asParams):
1414 if iParam not in aiSkipParams and sParam not in self.oParent.dVariables:
1415 # The parameter may contain a C expression, so we have to try
1416 # extract the relevant bits, i.e. variables and fields while
1417 # ignoring operators and parentheses.
1418 offParam = 0;
1419 while offParam < len(sParam):
1420 # Is it the start of an C identifier? If so, find the end, but don't stop on field separators (->, .).
1421 ch = sParam[offParam];
1422 if ch.isalpha() or ch == '_':
1423 offStart = offParam;
1424 offParam += 1;
1425 while offParam < len(sParam):
1426 ch = sParam[offParam];
1427 if not ch.isalnum() and ch != '_' and ch != '.':
1428 if ch != '-' or sParam[offParam + 1] != '>':
1429 # Special hack for the 'CTX_SUFF(pVM)' bit in pVCpu->CTX_SUFF(pVM)->xxxx:
1430 if ( ch == '('
1431 and sParam[offStart : offParam + len('(pVM)->')] == 'pVCpu->CTX_SUFF(pVM)->'):
1432 offParam += len('(pVM)->') - 1;
1433 else:
1434 break;
1435 offParam += 1;
1436 offParam += 1;
1437 sRef = sParam[offStart : offParam];
1438
1439 # For register references, we pass the full register indexes instead as macros
1440 # like IEM_GET_MODRM_REG implicitly references pVCpu->iem.s.uRexReg and the
1441 # threaded function will be more efficient if we just pass the register index
1442 # as a 4-bit param.
1443 if ( sRef.startswith('IEM_GET_MODRM')
1444 or sRef.startswith('IEM_GET_EFFECTIVE_VVVV')
1445 or sRef.startswith('IEM_GET_IMM8_REG') ):
1446 offParam = iai.McBlock.skipSpacesAt(sParam, offParam, len(sParam));
1447 if sParam[offParam] != '(':
1448 self.raiseProblem('Expected "(" following %s in "%s"' % (sRef, oStmt.renderCode(),));
1449 (asMacroParams, offCloseParam) = iai.McBlock.extractParams(sParam, offParam);
1450 if asMacroParams is None:
1451 self.raiseProblem('Unable to find ")" for %s in "%s"' % (sRef, oStmt.renderCode(),));
1452 offParam = offCloseParam + 1;
1453 self.aoParamRefs.append(ThreadedParamRef(sParam[offStart : offParam], 'uint8_t',
1454 oStmt, iParam, offStart));
1455
1456 # We can skip known variables.
1457 elif sRef in self.oParent.dVariables:
1458 pass;
1459
1460 # Skip certain macro invocations.
1461 elif sRef in ('IEM_GET_HOST_CPU_FEATURES',
1462 'IEM_GET_GUEST_CPU_FEATURES',
1463 'IEM_IS_GUEST_CPU_AMD',
1464 'IEM_IS_16BIT_CODE',
1465 'IEM_IS_32BIT_CODE',
1466 'IEM_IS_64BIT_CODE',
1467 ):
1468 offParam = iai.McBlock.skipSpacesAt(sParam, offParam, len(sParam));
1469 if sParam[offParam] != '(':
1470 self.raiseProblem('Expected "(" following %s in "%s"' % (sRef, oStmt.renderCode(),));
1471 (asMacroParams, offCloseParam) = iai.McBlock.extractParams(sParam, offParam);
1472 if asMacroParams is None:
1473 self.raiseProblem('Unable to find ")" for %s in "%s"' % (sRef, oStmt.renderCode(),));
1474 offParam = offCloseParam + 1;
1475
1476 # Skip any dereference following it, unless it's a predicate like IEM_IS_GUEST_CPU_AMD.
1477 if sRef not in ('IEM_IS_GUEST_CPU_AMD',
1478 'IEM_IS_16BIT_CODE',
1479 'IEM_IS_32BIT_CODE',
1480 'IEM_IS_64BIT_CODE',
1481 ):
1482 offParam = iai.McBlock.skipSpacesAt(sParam, offParam, len(sParam));
1483 if offParam + 2 <= len(sParam) and sParam[offParam : offParam + 2] == '->':
1484 offParam = iai.McBlock.skipSpacesAt(sParam, offParam + 2, len(sParam));
1485 while offParam < len(sParam) and (sParam[offParam].isalnum() or sParam[offParam] in '_.'):
1486 offParam += 1;
1487
1488 # Skip constants, globals, types (casts), sizeof and macros.
1489 elif ( sRef.startswith('IEM_OP_PRF_')
1490 or sRef.startswith('IEM_ACCESS_')
1491 or sRef.startswith('IEMINT_')
1492 or sRef.startswith('X86_GREG_')
1493 or sRef.startswith('X86_SREG_')
1494 or sRef.startswith('X86_EFL_')
1495 or sRef.startswith('X86_FSW_')
1496 or sRef.startswith('X86_FCW_')
1497 or sRef.startswith('X86_XCPT_')
1498 or sRef.startswith('IEMMODE_')
1499 or sRef.startswith('IEM_F_')
1500 or sRef.startswith('IEM_CIMPL_F_')
1501 or sRef.startswith('g_')
1502 or sRef.startswith('iemAImpl_')
1503 or sRef.startswith('kIemNativeGstReg_')
1504 or sRef.startswith('RT_ARCH_VAL_')
1505 or sRef in ( 'int8_t', 'int16_t', 'int32_t', 'int64_t',
1506 'INT8_C', 'INT16_C', 'INT32_C', 'INT64_C',
1507 'uint8_t', 'uint16_t', 'uint32_t', 'uint64_t',
1508 'UINT8_C', 'UINT16_C', 'UINT32_C', 'UINT64_C',
1509 'UINT8_MAX', 'UINT16_MAX', 'UINT32_MAX', 'UINT64_MAX',
1510 'INT8_MAX', 'INT16_MAX', 'INT32_MAX', 'INT64_MAX',
1511 'INT8_MIN', 'INT16_MIN', 'INT32_MIN', 'INT64_MIN',
1512 'sizeof', 'NOREF', 'RT_NOREF', 'IEMMODE_64BIT',
1513 'RT_BIT_32', 'RT_BIT_64', 'true', 'false',
1514 'NIL_RTGCPTR',) ):
1515 pass;
1516
1517 # Skip certain macro invocations.
1518 # Any variable (non-field) and decoder fields in IEMCPU will need to be parameterized.
1519 elif ( ( '.' not in sRef
1520 and '-' not in sRef
1521 and sRef not in ('pVCpu', ) )
1522 or iai.McBlock.koReIemDecoderVars.search(sRef) is not None):
1523 self.aoParamRefs.append(ThreadedParamRef(sRef, self.analyzeReferenceToType(sRef),
1524 oStmt, iParam, offStart));
1525 # Number.
1526 elif ch.isdigit():
1527 if ( ch == '0'
1528 and offParam + 2 <= len(sParam)
1529 and sParam[offParam + 1] in 'xX'
1530 and sParam[offParam + 2] in self.ksHexDigits ):
1531 offParam += 2;
1532 while offParam < len(sParam) and sParam[offParam] in self.ksHexDigits:
1533 offParam += 1;
1534 else:
1535 while offParam < len(sParam) and sParam[offParam].isdigit():
1536 offParam += 1;
1537 # Comment?
1538 elif ( ch == '/'
1539 and offParam + 4 <= len(sParam)
1540 and sParam[offParam + 1] == '*'):
1541 offParam += 2;
1542 offNext = sParam.find('*/', offParam);
1543 if offNext < offParam:
1544 self.raiseProblem('Unable to find "*/" in "%s" ("%s")' % (sRef, oStmt.renderCode(),));
1545 offParam = offNext + 2;
1546 # Whatever else.
1547 else:
1548 offParam += 1;
1549
1550 # Traverse the branches of conditionals.
1551 if isinstance(oStmt, iai.McStmtCond):
1552 self.analyzeFindThreadedParamRefs(oStmt.aoIfBranch);
1553 self.analyzeFindThreadedParamRefs(oStmt.aoElseBranch);
1554 return True;
1555
1556 def analyzeVariation(self, aoStmts):
1557 """
1558 2nd part of the analysis, done on each variation.
1559
1560 The variations may differ in parameter requirements and will end up with
1561 slightly different MC sequences. Thus this is done on each individually.
1562
1563 Returns dummy True - raises exception on trouble.
1564 """
1565 # Now scan the code for variables and field references that needs to
1566 # be passed to the threaded function because they are related to the
1567 # instruction decoding.
1568 self.analyzeFindThreadedParamRefs(aoStmts);
1569 self.analyzeConsolidateThreadedParamRefs();
1570
1571 # Morph the statement stream for the block into what we'll be using in the threaded function.
1572 (self.aoStmtsForThreadedFunction, iParamRef) = self.analyzeMorphStmtForThreaded(aoStmts, {});
1573 if iParamRef != len(self.aoParamRefs):
1574 raise Exception('iParamRef=%s, expected %s!' % (iParamRef, len(self.aoParamRefs),));
1575
1576 return True;
1577
1578 def emitThreadedCallStmtsForVariant(self, cchIndent, fTbLookupTable = False, sCallVarNm = None):
1579 """
1580 Produces generic C++ statments that emits a call to the thread function
1581 variation and any subsequent checks that may be necessary after that.
1582
1583 The sCallVarNm is the name of the variable with the threaded function
1584 to call. This is for the case where all the variations have the same
1585 parameters and only the threaded function number differs.
1586
1587 The fTbLookupTable parameter can either be False, True or whatever else
1588 (like 2) - in the latte case this means a large lookup table.
1589 """
1590 aoStmts = [
1591 iai.McCppCall('IEM_MC2_BEGIN_EMIT_CALLS',
1592 ['1' if 'IEM_CIMPL_F_CHECK_IRQ_BEFORE' in self.oParent.dsCImplFlags else '0'],
1593 cchIndent = cchIndent), # Scope and a hook for various stuff.
1594 ];
1595
1596 # The call to the threaded function.
1597 asCallArgs = [ self.getIndexName() if not sCallVarNm else sCallVarNm, ];
1598 for iParam in range(self.cMinParams):
1599 asFrags = [];
1600 for aoRefs in self.dParamRefs.values():
1601 oRef = aoRefs[0];
1602 if oRef.iNewParam == iParam:
1603 sCast = '(uint64_t)'
1604 if oRef.sType in ('int8_t', 'int16_t', 'int32_t'): # Make sure these doesn't get sign-extended.
1605 sCast = '(uint64_t)(u' + oRef.sType + ')';
1606 if oRef.offNewParam == 0:
1607 asFrags.append(sCast + '(' + oRef.sOrgRef + ')');
1608 else:
1609 asFrags.append('(%s(%s) << %s)' % (sCast, oRef.sOrgRef, oRef.offNewParam));
1610 assert asFrags;
1611 asCallArgs.append(' | '.join(asFrags));
1612
1613 if fTbLookupTable is False:
1614 aoStmts.append(iai.McCppCall('IEM_MC2_EMIT_CALL_%s' % (len(asCallArgs) - 1,),
1615 asCallArgs, cchIndent = cchIndent));
1616 else:
1617 aoStmts.append(iai.McCppCall('IEM_MC2_EMIT_CALL_WITH_TB_LOOKUP_%s' % (len(asCallArgs) - 1,),
1618 ['0' if fTbLookupTable is True else '1',] + asCallArgs, cchIndent = cchIndent));
1619
1620 # 2023-11-28: This has to be done AFTER the CIMPL call, so we have to
1621 # emit this mode check from the compilation loop. On the
1622 # plus side, this means we eliminate unnecessary call at
1623 # end of the TB. :-)
1624 ## For CIMPL stuff, we need to consult the associated IEM_CIMPL_F_XXX
1625 ## mask and maybe emit additional checks.
1626 #if ( 'IEM_CIMPL_F_MODE' in self.oParent.dsCImplFlags
1627 # or 'IEM_CIMPL_F_XCPT' in self.oParent.dsCImplFlags
1628 # or 'IEM_CIMPL_F_VMEXIT' in self.oParent.dsCImplFlags):
1629 # aoStmts.append(iai.McCppCall('IEM_MC2_EMIT_CALL_1', ( 'kIemThreadedFunc_BltIn_CheckMode', 'pVCpu->iem.s.fExec', ),
1630 # cchIndent = cchIndent));
1631
1632 sCImplFlags = ' | '.join(self.oParent.dsCImplFlags.keys());
1633 if not sCImplFlags:
1634 sCImplFlags = '0'
1635 aoStmts.append(iai.McCppCall('IEM_MC2_END_EMIT_CALLS', ( sCImplFlags, ), cchIndent = cchIndent)); # For closing the scope.
1636
1637 # Emit fEndTb = true or fTbBranched = true if any of the CIMPL flags
1638 # indicates we should do so.
1639 # Note! iemThreadedRecompilerMcDeferToCImpl0 duplicates work done here.
1640 asEndTbFlags = [];
1641 asTbBranchedFlags = [];
1642 for sFlag in self.oParent.dsCImplFlags:
1643 if self.kdCImplFlags[sFlag] is True:
1644 asEndTbFlags.append(sFlag);
1645 elif sFlag.startswith('IEM_CIMPL_F_BRANCH_'):
1646 asTbBranchedFlags.append(sFlag);
1647 if ( asTbBranchedFlags
1648 and ( 'IEM_CIMPL_F_BRANCH_CONDITIONAL' not in asTbBranchedFlags
1649 or self.sVariation not in self.kdVariationsWithConditionalNoJmp)):
1650 aoStmts.append(iai.McCppGeneric('iemThreadedSetBranched(pVCpu, %s);'
1651 % ((' | '.join(asTbBranchedFlags)).replace('IEM_CIMPL_F_BRANCH', 'IEMBRANCHED_F'),),
1652 cchIndent = cchIndent)); # Inline fn saves ~2 seconds for gcc 13/dbg (1m13s vs 1m15s).
1653 if asEndTbFlags:
1654 aoStmts.append(iai.McCppGeneric('pVCpu->iem.s.fEndTb = true; /* %s */' % (','.join(asEndTbFlags),),
1655 cchIndent = cchIndent));
1656
1657 if 'IEM_CIMPL_F_CHECK_IRQ_AFTER' in self.oParent.dsCImplFlags:
1658 aoStmts.append(iai.McCppGeneric('pVCpu->iem.s.cInstrTillIrqCheck = 0;', cchIndent = cchIndent));
1659
1660 return aoStmts;
1661
1662
1663class ThreadedFunction(object):
1664 """
1665 A threaded function.
1666 """
1667
1668 def __init__(self, oMcBlock: iai.McBlock) -> None:
1669 self.oMcBlock = oMcBlock # type: iai.McBlock
1670 # The remaining fields are only useful after analyze() has been called:
1671 ## Variations for this block. There is at least one.
1672 self.aoVariations = [] # type: List[ThreadedFunctionVariation]
1673 ## Variation dictionary containing the same as aoVariations.
1674 self.dVariations = {} # type: Dict[str, ThreadedFunctionVariation]
1675 ## Dictionary of local variables (IEM_MC_LOCAL[_CONST]) and call arguments (IEM_MC_ARG*).
1676 self.dVariables = {} # type: Dict[str, iai.McStmtVar]
1677 ## Dictionary with any IEM_CIMPL_F_XXX flags explicitly advertised in the code block
1678 ## and those determined by analyzeCodeOperation().
1679 self.dsCImplFlags = {} # type: Dict[str, bool]
1680 ## The unique sub-name for this threaded function.
1681 self.sSubName = '';
1682 #if oMcBlock.iInFunction > 0 or (oMcBlock.oInstruction and len(oMcBlock.oInstruction.aoMcBlocks) > 1):
1683 # self.sSubName = '_%s' % (oMcBlock.iInFunction);
1684
1685 @staticmethod
1686 def dummyInstance():
1687 """ Gets a dummy instance. """
1688 return ThreadedFunction(iai.McBlock('null', 999999999, 999999999,
1689 iai.DecoderFunction('null', 999999999, 'nil', ('','')), 999999999));
1690
1691 def hasWithFlagsCheckingAndClearingVariation(self):
1692 """
1693 Check if there is one or more with flags checking and clearing
1694 variations for this threaded function.
1695 """
1696 for sVarWithFlags in ThreadedFunctionVariation.kdVariationsWithEflagsCheckingAndClearing:
1697 if sVarWithFlags in self.dVariations:
1698 return True;
1699 return False;
1700
1701 #
1702 # Analysis and code morphing.
1703 #
1704
1705 def raiseProblem(self, sMessage):
1706 """ Raises a problem. """
1707 raise Exception('%s:%s: error: %s' % (self.oMcBlock.sSrcFile, self.oMcBlock.iBeginLine, sMessage, ));
1708
1709 def error(self, sMessage, oGenerator):
1710 """ Emits an error via the generator object, causing it to fail. """
1711 oGenerator.rawError('%s:%s: error: %s' % (self.oMcBlock.sSrcFile, self.oMcBlock.iBeginLine, sMessage, ));
1712
1713 def warning(self, sMessage):
1714 """ Emits a warning. """
1715 print('%s:%s: warning: %s' % (self.oMcBlock.sSrcFile, self.oMcBlock.iBeginLine, sMessage, ));
1716
1717 ## Used by analyzeAndAnnotateName for memory MC blocks.
1718 kdAnnotateNameMemStmts = {
1719 'IEM_MC_FETCH_MEM16_U8': '__mem8',
1720 'IEM_MC_FETCH_MEM32_U8': '__mem8',
1721 'IEM_MC_FETCH_MEM_D80': '__mem80',
1722 'IEM_MC_FETCH_MEM_I16': '__mem16',
1723 'IEM_MC_FETCH_MEM_I32': '__mem32',
1724 'IEM_MC_FETCH_MEM_I64': '__mem64',
1725 'IEM_MC_FETCH_MEM_R32': '__mem32',
1726 'IEM_MC_FETCH_MEM_R64': '__mem64',
1727 'IEM_MC_FETCH_MEM_R80': '__mem80',
1728 'IEM_MC_FETCH_MEM_U128': '__mem128',
1729 'IEM_MC_FETCH_MEM_U128_ALIGN_SSE': '__mem128',
1730 'IEM_MC_FETCH_MEM_U128_NO_AC': '__mem128',
1731 'IEM_MC_FETCH_MEM_U16': '__mem16',
1732 'IEM_MC_FETCH_MEM_U16_DISP': '__mem16',
1733 'IEM_MC_FETCH_MEM_U16_SX_U32': '__mem16sx32',
1734 'IEM_MC_FETCH_MEM_U16_SX_U64': '__mem16sx64',
1735 'IEM_MC_FETCH_MEM_U16_ZX_U32': '__mem16zx32',
1736 'IEM_MC_FETCH_MEM_U16_ZX_U64': '__mem16zx64',
1737 'IEM_MC_FETCH_MEM_U256': '__mem256',
1738 'IEM_MC_FETCH_MEM_U256_ALIGN_AVX': '__mem256',
1739 'IEM_MC_FETCH_MEM_U256_NO_AC': '__mem256',
1740 'IEM_MC_FETCH_MEM_U32': '__mem32',
1741 'IEM_MC_FETCH_MEM_U32_DISP': '__mem32',
1742 'IEM_MC_FETCH_MEM_U32_SX_U64': '__mem32sx64',
1743 'IEM_MC_FETCH_MEM_U32_ZX_U64': '__mem32zx64',
1744 'IEM_MC_FETCH_MEM_U64': '__mem64',
1745 'IEM_MC_FETCH_MEM_U64_ALIGN_U128': '__mem64',
1746 'IEM_MC_FETCH_MEM_U64_DISP': '__mem64',
1747 'IEM_MC_FETCH_MEM_U8': '__mem8',
1748 'IEM_MC_FETCH_MEM_U8_DISP': '__mem8',
1749 'IEM_MC_FETCH_MEM_U8_SX_U16': '__mem8sx16',
1750 'IEM_MC_FETCH_MEM_U8_SX_U32': '__mem8sx32',
1751 'IEM_MC_FETCH_MEM_U8_SX_U64': '__mem8sx64',
1752 'IEM_MC_FETCH_MEM_U8_ZX_U16': '__mem8zx16',
1753 'IEM_MC_FETCH_MEM_U8_ZX_U32': '__mem8zx32',
1754 'IEM_MC_FETCH_MEM_U8_ZX_U64': '__mem8zx64',
1755 'IEM_MC_FETCH_MEM_XMM': '__mem128',
1756 'IEM_MC_FETCH_MEM_XMM_ALIGN_SSE': '__mem128',
1757 'IEM_MC_FETCH_MEM_XMM_NO_AC': '__mem128',
1758 'IEM_MC_FETCH_MEM_XMM_U32': '__mem32',
1759 'IEM_MC_FETCH_MEM_XMM_U64': '__mem64',
1760 'IEM_MC_FETCH_MEM_U128_AND_XREG_U128': '__mem128',
1761 'IEM_MC_FETCH_MEM_XMM_ALIGN_SSE_AND_XREG_XMM': '__mem128',
1762 'IEM_MC_FETCH_MEM_XMM_U32_AND_XREG_XMM': '__mem32',
1763 'IEM_MC_FETCH_MEM_XMM_U64_AND_XREG_XMM': '__mem64',
1764 'IEM_MC_FETCH_MEM_U128_AND_XREG_U128_AND_RAX_RDX_U64': '__mem128',
1765 'IEM_MC_FETCH_MEM_U128_AND_XREG_U128_AND_EAX_EDX_U32_SX_U64': '__mem128',
1766
1767 'IEM_MC_STORE_MEM_I16_CONST_BY_REF': '__mem16',
1768 'IEM_MC_STORE_MEM_I32_CONST_BY_REF': '__mem32',
1769 'IEM_MC_STORE_MEM_I64_CONST_BY_REF': '__mem64',
1770 'IEM_MC_STORE_MEM_I8_CONST_BY_REF': '__mem8',
1771 'IEM_MC_STORE_MEM_INDEF_D80_BY_REF': '__mem80',
1772 'IEM_MC_STORE_MEM_NEG_QNAN_R32_BY_REF': '__mem32',
1773 'IEM_MC_STORE_MEM_NEG_QNAN_R64_BY_REF': '__mem64',
1774 'IEM_MC_STORE_MEM_NEG_QNAN_R80_BY_REF': '__mem80',
1775 'IEM_MC_STORE_MEM_U128': '__mem128',
1776 'IEM_MC_STORE_MEM_U128_ALIGN_SSE': '__mem128',
1777 'IEM_MC_STORE_MEM_U128_NO_AC': '__mem128',
1778 'IEM_MC_STORE_MEM_U16': '__mem16',
1779 'IEM_MC_STORE_MEM_U16_CONST': '__mem16c',
1780 'IEM_MC_STORE_MEM_U256': '__mem256',
1781 'IEM_MC_STORE_MEM_U256_ALIGN_AVX': '__mem256',
1782 'IEM_MC_STORE_MEM_U256_NO_AC': '__mem256',
1783 'IEM_MC_STORE_MEM_U32': '__mem32',
1784 'IEM_MC_STORE_MEM_U32_CONST': '__mem32c',
1785 'IEM_MC_STORE_MEM_U64': '__mem64',
1786 'IEM_MC_STORE_MEM_U64_CONST': '__mem64c',
1787 'IEM_MC_STORE_MEM_U8': '__mem8',
1788 'IEM_MC_STORE_MEM_U8_CONST': '__mem8c',
1789
1790 'IEM_MC_MEM_MAP_D80_WO': '__mem80',
1791 'IEM_MC_MEM_MAP_I16_WO': '__mem16',
1792 'IEM_MC_MEM_MAP_I32_WO': '__mem32',
1793 'IEM_MC_MEM_MAP_I64_WO': '__mem64',
1794 'IEM_MC_MEM_MAP_R32_WO': '__mem32',
1795 'IEM_MC_MEM_MAP_R64_WO': '__mem64',
1796 'IEM_MC_MEM_MAP_R80_WO': '__mem80',
1797 'IEM_MC_MEM_MAP_U128_ATOMIC': '__mem128a',
1798 'IEM_MC_MEM_MAP_U128_RO': '__mem128',
1799 'IEM_MC_MEM_MAP_U128_RW': '__mem128',
1800 'IEM_MC_MEM_MAP_U128_WO': '__mem128',
1801 'IEM_MC_MEM_MAP_U16_ATOMIC': '__mem16a',
1802 'IEM_MC_MEM_MAP_U16_RO': '__mem16',
1803 'IEM_MC_MEM_MAP_U16_RW': '__mem16',
1804 'IEM_MC_MEM_MAP_U16_WO': '__mem16',
1805 'IEM_MC_MEM_MAP_U32_ATOMIC': '__mem32a',
1806 'IEM_MC_MEM_MAP_U32_RO': '__mem32',
1807 'IEM_MC_MEM_MAP_U32_RW': '__mem32',
1808 'IEM_MC_MEM_MAP_U32_WO': '__mem32',
1809 'IEM_MC_MEM_MAP_U64_ATOMIC': '__mem64a',
1810 'IEM_MC_MEM_MAP_U64_RO': '__mem64',
1811 'IEM_MC_MEM_MAP_U64_RW': '__mem64',
1812 'IEM_MC_MEM_MAP_U64_WO': '__mem64',
1813 'IEM_MC_MEM_MAP_U8_ATOMIC': '__mem8a',
1814 'IEM_MC_MEM_MAP_U8_RO': '__mem8',
1815 'IEM_MC_MEM_MAP_U8_RW': '__mem8',
1816 'IEM_MC_MEM_MAP_U8_WO': '__mem8',
1817 };
1818 ## Used by analyzeAndAnnotateName for non-memory MC blocks.
1819 kdAnnotateNameRegStmts = {
1820 'IEM_MC_FETCH_GREG_U8': '__greg8',
1821 'IEM_MC_FETCH_GREG_U8_ZX_U16': '__greg8zx16',
1822 'IEM_MC_FETCH_GREG_U8_ZX_U32': '__greg8zx32',
1823 'IEM_MC_FETCH_GREG_U8_ZX_U64': '__greg8zx64',
1824 'IEM_MC_FETCH_GREG_U8_SX_U16': '__greg8sx16',
1825 'IEM_MC_FETCH_GREG_U8_SX_U32': '__greg8sx32',
1826 'IEM_MC_FETCH_GREG_U8_SX_U64': '__greg8sx64',
1827 'IEM_MC_FETCH_GREG_U16': '__greg16',
1828 'IEM_MC_FETCH_GREG_U16_ZX_U32': '__greg16zx32',
1829 'IEM_MC_FETCH_GREG_U16_ZX_U64': '__greg16zx64',
1830 'IEM_MC_FETCH_GREG_U16_SX_U32': '__greg16sx32',
1831 'IEM_MC_FETCH_GREG_U16_SX_U64': '__greg16sx64',
1832 'IEM_MC_FETCH_GREG_U32': '__greg32',
1833 'IEM_MC_FETCH_GREG_U32_ZX_U64': '__greg32zx64',
1834 'IEM_MC_FETCH_GREG_U32_SX_U64': '__greg32sx64',
1835 'IEM_MC_FETCH_GREG_U64': '__greg64',
1836 'IEM_MC_FETCH_GREG_U64_ZX_U64': '__greg64zx64',
1837 'IEM_MC_FETCH_GREG_PAIR_U32': '__greg32',
1838 'IEM_MC_FETCH_GREG_PAIR_U64': '__greg64',
1839
1840 'IEM_MC_STORE_GREG_U8': '__greg8',
1841 'IEM_MC_STORE_GREG_U16': '__greg16',
1842 'IEM_MC_STORE_GREG_U32': '__greg32',
1843 'IEM_MC_STORE_GREG_U64': '__greg64',
1844 'IEM_MC_STORE_GREG_I64': '__greg64',
1845 'IEM_MC_STORE_GREG_U8_CONST': '__greg8c',
1846 'IEM_MC_STORE_GREG_U16_CONST': '__greg16c',
1847 'IEM_MC_STORE_GREG_U32_CONST': '__greg32c',
1848 'IEM_MC_STORE_GREG_U64_CONST': '__greg64c',
1849 'IEM_MC_STORE_GREG_PAIR_U32': '__greg32',
1850 'IEM_MC_STORE_GREG_PAIR_U64': '__greg64',
1851
1852 'IEM_MC_FETCH_SREG_U16': '__sreg16',
1853 'IEM_MC_FETCH_SREG_ZX_U32': '__sreg32',
1854 'IEM_MC_FETCH_SREG_ZX_U64': '__sreg64',
1855 'IEM_MC_FETCH_SREG_BASE_U64': '__sbase64',
1856 'IEM_MC_FETCH_SREG_BASE_U32': '__sbase32',
1857 'IEM_MC_STORE_SREG_BASE_U64': '__sbase64',
1858 'IEM_MC_STORE_SREG_BASE_U32': '__sbase32',
1859
1860 'IEM_MC_REF_GREG_U8': '__greg8',
1861 'IEM_MC_REF_GREG_U16': '__greg16',
1862 'IEM_MC_REF_GREG_U32': '__greg32',
1863 'IEM_MC_REF_GREG_U64': '__greg64',
1864 'IEM_MC_REF_GREG_U8_CONST': '__greg8',
1865 'IEM_MC_REF_GREG_U16_CONST': '__greg16',
1866 'IEM_MC_REF_GREG_U32_CONST': '__greg32',
1867 'IEM_MC_REF_GREG_U64_CONST': '__greg64',
1868 'IEM_MC_REF_GREG_I32': '__greg32',
1869 'IEM_MC_REF_GREG_I64': '__greg64',
1870 'IEM_MC_REF_GREG_I32_CONST': '__greg32',
1871 'IEM_MC_REF_GREG_I64_CONST': '__greg64',
1872
1873 'IEM_MC_STORE_FPUREG_R80_SRC_REF': '__fpu',
1874 'IEM_MC_REF_FPUREG': '__fpu',
1875
1876 'IEM_MC_FETCH_MREG_U64': '__mreg64',
1877 'IEM_MC_FETCH_MREG_U32': '__mreg32',
1878 'IEM_MC_FETCH_MREG_U16': '__mreg16',
1879 'IEM_MC_FETCH_MREG_U8': '__mreg8',
1880 'IEM_MC_STORE_MREG_U64': '__mreg64',
1881 'IEM_MC_STORE_MREG_U32': '__mreg32',
1882 'IEM_MC_STORE_MREG_U16': '__mreg16',
1883 'IEM_MC_STORE_MREG_U8': '__mreg8',
1884 'IEM_MC_STORE_MREG_U32_ZX_U64': '__mreg32zx64',
1885 'IEM_MC_REF_MREG_U64': '__mreg64',
1886 'IEM_MC_REF_MREG_U64_CONST': '__mreg64',
1887 'IEM_MC_REF_MREG_U32_CONST': '__mreg32',
1888
1889 'IEM_MC_CLEAR_XREG_U32_MASK': '__xreg32x4',
1890 'IEM_MC_FETCH_XREG_U128': '__xreg128',
1891 'IEM_MC_FETCH_XREG_XMM': '__xreg128',
1892 'IEM_MC_FETCH_XREG_U64': '__xreg64',
1893 'IEM_MC_FETCH_XREG_U32': '__xreg32',
1894 'IEM_MC_FETCH_XREG_U16': '__xreg16',
1895 'IEM_MC_FETCH_XREG_U8': '__xreg8',
1896 'IEM_MC_FETCH_XREG_PAIR_U128': '__xreg128p',
1897 'IEM_MC_FETCH_XREG_PAIR_XMM': '__xreg128p',
1898 'IEM_MC_FETCH_XREG_PAIR_U128_AND_RAX_RDX_U64': '__xreg128p',
1899 'IEM_MC_FETCH_XREG_PAIR_U128_AND_EAX_EDX_U32_SX_U64': '__xreg128p',
1900
1901 'IEM_MC_STORE_XREG_U32_U128': '__xreg32',
1902 'IEM_MC_STORE_XREG_U128': '__xreg128',
1903 'IEM_MC_STORE_XREG_XMM': '__xreg128',
1904 'IEM_MC_STORE_XREG_XMM_U32': '__xreg32',
1905 'IEM_MC_STORE_XREG_XMM_U64': '__xreg64',
1906 'IEM_MC_STORE_XREG_U64': '__xreg64',
1907 'IEM_MC_STORE_XREG_U64_ZX_U128': '__xreg64zx128',
1908 'IEM_MC_STORE_XREG_U32': '__xreg32',
1909 'IEM_MC_STORE_XREG_U16': '__xreg16',
1910 'IEM_MC_STORE_XREG_U8': '__xreg8',
1911 'IEM_MC_STORE_XREG_U32_ZX_U128': '__xreg32zx128',
1912 'IEM_MC_STORE_XREG_R32': '__xreg32',
1913 'IEM_MC_STORE_XREG_R64': '__xreg64',
1914 'IEM_MC_BROADCAST_XREG_U8_ZX_VLMAX': '__xreg8zx',
1915 'IEM_MC_BROADCAST_XREG_U16_ZX_VLMAX': '__xreg16zx',
1916 'IEM_MC_BROADCAST_XREG_U32_ZX_VLMAX': '__xreg32zx',
1917 'IEM_MC_BROADCAST_XREG_U64_ZX_VLMAX': '__xreg64zx',
1918 'IEM_MC_BROADCAST_XREG_U128_ZX_VLMAX': '__xreg128zx',
1919 'IEM_MC_REF_XREG_U128': '__xreg128',
1920 'IEM_MC_REF_XREG_U128_CONST': '__xreg128',
1921 'IEM_MC_REF_XREG_U32_CONST': '__xreg32',
1922 'IEM_MC_REF_XREG_U64_CONST': '__xreg64',
1923 'IEM_MC_REF_XREG_R32_CONST': '__xreg32',
1924 'IEM_MC_REF_XREG_R64_CONST': '__xreg64',
1925 'IEM_MC_REF_XREG_XMM_CONST': '__xreg128',
1926 'IEM_MC_COPY_XREG_U128': '__xreg128',
1927
1928 'IEM_MC_FETCH_YREG_U256': '__yreg256',
1929 'IEM_MC_FETCH_YREG_YMM': '__yreg256',
1930 'IEM_MC_FETCH_YREG_U128': '__yreg128',
1931 'IEM_MC_FETCH_YREG_U64': '__yreg64',
1932 'IEM_MC_FETCH_YREG_U32': '__yreg32',
1933 'IEM_MC_STORE_YREG_U128': '__yreg128',
1934 'IEM_MC_STORE_YREG_U32_ZX_VLMAX': '__yreg32zx',
1935 'IEM_MC_STORE_YREG_U64_ZX_VLMAX': '__yreg64zx',
1936 'IEM_MC_STORE_YREG_U128_ZX_VLMAX': '__yreg128zx',
1937 'IEM_MC_STORE_YREG_U256_ZX_VLMAX': '__yreg256zx',
1938 'IEM_MC_BROADCAST_YREG_U8_ZX_VLMAX': '__yreg8',
1939 'IEM_MC_BROADCAST_YREG_U16_ZX_VLMAX': '__yreg16',
1940 'IEM_MC_BROADCAST_YREG_U32_ZX_VLMAX': '__yreg32',
1941 'IEM_MC_BROADCAST_YREG_U64_ZX_VLMAX': '__yreg64',
1942 'IEM_MC_BROADCAST_YREG_U128_ZX_VLMAX': '__yreg128',
1943 'IEM_MC_REF_YREG_U128': '__yreg128',
1944 'IEM_MC_REF_YREG_U128_CONST': '__yreg128',
1945 'IEM_MC_REF_YREG_U64_CONST': '__yreg64',
1946 'IEM_MC_COPY_YREG_U256_ZX_VLMAX': '__yreg256zx',
1947 'IEM_MC_COPY_YREG_U128_ZX_VLMAX': '__yreg128zx',
1948 'IEM_MC_COPY_YREG_U64_ZX_VLMAX': '__yreg64zx',
1949 'IEM_MC_MERGE_YREG_U32_U96_ZX_VLMAX': '__yreg3296',
1950 'IEM_MC_MERGE_YREG_U64_U64_ZX_VLMAX': '__yreg6464',
1951 'IEM_MC_MERGE_YREG_U64HI_U64HI_ZX_VLMAX': '__yreg64hi64hi',
1952 'IEM_MC_MERGE_YREG_U64LO_U64LO_ZX_VLMAX': '__yreg64lo64lo',
1953 'IEM_MC_MERGE_YREG_U64LO_U64LOCAL_ZX_VLMAX':'__yreg64',
1954 'IEM_MC_MERGE_YREG_U64LOCAL_U64HI_ZX_VLMAX':'__yreg64',
1955 };
1956 kdAnnotateNameCallStmts = {
1957 'IEM_MC_CALL_CIMPL_0': '__cimpl',
1958 'IEM_MC_CALL_CIMPL_1': '__cimpl',
1959 'IEM_MC_CALL_CIMPL_2': '__cimpl',
1960 'IEM_MC_CALL_CIMPL_3': '__cimpl',
1961 'IEM_MC_CALL_CIMPL_4': '__cimpl',
1962 'IEM_MC_CALL_CIMPL_5': '__cimpl',
1963 'IEM_MC_CALL_CIMPL_6': '__cimpl',
1964 'IEM_MC_CALL_CIMPL_7': '__cimpl',
1965 'IEM_MC_DEFER_TO_CIMPL_0_RET': '__cimpl_defer',
1966 'IEM_MC_DEFER_TO_CIMPL_1_RET': '__cimpl_defer',
1967 'IEM_MC_DEFER_TO_CIMPL_2_RET': '__cimpl_defer',
1968 'IEM_MC_DEFER_TO_CIMPL_3_RET': '__cimpl_defer',
1969 'IEM_MC_DEFER_TO_CIMPL_4_RET': '__cimpl_defer',
1970 'IEM_MC_DEFER_TO_CIMPL_5_RET': '__cimpl_defer',
1971 'IEM_MC_DEFER_TO_CIMPL_6_RET': '__cimpl_defer',
1972 'IEM_MC_DEFER_TO_CIMPL_7_RET': '__cimpl_defer',
1973 'IEM_MC_CALL_VOID_AIMPL_0': '__aimpl',
1974 'IEM_MC_CALL_VOID_AIMPL_1': '__aimpl',
1975 'IEM_MC_CALL_VOID_AIMPL_2': '__aimpl',
1976 'IEM_MC_CALL_VOID_AIMPL_3': '__aimpl',
1977 'IEM_MC_CALL_VOID_AIMPL_4': '__aimpl',
1978 'IEM_MC_CALL_VOID_AIMPL_5': '__aimpl',
1979 'IEM_MC_CALL_AIMPL_0': '__aimpl_ret',
1980 'IEM_MC_CALL_AIMPL_1': '__aimpl_ret',
1981 'IEM_MC_CALL_AIMPL_2': '__aimpl_ret',
1982 'IEM_MC_CALL_AIMPL_3': '__aimpl_ret',
1983 'IEM_MC_CALL_AIMPL_4': '__aimpl_ret',
1984 'IEM_MC_CALL_AIMPL_5': '__aimpl_ret',
1985 'IEM_MC_CALL_AIMPL_6': '__aimpl_ret',
1986 'IEM_MC_CALL_VOID_AIMPL_6': '__aimpl_fpu',
1987 'IEM_MC_CALL_FPU_AIMPL_0': '__aimpl_fpu',
1988 'IEM_MC_CALL_FPU_AIMPL_1': '__aimpl_fpu',
1989 'IEM_MC_CALL_FPU_AIMPL_2': '__aimpl_fpu',
1990 'IEM_MC_CALL_FPU_AIMPL_3': '__aimpl_fpu',
1991 'IEM_MC_CALL_FPU_AIMPL_4': '__aimpl_fpu',
1992 'IEM_MC_CALL_FPU_AIMPL_5': '__aimpl_fpu',
1993 'IEM_MC_CALL_MMX_AIMPL_0': '__aimpl_mmx',
1994 'IEM_MC_CALL_MMX_AIMPL_1': '__aimpl_mmx',
1995 'IEM_MC_CALL_MMX_AIMPL_2': '__aimpl_mmx',
1996 'IEM_MC_CALL_MMX_AIMPL_3': '__aimpl_mmx',
1997 'IEM_MC_CALL_MMX_AIMPL_4': '__aimpl_mmx',
1998 'IEM_MC_CALL_MMX_AIMPL_5': '__aimpl_mmx',
1999 'IEM_MC_CALL_SSE_AIMPL_0': '__aimpl_sse',
2000 'IEM_MC_CALL_SSE_AIMPL_1': '__aimpl_sse',
2001 'IEM_MC_CALL_SSE_AIMPL_2': '__aimpl_sse',
2002 'IEM_MC_CALL_SSE_AIMPL_3': '__aimpl_sse',
2003 'IEM_MC_CALL_SSE_AIMPL_4': '__aimpl_sse',
2004 'IEM_MC_CALL_SSE_AIMPL_5': '__aimpl_sse',
2005 'IEM_MC_CALL_AVX_AIMPL_0': '__aimpl_avx',
2006 'IEM_MC_CALL_AVX_AIMPL_1': '__aimpl_avx',
2007 'IEM_MC_CALL_AVX_AIMPL_2': '__aimpl_avx',
2008 'IEM_MC_CALL_AVX_AIMPL_3': '__aimpl_avx',
2009 'IEM_MC_CALL_AVX_AIMPL_4': '__aimpl_avx',
2010 'IEM_MC_CALL_AVX_AIMPL_5': '__aimpl_avx',
2011 };
2012 def analyzeAndAnnotateName(self, aoStmts: List[iai.McStmt]):
2013 """
2014 Scans the statements and variation lists for clues about the threaded function,
2015 and sets self.sSubName if successfull.
2016 """
2017 # Operand base naming:
2018 dHits = {};
2019 cHits = iai.McStmt.countStmtsByName(aoStmts, self.kdAnnotateNameMemStmts, dHits);
2020 if cHits > 0:
2021 sStmtNm = sorted(dHits.keys())[-1]; # priority: STORE, MEM_MAP, FETCH.
2022 sName = self.kdAnnotateNameMemStmts[sStmtNm];
2023 else:
2024 cHits = iai.McStmt.countStmtsByName(aoStmts, self.kdAnnotateNameRegStmts, dHits);
2025 if cHits > 0:
2026 sStmtNm = sorted(dHits.keys())[-1]; # priority: STORE, MEM_MAP, FETCH.
2027 sName = self.kdAnnotateNameRegStmts[sStmtNm];
2028 else:
2029 # No op details, try name it by call type...
2030 cHits = iai.McStmt.countStmtsByName(aoStmts, self.kdAnnotateNameCallStmts, dHits);
2031 if cHits > 0:
2032 sStmtNm = sorted(dHits.keys())[-1]; # Not really necessary to sort, but simple this way.
2033 self.sSubName = self.kdAnnotateNameCallStmts[sStmtNm];
2034 return;
2035
2036 # Add call info if any:
2037 dHits = {};
2038 cHits = iai.McStmt.countStmtsByName(aoStmts, self.kdAnnotateNameCallStmts, dHits);
2039 if cHits > 0:
2040 sStmtNm = sorted(dHits.keys())[-1]; # Not really necessary to sort, but simple this way.
2041 sName += self.kdAnnotateNameCallStmts[sStmtNm][1:];
2042
2043 self.sSubName = sName;
2044 return;
2045
2046 def analyzeFindVariablesAndCallArgs(self, aoStmts: List[iai.McStmt]) -> bool:
2047 """ Scans the statements for MC variables and call arguments. """
2048 for oStmt in aoStmts:
2049 if isinstance(oStmt, iai.McStmtVar):
2050 if oStmt.sVarName in self.dVariables:
2051 raise Exception('Variable %s is defined more than once!' % (oStmt.sVarName,));
2052 self.dVariables[oStmt.sVarName] = oStmt.sVarName;
2053 elif isinstance(oStmt, iai.McStmtCall) and oStmt.sName.startswith('IEM_MC_CALL_AIMPL_'):
2054 if oStmt.asParams[1] in self.dVariables:
2055 raise Exception('Variable %s is defined more than once!' % (oStmt.asParams[1],));
2056 self.dVariables[oStmt.asParams[1]] = iai.McStmtVar('IEM_MC_LOCAL', oStmt.asParams[0:2],
2057 oStmt.asParams[0], oStmt.asParams[1]);
2058
2059 # There shouldn't be any variables or arguments declared inside if/
2060 # else blocks, but scan them too to be on the safe side.
2061 if isinstance(oStmt, iai.McStmtCond):
2062 #cBefore = len(self.dVariables);
2063 self.analyzeFindVariablesAndCallArgs(oStmt.aoIfBranch);
2064 self.analyzeFindVariablesAndCallArgs(oStmt.aoElseBranch);
2065 #if len(self.dVariables) != cBefore:
2066 # raise Exception('Variables/arguments defined in conditional branches!');
2067 return True;
2068
2069 kdReturnStmtAnnotations = {
2070 'IEM_MC_ADVANCE_RIP_AND_FINISH': g_ksFinishAnnotation_Advance,
2071 'IEM_MC_REL_JMP_S8_AND_FINISH': g_ksFinishAnnotation_RelJmp,
2072 'IEM_MC_REL_JMP_S16_AND_FINISH': g_ksFinishAnnotation_RelJmp,
2073 'IEM_MC_REL_JMP_S32_AND_FINISH': g_ksFinishAnnotation_RelJmp,
2074 'IEM_MC_SET_RIP_U16_AND_FINISH': g_ksFinishAnnotation_SetJmp,
2075 'IEM_MC_SET_RIP_U32_AND_FINISH': g_ksFinishAnnotation_SetJmp,
2076 'IEM_MC_SET_RIP_U64_AND_FINISH': g_ksFinishAnnotation_SetJmp,
2077 'IEM_MC_REL_CALL_S16_AND_FINISH': g_ksFinishAnnotation_RelCall,
2078 'IEM_MC_REL_CALL_S32_AND_FINISH': g_ksFinishAnnotation_RelCall,
2079 'IEM_MC_REL_CALL_S64_AND_FINISH': g_ksFinishAnnotation_RelCall,
2080 'IEM_MC_IND_CALL_U16_AND_FINISH': g_ksFinishAnnotation_IndCall,
2081 'IEM_MC_IND_CALL_U32_AND_FINISH': g_ksFinishAnnotation_IndCall,
2082 'IEM_MC_IND_CALL_U64_AND_FINISH': g_ksFinishAnnotation_IndCall,
2083 'IEM_MC_DEFER_TO_CIMPL_0_RET': g_ksFinishAnnotation_DeferToCImpl,
2084 'IEM_MC_DEFER_TO_CIMPL_1_RET': g_ksFinishAnnotation_DeferToCImpl,
2085 'IEM_MC_DEFER_TO_CIMPL_2_RET': g_ksFinishAnnotation_DeferToCImpl,
2086 'IEM_MC_DEFER_TO_CIMPL_3_RET': g_ksFinishAnnotation_DeferToCImpl,
2087 'IEM_MC_DEFER_TO_CIMPL_4_RET': g_ksFinishAnnotation_DeferToCImpl,
2088 'IEM_MC_DEFER_TO_CIMPL_5_RET': g_ksFinishAnnotation_DeferToCImpl,
2089 'IEM_MC_DEFER_TO_CIMPL_6_RET': g_ksFinishAnnotation_DeferToCImpl,
2090 'IEM_MC_DEFER_TO_CIMPL_7_RET': g_ksFinishAnnotation_DeferToCImpl,
2091 };
2092 def analyzeCodeOperation(self, aoStmts: List[iai.McStmt], dEflStmts, fSeenConditional = False) -> bool:
2093 """
2094 Analyzes the code looking clues as to additional side-effects.
2095
2096 Currently this is simply looking for branching and adding the relevant
2097 branch flags to dsCImplFlags. ASSUMES the caller pre-populates the
2098 dictionary with a copy of self.oMcBlock.dsCImplFlags.
2099
2100 This also sets McStmtCond.oIfBranchAnnotation & McStmtCond.oElseBranchAnnotation.
2101
2102 Returns annotation on return style.
2103 """
2104 sAnnotation = None;
2105 for oStmt in aoStmts:
2106 # Set IEM_IMPL_C_F_BRANCH_XXXX flags if we see any branching MCs.
2107 if oStmt.sName.startswith('IEM_MC_SET_RIP'):
2108 assert not fSeenConditional;
2109 self.dsCImplFlags['IEM_CIMPL_F_BRANCH_INDIRECT'] = True;
2110 elif oStmt.sName.startswith('IEM_MC_REL_JMP'):
2111 self.dsCImplFlags['IEM_CIMPL_F_BRANCH_RELATIVE'] = True;
2112 if fSeenConditional:
2113 self.dsCImplFlags['IEM_CIMPL_F_BRANCH_CONDITIONAL'] = True;
2114 elif oStmt.sName.startswith('IEM_MC_IND_CALL'):
2115 assert not fSeenConditional;
2116 self.dsCImplFlags['IEM_CIMPL_F_BRANCH_INDIRECT'] = True;
2117 self.dsCImplFlags['IEM_CIMPL_F_BRANCH_STACK'] = True;
2118 self.dsCImplFlags['IEM_CIMPL_F_END_TB'] = True;
2119 elif oStmt.sName.startswith('IEM_MC_REL_CALL'):
2120 assert not fSeenConditional;
2121 self.dsCImplFlags['IEM_CIMPL_F_BRANCH_RELATIVE'] = True;
2122 self.dsCImplFlags['IEM_CIMPL_F_BRANCH_STACK'] = True;
2123 self.dsCImplFlags['IEM_CIMPL_F_END_TB'] = True;
2124 elif oStmt.sName.startswith('IEM_MC_RETN'):
2125 assert not fSeenConditional;
2126 self.dsCImplFlags['IEM_CIMPL_F_BRANCH_INDIRECT'] = True;
2127 self.dsCImplFlags['IEM_CIMPL_F_BRANCH_STACK'] = True;
2128 self.dsCImplFlags['IEM_CIMPL_F_END_TB'] = True;
2129
2130 # Check for CIMPL and AIMPL calls.
2131 if oStmt.sName.startswith('IEM_MC_CALL_'):
2132 if oStmt.sName.startswith('IEM_MC_CALL_CIMPL_'):
2133 self.dsCImplFlags['IEM_CIMPL_F_CALLS_CIMPL'] = True;
2134 elif ( oStmt.sName.startswith('IEM_MC_CALL_VOID_AIMPL_')
2135 or oStmt.sName.startswith('IEM_MC_CALL_AIMPL_')):
2136 self.dsCImplFlags['IEM_CIMPL_F_CALLS_AIMPL'] = True;
2137 elif ( oStmt.sName.startswith('IEM_MC_CALL_SSE_AIMPL_')
2138 or oStmt.sName.startswith('IEM_MC_CALL_MMX_AIMPL_')
2139 or oStmt.sName.startswith('IEM_MC_CALL_FPU_AIMPL_')):
2140 self.dsCImplFlags['IEM_CIMPL_F_CALLS_AIMPL_WITH_FXSTATE'] = True;
2141 elif oStmt.sName.startswith('IEM_MC_CALL_AVX_AIMPL_'):
2142 self.dsCImplFlags['IEM_CIMPL_F_CALLS_AIMPL_WITH_XSTATE'] = True;
2143 else:
2144 raise Exception('Unknown IEM_MC_CALL_* statement: %s' % (oStmt.sName,));
2145
2146 # Check for return statements.
2147 if oStmt.sName in self.kdReturnStmtAnnotations:
2148 assert sAnnotation is None;
2149 sAnnotation = self.kdReturnStmtAnnotations[oStmt.sName];
2150
2151 # Collect MCs working on EFLAGS. Caller will check this.
2152 if oStmt.sName in ('IEM_MC_FETCH_EFLAGS', 'IEM_MC_FETCH_EFLAGS_U8', 'IEM_MC_COMMIT_EFLAGS',
2153 'IEM_MC_COMMIT_EFLAGS_OPT', 'IEM_MC_REF_EFLAGS', 'IEM_MC_ARG_LOCAL_EFLAGS', ):
2154 dEflStmts[oStmt.sName] = oStmt;
2155 elif isinstance(oStmt, iai.McStmtCall):
2156 if oStmt.sName in ('IEM_MC_CALL_CIMPL_0', 'IEM_MC_CALL_CIMPL_1', 'IEM_MC_CALL_CIMPL_2',
2157 'IEM_MC_CALL_CIMPL_3', 'IEM_MC_CALL_CIMPL_4', 'IEM_MC_CALL_CIMPL_5',):
2158 if ( oStmt.asParams[0].find('IEM_CIMPL_F_RFLAGS') >= 0
2159 or oStmt.asParams[0].find('IEM_CIMPL_F_STATUS_FLAGS') >= 0):
2160 dEflStmts[oStmt.sName] = oStmt;
2161
2162 # Process branches of conditionals recursively.
2163 if isinstance(oStmt, iai.McStmtCond):
2164 oStmt.oIfBranchAnnotation = self.analyzeCodeOperation(oStmt.aoIfBranch, dEflStmts, True);
2165 if oStmt.aoElseBranch:
2166 oStmt.oElseBranchAnnotation = self.analyzeCodeOperation(oStmt.aoElseBranch, dEflStmts, True);
2167
2168 return sAnnotation;
2169
2170 def analyzeThreadedFunction(self, oGenerator):
2171 """
2172 Analyzes the code, identifying the number of parameters it requires and such.
2173
2174 Returns dummy True - raises exception on trouble.
2175 """
2176
2177 #
2178 # Decode the block into a list/tree of McStmt objects.
2179 #
2180 aoStmts = self.oMcBlock.decode();
2181
2182 #
2183 # Check the block for errors before we proceed (will decode it).
2184 #
2185 asErrors = self.oMcBlock.check();
2186 if asErrors:
2187 raise Exception('\n'.join(['%s:%s: error: %s' % (self.oMcBlock.sSrcFile, self.oMcBlock.iBeginLine, sError, )
2188 for sError in asErrors]));
2189
2190 #
2191 # Scan the statements for local variables and call arguments (self.dVariables).
2192 #
2193 self.analyzeFindVariablesAndCallArgs(aoStmts);
2194
2195 #
2196 # Scan the code for IEM_CIMPL_F_ and other clues.
2197 #
2198 self.dsCImplFlags = self.oMcBlock.dsCImplFlags.copy();
2199 dEflStmts = {};
2200 self.analyzeCodeOperation(aoStmts, dEflStmts);
2201 if ( ('IEM_CIMPL_F_CALLS_CIMPL' in self.dsCImplFlags)
2202 + ('IEM_CIMPL_F_CALLS_AIMPL' in self.dsCImplFlags)
2203 + ('IEM_CIMPL_F_CALLS_AIMPL_WITH_FXSTATE' in self.dsCImplFlags)
2204 + ('IEM_CIMPL_F_CALLS_AIMPL_WITH_XSTATE' in self.dsCImplFlags) > 1):
2205 self.error('Mixing CIMPL/AIMPL/AIMPL_WITH_FXSTATE/AIMPL_WITH_XSTATE calls', oGenerator);
2206
2207 #
2208 # Analyse EFLAGS related MCs and @opflmodify and friends.
2209 #
2210 if dEflStmts:
2211 oInstruction = self.oMcBlock.oInstruction; # iai.Instruction
2212 if ( oInstruction is None
2213 or (oInstruction.asFlTest is None and oInstruction.asFlModify is None)):
2214 sMcNames = '+'.join(dEflStmts.keys());
2215 if len(dEflStmts) != 1 or not sMcNames.startswith('IEM_MC_CALL_CIMPL_'): # Hack for far calls
2216 self.error('Uses %s but has no @opflmodify, @opfltest or @opflclass with details!' % (sMcNames,), oGenerator);
2217 elif 'IEM_MC_COMMIT_EFLAGS' in dEflStmts or 'IEM_MC_COMMIT_EFLAGS_OPT' in dEflStmts:
2218 if not oInstruction.asFlModify:
2219 if oInstruction.sMnemonic not in [ 'not', ]:
2220 self.error('Uses IEM_MC_COMMIT_EFLAGS[_OPT] but has no flags in @opflmodify!', oGenerator);
2221 elif ( 'IEM_MC_CALL_CIMPL_0' in dEflStmts
2222 or 'IEM_MC_CALL_CIMPL_1' in dEflStmts
2223 or 'IEM_MC_CALL_CIMPL_2' in dEflStmts
2224 or 'IEM_MC_CALL_CIMPL_3' in dEflStmts
2225 or 'IEM_MC_CALL_CIMPL_4' in dEflStmts
2226 or 'IEM_MC_CALL_CIMPL_5' in dEflStmts ):
2227 if not oInstruction.asFlModify:
2228 self.error('Uses IEM_MC_CALL_CIMPL_x or IEM_MC_DEFER_TO_CIMPL_5_RET with IEM_CIMPL_F_STATUS_FLAGS '
2229 'or IEM_CIMPL_F_RFLAGS but has no flags in @opflmodify!', oGenerator);
2230 elif 'IEM_MC_REF_EFLAGS' not in dEflStmts:
2231 if not oInstruction.asFlTest:
2232 if oInstruction.sMnemonic not in [ 'not', ]:
2233 self.error('Expected @opfltest!', oGenerator);
2234 if oInstruction and oInstruction.asFlSet:
2235 for sFlag in oInstruction.asFlSet:
2236 if sFlag not in oInstruction.asFlModify:
2237 self.error('"%s" in @opflset but missing from @opflmodify (%s)!'
2238 % (sFlag, ', '.join(oInstruction.asFlModify)), oGenerator);
2239 if oInstruction and oInstruction.asFlClear:
2240 for sFlag in oInstruction.asFlClear:
2241 if sFlag not in oInstruction.asFlModify:
2242 self.error('"%s" in @opflclear but missing from @opflmodify (%s)!'
2243 % (sFlag, ', '.join(oInstruction.asFlModify)), oGenerator);
2244
2245 #
2246 # Create variations as needed.
2247 #
2248 if iai.McStmt.findStmtByNames(aoStmts,
2249 { 'IEM_MC_DEFER_TO_CIMPL_0_RET': True,
2250 'IEM_MC_DEFER_TO_CIMPL_1_RET': True,
2251 'IEM_MC_DEFER_TO_CIMPL_2_RET': True,
2252 'IEM_MC_DEFER_TO_CIMPL_3_RET': True, }):
2253 asVariations = (ThreadedFunctionVariation.ksVariation_Default,);
2254
2255 elif iai.McStmt.findStmtByNames(aoStmts, { 'IEM_MC_CALC_RM_EFF_ADDR' : True,
2256 'IEM_MC_FETCH_MEM_U8' : True, # mov_AL_Ob ++
2257 'IEM_MC_FETCH_MEM_U16' : True, # mov_rAX_Ov ++
2258 'IEM_MC_FETCH_MEM_U32' : True,
2259 'IEM_MC_FETCH_MEM_U64' : True,
2260 'IEM_MC_STORE_MEM_U8' : True, # mov_Ob_AL ++
2261 'IEM_MC_STORE_MEM_U16' : True, # mov_Ov_rAX ++
2262 'IEM_MC_STORE_MEM_U32' : True,
2263 'IEM_MC_STORE_MEM_U64' : True, }):
2264 if 'IEM_MC_F_64BIT' in self.oMcBlock.dsMcFlags:
2265 asVariations = ThreadedFunctionVariation.kasVariationsWithAddressOnly64;
2266 elif 'IEM_MC_F_NOT_64BIT' in self.oMcBlock.dsMcFlags and 'IEM_MC_F_NOT_286_OR_OLDER' in self.oMcBlock.dsMcFlags:
2267 asVariations = ThreadedFunctionVariation.kasVariationsWithAddressNot286Not64;
2268 elif 'IEM_MC_F_NOT_286_OR_OLDER' in self.oMcBlock.dsMcFlags:
2269 asVariations = ThreadedFunctionVariation.kasVariationsWithAddressNot286;
2270 elif 'IEM_MC_F_NOT_64BIT' in self.oMcBlock.dsMcFlags:
2271 asVariations = ThreadedFunctionVariation.kasVariationsWithAddressNot64;
2272 elif 'IEM_MC_F_ONLY_8086' in self.oMcBlock.dsMcFlags:
2273 asVariations = ThreadedFunctionVariation.kasVariationsOnlyPre386;
2274 else:
2275 asVariations = ThreadedFunctionVariation.kasVariationsWithAddress;
2276 else:
2277 if 'IEM_MC_F_64BIT' in self.oMcBlock.dsMcFlags:
2278 asVariations = ThreadedFunctionVariation.kasVariationsWithoutAddressOnly64;
2279 elif 'IEM_MC_F_NOT_64BIT' in self.oMcBlock.dsMcFlags and 'IEM_MC_F_NOT_286_OR_OLDER' in self.oMcBlock.dsMcFlags:
2280 asVariations = ThreadedFunctionVariation.kasVariationsWithoutAddressNot286Not64;
2281 elif 'IEM_MC_F_NOT_286_OR_OLDER' in self.oMcBlock.dsMcFlags:
2282 asVariations = ThreadedFunctionVariation.kasVariationsWithoutAddressNot286;
2283 elif 'IEM_MC_F_NOT_64BIT' in self.oMcBlock.dsMcFlags:
2284 asVariations = ThreadedFunctionVariation.kasVariationsWithoutAddressNot64;
2285 elif 'IEM_MC_F_ONLY_8086' in self.oMcBlock.dsMcFlags:
2286 asVariations = ThreadedFunctionVariation.kasVariationsOnlyPre386;
2287 else:
2288 asVariations = ThreadedFunctionVariation.kasVariationsWithoutAddress;
2289
2290 if ( 'IEM_CIMPL_F_BRANCH_CONDITIONAL' in self.dsCImplFlags
2291 and 'IEM_CIMPL_F_BRANCH_RELATIVE' in self.dsCImplFlags): # (latter to avoid iemOp_into)
2292 assert set(asVariations).issubset(ThreadedFunctionVariation.kasVariationsWithoutAddress), \
2293 '%s: vars=%s McFlags=%s' % (self.oMcBlock.oFunction.sName, asVariations, self.oMcBlock.dsMcFlags);
2294 asVariationsBase = asVariations;
2295 asVariations = [];
2296 for sVariation in asVariationsBase:
2297 asVariations.extend([sVariation + '_Jmp', sVariation + '_NoJmp']);
2298 assert set(asVariations).issubset(ThreadedFunctionVariation.kdVariationsWithConditional);
2299
2300 # We've got some Flat variations we need to add manually to avoid unnecessary CS.LIM checks.
2301 if ThrdFnVar.ksVariation_32 in asVariationsBase:
2302 assert ThrdFnVar.ksVariation_32f in asVariationsBase;
2303 asVariations.extend([
2304 ThrdFnVar.ksVariation_32_Flat_Jmp,
2305 ThrdFnVar.ksVariation_32_Flat_NoJmp,
2306 ThrdFnVar.ksVariation_32f_Flat_Jmp,
2307 ThrdFnVar.ksVariation_32f_Flat_NoJmp,
2308 ]);
2309
2310 # Similarly, if there are 64-bit variants, we need the within same page variations.
2311 # We skip this when the operand size prefix forces is used because it cuts RIP down
2312 # to 16-bit only and the same-page assumptions are most likely wrong then.
2313 if ( ThrdFnVar.ksVariation_64 in asVariationsBase
2314 and not iai.McStmt.findStmtByNames(aoStmts, { 'IEM_MC_REL_JMP_S16_AND_FINISH': True })):
2315 assert ThrdFnVar.ksVariation_64f in asVariationsBase;
2316 asVariations.extend([
2317 ThrdFnVar.ksVariation_64_SamePg_Jmp,
2318 ThrdFnVar.ksVariation_64_SamePg_NoJmp,
2319 ThrdFnVar.ksVariation_64f_SamePg_Jmp,
2320 ThrdFnVar.ksVariation_64f_SamePg_NoJmp,
2321 ]);
2322
2323 if not iai.McStmt.findStmtByNames(aoStmts,
2324 { 'IEM_MC_ADVANCE_RIP_AND_FINISH': True,
2325 'IEM_MC_REL_JMP_S8_AND_FINISH': True,
2326 'IEM_MC_REL_JMP_S16_AND_FINISH': True,
2327 'IEM_MC_REL_JMP_S32_AND_FINISH': True,
2328 'IEM_MC_SET_RIP_U16_AND_FINISH': True,
2329 'IEM_MC_SET_RIP_U32_AND_FINISH': True,
2330 'IEM_MC_SET_RIP_U64_AND_FINISH': True,
2331 'IEM_MC_REL_CALL_S16_AND_FINISH': True,
2332 'IEM_MC_REL_CALL_S32_AND_FINISH': True,
2333 'IEM_MC_REL_CALL_S64_AND_FINISH': True,
2334 'IEM_MC_IND_CALL_U16_AND_FINISH': True,
2335 'IEM_MC_IND_CALL_U32_AND_FINISH': True,
2336 'IEM_MC_IND_CALL_U64_AND_FINISH': True,
2337 'IEM_MC_RETN_AND_FINISH': True,
2338 }):
2339 asVariations = [sVariation for sVariation in asVariations
2340 if sVariation not in ThreadedFunctionVariation.kdVariationsWithEflagsCheckingAndClearing];
2341
2342 self.aoVariations = [ThreadedFunctionVariation(self, sVar) for sVar in asVariations];
2343
2344 # Dictionary variant of the list.
2345 self.dVariations = { oVar.sVariation: oVar for oVar in self.aoVariations };
2346
2347 #
2348 # Try annotate the threaded function name.
2349 #
2350 self.analyzeAndAnnotateName(aoStmts);
2351
2352 #
2353 # Continue the analysis on each variation.
2354 #
2355 for oVariation in self.aoVariations:
2356 oVariation.analyzeVariation(aoStmts);
2357
2358 return True;
2359
2360 ## Used by emitThreadedCallStmts.
2361 kdVariationsWithNeedForPrefixCheck = {
2362 ThreadedFunctionVariation.ksVariation_64_Addr32: True,
2363 ThreadedFunctionVariation.ksVariation_64f_Addr32: True,
2364 ThreadedFunctionVariation.ksVariation_64_FsGs: True,
2365 ThreadedFunctionVariation.ksVariation_64f_FsGs: True,
2366 ThreadedFunctionVariation.ksVariation_32_Addr16: True,
2367 ThreadedFunctionVariation.ksVariation_32f_Addr16: True,
2368 ThreadedFunctionVariation.ksVariation_32_Flat: True,
2369 ThreadedFunctionVariation.ksVariation_32f_Flat: True,
2370 ThreadedFunctionVariation.ksVariation_16_Addr32: True,
2371 ThreadedFunctionVariation.ksVariation_16f_Addr32: True,
2372 };
2373
2374 def emitThreadedCallStmts(self, sBranch = None, fTbLookupTable = False): # pylint: disable=too-many-statements
2375 """
2376 Worker for morphInputCode that returns a list of statements that emits
2377 the call to the threaded functions for the block.
2378
2379 The sBranch parameter is used with conditional branches where we'll emit
2380 different threaded calls depending on whether we're in the jump-taken or
2381 no-jump code path. Values are either None, 'Jmp' or 'NoJmp'.
2382
2383 The fTbLookupTable parameter can either be False, True or whatever else
2384 (like 2) - in the latter case this means a large lookup table.
2385 """
2386 # Special case for only default variation:
2387 if len(self.aoVariations) == 1 and self.aoVariations[0].sVariation == ThreadedFunctionVariation.ksVariation_Default:
2388 assert not sBranch;
2389 return self.aoVariations[0].emitThreadedCallStmtsForVariant(0, fTbLookupTable);
2390
2391 #
2392 # Case statement sub-class.
2393 #
2394 dByVari = self.dVariations;
2395 #fDbg = self.oMcBlock.sFunction == 'iemOp_jnl_Jv';
2396 class Case:
2397 def __init__(self, sCond, sVarNm = None, sIntraPgVarNm = None, sIntraPgDispVariable = None):
2398 self.sCond = sCond;
2399 self.sVarNm = sVarNm;
2400 self.oVar = dByVari[sVarNm] if sVarNm else None;
2401 self.aoBody = self.oVar.emitThreadedCallStmtsForVariant(8, fTbLookupTable) if sVarNm else None;
2402 # Some annoying complications just to skip canonical jump target checks for intrapage jumps.
2403 self.sIntraPgDispVariable = sIntraPgDispVariable;
2404 self.oIntraPgVar = dByVari[sIntraPgVarNm] if sIntraPgVarNm else None;
2405 self.aoIntraPgBody = self.oIntraPgVar.emitThreadedCallStmtsForVariant(8, fTbLookupTable) if sIntraPgVarNm \
2406 else None;
2407
2408 def toCode(self):
2409 aoStmts = [ iai.McCppGeneric('case %s:' % (self.sCond), cchIndent = 4), ];
2410 if self.aoBody:
2411 if not self.aoIntraPgBody:
2412 aoStmts.extend(self.aoBody);
2413 aoStmts.append(iai.McCppGeneric('break;', cchIndent = 8));
2414 else:
2415 aoStmts.extend([
2416 iai.McCppCond('!IEMOP_HLP_PC64_IS_JMP_REL_WITHIN_PAGE(%s)' % (self.sIntraPgDispVariable,),
2417 True, self.aoBody, self.aoIntraPgBody, cchIndent = 8),
2418 iai.McCppGeneric('break;', cchIndent = 8),
2419 ]);
2420 return aoStmts;
2421
2422 def toFunctionAssignment(self):
2423 aoStmts = [ iai.McCppGeneric('case %s:' % (self.sCond), cchIndent = 4), ];
2424 if self.aoBody:
2425 if not self.aoIntraPgBody:
2426 aoStmts.extend([
2427 iai.McCppGeneric('enmFunction = %s;' % (self.oVar.getIndexName(),), cchIndent = 8),
2428 iai.McCppGeneric('break;', cchIndent = 8),
2429 ]);
2430 else:
2431 aoStmts.extend([
2432 iai.McCppGeneric('enmFunction = !IEMOP_HLP_PC64_IS_JMP_REL_WITHIN_PAGE(%s) ? %s : %s;'
2433 % (self.sIntraPgDispVariable, self.oVar.getIndexName(),
2434 self.oIntraPgVar.getIndexName(),), cchIndent = 8),
2435 iai.McCppGeneric('break;', cchIndent = 8),
2436 ]);
2437 return aoStmts;
2438
2439 @staticmethod
2440 def isSameBody(aoThisBody, sThisIndexName, aoThatBody, sThatIndexName, sBody = ''):
2441 if len(aoThisBody) != len(aoThatBody):
2442 #if fDbg: print('dbg: %sbody len diff: %s vs %s' % (sBody, len(aoThisBody), len(aoThatBody),));
2443 return False;
2444 for iStmt, oStmt in enumerate(aoThisBody):
2445 oThatStmt = aoThatBody[iStmt] # type: iai.McStmt
2446 assert isinstance(oStmt, iai.McCppGeneric);
2447 assert not isinstance(oStmt, iai.McStmtCond);
2448 if isinstance(oStmt, iai.McStmtCond):
2449 return False;
2450 if oStmt.sName != oThatStmt.sName:
2451 #if fDbg: print('dbg: %sstmt #%s name: %s vs %s' % (sBody, iStmt, oStmt.sName, oThatStmt.sName,));
2452 return False;
2453 if len(oStmt.asParams) != len(oThatStmt.asParams):
2454 #if fDbg: print('dbg: %sstmt #%s param count: %s vs %s'
2455 # % (sBody, iStmt, len(oStmt.asParams), len(oThatStmt.asParams),));
2456 return False;
2457 for iParam, sParam in enumerate(oStmt.asParams):
2458 if ( sParam != oThatStmt.asParams[iParam]
2459 and ( iParam not in (1, 2)
2460 or not isinstance(oStmt, iai.McCppCall)
2461 or not oStmt.asParams[0].startswith('IEM_MC2_EMIT_CALL_')
2462 or sParam != sThisIndexName
2463 or oThatStmt.asParams[iParam] != sThatIndexName )):
2464 #if fDbg: print('dbg: %sstmt #%s, param #%s: %s vs %s'
2465 # % (sBody, iStmt, iParam, sParam, oThatStmt.asParams[iParam],));
2466 return False;
2467 return True;
2468
2469 def isSame(self, oThat):
2470 if self.aoBody: # no body == fall thru - that always matches.
2471 if not self.isSameBody(self.aoBody, self.oVar.getIndexName(),
2472 oThat.aoBody, oThat.oVar.getIndexName()):
2473 return False;
2474 if self.aoIntraPgBody and not self.isSameBody(self.aoIntraPgBody, self.oIntraPgVar.getIndexName(),
2475 oThat.aoBody, oThat.oVar.getIndexName(),
2476 'intrapg/left '):
2477 return False;
2478 if oThat.aoIntraPgBody and not self.isSameBody(self.aoBody, self.oVar.getIndexName(),
2479 oThat.aoIntraPgBody, oThat.oIntraPgVar.getIndexName(),
2480 'intrapg/right '):
2481 return False;
2482 return True;
2483
2484 #
2485 # Determine what we're switch on.
2486 # This ASSUMES that (IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | IEM_F_MODE_CPUMODE_MASK) == 7!
2487 #
2488 fSimple = True;
2489 sSwitchValue = '(pVCpu->iem.s.fExec & (IEM_F_MODE_CPUMODE_MASK | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK))';
2490 if dByVari.keys() & self.kdVariationsWithNeedForPrefixCheck.keys():
2491 sSwitchValue += ' | (pVCpu->iem.s.enmEffAddrMode == (pVCpu->iem.s.fExec & IEM_F_MODE_CPUMODE_MASK) ? 0 : 8)';
2492 # Accesses via FS and GS and CS goes thru non-FLAT functions. (CS
2493 # is not writable in 32-bit mode (at least), thus the penalty mode
2494 # for any accesses via it (simpler this way).)
2495 sSwitchValue += ' | (pVCpu->iem.s.iEffSeg < X86_SREG_FS && pVCpu->iem.s.iEffSeg != X86_SREG_CS ? 0 : 16)';
2496 fSimple = False; # threaded functions.
2497 if dByVari.keys() & ThreadedFunctionVariation.kdVariationsWithEflagsCheckingAndClearing:
2498 sSwitchValue += ' | ((pVCpu->iem.s.fTbPrevInstr & (IEM_CIMPL_F_RFLAGS | IEM_CIMPL_F_INHIBIT_SHADOW)) || ' \
2499 + '(pVCpu->iem.s.fExec & IEM_F_PENDING_BRK_MASK) ? 32 : 0)';
2500
2501 #
2502 # Generate the case statements.
2503 #
2504 # pylintx: disable=x
2505 aoCases = [];
2506 if ThreadedFunctionVariation.ksVariation_64_Addr32 in dByVari:
2507 assert not fSimple and not sBranch;
2508 aoCases.extend([
2509 Case('IEMMODE_64BIT', ThrdFnVar.ksVariation_64),
2510 Case('IEMMODE_64BIT | 16', ThrdFnVar.ksVariation_64_FsGs),
2511 Case('IEMMODE_64BIT | 8 | 16', None), # fall thru
2512 Case('IEMMODE_64BIT | 8', ThrdFnVar.ksVariation_64_Addr32),
2513 ]);
2514 if ThreadedFunctionVariation.ksVariation_64f_Addr32 in dByVari:
2515 aoCases.extend([
2516 Case('IEMMODE_64BIT | 32', ThrdFnVar.ksVariation_64f),
2517 Case('IEMMODE_64BIT | 32 | 16', ThrdFnVar.ksVariation_64f_FsGs),
2518 Case('IEMMODE_64BIT | 32 | 8 | 16', None), # fall thru
2519 Case('IEMMODE_64BIT | 32 | 8', ThrdFnVar.ksVariation_64f_Addr32),
2520 ]);
2521 elif ThrdFnVar.ksVariation_64 in dByVari:
2522 assert fSimple and not sBranch;
2523 aoCases.append(Case('IEMMODE_64BIT', ThrdFnVar.ksVariation_64));
2524 if ThreadedFunctionVariation.ksVariation_64f in dByVari:
2525 aoCases.append(Case('IEMMODE_64BIT | 32', ThrdFnVar.ksVariation_64f));
2526 elif ThrdFnVar.ksVariation_64_Jmp in dByVari:
2527 assert fSimple and sBranch;
2528 if ThrdFnVar.ksVariation_64_SamePg_Jmp not in dByVari:
2529 assert ThrdFnVar.ksVariation_64f_Jmp in dByVari;
2530 aoCases.extend([
2531 Case('IEMMODE_64BIT',
2532 ThrdFnVar.ksVariation_64_Jmp if sBranch == 'Jmp' else ThrdFnVar.ksVariation_64_NoJmp),
2533 Case('IEMMODE_64BIT | 32',
2534 ThrdFnVar.ksVariation_64f_Jmp if sBranch == 'Jmp' else ThrdFnVar.ksVariation_64f_NoJmp),
2535 ]);
2536 else:
2537 assert ThrdFnVar.ksVariation_64f_SamePg_Jmp in dByVari;
2538 oStmtRelJmp = iai.McStmt.findStmtByNames(self.oMcBlock.decode(),
2539 { 'IEM_MC_REL_JMP_S8_AND_FINISH': True,
2540 'IEM_MC_REL_JMP_S16_AND_FINISH': True,
2541 'IEM_MC_REL_JMP_S32_AND_FINISH': True,});
2542 sIntraPgDispVariable = oStmtRelJmp.asParams[0];
2543 aoCases.extend([
2544 Case('IEMMODE_64BIT',
2545 ThrdFnVar.ksVariation_64_Jmp if sBranch == 'Jmp' else ThrdFnVar.ksVariation_64_NoJmp,
2546 ThrdFnVar.ksVariation_64_SamePg_Jmp if sBranch == 'Jmp' else ThrdFnVar.ksVariation_64_SamePg_NoJmp,
2547 sIntraPgDispVariable),
2548 Case('IEMMODE_64BIT | 32',
2549 ThrdFnVar.ksVariation_64f_Jmp if sBranch == 'Jmp' else ThrdFnVar.ksVariation_64f_NoJmp,
2550 ThrdFnVar.ksVariation_64f_SamePg_Jmp if sBranch == 'Jmp' else ThrdFnVar.ksVariation_64f_SamePg_NoJmp,
2551 sIntraPgDispVariable),
2552 ]);
2553
2554
2555 if ThrdFnVar.ksVariation_32_Addr16 in dByVari:
2556 assert not fSimple and not sBranch;
2557 aoCases.extend([
2558 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK', ThrdFnVar.ksVariation_32_Flat),
2559 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 16', None), # fall thru
2560 Case('IEMMODE_32BIT | 16', None), # fall thru
2561 Case('IEMMODE_32BIT', ThrdFnVar.ksVariation_32),
2562 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 8', None), # fall thru
2563 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 8 | 16',None), # fall thru
2564 Case('IEMMODE_32BIT | 8 | 16',None), # fall thru
2565 Case('IEMMODE_32BIT | 8', ThrdFnVar.ksVariation_32_Addr16),
2566 ]);
2567 if ThrdFnVar.ksVariation_32f_Addr16 in dByVari:
2568 aoCases.extend([
2569 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 32', ThrdFnVar.ksVariation_32f_Flat),
2570 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 32 | 16', None), # fall thru
2571 Case('IEMMODE_32BIT | 32 | 16', None), # fall thru
2572 Case('IEMMODE_32BIT | 32', ThrdFnVar.ksVariation_32f),
2573 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 32 | 8', None), # fall thru
2574 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 32 | 8 | 16',None), # fall thru
2575 Case('IEMMODE_32BIT | 32 | 8 | 16',None), # fall thru
2576 Case('IEMMODE_32BIT | 32 | 8', ThrdFnVar.ksVariation_32f_Addr16),
2577 ]);
2578 elif ThrdFnVar.ksVariation_32 in dByVari:
2579 assert fSimple and not sBranch;
2580 aoCases.extend([
2581 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK', None), # fall thru
2582 Case('IEMMODE_32BIT', ThrdFnVar.ksVariation_32),
2583 ]);
2584 if ThrdFnVar.ksVariation_32f in dByVari:
2585 aoCases.extend([
2586 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 32', None), # fall thru
2587 Case('IEMMODE_32BIT | 32', ThrdFnVar.ksVariation_32f),
2588 ]);
2589 elif ThrdFnVar.ksVariation_32_Jmp in dByVari:
2590 assert fSimple and sBranch;
2591 aoCases.extend([
2592 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK',
2593 ThrdFnVar.ksVariation_32_Flat_Jmp if sBranch == 'Jmp' else ThrdFnVar.ksVariation_32_Flat_NoJmp),
2594 Case('IEMMODE_32BIT',
2595 ThrdFnVar.ksVariation_32_Jmp if sBranch == 'Jmp' else ThrdFnVar.ksVariation_32_NoJmp),
2596 ]);
2597 if ThrdFnVar.ksVariation_32f_Jmp in dByVari:
2598 aoCases.extend([
2599 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 32',
2600 ThrdFnVar.ksVariation_32f_Flat_Jmp if sBranch == 'Jmp' else ThrdFnVar.ksVariation_32f_Flat_NoJmp),
2601 Case('IEMMODE_32BIT | 32',
2602 ThrdFnVar.ksVariation_32f_Jmp if sBranch == 'Jmp' else ThrdFnVar.ksVariation_32f_NoJmp),
2603 ]);
2604
2605 if ThrdFnVar.ksVariation_16_Addr32 in dByVari:
2606 assert not fSimple and not sBranch;
2607 aoCases.extend([
2608 Case('IEMMODE_16BIT | 16', None), # fall thru
2609 Case('IEMMODE_16BIT', ThrdFnVar.ksVariation_16),
2610 Case('IEMMODE_16BIT | 8 | 16', None), # fall thru
2611 Case('IEMMODE_16BIT | 8', ThrdFnVar.ksVariation_16_Addr32),
2612 ]);
2613 if ThrdFnVar.ksVariation_16f_Addr32 in dByVari:
2614 aoCases.extend([
2615 Case('IEMMODE_16BIT | 32 | 16', None), # fall thru
2616 Case('IEMMODE_16BIT | 32', ThrdFnVar.ksVariation_16f),
2617 Case('IEMMODE_16BIT | 32 | 8 | 16', None), # fall thru
2618 Case('IEMMODE_16BIT | 32 | 8', ThrdFnVar.ksVariation_16f_Addr32),
2619 ]);
2620 elif ThrdFnVar.ksVariation_16 in dByVari:
2621 assert fSimple and not sBranch;
2622 aoCases.append(Case('IEMMODE_16BIT', ThrdFnVar.ksVariation_16));
2623 if ThrdFnVar.ksVariation_16f in dByVari:
2624 aoCases.append(Case('IEMMODE_16BIT | 32', ThrdFnVar.ksVariation_16f));
2625 elif ThrdFnVar.ksVariation_16_Jmp in dByVari:
2626 assert fSimple and sBranch;
2627 aoCases.append(Case('IEMMODE_16BIT',
2628 ThrdFnVar.ksVariation_16_Jmp if sBranch == 'Jmp' else ThrdFnVar.ksVariation_16_NoJmp));
2629 if ThrdFnVar.ksVariation_16f_Jmp in dByVari:
2630 aoCases.append(Case('IEMMODE_16BIT | 32',
2631 ThrdFnVar.ksVariation_16f_Jmp if sBranch == 'Jmp' else ThrdFnVar.ksVariation_16f_NoJmp));
2632
2633
2634 if ThrdFnVar.ksVariation_16_Pre386 in dByVari:
2635 if not fSimple:
2636 aoCases.append(Case('IEMMODE_16BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 16', None)); # fall thru
2637 aoCases.append(Case('IEMMODE_16BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK', ThrdFnVar.ksVariation_16_Pre386));
2638 if ThrdFnVar.ksVariation_16f_Pre386 in dByVari: # should be nested under previous if, but line too long.
2639 if not fSimple:
2640 aoCases.append(Case('IEMMODE_16BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 32 | 16', None)); # fall thru
2641 aoCases.append(Case('IEMMODE_16BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 32', ThrdFnVar.ksVariation_16f_Pre386));
2642
2643 if ThrdFnVar.ksVariation_16_Pre386_Jmp in dByVari:
2644 assert fSimple and sBranch;
2645 aoCases.append(Case('IEMMODE_16BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK',
2646 ThrdFnVar.ksVariation_16_Pre386_Jmp if sBranch == 'Jmp'
2647 else ThrdFnVar.ksVariation_16_Pre386_NoJmp));
2648 if ThrdFnVar.ksVariation_16f_Pre386_Jmp in dByVari:
2649 assert fSimple and sBranch;
2650 aoCases.append(Case('IEMMODE_16BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 32',
2651 ThrdFnVar.ksVariation_16f_Pre386_Jmp if sBranch == 'Jmp'
2652 else ThrdFnVar.ksVariation_16f_Pre386_NoJmp));
2653
2654 #
2655 # If the case bodies are all the same, except for the function called,
2656 # we can reduce the code size and hopefully compile time.
2657 #
2658 iFirstCaseWithBody = 0;
2659 while not aoCases[iFirstCaseWithBody].aoBody:
2660 iFirstCaseWithBody += 1
2661 fAllSameCases = True
2662 for iCase in range(iFirstCaseWithBody + 1, len(aoCases)):
2663 fAllSameCases = fAllSameCases and aoCases[iCase].isSame(aoCases[iFirstCaseWithBody]);
2664 #if fDbg: print('fAllSameCases=%s %s' % (fAllSameCases, self.oMcBlock.sFunction,));
2665 if fAllSameCases:
2666 aoStmts = [
2667 iai.McCppGeneric('IEMTHREADEDFUNCS enmFunction;'),
2668 iai.McCppGeneric('switch (%s)' % (sSwitchValue,)),
2669 iai.McCppGeneric('{'),
2670 ];
2671 for oCase in aoCases:
2672 aoStmts.extend(oCase.toFunctionAssignment());
2673 aoStmts.extend([
2674 iai.McCppGeneric('IEM_NOT_REACHED_DEFAULT_CASE_RET();', cchIndent = 4),
2675 iai.McCppGeneric('}'),
2676 ]);
2677 aoStmts.extend(dByVari[aoCases[iFirstCaseWithBody].sVarNm].emitThreadedCallStmtsForVariant(0, fTbLookupTable,
2678 'enmFunction'));
2679
2680 else:
2681 #
2682 # Generate the generic switch statement.
2683 #
2684 aoStmts = [
2685 iai.McCppGeneric('switch (%s)' % (sSwitchValue,)),
2686 iai.McCppGeneric('{'),
2687 ];
2688 for oCase in aoCases:
2689 aoStmts.extend(oCase.toCode());
2690 aoStmts.extend([
2691 iai.McCppGeneric('IEM_NOT_REACHED_DEFAULT_CASE_RET();', cchIndent = 4),
2692 iai.McCppGeneric('}'),
2693 ]);
2694
2695 return aoStmts;
2696
2697 def morphInputCode(self, aoStmts, fIsConditional = False, fCallEmitted = False, cDepth = 0, sBranchAnnotation = None):
2698 """
2699 Adjusts (& copies) the statements for the input/decoder so it will emit
2700 calls to the right threaded functions for each block.
2701
2702 Returns list/tree of statements (aoStmts is not modified) and updated
2703 fCallEmitted status.
2704 """
2705 #print('McBlock at %s:%s' % (os.path.split(self.oMcBlock.sSrcFile)[1], self.oMcBlock.iBeginLine,));
2706 aoDecoderStmts = [];
2707
2708 for iStmt, oStmt in enumerate(aoStmts):
2709 # Copy the statement. Make a deep copy to make sure we've got our own
2710 # copies of all instance variables, even if a bit overkill at the moment.
2711 oNewStmt = copy.deepcopy(oStmt);
2712 aoDecoderStmts.append(oNewStmt);
2713 #print('oNewStmt %s %s' % (oNewStmt.sName, len(oNewStmt.asParams),));
2714 if oNewStmt.sName == 'IEM_MC_BEGIN' and self.dsCImplFlags:
2715 oNewStmt.asParams[1] = ' | '.join(sorted(self.dsCImplFlags.keys()));
2716
2717 # If we haven't emitted the threaded function call yet, look for
2718 # statements which it would naturally follow or preceed.
2719 if not fCallEmitted:
2720 if not oStmt.isCppStmt():
2721 if ( oStmt.sName.startswith('IEM_MC_MAYBE_RAISE_') \
2722 or (oStmt.sName.endswith('_AND_FINISH') and oStmt.sName.startswith('IEM_MC_'))
2723 or oStmt.sName.startswith('IEM_MC_CALL_CIMPL_')
2724 or oStmt.sName.startswith('IEM_MC_DEFER_TO_CIMPL_')
2725 or oStmt.sName in ('IEM_MC_RAISE_DIVIDE_ERROR_IF_LOCAL_IS_ZERO',)):
2726 aoDecoderStmts.pop();
2727 if not fIsConditional:
2728 aoDecoderStmts.extend(self.emitThreadedCallStmts());
2729 elif oStmt.sName == 'IEM_MC_ADVANCE_RIP_AND_FINISH':
2730 aoDecoderStmts.extend(self.emitThreadedCallStmts('NoJmp', True));
2731 else:
2732 assert oStmt.sName in { 'IEM_MC_REL_JMP_S8_AND_FINISH': True,
2733 'IEM_MC_REL_JMP_S16_AND_FINISH': True,
2734 'IEM_MC_REL_JMP_S32_AND_FINISH': True, };
2735 aoDecoderStmts.extend(self.emitThreadedCallStmts('Jmp', True));
2736 aoDecoderStmts.append(oNewStmt);
2737 fCallEmitted = True;
2738
2739 elif iai.g_dMcStmtParsers[oStmt.sName][2]:
2740 # This is for Jmp/NoJmp with loopne and friends which modifies state other than RIP.
2741 if not sBranchAnnotation:
2742 self.raiseProblem('Modifying state before emitting calls! %s' % (oStmt.sName,));
2743 assert fIsConditional;
2744 aoDecoderStmts.pop();
2745 if sBranchAnnotation == g_ksFinishAnnotation_Advance:
2746 assert iai.McStmt.findStmtByNames(aoStmts[iStmt:], {'IEM_MC_ADVANCE_RIP_AND_FINISH':1,})
2747 aoDecoderStmts.extend(self.emitThreadedCallStmts('NoJmp', True));
2748 elif sBranchAnnotation == g_ksFinishAnnotation_RelJmp:
2749 assert iai.McStmt.findStmtByNames(aoStmts[iStmt:],
2750 { 'IEM_MC_REL_JMP_S8_AND_FINISH': 1,
2751 'IEM_MC_REL_JMP_S16_AND_FINISH': 1,
2752 'IEM_MC_REL_JMP_S32_AND_FINISH': 1, });
2753 aoDecoderStmts.extend(self.emitThreadedCallStmts('Jmp', True));
2754 else:
2755 self.raiseProblem('Modifying state before emitting calls! %s' % (oStmt.sName,));
2756 aoDecoderStmts.append(oNewStmt);
2757 fCallEmitted = True;
2758
2759 elif ( not fIsConditional
2760 and oStmt.fDecode
2761 and ( oStmt.asParams[0].find('IEMOP_HLP_DONE_') >= 0
2762 or oStmt.asParams[0].find('IEMOP_HLP_DECODED_') >= 0)):
2763 aoDecoderStmts.extend(self.emitThreadedCallStmts());
2764 fCallEmitted = True;
2765
2766 # Process branches of conditionals recursively.
2767 if isinstance(oStmt, iai.McStmtCond):
2768 (oNewStmt.aoIfBranch, fCallEmitted1) = self.morphInputCode(oStmt.aoIfBranch, fIsConditional,
2769 fCallEmitted, cDepth + 1, oStmt.oIfBranchAnnotation);
2770 if oStmt.aoElseBranch:
2771 (oNewStmt.aoElseBranch, fCallEmitted2) = self.morphInputCode(oStmt.aoElseBranch, fIsConditional,
2772 fCallEmitted, cDepth + 1,
2773 oStmt.oElseBranchAnnotation);
2774 else:
2775 fCallEmitted2 = False;
2776 fCallEmitted = fCallEmitted or (fCallEmitted1 and fCallEmitted2);
2777
2778 if not fCallEmitted and cDepth == 0:
2779 self.raiseProblem('Unable to insert call to threaded function.');
2780
2781 return (aoDecoderStmts, fCallEmitted);
2782
2783
2784 def generateInputCode(self):
2785 """
2786 Modifies the input code.
2787 """
2788 cchIndent = (self.oMcBlock.cchIndent + 3) // 4 * 4;
2789
2790 if len(self.oMcBlock.aoStmts) == 1:
2791 # IEM_MC_DEFER_TO_CIMPL_X_RET - need to wrap in {} to make it safe to insert into random code.
2792 sCode = ' ' * cchIndent + 'pVCpu->iem.s.fTbCurInstr = ';
2793 if self.dsCImplFlags:
2794 sCode += ' | '.join(sorted(self.dsCImplFlags.keys())) + ';\n';
2795 else:
2796 sCode += '0;\n';
2797 sCode += iai.McStmt.renderCodeForList(self.morphInputCode(self.oMcBlock.aoStmts)[0],
2798 cchIndent = cchIndent).replace('\n', ' /* gen */\n', 1);
2799 sIndent = ' ' * (min(cchIndent, 2) - 2);
2800 sCode = sIndent + '{\n' + sCode + sIndent + '}\n';
2801 return sCode;
2802
2803 # IEM_MC_BEGIN/END block
2804 assert len(self.oMcBlock.asLines) > 2, "asLines=%s" % (self.oMcBlock.asLines,);
2805 fIsConditional = ( 'IEM_CIMPL_F_BRANCH_CONDITIONAL' in self.dsCImplFlags
2806 and 'IEM_CIMPL_F_BRANCH_RELATIVE' in self.dsCImplFlags); # (latter to avoid iemOp_into)
2807 return iai.McStmt.renderCodeForList(self.morphInputCode(self.oMcBlock.aoStmts, fIsConditional)[0],
2808 cchIndent = cchIndent).replace('\n', ' /* gen */\n', 1);
2809
2810# Short alias for ThreadedFunctionVariation.
2811ThrdFnVar = ThreadedFunctionVariation;
2812
2813
2814class IEMThreadedGenerator(object):
2815 """
2816 The threaded code generator & annotator.
2817 """
2818
2819 def __init__(self):
2820 self.aoThreadedFuncs = [] # type: List[ThreadedFunction]
2821 self.oOptions = None # type: argparse.Namespace
2822 self.aoParsers = [] # type: List[IEMAllInstPython.SimpleParser]
2823 self.aidxFirstFunctions = [] # type: List[int] ##< Runs parallel to aoParser giving the index of the first function.
2824 self.cErrors = 0;
2825
2826 #
2827 # Error reporting.
2828 #
2829
2830 def rawError(self, sCompleteMessage):
2831 """ Output a raw error and increment the error counter. """
2832 print(sCompleteMessage, file = sys.stderr);
2833 self.cErrors += 1;
2834 return False;
2835
2836 #
2837 # Processing.
2838 #
2839
2840 def processInputFiles(self, sHostArch, fNativeRecompilerEnabled):
2841 """
2842 Process the input files.
2843 """
2844
2845 # Parse the files.
2846 self.aoParsers = iai.parseFiles(self.oOptions.asInFiles, sHostArch);
2847
2848 # Create threaded functions for the MC blocks.
2849 self.aoThreadedFuncs = [ThreadedFunction(oMcBlock) for oMcBlock in iai.g_aoMcBlocks];
2850
2851 # Analyze the threaded functions.
2852 dRawParamCounts = {};
2853 dMinParamCounts = {};
2854 for oThreadedFunction in self.aoThreadedFuncs:
2855 oThreadedFunction.analyzeThreadedFunction(self);
2856 for oVariation in oThreadedFunction.aoVariations:
2857 dRawParamCounts[len(oVariation.dParamRefs)] = dRawParamCounts.get(len(oVariation.dParamRefs), 0) + 1;
2858 dMinParamCounts[oVariation.cMinParams] = dMinParamCounts.get(oVariation.cMinParams, 0) + 1;
2859 print('debug: param count distribution, raw and optimized:', file = sys.stderr);
2860 for cCount in sorted({cBits: True for cBits in list(dRawParamCounts.keys()) + list(dMinParamCounts.keys())}.keys()):
2861 print('debug: %s params: %4s raw, %4s min'
2862 % (cCount, dRawParamCounts.get(cCount, 0), dMinParamCounts.get(cCount, 0)),
2863 file = sys.stderr);
2864
2865 # Do another pass over the threaded functions to settle the name suffix.
2866 iThreadedFn = 0;
2867 while iThreadedFn < len(self.aoThreadedFuncs):
2868 oFunction = self.aoThreadedFuncs[iThreadedFn].oMcBlock.oFunction;
2869 assert oFunction;
2870 iThreadedFnNext = iThreadedFn + 1;
2871 dSubNames = { self.aoThreadedFuncs[iThreadedFn].sSubName: 1 };
2872 while ( iThreadedFnNext < len(self.aoThreadedFuncs)
2873 and self.aoThreadedFuncs[iThreadedFnNext].oMcBlock.oFunction == oFunction):
2874 dSubNames[self.aoThreadedFuncs[iThreadedFnNext].sSubName] = 1;
2875 iThreadedFnNext += 1;
2876 if iThreadedFnNext - iThreadedFn > len(dSubNames):
2877 iSubName = 0;
2878 while iThreadedFn + iSubName < iThreadedFnNext:
2879 self.aoThreadedFuncs[iThreadedFn + iSubName].sSubName += '_%s' % (iSubName,);
2880 iSubName += 1;
2881 iThreadedFn = iThreadedFnNext;
2882
2883 # Populate aidxFirstFunctions. This is ASSUMING that
2884 # g_aoMcBlocks/self.aoThreadedFuncs are in self.aoParsers order.
2885 iThreadedFunction = 0;
2886 oThreadedFunction = self.getThreadedFunctionByIndex(0);
2887 self.aidxFirstFunctions = [];
2888 for oParser in self.aoParsers:
2889 self.aidxFirstFunctions.append(iThreadedFunction);
2890
2891 while oThreadedFunction.oMcBlock.sSrcFile == oParser.sSrcFile:
2892 iThreadedFunction += 1;
2893 oThreadedFunction = self.getThreadedFunctionByIndex(iThreadedFunction);
2894
2895 # Analyze the threaded functions and their variations for native recompilation.
2896 if fNativeRecompilerEnabled:
2897 ian.analyzeThreadedFunctionsForNativeRecomp(self.aoThreadedFuncs, sHostArch);
2898
2899 # Gather arguments + variable statistics for the MC blocks.
2900 cMaxArgs = 0;
2901 cMaxVars = 0;
2902 cMaxVarsAndArgs = 0;
2903 cbMaxArgs = 0;
2904 cbMaxVars = 0;
2905 cbMaxVarsAndArgs = 0;
2906 for oThreadedFunction in self.aoThreadedFuncs:
2907 if oThreadedFunction.oMcBlock.aoLocals or oThreadedFunction.oMcBlock.aoArgs:
2908 # Counts.
2909 cMaxVars = max(cMaxVars, len(oThreadedFunction.oMcBlock.aoLocals));
2910 cMaxArgs = max(cMaxArgs, len(oThreadedFunction.oMcBlock.aoArgs));
2911 cMaxVarsAndArgs = max(cMaxVarsAndArgs,
2912 len(oThreadedFunction.oMcBlock.aoLocals) + len(oThreadedFunction.oMcBlock.aoArgs));
2913 if cMaxVarsAndArgs > 9:
2914 raise Exception('%s potentially uses too many variables / args: %u, max 10 - %u vars and %u args'
2915 % (oThreadedFunction.oMcBlock.oFunction.sName, cMaxVarsAndArgs,
2916 len(oThreadedFunction.oMcBlock.aoLocals), len(oThreadedFunction.oMcBlock.aoArgs),));
2917 # Calc stack allocation size:
2918 cbArgs = 0;
2919 for oArg in oThreadedFunction.oMcBlock.aoArgs:
2920 cbArgs += (getTypeBitCount(oArg.sType) + 63) // 64 * 8;
2921 cbVars = 0;
2922 for oVar in oThreadedFunction.oMcBlock.aoLocals:
2923 cbVars += (getTypeBitCount(oVar.sType) + 63) // 64 * 8;
2924 cbMaxVars = max(cbMaxVars, cbVars);
2925 cbMaxArgs = max(cbMaxArgs, cbArgs);
2926 cbMaxVarsAndArgs = max(cbMaxVarsAndArgs, cbVars + cbArgs);
2927 if cbMaxVarsAndArgs >= 0xc0:
2928 raise Exception('%s potentially uses too much stack: cbMaxVars=%#x cbMaxArgs=%#x'
2929 % (oThreadedFunction.oMcBlock.oFunction.sName, cbMaxVars, cbMaxArgs,));
2930
2931 print('debug: max vars+args: %u bytes / %u; max vars: %u bytes / %u; max args: %u bytes / %u'
2932 % (cbMaxVarsAndArgs, cMaxVarsAndArgs, cbMaxVars, cMaxVars, cbMaxArgs, cMaxArgs,), file = sys.stderr);
2933
2934 if self.cErrors > 0:
2935 print('fatal error: %u error%s during processing. Details above.'
2936 % (self.cErrors, 's' if self.cErrors > 1 else '',), file = sys.stderr);
2937 return False;
2938 return True;
2939
2940 #
2941 # Output
2942 #
2943
2944 def generateLicenseHeader(self):
2945 """
2946 Returns the lines for a license header.
2947 """
2948 return [
2949 '/*',
2950 ' * Autogenerated by $Id: IEMAllThrdPython.py 106179 2024-09-29 01:14:19Z vboxsync $ ',
2951 ' * Do not edit!',
2952 ' */',
2953 '',
2954 '/*',
2955 ' * Copyright (C) 2023-' + str(datetime.date.today().year) + ' Oracle and/or its affiliates.',
2956 ' *',
2957 ' * This file is part of VirtualBox base platform packages, as',
2958 ' * available from https://www.virtualbox.org.',
2959 ' *',
2960 ' * This program is free software; you can redistribute it and/or',
2961 ' * modify it under the terms of the GNU General Public License',
2962 ' * as published by the Free Software Foundation, in version 3 of the',
2963 ' * License.',
2964 ' *',
2965 ' * This program is distributed in the hope that it will be useful, but',
2966 ' * WITHOUT ANY WARRANTY; without even the implied warranty of',
2967 ' * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU',
2968 ' * General Public License for more details.',
2969 ' *',
2970 ' * You should have received a copy of the GNU General Public License',
2971 ' * along with this program; if not, see <https://www.gnu.org/licenses>.',
2972 ' *',
2973 ' * The contents of this file may alternatively be used under the terms',
2974 ' * of the Common Development and Distribution License Version 1.0',
2975 ' * (CDDL), a copy of it is provided in the "COPYING.CDDL" file included',
2976 ' * in the VirtualBox distribution, in which case the provisions of the',
2977 ' * CDDL are applicable instead of those of the GPL.',
2978 ' *',
2979 ' * You may elect to license modified versions of this file under the',
2980 ' * terms and conditions of either the GPL or the CDDL or both.',
2981 ' *',
2982 ' * SPDX-License-Identifier: GPL-3.0-only OR CDDL-1.0',
2983 ' */',
2984 '',
2985 '',
2986 '',
2987 ];
2988
2989 ## List of built-in threaded functions with user argument counts and
2990 ## whether it has a native recompiler implementation.
2991 katBltIns = (
2992 ( 'Nop', 0, True ),
2993 ( 'LogCpuState', 0, True ),
2994
2995 ( 'DeferToCImpl0', 2, True ),
2996 ( 'CheckIrq', 0, True ),
2997 ( 'CheckTimers', 0, True ),
2998 ( 'CheckTimersAndIrq', 0, True ),
2999 ( 'CheckMode', 1, True ),
3000 ( 'CheckHwInstrBps', 0, False ),
3001 ( 'CheckCsLim', 1, True ),
3002
3003 ( 'CheckCsLimAndOpcodes', 3, True ),
3004 ( 'CheckOpcodes', 3, True ),
3005 ( 'CheckOpcodesConsiderCsLim', 3, True ),
3006
3007 ( 'CheckCsLimAndPcAndOpcodes', 3, True ),
3008 ( 'CheckPcAndOpcodes', 3, True ),
3009 ( 'CheckPcAndOpcodesConsiderCsLim', 3, True ),
3010
3011 ( 'CheckCsLimAndOpcodesAcrossPageLoadingTlb', 3, True ),
3012 ( 'CheckOpcodesAcrossPageLoadingTlb', 3, True ),
3013 ( 'CheckOpcodesAcrossPageLoadingTlbConsiderCsLim', 2, True ),
3014
3015 ( 'CheckCsLimAndOpcodesLoadingTlb', 3, True ),
3016 ( 'CheckOpcodesLoadingTlb', 3, True ),
3017 ( 'CheckOpcodesLoadingTlbConsiderCsLim', 3, True ),
3018
3019 ( 'CheckCsLimAndOpcodesOnNextPageLoadingTlb', 2, True ),
3020 ( 'CheckOpcodesOnNextPageLoadingTlb', 2, True ),
3021 ( 'CheckOpcodesOnNextPageLoadingTlbConsiderCsLim', 2, True ),
3022
3023 ( 'CheckCsLimAndOpcodesOnNewPageLoadingTlb', 2, True ),
3024 ( 'CheckOpcodesOnNewPageLoadingTlb', 2, True ),
3025 ( 'CheckOpcodesOnNewPageLoadingTlbConsiderCsLim', 2, True ),
3026
3027 ( 'Jump', 1, True ),
3028 );
3029
3030 def generateThreadedFunctionsHeader(self, oOut, _):
3031 """
3032 Generates the threaded functions header file.
3033 Returns success indicator.
3034 """
3035
3036 asLines = self.generateLicenseHeader();
3037
3038 # Generate the threaded function table indexes.
3039 asLines += [
3040 'typedef enum IEMTHREADEDFUNCS',
3041 '{',
3042 ' kIemThreadedFunc_Invalid = 0,',
3043 '',
3044 ' /*',
3045 ' * Predefined',
3046 ' */',
3047 ];
3048 asLines += [' kIemThreadedFunc_BltIn_%s,' % (sFuncNm,) for sFuncNm, _, _ in self.katBltIns];
3049
3050 iThreadedFunction = 1 + len(self.katBltIns);
3051 for sVariation in ThreadedFunctionVariation.kasVariationsEmitOrder:
3052 asLines += [
3053 '',
3054 ' /*',
3055 ' * Variation: ' + ThreadedFunctionVariation.kdVariationNames[sVariation] + '',
3056 ' */',
3057 ];
3058 for oThreadedFunction in self.aoThreadedFuncs:
3059 oVariation = oThreadedFunction.dVariations.get(sVariation, None);
3060 if oVariation:
3061 iThreadedFunction += 1;
3062 oVariation.iEnumValue = iThreadedFunction;
3063 asLines.append(' ' + oVariation.getIndexName() + ',');
3064 asLines += [
3065 ' kIemThreadedFunc_End',
3066 '} IEMTHREADEDFUNCS;',
3067 '',
3068 ];
3069
3070 # Prototype the function table.
3071 asLines += [
3072 'extern const PFNIEMTHREADEDFUNC g_apfnIemThreadedFunctions[kIemThreadedFunc_End];',
3073 'extern uint8_t const g_acIemThreadedFunctionUsedArgs[kIemThreadedFunc_End];',
3074 '#if defined(IN_RING3) || defined(LOG_ENABLED)',
3075 'extern const char * const g_apszIemThreadedFunctions[kIemThreadedFunc_End];',
3076 '#endif',
3077 '#if defined(IN_RING3)',
3078 'extern const char * const g_apszIemThreadedFunctionStats[kIemThreadedFunc_End];',
3079 '#endif',
3080 ];
3081
3082 oOut.write('\n'.join(asLines));
3083 return True;
3084
3085 ksBitsToIntMask = {
3086 1: "UINT64_C(0x1)",
3087 2: "UINT64_C(0x3)",
3088 4: "UINT64_C(0xf)",
3089 8: "UINT64_C(0xff)",
3090 16: "UINT64_C(0xffff)",
3091 32: "UINT64_C(0xffffffff)",
3092 };
3093
3094 def generateFunctionParameterUnpacking(self, oVariation, oOut, asParams, uNoRefLevel = 0):
3095 """
3096 Outputs code for unpacking parameters.
3097 This is shared by the threaded and native code generators.
3098 """
3099 aasVars = [];
3100 for aoRefs in oVariation.dParamRefs.values():
3101 oRef = aoRefs[0];
3102 if oRef.sType[0] != 'P':
3103 cBits = g_kdTypeInfo[oRef.sType][0];
3104 sType = g_kdTypeInfo[oRef.sType][2];
3105 else:
3106 cBits = 64;
3107 sType = oRef.sType;
3108
3109 sTypeDecl = sType + ' const';
3110
3111 if cBits == 64:
3112 assert oRef.offNewParam == 0;
3113 if sType == 'uint64_t':
3114 sUnpack = '%s;' % (asParams[oRef.iNewParam],);
3115 else:
3116 sUnpack = '(%s)%s;' % (sType, asParams[oRef.iNewParam],);
3117 elif oRef.offNewParam == 0:
3118 sUnpack = '(%s)(%s & %s);' % (sType, asParams[oRef.iNewParam], self.ksBitsToIntMask[cBits]);
3119 else:
3120 sUnpack = '(%s)((%s >> %s) & %s);' \
3121 % (sType, asParams[oRef.iNewParam], oRef.offNewParam, self.ksBitsToIntMask[cBits]);
3122
3123 sComment = '/* %s - %s ref%s */' % (oRef.sOrgRef, len(aoRefs), 's' if len(aoRefs) != 1 else '',);
3124
3125 aasVars.append([ '%s:%02u' % (oRef.iNewParam, oRef.offNewParam),
3126 sTypeDecl, oRef.sNewName, sUnpack, sComment ]);
3127 acchVars = [0, 0, 0, 0, 0];
3128 for asVar in aasVars:
3129 for iCol, sStr in enumerate(asVar):
3130 acchVars[iCol] = max(acchVars[iCol], len(sStr));
3131 sFmt = ' %%-%ss %%-%ss = %%-%ss %%s\n' % (acchVars[1], acchVars[2], acchVars[3]);
3132 for asVar in sorted(aasVars):
3133 oOut.write(sFmt % (asVar[1], asVar[2], asVar[3], asVar[4],));
3134
3135 if uNoRefLevel > 0 and aasVars:
3136 if uNoRefLevel > 1:
3137 # level 2: Everything. This is used by liveness.
3138 oOut.write(' ');
3139 for asVar in sorted(aasVars):
3140 oOut.write(' RT_NOREF_PV(%s);' % (asVar[2],));
3141 oOut.write('\n');
3142 else:
3143 # level 1: Only pfnXxxx variables. This is used by native.
3144 for asVar in sorted(aasVars):
3145 if asVar[2].startswith('pfn'):
3146 oOut.write(' RT_NOREF_PV(%s);\n' % (asVar[2],));
3147 return True;
3148
3149 kasThreadedParamNames = ('uParam0', 'uParam1', 'uParam2');
3150 def generateThreadedFunctionsSource(self, oOut, _):
3151 """
3152 Generates the threaded functions source file.
3153 Returns success indicator.
3154 """
3155
3156 asLines = self.generateLicenseHeader();
3157 oOut.write('\n'.join(asLines));
3158
3159 #
3160 # Emit the function definitions.
3161 #
3162 for sVariation in ThreadedFunctionVariation.kasVariationsEmitOrder:
3163 sVarName = ThreadedFunctionVariation.kdVariationNames[sVariation];
3164 oOut.write( '\n'
3165 + '\n'
3166 + '\n'
3167 + '\n'
3168 + '/*' + '*' * 128 + '\n'
3169 + '* Variation: ' + sVarName + ' ' * (129 - len(sVarName) - 15) + '*\n'
3170 + '*' * 128 + '*/\n');
3171
3172 for oThreadedFunction in self.aoThreadedFuncs:
3173 oVariation = oThreadedFunction.dVariations.get(sVariation, None);
3174 if oVariation:
3175 oMcBlock = oThreadedFunction.oMcBlock;
3176
3177 # Function header
3178 oOut.write( '\n'
3179 + '\n'
3180 + '/**\n'
3181 + ' * #%u: %s at line %s offset %s in %s%s\n'
3182 % (oVariation.iEnumValue, oMcBlock.sFunction, oMcBlock.iBeginLine, oMcBlock.offBeginLine,
3183 os.path.split(oMcBlock.sSrcFile)[1],
3184 ' (macro expansion)' if oMcBlock.iBeginLine == oMcBlock.iEndLine else '')
3185 + ' */\n'
3186 + 'static IEM_DECL_IEMTHREADEDFUNC_DEF(' + oVariation.getThreadedFunctionName() + ')\n'
3187 + '{\n');
3188
3189 # Unpack parameters.
3190 self.generateFunctionParameterUnpacking(oVariation, oOut, self.kasThreadedParamNames);
3191
3192 # RT_NOREF for unused parameters.
3193 if oVariation.cMinParams < g_kcThreadedParams:
3194 oOut.write(' RT_NOREF(' + ', '.join(self.kasThreadedParamNames[oVariation.cMinParams:]) + ');\n');
3195
3196 # Now for the actual statements.
3197 oOut.write(iai.McStmt.renderCodeForList(oVariation.aoStmtsForThreadedFunction, cchIndent = 4));
3198
3199 oOut.write('}\n');
3200
3201
3202 #
3203 # Generate the output tables in parallel.
3204 #
3205 asFuncTable = [
3206 '/**',
3207 ' * Function pointer table.',
3208 ' */',
3209 'PFNIEMTHREADEDFUNC const g_apfnIemThreadedFunctions[kIemThreadedFunc_End] =',
3210 '{',
3211 ' /*Invalid*/ NULL,',
3212 ];
3213 asArgCntTab = [
3214 '/**',
3215 ' * Argument count table.',
3216 ' */',
3217 'uint8_t const g_acIemThreadedFunctionUsedArgs[kIemThreadedFunc_End] =',
3218 '{',
3219 ' 0, /*Invalid*/',
3220 ];
3221 asNameTable = [
3222 '/**',
3223 ' * Function name table.',
3224 ' */',
3225 'const char * const g_apszIemThreadedFunctions[kIemThreadedFunc_End] =',
3226 '{',
3227 ' "Invalid",',
3228 ];
3229 asStatTable = [
3230 '/**',
3231 ' * Function statistics name table.',
3232 ' */',
3233 'const char * const g_apszIemThreadedFunctionStats[kIemThreadedFunc_End] =',
3234 '{',
3235 ' NULL,',
3236 ];
3237 aasTables = (asFuncTable, asArgCntTab, asNameTable, asStatTable,);
3238
3239 for asTable in aasTables:
3240 asTable.extend((
3241 '',
3242 ' /*',
3243 ' * Predefined.',
3244 ' */',
3245 ));
3246 for sFuncNm, cArgs, _ in self.katBltIns:
3247 asFuncTable.append(' iemThreadedFunc_BltIn_%s,' % (sFuncNm,));
3248 asArgCntTab.append(' %d, /*BltIn_%s*/' % (cArgs, sFuncNm,));
3249 asNameTable.append(' "BltIn_%s",' % (sFuncNm,));
3250 asStatTable.append(' "BltIn/%s",' % (sFuncNm,));
3251
3252 iThreadedFunction = 1 + len(self.katBltIns);
3253 for sVariation in ThreadedFunctionVariation.kasVariationsEmitOrder:
3254 for asTable in aasTables:
3255 asTable.extend((
3256 '',
3257 ' /*',
3258 ' * Variation: ' + ThreadedFunctionVariation.kdVariationNames[sVariation],
3259 ' */',
3260 ));
3261 for oThreadedFunction in self.aoThreadedFuncs:
3262 oVariation = oThreadedFunction.dVariations.get(sVariation, None);
3263 if oVariation:
3264 iThreadedFunction += 1;
3265 assert oVariation.iEnumValue == iThreadedFunction;
3266 sName = oVariation.getThreadedFunctionName();
3267 asFuncTable.append(' /*%4u*/ %s,' % (iThreadedFunction, sName,));
3268 asNameTable.append(' /*%4u*/ "%s",' % (iThreadedFunction, sName,));
3269 asArgCntTab.append(' /*%4u*/ %d, /*%s*/' % (iThreadedFunction, oVariation.cMinParams, sName,));
3270 asStatTable.append(' "%s",' % (oVariation.getThreadedFunctionStatisticsName(),));
3271
3272 for asTable in aasTables:
3273 asTable.append('};');
3274
3275 #
3276 # Output the tables.
3277 #
3278 oOut.write( '\n'
3279 + '\n');
3280 oOut.write('\n'.join(asFuncTable));
3281 oOut.write( '\n'
3282 + '\n'
3283 + '\n');
3284 oOut.write('\n'.join(asArgCntTab));
3285 oOut.write( '\n'
3286 + '\n'
3287 + '#if defined(IN_RING3) || defined(LOG_ENABLED)\n');
3288 oOut.write('\n'.join(asNameTable));
3289 oOut.write( '\n'
3290 + '#endif /* IN_RING3 || LOG_ENABLED */\n'
3291 + '\n'
3292 + '\n'
3293 + '#if defined(IN_RING3)\n');
3294 oOut.write('\n'.join(asStatTable));
3295 oOut.write( '\n'
3296 + '#endif /* IN_RING3 */\n');
3297
3298 return True;
3299
3300 def generateNativeFunctionsHeader(self, oOut, _):
3301 """
3302 Generates the native recompiler functions header file.
3303 Returns success indicator.
3304 """
3305 if not self.oOptions.fNativeRecompilerEnabled:
3306 return True;
3307
3308 asLines = self.generateLicenseHeader();
3309
3310 # Prototype the function table.
3311 asLines += [
3312 'extern const PFNIEMNATIVERECOMPFUNC g_apfnIemNativeRecompileFunctions[kIemThreadedFunc_End];',
3313 'extern const PFNIEMNATIVELIVENESSFUNC g_apfnIemNativeLivenessFunctions[kIemThreadedFunc_End];',
3314 '',
3315 ];
3316
3317 # Emit indicators as to which of the builtin functions have a native
3318 # recompiler function and which not. (We only really need this for
3319 # kIemThreadedFunc_BltIn_CheckMode, but do all just for simplicity.)
3320 for atBltIn in self.katBltIns:
3321 if atBltIn[1]:
3322 asLines.append('#define IEMNATIVE_WITH_BLTIN_' + atBltIn[0].upper())
3323 else:
3324 asLines.append('#define IEMNATIVE_WITHOUT_BLTIN_' + atBltIn[0].upper())
3325
3326 # Emit prototypes for the builtin functions we use in tables.
3327 asLines += [
3328 '',
3329 '/* Prototypes for built-in functions used in the above tables. */',
3330 ];
3331 for sFuncNm, _, fHaveRecompFunc in self.katBltIns:
3332 if fHaveRecompFunc:
3333 asLines += [
3334 'IEM_DECL_IEMNATIVERECOMPFUNC_PROTO( iemNativeRecompFunc_BltIn_%s);' % (sFuncNm,),
3335 'IEM_DECL_IEMNATIVELIVENESSFUNC_PROTO(iemNativeLivenessFunc_BltIn_%s);' % (sFuncNm,),
3336 ];
3337
3338 # Emit prototypes for table function.
3339 asLines += [
3340 '',
3341 '#ifdef IEMNATIVE_INCL_TABLE_FUNCTION_PROTOTYPES'
3342 ]
3343 for sVariation in ThreadedFunctionVariation.kasVariationsEmitOrder:
3344 sVarName = ThreadedFunctionVariation.kdVariationNames[sVariation];
3345 asLines += [
3346 '',
3347 '/* Variation: ' + sVarName + ' */',
3348 ];
3349 for oThreadedFunction in self.aoThreadedFuncs:
3350 oVariation = oThreadedFunction.dVariations.get(sVariation, None) # type: ThreadedFunctionVariation
3351 if oVariation and oVariation.oNativeRecomp and oVariation.oNativeRecomp.isRecompilable():
3352 asLines.append('IEM_DECL_IEMNATIVERECOMPFUNC_PROTO(' + oVariation.getNativeFunctionName() + ');');
3353 asLines += [
3354 '',
3355 '#endif /* IEMNATIVE_INCL_TABLE_FUNCTION_PROTOTYPES */',
3356 ]
3357
3358 oOut.write('\n'.join(asLines));
3359 return True;
3360
3361 # This applies to both generateNativeFunctionsSource and generateNativeLivenessSource.
3362 kcNativeSourceParts = 6;
3363
3364 def generateNativeFunctionsSource(self, oOut, idxPart):
3365 """
3366 Generates the native recompiler functions source file.
3367 Returns success indicator.
3368 """
3369 assert(idxPart in range(self.kcNativeSourceParts));
3370 if not self.oOptions.fNativeRecompilerEnabled:
3371 return True;
3372
3373 #
3374 # The file header.
3375 #
3376 oOut.write('\n'.join(self.generateLicenseHeader()));
3377
3378 #
3379 # Emit the functions.
3380 #
3381 # The files are split up by threaded variation as that's the simplest way to
3382 # do it, even if the distribution isn't entirely even (ksVariation_Default
3383 # only has the defer to cimpl bits and the pre-386 variants will naturally
3384 # have fewer instructions).
3385 #
3386 cVariationsPerFile = len(ThreadedFunctionVariation.kasVariationsEmitOrder) // self.kcNativeSourceParts;
3387 idxFirstVar = idxPart * cVariationsPerFile;
3388 idxEndVar = idxFirstVar + cVariationsPerFile;
3389 if idxPart + 1 >= self.kcNativeSourceParts:
3390 idxEndVar = len(ThreadedFunctionVariation.kasVariationsEmitOrder);
3391 for sVariation in ThreadedFunctionVariation.kasVariationsEmitOrder[idxFirstVar:idxEndVar]:
3392 sVarName = ThreadedFunctionVariation.kdVariationNames[sVariation];
3393 oOut.write( '\n'
3394 + '\n'
3395 + '\n'
3396 + '\n'
3397 + '/*' + '*' * 128 + '\n'
3398 + '* Variation: ' + sVarName + ' ' * (129 - len(sVarName) - 15) + '*\n'
3399 + '*' * 128 + '*/\n');
3400
3401 for oThreadedFunction in self.aoThreadedFuncs:
3402 oVariation = oThreadedFunction.dVariations.get(sVariation, None) # type: ThreadedFunctionVariation
3403 if oVariation and oVariation.oNativeRecomp and oVariation.oNativeRecomp.isRecompilable():
3404 oMcBlock = oThreadedFunction.oMcBlock;
3405
3406 # Function header
3407 oOut.write( '\n'
3408 + '\n'
3409 + '/**\n'
3410 + ' * #%u: %s at line %s offset %s in %s%s\n'
3411 % (oVariation.iEnumValue, oMcBlock.sFunction, oMcBlock.iBeginLine, oMcBlock.offBeginLine,
3412 os.path.split(oMcBlock.sSrcFile)[1],
3413 ' (macro expansion)' if oMcBlock.iBeginLine == oMcBlock.iEndLine else '')
3414 + ' */\n'
3415 + 'IEM_DECL_IEMNATIVERECOMPFUNC_DEF(' + oVariation.getNativeFunctionName() + ')\n'
3416 + '{\n');
3417
3418 # Unpack parameters.
3419 self.generateFunctionParameterUnpacking(oVariation, oOut,
3420 ('pCallEntry->auParams[0]',
3421 'pCallEntry->auParams[1]',
3422 'pCallEntry->auParams[2]',),
3423 uNoRefLevel = 1);
3424
3425 # Now for the actual statements.
3426 oOut.write(oVariation.oNativeRecomp.renderCode(cchIndent = 4));
3427
3428 oOut.write('}\n');
3429
3430 #
3431 # Output the function table in the smallest file (currently the last).
3432 #
3433 if idxPart + 1 == self.kcNativeSourceParts:
3434 oOut.write( '\n'
3435 + '\n'
3436 + '/*\n'
3437 + ' * Function table running parallel to g_apfnIemThreadedFunctions and friends.\n'
3438 + ' */\n'
3439 + 'const PFNIEMNATIVERECOMPFUNC g_apfnIemNativeRecompileFunctions[kIemThreadedFunc_End] =\n'
3440 + '{\n'
3441 + ' /*Invalid*/ NULL,'
3442 + '\n'
3443 + ' /*\n'
3444 + ' * Predefined.\n'
3445 + ' */\n'
3446 );
3447 for sFuncNm, _, fHaveRecompFunc in self.katBltIns:
3448 if fHaveRecompFunc:
3449 oOut.write(' iemNativeRecompFunc_BltIn_%s,\n' % (sFuncNm,))
3450 else:
3451 oOut.write(' NULL, /*BltIn_%s*/\n' % (sFuncNm,))
3452
3453 iThreadedFunction = 1 + len(self.katBltIns);
3454 for sVariation in ThreadedFunctionVariation.kasVariationsEmitOrder:
3455 oOut.write( ' /*\n'
3456 + ' * Variation: ' + ThreadedFunctionVariation.kdVariationNames[sVariation] + '\n'
3457 + ' */\n');
3458 for oThreadedFunction in self.aoThreadedFuncs:
3459 oVariation = oThreadedFunction.dVariations.get(sVariation, None);
3460 if oVariation:
3461 iThreadedFunction += 1;
3462 assert oVariation.iEnumValue == iThreadedFunction;
3463 sName = oVariation.getNativeFunctionName();
3464 if oVariation.oNativeRecomp and oVariation.oNativeRecomp.isRecompilable():
3465 oOut.write(' /*%4u*/ %s,\n' % (iThreadedFunction, sName,));
3466 else:
3467 oOut.write(' /*%4u*/ NULL /*%s*/,\n' % (iThreadedFunction, sName,));
3468
3469 oOut.write( '};\n');
3470
3471 oOut.write('\n');
3472 return True;
3473
3474 def generateNativeLivenessHeader(self, oOut, _):
3475 """
3476 Generates the internal native recompiler liveness header file.
3477 Returns success indicator.
3478 """
3479 if not self.oOptions.fNativeRecompilerEnabled:
3480 return True;
3481
3482 oOut.write('\n'.join(self.generateLicenseHeader()));
3483 oOut.write( '\n'
3484 + '/*\n'
3485 + ' * Liveness analysis function prototypes.\n'
3486 + ' */\n');
3487
3488 # Emit prototypes for the liveness table functions.
3489 for sVariation in ThreadedFunctionVariation.kasVariationsEmitOrder:
3490 sVarName = ThreadedFunctionVariation.kdVariationNames[sVariation];
3491 oOut.write('/* Variation: ' + sVarName + ' */\n');
3492 for oThreadedFunction in self.aoThreadedFuncs:
3493 oVariation = oThreadedFunction.dVariations.get(sVariation, None) # type: ThreadedFunctionVariation
3494 if oVariation and oVariation.oNativeRecomp and oVariation.oNativeRecomp.isRecompilable():
3495 oOut.write('IEM_DECL_IEMNATIVELIVENESSFUNC_PROTO(' + oVariation.getLivenessFunctionName() + ');\n');
3496
3497 oOut.write('\n');
3498 return True;
3499
3500
3501 def generateNativeLivenessSource(self, oOut, idxPart):
3502 """
3503 Generates the native recompiler liveness analysis functions source file.
3504 Returns success indicator.
3505 """
3506 assert(idxPart in range(self.kcNativeSourceParts));
3507 if not self.oOptions.fNativeRecompilerEnabled:
3508 return True;
3509
3510 #
3511 # The file header.
3512 #
3513 oOut.write('\n'.join(self.generateLicenseHeader()));
3514
3515 #
3516 # Emit the functions.
3517 #
3518 # The files are split up by threaded variation as that's the simplest way to
3519 # do it, even if the distribution isn't entirely even (ksVariation_Default
3520 # only has the defer to cimpl bits and the pre-386 variants will naturally
3521 # have fewer instructions).
3522 #
3523 cVariationsPerFile = len(ThreadedFunctionVariation.kasVariationsEmitOrder) // self.kcNativeSourceParts;
3524 idxFirstVar = idxPart * cVariationsPerFile;
3525 idxEndVar = idxFirstVar + cVariationsPerFile;
3526 if idxPart + 1 >= self.kcNativeSourceParts:
3527 idxEndVar = len(ThreadedFunctionVariation.kasVariationsEmitOrder);
3528 for sVariation in ThreadedFunctionVariation.kasVariationsEmitOrder[idxFirstVar:idxEndVar]:
3529 sVarName = ThreadedFunctionVariation.kdVariationNames[sVariation];
3530 oOut.write( '\n'
3531 + '\n'
3532 + '\n'
3533 + '\n'
3534 + '/*' + '*' * 128 + '\n'
3535 + '* Variation: ' + sVarName + ' ' * (129 - len(sVarName) - 15) + '*\n'
3536 + '*' * 128 + '*/\n');
3537
3538 for oThreadedFunction in self.aoThreadedFuncs:
3539 oVariation = oThreadedFunction.dVariations.get(sVariation, None) # type: ThreadedFunctionVariation
3540 if oVariation and oVariation.oNativeRecomp and oVariation.oNativeRecomp.isRecompilable():
3541 oMcBlock = oThreadedFunction.oMcBlock;
3542
3543 # Function header
3544 oOut.write( '\n'
3545 + '\n'
3546 + '/**\n'
3547 + ' * #%u: %s at line %s offset %s in %s%s\n'
3548 % (oVariation.iEnumValue, oMcBlock.sFunction, oMcBlock.iBeginLine, oMcBlock.offBeginLine,
3549 os.path.split(oMcBlock.sSrcFile)[1],
3550 ' (macro expansion)' if oMcBlock.iBeginLine == oMcBlock.iEndLine else '')
3551 + ' */\n'
3552 + 'IEM_DECL_IEMNATIVELIVENESSFUNC_DEF(' + oVariation.getLivenessFunctionName() + ')\n'
3553 + '{\n');
3554
3555 # Unpack parameters.
3556 self.generateFunctionParameterUnpacking(oVariation, oOut,
3557 ('pCallEntry->auParams[0]',
3558 'pCallEntry->auParams[1]',
3559 'pCallEntry->auParams[2]',),
3560 uNoRefLevel = 2);
3561
3562 # Now for the actual statements.
3563 oOut.write(oVariation.oNativeRecomp.renderCode(cchIndent = 4));
3564
3565 oOut.write('}\n');
3566
3567 #
3568 # Output the function table in the smallest file (currently the last).
3569 #
3570 if idxPart + 1 == self.kcNativeSourceParts:
3571 oOut.write( '\n'
3572 + '\n'
3573 + '\n'
3574 + '/*\n'
3575 + ' * Liveness analysis function table running parallel to g_apfnIemThreadedFunctions and friends.\n'
3576 + ' */\n'
3577 + 'const PFNIEMNATIVELIVENESSFUNC g_apfnIemNativeLivenessFunctions[kIemThreadedFunc_End] =\n'
3578 + '{\n'
3579 + ' /*Invalid*/ NULL,'
3580 + '\n'
3581 + ' /*\n'
3582 + ' * Predefined.\n'
3583 + ' */\n'
3584 );
3585 for sFuncNm, _, fHaveRecompFunc in self.katBltIns:
3586 if fHaveRecompFunc:
3587 oOut.write(' iemNativeLivenessFunc_BltIn_%s,\n' % (sFuncNm,))
3588 else:
3589 oOut.write(' iemNativeLivenessFunc_ThreadedCall, /*BltIn_%s*/\n' % (sFuncNm,))
3590
3591 iThreadedFunction = 1 + len(self.katBltIns);
3592 for sVariation in ThreadedFunctionVariation.kasVariationsEmitOrder:
3593 oOut.write( ' /*\n'
3594 + ' * Variation: ' + ThreadedFunctionVariation.kdVariationNames[sVariation] + '\n'
3595 + ' */\n');
3596 for oThreadedFunction in self.aoThreadedFuncs:
3597 oVariation = oThreadedFunction.dVariations.get(sVariation, None);
3598 if oVariation:
3599 iThreadedFunction += 1;
3600 assert oVariation.iEnumValue == iThreadedFunction;
3601 sName = oVariation.getLivenessFunctionName();
3602 if oVariation.oNativeRecomp and oVariation.oNativeRecomp.isRecompilable():
3603 oOut.write(' /*%4u*/ %s,\n' % (iThreadedFunction, sName,));
3604 else:
3605 oOut.write(' /*%4u*/ iemNativeLivenessFunc_ThreadedCall /*%s*/,\n' % (iThreadedFunction, sName,));
3606
3607 oOut.write( '};\n'
3608 + '\n');
3609 return True;
3610
3611
3612 def getThreadedFunctionByIndex(self, idx):
3613 """
3614 Returns a ThreadedFunction object for the given index. If the index is
3615 out of bounds, a dummy is returned.
3616 """
3617 if idx < len(self.aoThreadedFuncs):
3618 return self.aoThreadedFuncs[idx];
3619 return ThreadedFunction.dummyInstance();
3620
3621 def generateModifiedInput(self, oOut, idxFile):
3622 """
3623 Generates the combined modified input source/header file.
3624 Returns success indicator.
3625 """
3626 #
3627 # File header and assert assumptions.
3628 #
3629 oOut.write('\n'.join(self.generateLicenseHeader()));
3630 oOut.write('AssertCompile((IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | IEM_F_MODE_CPUMODE_MASK) == 7);\n');
3631
3632 #
3633 # Iterate all parsers (input files) and output the ones related to the
3634 # file set given by idxFile.
3635 #
3636 for idxParser, oParser in enumerate(self.aoParsers): # type: int, IEMAllInstPython.SimpleParser
3637 # Is this included in the file set?
3638 sSrcBaseFile = os.path.basename(oParser.sSrcFile).lower();
3639 fInclude = -1;
3640 for aoInfo in iai.g_aaoAllInstrFilesAndDefaultMapAndSet:
3641 if sSrcBaseFile == aoInfo[0].lower():
3642 fInclude = aoInfo[2] in (-1, idxFile);
3643 break;
3644 if fInclude is not True:
3645 assert fInclude is False;
3646 continue;
3647
3648 # Output it.
3649 oOut.write("\n\n/* ****** BEGIN %s ******* */\n" % (oParser.sSrcFile,));
3650
3651 iThreadedFunction = self.aidxFirstFunctions[idxParser];
3652 oThreadedFunction = self.getThreadedFunctionByIndex(iThreadedFunction);
3653 iLine = 0;
3654 while iLine < len(oParser.asLines):
3655 sLine = oParser.asLines[iLine];
3656 iLine += 1; # iBeginLine and iEndLine are 1-based.
3657
3658 # Can we pass it thru?
3659 if ( iLine not in [oThreadedFunction.oMcBlock.iBeginLine, oThreadedFunction.oMcBlock.iEndLine]
3660 or oThreadedFunction.oMcBlock.sSrcFile != oParser.sSrcFile):
3661 oOut.write(sLine);
3662 #
3663 # Single MC block. Just extract it and insert the replacement.
3664 #
3665 elif oThreadedFunction.oMcBlock.iBeginLine != oThreadedFunction.oMcBlock.iEndLine:
3666 assert ( (sLine.count('IEM_MC_') - sLine.count('IEM_MC_F_') == 1)
3667 or oThreadedFunction.oMcBlock.iMacroExp == iai.McBlock.kiMacroExp_Partial), 'sLine="%s"' % (sLine,);
3668 oOut.write(sLine[:oThreadedFunction.oMcBlock.offBeginLine]);
3669 sModified = oThreadedFunction.generateInputCode().strip();
3670 oOut.write(sModified);
3671
3672 iLine = oThreadedFunction.oMcBlock.iEndLine;
3673 sLine = oParser.asLines[iLine - 1];
3674 assert ( sLine.count('IEM_MC_') - sLine.count('IEM_MC_F_') == 1
3675 or len(oThreadedFunction.oMcBlock.aoStmts) == 1
3676 or oThreadedFunction.oMcBlock.iMacroExp == iai.McBlock.kiMacroExp_Partial);
3677 oOut.write(sLine[oThreadedFunction.oMcBlock.offAfterEnd : ]);
3678
3679 # Advance
3680 iThreadedFunction += 1;
3681 oThreadedFunction = self.getThreadedFunctionByIndex(iThreadedFunction);
3682 #
3683 # Macro expansion line that have sublines and may contain multiple MC blocks.
3684 #
3685 else:
3686 offLine = 0;
3687 while iLine == oThreadedFunction.oMcBlock.iBeginLine:
3688 oOut.write(sLine[offLine : oThreadedFunction.oMcBlock.offBeginLine]);
3689
3690 sModified = oThreadedFunction.generateInputCode().strip();
3691 assert ( sModified.startswith('IEM_MC_BEGIN')
3692 or (sModified.find('IEM_MC_DEFER_TO_CIMPL_') > 0 and sModified.strip().startswith('{\n'))
3693 or sModified.startswith('pVCpu->iem.s.fEndTb = true')
3694 or sModified.startswith('pVCpu->iem.s.fTbCurInstr = ')
3695 ), 'sModified="%s"' % (sModified,);
3696 oOut.write(sModified);
3697
3698 offLine = oThreadedFunction.oMcBlock.offAfterEnd;
3699
3700 # Advance
3701 iThreadedFunction += 1;
3702 oThreadedFunction = self.getThreadedFunctionByIndex(iThreadedFunction);
3703
3704 # Last line segment.
3705 if offLine < len(sLine):
3706 oOut.write(sLine[offLine : ]);
3707
3708 oOut.write("/* ****** END %s ******* */\n" % (oParser.sSrcFile,));
3709
3710 return True;
3711
3712
3713 #
3714 # Main
3715 #
3716
3717 def main(self, asArgs):
3718 """
3719 C-like main function.
3720 Returns exit code.
3721 """
3722
3723 #
3724 # Parse arguments
3725 #
3726 sScriptDir = os.path.dirname(__file__);
3727 oParser = argparse.ArgumentParser(add_help = False);
3728 oParser.add_argument('asInFiles',
3729 metavar = 'input.cpp.h',
3730 nargs = '*',
3731 default = [os.path.join(sScriptDir, aoInfo[0])
3732 for aoInfo in iai.g_aaoAllInstrFilesAndDefaultMapAndSet],
3733 help = "Selection of VMMAll/IEMAllInst*.cpp.h files to use as input.");
3734 oParser.add_argument('--host-arch',
3735 metavar = 'arch',
3736 dest = 'sHostArch',
3737 action = 'store',
3738 default = None,
3739 help = 'The host architecture.');
3740
3741 oParser.add_argument('--out-thrd-funcs-hdr',
3742 metavar = 'file-thrd-funcs.h',
3743 dest = 'sOutFileThrdFuncsHdr',
3744 action = 'store',
3745 default = '-',
3746 help = 'The output header file for the threaded functions.');
3747 oParser.add_argument('--out-thrd-funcs-cpp',
3748 metavar = 'file-thrd-funcs.cpp',
3749 dest = 'sOutFileThrdFuncsCpp',
3750 action = 'store',
3751 default = '-',
3752 help = 'The output C++ file for the threaded functions.');
3753 oParser.add_argument('--out-n8ve-funcs-hdr',
3754 metavar = 'file-n8tv-funcs.h',
3755 dest = 'sOutFileN8veFuncsHdr',
3756 action = 'store',
3757 default = '-',
3758 help = 'The output header file for the native recompiler functions.');
3759 for iFile in range(1, self.kcNativeSourceParts + 1):
3760 oParser.add_argument('--out-n8ve-funcs-cpp%u' % (iFile,),
3761 metavar = 'file-n8tv-funcs%u.cpp' % (iFile,),
3762 dest = 'sOutFileN8veFuncsCpp%u' % (iFile,),
3763 action = 'store',
3764 default = '-',
3765 help = 'The output C++ file for the native recompiler functions part %u.' % (iFile,));
3766 oParser.add_argument('--out-n8ve-liveness-hdr',
3767 metavar = 'file-n8ve-liveness.h',
3768 dest = 'sOutFileN8veLivenessHdr',
3769 action = 'store',
3770 default = '-',
3771 help = 'The output header file for the native recompiler liveness analysis functions.');
3772 for iFile in range(1, self.kcNativeSourceParts + 1):
3773 oParser.add_argument('--out-n8ve-liveness-cpp%u' % (iFile,),
3774 metavar = 'file-n8ve-liveness%u.cpp' % (iFile,),
3775 dest = 'sOutFileN8veLivenessCpp%u' % (iFile,),
3776 action = 'store',
3777 default = '-',
3778 help = 'The output C++ file for the native recompiler liveness analysis functions part %u.'
3779 % (iFile,));
3780 oParser.add_argument('--native',
3781 dest = 'fNativeRecompilerEnabled',
3782 action = 'store_true',
3783 default = False,
3784 help = 'Enables generating the files related to native recompilation.');
3785 oParser.add_argument('--out-mod-input1',
3786 metavar = 'file-instr.cpp.h',
3787 dest = 'sOutFileModInput1',
3788 action = 'store',
3789 default = '-',
3790 help = 'The output C++/header file for modified input instruction files part 1.');
3791 oParser.add_argument('--out-mod-input2',
3792 metavar = 'file-instr.cpp.h',
3793 dest = 'sOutFileModInput2',
3794 action = 'store',
3795 default = '-',
3796 help = 'The output C++/header file for modified input instruction files part 2.');
3797 oParser.add_argument('--out-mod-input3',
3798 metavar = 'file-instr.cpp.h',
3799 dest = 'sOutFileModInput3',
3800 action = 'store',
3801 default = '-',
3802 help = 'The output C++/header file for modified input instruction files part 3.');
3803 oParser.add_argument('--out-mod-input4',
3804 metavar = 'file-instr.cpp.h',
3805 dest = 'sOutFileModInput4',
3806 action = 'store',
3807 default = '-',
3808 help = 'The output C++/header file for modified input instruction files part 4.');
3809 oParser.add_argument('--help', '-h', '-?',
3810 action = 'help',
3811 help = 'Display help and exit.');
3812 oParser.add_argument('--version', '-V',
3813 action = 'version',
3814 version = 'r%s (IEMAllThreadedPython.py), r%s (IEMAllInstPython.py)'
3815 % (__version__.split()[1], iai.__version__.split()[1],),
3816 help = 'Displays the version/revision of the script and exit.');
3817 self.oOptions = oParser.parse_args(asArgs[1:]);
3818 print("oOptions=%s" % (self.oOptions,), file = sys.stderr);
3819
3820 if self.oOptions.sHostArch not in ('amd64', 'arm64'):
3821 print('error! Unsupported (or missing) host architecture: %s' % (self.oOptions.sHostArch,), file = sys.stderr);
3822 return 1;
3823
3824 #
3825 # Process the instructions specified in the IEM sources.
3826 #
3827 if self.processInputFiles(self.oOptions.sHostArch, self.oOptions.fNativeRecompilerEnabled):
3828 #
3829 # Generate the output files.
3830 #
3831 aaoOutputFiles = [
3832 ( self.oOptions.sOutFileThrdFuncsHdr, self.generateThreadedFunctionsHeader, 0, ),
3833 ( self.oOptions.sOutFileThrdFuncsCpp, self.generateThreadedFunctionsSource, 0, ),
3834 ( self.oOptions.sOutFileN8veFuncsHdr, self.generateNativeFunctionsHeader, 0, ),
3835 ( self.oOptions.sOutFileN8veLivenessHdr, self.generateNativeLivenessHeader, 0, ),
3836 ( self.oOptions.sOutFileModInput1, self.generateModifiedInput, 1, ),
3837 ( self.oOptions.sOutFileModInput2, self.generateModifiedInput, 2, ),
3838 ( self.oOptions.sOutFileModInput3, self.generateModifiedInput, 3, ),
3839 ( self.oOptions.sOutFileModInput4, self.generateModifiedInput, 4, ),
3840 ];
3841 for iFile in range(self.kcNativeSourceParts):
3842 aaoOutputFiles.extend([
3843 ( getattr(self.oOptions, 'sOutFileN8veFuncsCpp%u' % (iFile + 1)),
3844 self.generateNativeFunctionsSource, iFile, ),
3845 ( getattr(self.oOptions, 'sOutFileN8veLivenessCpp%u' % (iFile + 1)),
3846 self.generateNativeLivenessSource, iFile, ),
3847 ]);
3848 fRc = True;
3849 for sOutFile, fnGenMethod, iPartNo in aaoOutputFiles:
3850 if sOutFile == '-':
3851 fRc = fnGenMethod(sys.stdout, iPartNo) and fRc;
3852 else:
3853 try:
3854 oOut = open(sOutFile, 'w'); # pylint: disable=consider-using-with,unspecified-encoding
3855 except Exception as oXcpt:
3856 print('error! Failed open "%s" for writing: %s' % (sOutFile, oXcpt,), file = sys.stderr);
3857 return 1;
3858 fRc = fnGenMethod(oOut, iPartNo) and fRc;
3859 oOut.close();
3860 if fRc:
3861 return 0;
3862
3863 return 1;
3864
3865
3866if __name__ == '__main__':
3867 sys.exit(IEMThreadedGenerator().main(sys.argv));
3868
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette