VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAllThrdPython.py@ 105462

Last change on this file since 105462 was 105283, checked in by vboxsync, 4 months ago

VMM/IEM: Implement vcmpps, vcmppd, vcmpss, vcmpsd instruction emulations, bugref:9898

  • Property svn:eol-style set to LF
  • Property svn:executable set to *
  • Property svn:keywords set to Author Date Id Revision
File size: 192.1 KB
Line 
1#!/usr/bin/env python
2# -*- coding: utf-8 -*-
3# $Id: IEMAllThrdPython.py 105283 2024-07-11 20:26:27Z vboxsync $
4# pylint: disable=invalid-name
5
6"""
7Annotates and generates threaded functions from IEMAllInst*.cpp.h.
8"""
9
10from __future__ import print_function;
11
12__copyright__ = \
13"""
14Copyright (C) 2023 Oracle and/or its affiliates.
15
16This file is part of VirtualBox base platform packages, as
17available from https://www.virtualbox.org.
18
19This program is free software; you can redistribute it and/or
20modify it under the terms of the GNU General Public License
21as published by the Free Software Foundation, in version 3 of the
22License.
23
24This program is distributed in the hope that it will be useful, but
25WITHOUT ANY WARRANTY; without even the implied warranty of
26MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
27General Public License for more details.
28
29You should have received a copy of the GNU General Public License
30along with this program; if not, see <https://www.gnu.org/licenses>.
31
32SPDX-License-Identifier: GPL-3.0-only
33"""
34__version__ = "$Revision: 105283 $"
35
36# Standard python imports.
37import copy;
38import datetime;
39import os;
40import re;
41import sys;
42import argparse;
43from typing import Dict, List;
44
45import IEMAllInstPython as iai;
46import IEMAllN8vePython as ian;
47
48
49# Python 3 hacks:
50if sys.version_info[0] >= 3:
51 long = int; # pylint: disable=redefined-builtin,invalid-name
52
53## Number of generic parameters for the thread functions.
54g_kcThreadedParams = 3;
55
56g_kdTypeInfo = {
57 # type name: (cBits, fSigned, C-type )
58 'int8_t': ( 8, True, 'int8_t', ),
59 'int16_t': ( 16, True, 'int16_t', ),
60 'int32_t': ( 32, True, 'int32_t', ),
61 'int64_t': ( 64, True, 'int64_t', ),
62 'uint4_t': ( 4, False, 'uint8_t', ),
63 'uint8_t': ( 8, False, 'uint8_t', ),
64 'uint16_t': ( 16, False, 'uint16_t', ),
65 'uint32_t': ( 32, False, 'uint32_t', ),
66 'uint64_t': ( 64, False, 'uint64_t', ),
67 'uintptr_t': ( 64, False, 'uintptr_t',), # ASSUMES 64-bit host pointer size.
68 'bool': ( 1, False, 'bool', ),
69 'IEMMODE': ( 2, False, 'IEMMODE', ),
70};
71
72# Only for getTypeBitCount/variables.
73g_kdTypeInfo2 = {
74 'RTFLOAT32U': ( 32, False, 'RTFLOAT32U', ),
75 'RTFLOAT64U': ( 64, False, 'RTFLOAT64U', ),
76 'RTUINT64U': ( 64, False, 'RTUINT64U', ),
77 'RTGCPTR': ( 64, False, 'RTGCPTR', ),
78 'RTPBCD80U': ( 80, False, 'RTPBCD80U', ),
79 'RTFLOAT80U': ( 80, False, 'RTFLOAT80U', ),
80 'IEMFPURESULT': (80+16, False, 'IEMFPURESULT', ),
81 'IEMFPURESULTTWO': (80+16+80,False, 'IEMFPURESULTTWO', ),
82 'RTUINT128U': ( 128, False, 'RTUINT128U', ),
83 'X86XMMREG': ( 128, False, 'X86XMMREG', ),
84 'X86YMMREG': ( 256, False, 'X86YMMREG', ),
85 'IEMMEDIAF2XMMSRC': ( 256, False, 'IEMMEDIAF2XMMSRC',),
86 'IEMMEDIAF2YMMSRC': ( 512, False, 'IEMMEDIAF2YMMSRC',),
87 'RTUINT256U': ( 256, False, 'RTUINT256U', ),
88 'IEMPCMPISTRXSRC': ( 256, False, 'IEMPCMPISTRXSRC', ),
89 'IEMPCMPESTRXSRC': ( 384, False, 'IEMPCMPESTRXSRC', ),
90}; #| g_kdTypeInfo; - requires 3.9
91g_kdTypeInfo2.update(g_kdTypeInfo);
92
93def getTypeBitCount(sType):
94 """
95 Translate a type to size in bits
96 """
97 if sType in g_kdTypeInfo2:
98 return g_kdTypeInfo2[sType][0];
99 if '*' in sType or sType[0] == 'P':
100 return 64;
101 #raise Exception('Unknown type: %s' % (sType,));
102 print('error: Unknown type: %s' % (sType,));
103 return 64;
104
105g_kdIemFieldToType = {
106 # Illegal ones:
107 'offInstrNextByte': ( None, ),
108 'cbInstrBuf': ( None, ),
109 'pbInstrBuf': ( None, ),
110 'uInstrBufPc': ( None, ),
111 'cbInstrBufTotal': ( None, ),
112 'offCurInstrStart': ( None, ),
113 'cbOpcode': ( None, ),
114 'offOpcode': ( None, ),
115 'offModRm': ( None, ),
116 # Okay ones.
117 'fPrefixes': ( 'uint32_t', ),
118 'uRexReg': ( 'uint8_t', ),
119 'uRexB': ( 'uint8_t', ),
120 'uRexIndex': ( 'uint8_t', ),
121 'iEffSeg': ( 'uint8_t', ),
122 'enmEffOpSize': ( 'IEMMODE', ),
123 'enmDefAddrMode': ( 'IEMMODE', ),
124 'enmEffAddrMode': ( 'IEMMODE', ),
125 'enmDefOpSize': ( 'IEMMODE', ),
126 'idxPrefix': ( 'uint8_t', ),
127 'uVex3rdReg': ( 'uint8_t', ),
128 'uVexLength': ( 'uint8_t', ),
129 'fEvexStuff': ( 'uint8_t', ),
130 'uFpuOpcode': ( 'uint16_t', ),
131};
132
133## @name McStmtCond.oIfBranchAnnotation/McStmtCond.oElseBranchAnnotation values
134## @{
135g_ksFinishAnnotation_Advance = 'Advance';
136g_ksFinishAnnotation_RelJmp = 'RelJmp';
137g_ksFinishAnnotation_SetJmp = 'SetJmp';
138g_ksFinishAnnotation_RelCall = 'RelCall';
139g_ksFinishAnnotation_IndCall = 'IndCall';
140g_ksFinishAnnotation_DeferToCImpl = 'DeferToCImpl';
141## @}
142
143
144class ThreadedParamRef(object):
145 """
146 A parameter reference for a threaded function.
147 """
148
149 def __init__(self, sOrgRef, sType, oStmt, iParam = None, offParam = 0, sStdRef = None):
150 ## The name / reference in the original code.
151 self.sOrgRef = sOrgRef;
152 ## Normalized name to deal with spaces in macro invocations and such.
153 self.sStdRef = sStdRef if sStdRef else ''.join(sOrgRef.split());
154 ## Indicates that sOrgRef may not match the parameter.
155 self.fCustomRef = sStdRef is not None;
156 ## The type (typically derived).
157 self.sType = sType;
158 ## The statement making the reference.
159 self.oStmt = oStmt;
160 ## The parameter containing the references. None if implicit.
161 self.iParam = iParam;
162 ## The offset in the parameter of the reference.
163 self.offParam = offParam;
164
165 ## The variable name in the threaded function.
166 self.sNewName = 'x';
167 ## The this is packed into.
168 self.iNewParam = 99;
169 ## The bit offset in iNewParam.
170 self.offNewParam = 1024
171
172
173class ThreadedFunctionVariation(object):
174 """ Threaded function variation. """
175
176 ## @name Variations.
177 ## These variations will match translation block selection/distinctions as well.
178 ## @{
179 # pylint: disable=line-too-long
180 ksVariation_Default = ''; ##< No variations - only used by IEM_MC_DEFER_TO_CIMPL_X_RET.
181 ksVariation_16 = '_16'; ##< 16-bit mode code (386+).
182 ksVariation_16f = '_16f'; ##< 16-bit mode code (386+), check+clear eflags.
183 ksVariation_16_Jmp = '_16_Jmp'; ##< 16-bit mode code (386+), conditional jump taken.
184 ksVariation_16f_Jmp = '_16f_Jmp'; ##< 16-bit mode code (386+), check+clear eflags, conditional jump taken.
185 ksVariation_16_NoJmp = '_16_NoJmp'; ##< 16-bit mode code (386+), conditional jump not taken.
186 ksVariation_16f_NoJmp = '_16f_NoJmp'; ##< 16-bit mode code (386+), check+clear eflags, conditional jump not taken.
187 ksVariation_16_Addr32 = '_16_Addr32'; ##< 16-bit mode code (386+), address size prefixed to 32-bit addressing.
188 ksVariation_16f_Addr32 = '_16f_Addr32'; ##< 16-bit mode code (386+), address size prefixed to 32-bit addressing, eflags.
189 ksVariation_16_Pre386 = '_16_Pre386'; ##< 16-bit mode code, pre-386 CPU target.
190 ksVariation_16f_Pre386 = '_16f_Pre386'; ##< 16-bit mode code, pre-386 CPU target, check+clear eflags.
191 ksVariation_16_Pre386_Jmp = '_16_Pre386_Jmp'; ##< 16-bit mode code, pre-386 CPU target, conditional jump taken.
192 ksVariation_16f_Pre386_Jmp = '_16f_Pre386_Jmp'; ##< 16-bit mode code, pre-386 CPU target, check+clear eflags, conditional jump taken.
193 ksVariation_16_Pre386_NoJmp = '_16_Pre386_NoJmp'; ##< 16-bit mode code, pre-386 CPU target, conditional jump not taken.
194 ksVariation_16f_Pre386_NoJmp = '_16f_Pre386_NoJmp'; ##< 16-bit mode code, pre-386 CPU target, check+clear eflags, conditional jump not taken.
195 ksVariation_32 = '_32'; ##< 32-bit mode code (386+).
196 ksVariation_32f = '_32f'; ##< 32-bit mode code (386+), check+clear eflags.
197 ksVariation_32_Jmp = '_32_Jmp'; ##< 32-bit mode code (386+), conditional jump taken.
198 ksVariation_32f_Jmp = '_32f_Jmp'; ##< 32-bit mode code (386+), check+clear eflags, conditional jump taken.
199 ksVariation_32_NoJmp = '_32_NoJmp'; ##< 32-bit mode code (386+), conditional jump not taken.
200 ksVariation_32f_NoJmp = '_32f_NoJmp'; ##< 32-bit mode code (386+), check+clear eflags, conditional jump not taken.
201 ksVariation_32_Flat = '_32_Flat'; ##< 32-bit mode code (386+) with CS, DS, E,S and SS flat and 4GB wide.
202 ksVariation_32f_Flat = '_32f_Flat'; ##< 32-bit mode code (386+) with CS, DS, E,S and SS flat and 4GB wide, eflags.
203 ksVariation_32_Addr16 = '_32_Addr16'; ##< 32-bit mode code (386+), address size prefixed to 16-bit addressing.
204 ksVariation_32f_Addr16 = '_32f_Addr16'; ##< 32-bit mode code (386+), address size prefixed to 16-bit addressing, eflags.
205 ksVariation_64 = '_64'; ##< 64-bit mode code.
206 ksVariation_64f = '_64f'; ##< 64-bit mode code, check+clear eflags.
207 ksVariation_64_Jmp = '_64_Jmp'; ##< 64-bit mode code, conditional jump taken.
208 ksVariation_64f_Jmp = '_64f_Jmp'; ##< 64-bit mode code, check+clear eflags, conditional jump taken.
209 ksVariation_64_NoJmp = '_64_NoJmp'; ##< 64-bit mode code, conditional jump not taken.
210 ksVariation_64f_NoJmp = '_64f_NoJmp'; ##< 64-bit mode code, check+clear eflags, conditional jump not taken.
211 ksVariation_64_FsGs = '_64_FsGs'; ##< 64-bit mode code, with memory accesses via FS or GS.
212 ksVariation_64f_FsGs = '_64f_FsGs'; ##< 64-bit mode code, with memory accesses via FS or GS, check+clear eflags.
213 ksVariation_64_Addr32 = '_64_Addr32'; ##< 64-bit mode code, address size prefixed to 32-bit addressing.
214 ksVariation_64f_Addr32 = '_64f_Addr32'; ##< 64-bit mode code, address size prefixed to 32-bit addressing, c+c eflags.
215 # pylint: enable=line-too-long
216 kasVariations = (
217 ksVariation_Default,
218 ksVariation_16,
219 ksVariation_16f,
220 ksVariation_16_Jmp,
221 ksVariation_16f_Jmp,
222 ksVariation_16_NoJmp,
223 ksVariation_16f_NoJmp,
224 ksVariation_16_Addr32,
225 ksVariation_16f_Addr32,
226 ksVariation_16_Pre386,
227 ksVariation_16f_Pre386,
228 ksVariation_16_Pre386_Jmp,
229 ksVariation_16f_Pre386_Jmp,
230 ksVariation_16_Pre386_NoJmp,
231 ksVariation_16f_Pre386_NoJmp,
232 ksVariation_32,
233 ksVariation_32f,
234 ksVariation_32_Jmp,
235 ksVariation_32f_Jmp,
236 ksVariation_32_NoJmp,
237 ksVariation_32f_NoJmp,
238 ksVariation_32_Flat,
239 ksVariation_32f_Flat,
240 ksVariation_32_Addr16,
241 ksVariation_32f_Addr16,
242 ksVariation_64,
243 ksVariation_64f,
244 ksVariation_64_Jmp,
245 ksVariation_64f_Jmp,
246 ksVariation_64_NoJmp,
247 ksVariation_64f_NoJmp,
248 ksVariation_64_FsGs,
249 ksVariation_64f_FsGs,
250 ksVariation_64_Addr32,
251 ksVariation_64f_Addr32,
252 );
253 kasVariationsWithoutAddress = (
254 ksVariation_16,
255 ksVariation_16f,
256 ksVariation_16_Pre386,
257 ksVariation_16f_Pre386,
258 ksVariation_32,
259 ksVariation_32f,
260 ksVariation_64,
261 ksVariation_64f,
262 );
263 kasVariationsWithoutAddressNot286 = (
264 ksVariation_16,
265 ksVariation_16f,
266 ksVariation_32,
267 ksVariation_32f,
268 ksVariation_64,
269 ksVariation_64f,
270 );
271 kasVariationsWithoutAddressNot286Not64 = (
272 ksVariation_16,
273 ksVariation_16f,
274 ksVariation_32,
275 ksVariation_32f,
276 );
277 kasVariationsWithoutAddressNot64 = (
278 ksVariation_16,
279 ksVariation_16f,
280 ksVariation_16_Pre386,
281 ksVariation_16f_Pre386,
282 ksVariation_32,
283 ksVariation_32f,
284 );
285 kasVariationsWithoutAddressOnly64 = (
286 ksVariation_64,
287 ksVariation_64f,
288 );
289 kasVariationsWithAddress = (
290 ksVariation_16,
291 ksVariation_16f,
292 ksVariation_16_Addr32,
293 ksVariation_16f_Addr32,
294 ksVariation_16_Pre386,
295 ksVariation_16f_Pre386,
296 ksVariation_32,
297 ksVariation_32f,
298 ksVariation_32_Flat,
299 ksVariation_32f_Flat,
300 ksVariation_32_Addr16,
301 ksVariation_32f_Addr16,
302 ksVariation_64,
303 ksVariation_64f,
304 ksVariation_64_FsGs,
305 ksVariation_64f_FsGs,
306 ksVariation_64_Addr32,
307 ksVariation_64f_Addr32,
308 );
309 kasVariationsWithAddressNot286 = (
310 ksVariation_16,
311 ksVariation_16f,
312 ksVariation_16_Addr32,
313 ksVariation_16f_Addr32,
314 ksVariation_32,
315 ksVariation_32f,
316 ksVariation_32_Flat,
317 ksVariation_32f_Flat,
318 ksVariation_32_Addr16,
319 ksVariation_32f_Addr16,
320 ksVariation_64,
321 ksVariation_64f,
322 ksVariation_64_FsGs,
323 ksVariation_64f_FsGs,
324 ksVariation_64_Addr32,
325 ksVariation_64f_Addr32,
326 );
327 kasVariationsWithAddressNot286Not64 = (
328 ksVariation_16,
329 ksVariation_16f,
330 ksVariation_16_Addr32,
331 ksVariation_16f_Addr32,
332 ksVariation_32,
333 ksVariation_32f,
334 ksVariation_32_Flat,
335 ksVariation_32f_Flat,
336 ksVariation_32_Addr16,
337 ksVariation_32f_Addr16,
338 );
339 kasVariationsWithAddressNot64 = (
340 ksVariation_16,
341 ksVariation_16f,
342 ksVariation_16_Addr32,
343 ksVariation_16f_Addr32,
344 ksVariation_16_Pre386,
345 ksVariation_16f_Pre386,
346 ksVariation_32,
347 ksVariation_32f,
348 ksVariation_32_Flat,
349 ksVariation_32f_Flat,
350 ksVariation_32_Addr16,
351 ksVariation_32f_Addr16,
352 );
353 kasVariationsWithAddressOnly64 = (
354 ksVariation_64,
355 ksVariation_64f,
356 ksVariation_64_FsGs,
357 ksVariation_64f_FsGs,
358 ksVariation_64_Addr32,
359 ksVariation_64f_Addr32,
360 );
361 kasVariationsOnlyPre386 = (
362 ksVariation_16_Pre386,
363 ksVariation_16f_Pre386,
364 );
365 kasVariationsEmitOrder = (
366 ksVariation_Default,
367 ksVariation_64,
368 ksVariation_64f,
369 ksVariation_64_Jmp,
370 ksVariation_64f_Jmp,
371 ksVariation_64_NoJmp,
372 ksVariation_64f_NoJmp,
373 ksVariation_64_FsGs,
374 ksVariation_64f_FsGs,
375 ksVariation_32_Flat,
376 ksVariation_32f_Flat,
377 ksVariation_32,
378 ksVariation_32f,
379 ksVariation_32_Jmp,
380 ksVariation_32f_Jmp,
381 ksVariation_32_NoJmp,
382 ksVariation_32f_NoJmp,
383 ksVariation_16,
384 ksVariation_16f,
385 ksVariation_16_Jmp,
386 ksVariation_16f_Jmp,
387 ksVariation_16_NoJmp,
388 ksVariation_16f_NoJmp,
389 ksVariation_16_Addr32,
390 ksVariation_16f_Addr32,
391 ksVariation_16_Pre386,
392 ksVariation_16f_Pre386,
393 ksVariation_16_Pre386_Jmp,
394 ksVariation_16f_Pre386_Jmp,
395 ksVariation_16_Pre386_NoJmp,
396 ksVariation_16f_Pre386_NoJmp,
397 ksVariation_32_Addr16,
398 ksVariation_32f_Addr16,
399 ksVariation_64_Addr32,
400 ksVariation_64f_Addr32,
401 );
402 kdVariationNames = {
403 ksVariation_Default: 'defer-to-cimpl',
404 ksVariation_16: '16-bit',
405 ksVariation_16f: '16-bit w/ eflag checking and clearing',
406 ksVariation_16_Jmp: '16-bit w/ conditional jump taken',
407 ksVariation_16f_Jmp: '16-bit w/ eflag checking and clearing and conditional jump taken',
408 ksVariation_16_NoJmp: '16-bit w/ conditional jump not taken',
409 ksVariation_16f_NoJmp: '16-bit w/ eflag checking and clearing and conditional jump not taken',
410 ksVariation_16_Addr32: '16-bit w/ address prefix (Addr32)',
411 ksVariation_16f_Addr32: '16-bit w/ address prefix (Addr32) and eflag checking and clearing',
412 ksVariation_16_Pre386: '16-bit on pre-386 CPU',
413 ksVariation_16f_Pre386: '16-bit on pre-386 CPU w/ eflag checking and clearing',
414 ksVariation_16_Pre386_Jmp: '16-bit on pre-386 CPU w/ conditional jump taken',
415 ksVariation_16f_Pre386_Jmp: '16-bit on pre-386 CPU w/ eflag checking and clearing and conditional jump taken',
416 ksVariation_16_Pre386_NoJmp: '16-bit on pre-386 CPU w/ conditional jump taken',
417 ksVariation_16f_Pre386_NoJmp: '16-bit on pre-386 CPU w/ eflag checking and clearing and conditional jump taken',
418 ksVariation_32: '32-bit',
419 ksVariation_32f: '32-bit w/ eflag checking and clearing',
420 ksVariation_32_Jmp: '32-bit w/ conditional jump taken',
421 ksVariation_32f_Jmp: '32-bit w/ eflag checking and clearing and conditional jump taken',
422 ksVariation_32_NoJmp: '32-bit w/ conditional jump not taken',
423 ksVariation_32f_NoJmp: '32-bit w/ eflag checking and clearing and conditional jump not taken',
424 ksVariation_32_Flat: '32-bit flat and wide open CS, SS, DS and ES',
425 ksVariation_32f_Flat: '32-bit flat and wide open CS, SS, DS and ES w/ eflag checking and clearing',
426 ksVariation_32_Addr16: '32-bit w/ address prefix (Addr16)',
427 ksVariation_32f_Addr16: '32-bit w/ address prefix (Addr16) and eflag checking and clearing',
428 ksVariation_64: '64-bit',
429 ksVariation_64f: '64-bit w/ eflag checking and clearing',
430 ksVariation_64_Jmp: '64-bit w/ conditional jump taken',
431 ksVariation_64f_Jmp: '64-bit w/ eflag checking and clearing and conditional jump taken',
432 ksVariation_64_NoJmp: '64-bit w/ conditional jump not taken',
433 ksVariation_64f_NoJmp: '64-bit w/ eflag checking and clearing and conditional jump not taken',
434 ksVariation_64_FsGs: '64-bit with memory accessed via FS or GS',
435 ksVariation_64f_FsGs: '64-bit with memory accessed via FS or GS and eflag checking and clearing',
436 ksVariation_64_Addr32: '64-bit w/ address prefix (Addr32)',
437 ksVariation_64f_Addr32: '64-bit w/ address prefix (Addr32) and eflag checking and clearing',
438 };
439 kdVariationsWithEflagsCheckingAndClearing = {
440 ksVariation_16f: True,
441 ksVariation_16f_Jmp: True,
442 ksVariation_16f_NoJmp: True,
443 ksVariation_16f_Addr32: True,
444 ksVariation_16f_Pre386: True,
445 ksVariation_16f_Pre386_Jmp: True,
446 ksVariation_16f_Pre386_NoJmp: True,
447 ksVariation_32f: True,
448 ksVariation_32f_Jmp: True,
449 ksVariation_32f_NoJmp: True,
450 ksVariation_32f_Flat: True,
451 ksVariation_32f_Addr16: True,
452 ksVariation_64f: True,
453 ksVariation_64f_Jmp: True,
454 ksVariation_64f_NoJmp: True,
455 ksVariation_64f_FsGs: True,
456 ksVariation_64f_Addr32: True,
457 };
458 kdVariationsOnly64NoFlags = {
459 ksVariation_64: True,
460 ksVariation_64_Jmp: True,
461 ksVariation_64_NoJmp: True,
462 ksVariation_64_FsGs: True,
463 ksVariation_64_Addr32: True,
464 };
465 kdVariationsOnly64WithFlags = {
466 ksVariation_64f: True,
467 ksVariation_64f_Jmp: True,
468 ksVariation_64f_NoJmp: True,
469 ksVariation_64f_FsGs: True,
470 ksVariation_64f_Addr32: True,
471 };
472 kdVariationsOnlyPre386NoFlags = {
473 ksVariation_16_Pre386: True,
474 ksVariation_16_Pre386_Jmp: True,
475 ksVariation_16_Pre386_NoJmp: True,
476 };
477 kdVariationsOnlyPre386WithFlags = {
478 ksVariation_16f_Pre386: True,
479 ksVariation_16f_Pre386_Jmp: True,
480 ksVariation_16f_Pre386_NoJmp: True,
481 };
482 kdVariationsWithFlatAddress = {
483 ksVariation_32_Flat: True,
484 ksVariation_32f_Flat: True,
485 ksVariation_64: True,
486 ksVariation_64f: True,
487 ksVariation_64_Addr32: True,
488 ksVariation_64f_Addr32: True,
489 };
490 kdVariationsWithFlatStackAddress = {
491 ksVariation_32_Flat: True,
492 ksVariation_32f_Flat: True,
493 ksVariation_64: True,
494 ksVariation_64f: True,
495 ksVariation_64_FsGs: True,
496 ksVariation_64f_FsGs: True,
497 ksVariation_64_Addr32: True,
498 ksVariation_64f_Addr32: True,
499 };
500 kdVariationsWithFlat64StackAddress = {
501 ksVariation_64: True,
502 ksVariation_64f: True,
503 ksVariation_64_FsGs: True,
504 ksVariation_64f_FsGs: True,
505 ksVariation_64_Addr32: True,
506 ksVariation_64f_Addr32: True,
507 };
508 kdVariationsWithFlatAddr16 = {
509 ksVariation_16: True,
510 ksVariation_16f: True,
511 ksVariation_16_Pre386: True,
512 ksVariation_16f_Pre386: True,
513 ksVariation_32_Addr16: True,
514 ksVariation_32f_Addr16: True,
515 };
516 kdVariationsWithFlatAddr32No64 = {
517 ksVariation_16_Addr32: True,
518 ksVariation_16f_Addr32: True,
519 ksVariation_32: True,
520 ksVariation_32f: True,
521 ksVariation_32_Flat: True,
522 ksVariation_32f_Flat: True,
523 };
524 kdVariationsWithAddressOnly64 = {
525 ksVariation_64: True,
526 ksVariation_64f: True,
527 ksVariation_64_FsGs: True,
528 ksVariation_64f_FsGs: True,
529 ksVariation_64_Addr32: True,
530 ksVariation_64f_Addr32: True,
531 };
532 kdVariationsWithConditional = {
533 ksVariation_16_Jmp: True,
534 ksVariation_16_NoJmp: True,
535 ksVariation_16_Pre386_Jmp: True,
536 ksVariation_16_Pre386_NoJmp: True,
537 ksVariation_32_Jmp: True,
538 ksVariation_32_NoJmp: True,
539 ksVariation_64_Jmp: True,
540 ksVariation_64_NoJmp: True,
541 ksVariation_16f_Jmp: True,
542 ksVariation_16f_NoJmp: True,
543 ksVariation_16f_Pre386_Jmp: True,
544 ksVariation_16f_Pre386_NoJmp: True,
545 ksVariation_32f_Jmp: True,
546 ksVariation_32f_NoJmp: True,
547 ksVariation_64f_Jmp: True,
548 ksVariation_64f_NoJmp: True,
549 };
550 kdVariationsWithConditionalNoJmp = {
551 ksVariation_16_NoJmp: True,
552 ksVariation_16_Pre386_NoJmp: True,
553 ksVariation_32_NoJmp: True,
554 ksVariation_64_NoJmp: True,
555 ksVariation_16f_NoJmp: True,
556 ksVariation_16f_Pre386_NoJmp: True,
557 ksVariation_32f_NoJmp: True,
558 ksVariation_64f_NoJmp: True,
559 };
560 kdVariationsOnlyPre386 = {
561 ksVariation_16_Pre386: True,
562 ksVariation_16f_Pre386: True,
563 ksVariation_16_Pre386_Jmp: True,
564 ksVariation_16f_Pre386_Jmp: True,
565 ksVariation_16_Pre386_NoJmp: True,
566 ksVariation_16f_Pre386_NoJmp: True,
567 };
568 ## @}
569
570 ## IEM_CIMPL_F_XXX flags that we know.
571 ## The value indicates whether it terminates the TB or not. The goal is to
572 ## improve the recompiler so all but END_TB will be False.
573 ##
574 ## @note iemThreadedRecompilerMcDeferToCImpl0 duplicates info found here.
575 kdCImplFlags = {
576 'IEM_CIMPL_F_MODE': False,
577 'IEM_CIMPL_F_BRANCH_DIRECT': False,
578 'IEM_CIMPL_F_BRANCH_INDIRECT': False,
579 'IEM_CIMPL_F_BRANCH_RELATIVE': False,
580 'IEM_CIMPL_F_BRANCH_FAR': True,
581 'IEM_CIMPL_F_BRANCH_CONDITIONAL': False,
582 # IEM_CIMPL_F_BRANCH_ANY should only be used for testing, so not included here.
583 'IEM_CIMPL_F_BRANCH_STACK': False,
584 'IEM_CIMPL_F_BRANCH_STACK_FAR': False,
585 'IEM_CIMPL_F_RFLAGS': False,
586 'IEM_CIMPL_F_INHIBIT_SHADOW': False,
587 'IEM_CIMPL_F_CHECK_IRQ_AFTER': False,
588 'IEM_CIMPL_F_CHECK_IRQ_BEFORE': False,
589 'IEM_CIMPL_F_CHECK_IRQ_BEFORE_AND_AFTER': False, # (ignore)
590 'IEM_CIMPL_F_STATUS_FLAGS': False,
591 'IEM_CIMPL_F_VMEXIT': False,
592 'IEM_CIMPL_F_FPU': False,
593 'IEM_CIMPL_F_REP': False,
594 'IEM_CIMPL_F_IO': False,
595 'IEM_CIMPL_F_END_TB': True,
596 'IEM_CIMPL_F_XCPT': True,
597 'IEM_CIMPL_F_CALLS_CIMPL': False,
598 'IEM_CIMPL_F_CALLS_AIMPL': False,
599 'IEM_CIMPL_F_CALLS_AIMPL_WITH_FXSTATE': False,
600 'IEM_CIMPL_F_CALLS_AIMPL_WITH_XSTATE': False,
601 };
602
603 def __init__(self, oThreadedFunction, sVariation = ksVariation_Default):
604 self.oParent = oThreadedFunction # type: ThreadedFunction
605 ##< ksVariation_Xxxx.
606 self.sVariation = sVariation
607
608 ## Threaded function parameter references.
609 self.aoParamRefs = [] # type: List[ThreadedParamRef]
610 ## Unique parameter references.
611 self.dParamRefs = {} # type: Dict[str, List[ThreadedParamRef]]
612 ## Minimum number of parameters to the threaded function.
613 self.cMinParams = 0;
614
615 ## List/tree of statements for the threaded function.
616 self.aoStmtsForThreadedFunction = [] # type: List[McStmt]
617
618 ## Function enum number, for verification. Set by generateThreadedFunctionsHeader.
619 self.iEnumValue = -1;
620
621 ## Native recompilation details for this variation.
622 self.oNativeRecomp = None;
623
624 def getIndexName(self):
625 sName = self.oParent.oMcBlock.sFunction;
626 if sName.startswith('iemOp_'):
627 sName = sName[len('iemOp_'):];
628 return 'kIemThreadedFunc_%s%s%s' % ( sName, self.oParent.sSubName, self.sVariation, );
629
630 def getThreadedFunctionName(self):
631 sName = self.oParent.oMcBlock.sFunction;
632 if sName.startswith('iemOp_'):
633 sName = sName[len('iemOp_'):];
634 return 'iemThreadedFunc_%s%s%s' % ( sName, self.oParent.sSubName, self.sVariation, );
635
636 def getNativeFunctionName(self):
637 return 'iemNativeRecompFunc_' + self.getThreadedFunctionName()[len('iemThreadedFunc_'):];
638
639 def getLivenessFunctionName(self):
640 return 'iemNativeLivenessFunc_' + self.getThreadedFunctionName()[len('iemThreadedFunc_'):];
641
642 def getShortName(self):
643 sName = self.oParent.oMcBlock.sFunction;
644 if sName.startswith('iemOp_'):
645 sName = sName[len('iemOp_'):];
646 return '%s%s%s' % ( sName, self.oParent.sSubName, self.sVariation, );
647
648 def getThreadedFunctionStatisticsName(self):
649 sName = self.oParent.oMcBlock.sFunction;
650 if sName.startswith('iemOp_'):
651 sName = sName[len('iemOp_'):];
652
653 sVarNm = self.sVariation;
654 if sVarNm:
655 if sVarNm.startswith('_'):
656 sVarNm = sVarNm[1:];
657 if sVarNm.endswith('_Jmp'):
658 sVarNm = sVarNm[:-4];
659 sName += '_Jmp';
660 elif sVarNm.endswith('_NoJmp'):
661 sVarNm = sVarNm[:-6];
662 sName += '_NoJmp';
663 else:
664 sVarNm = 'DeferToCImpl';
665
666 return '%s/%s%s' % ( sVarNm, sName, self.oParent.sSubName );
667
668 def isWithFlagsCheckingAndClearingVariation(self):
669 """
670 Checks if this is a variation that checks and clears EFLAGS.
671 """
672 return self.sVariation in ThreadedFunctionVariation.kdVariationsWithEflagsCheckingAndClearing;
673
674 #
675 # Analysis and code morphing.
676 #
677
678 def raiseProblem(self, sMessage):
679 """ Raises a problem. """
680 self.oParent.raiseProblem(sMessage);
681
682 def warning(self, sMessage):
683 """ Emits a warning. """
684 self.oParent.warning(sMessage);
685
686 def analyzeReferenceToType(self, sRef):
687 """
688 Translates a variable or structure reference to a type.
689 Returns type name.
690 Raises exception if unable to figure it out.
691 """
692 ch0 = sRef[0];
693 if ch0 == 'u':
694 if sRef.startswith('u32'):
695 return 'uint32_t';
696 if sRef.startswith('u8') or sRef == 'uReg':
697 return 'uint8_t';
698 if sRef.startswith('u64'):
699 return 'uint64_t';
700 if sRef.startswith('u16'):
701 return 'uint16_t';
702 elif ch0 == 'b':
703 return 'uint8_t';
704 elif ch0 == 'f':
705 return 'bool';
706 elif ch0 == 'i':
707 if sRef.startswith('i8'):
708 return 'int8_t';
709 if sRef.startswith('i16'):
710 return 'int16_t';
711 if sRef.startswith('i32'):
712 return 'int32_t';
713 if sRef.startswith('i64'):
714 return 'int64_t';
715 if sRef in ('iReg', 'iFixedReg', 'iGReg', 'iSegReg', 'iSrcReg', 'iDstReg', 'iCrReg'):
716 return 'uint8_t';
717 elif ch0 == 'p':
718 if sRef.find('-') < 0:
719 return 'uintptr_t';
720 if sRef.startswith('pVCpu->iem.s.'):
721 sField = sRef[len('pVCpu->iem.s.') : ];
722 if sField in g_kdIemFieldToType:
723 if g_kdIemFieldToType[sField][0]:
724 return g_kdIemFieldToType[sField][0];
725 elif ch0 == 'G' and sRef.startswith('GCPtr'):
726 return 'uint64_t';
727 elif ch0 == 'e':
728 if sRef == 'enmEffOpSize':
729 return 'IEMMODE';
730 elif ch0 == 'o':
731 if sRef.startswith('off32'):
732 return 'uint32_t';
733 elif sRef == 'cbFrame': # enter
734 return 'uint16_t';
735 elif sRef == 'cShift': ## @todo risky
736 return 'uint8_t';
737
738 self.raiseProblem('Unknown reference: %s' % (sRef,));
739 return None; # Shut up pylint 2.16.2.
740
741 def analyzeCallToType(self, sFnRef):
742 """
743 Determins the type of an indirect function call.
744 """
745 assert sFnRef[0] == 'p';
746
747 #
748 # Simple?
749 #
750 if sFnRef.find('-') < 0:
751 oDecoderFunction = self.oParent.oMcBlock.oFunction;
752
753 # Try the argument list of the function defintion macro invocation first.
754 iArg = 2;
755 while iArg < len(oDecoderFunction.asDefArgs):
756 if sFnRef == oDecoderFunction.asDefArgs[iArg]:
757 return oDecoderFunction.asDefArgs[iArg - 1];
758 iArg += 1;
759
760 # Then check out line that includes the word and looks like a variable declaration.
761 oRe = re.compile(' +(P[A-Z0-9_]+|const +IEMOP[A-Z0-9_]+ *[*]) +(const |) *' + sFnRef + ' *(;|=)');
762 for sLine in oDecoderFunction.asLines:
763 oMatch = oRe.match(sLine);
764 if oMatch:
765 if not oMatch.group(1).startswith('const'):
766 return oMatch.group(1);
767 return 'PC' + oMatch.group(1)[len('const ') : -1].strip();
768
769 #
770 # Deal with the pImpl->pfnXxx:
771 #
772 elif sFnRef.startswith('pImpl->pfn'):
773 sMember = sFnRef[len('pImpl->') : ];
774 sBaseType = self.analyzeCallToType('pImpl');
775 offBits = sMember.rfind('U') + 1;
776 if sBaseType == 'PCIEMOPBINSIZES': return 'PFNIEMAIMPLBINU' + sMember[offBits:];
777 if sBaseType == 'PCIEMOPBINTODOSIZES': return 'PFNIEMAIMPLBINTODOU' + sMember[offBits:];
778 if sBaseType == 'PCIEMOPUNARYSIZES': return 'PFNIEMAIMPLUNARYU' + sMember[offBits:];
779 if sBaseType == 'PCIEMOPSHIFTSIZES': return 'PFNIEMAIMPLSHIFTU' + sMember[offBits:];
780 if sBaseType == 'PCIEMOPSHIFTDBLSIZES': return 'PFNIEMAIMPLSHIFTDBLU' + sMember[offBits:];
781 if sBaseType == 'PCIEMOPMULDIVSIZES': return 'PFNIEMAIMPLMULDIVU' + sMember[offBits:];
782 if sBaseType == 'PCIEMOPMEDIAF2': return 'PFNIEMAIMPLMEDIAF2U' + sMember[offBits:];
783 if sBaseType == 'PCIEMOPMEDIAF2IMM8': return 'PFNIEMAIMPLMEDIAF2U' + sMember[offBits:] + 'IMM8';
784 if sBaseType == 'PCIEMOPMEDIAF3': return 'PFNIEMAIMPLMEDIAF3U' + sMember[offBits:];
785 if sBaseType == 'PCIEMOPMEDIAOPTF2': return 'PFNIEMAIMPLMEDIAOPTF2U' + sMember[offBits:];
786 if sBaseType == 'PCIEMOPMEDIAOPTF2IMM8': return 'PFNIEMAIMPLMEDIAOPTF2U' + sMember[offBits:] + 'IMM8';
787 if sBaseType == 'PCIEMOPMEDIAOPTF3': return 'PFNIEMAIMPLMEDIAOPTF3U' + sMember[offBits:];
788 if sBaseType == 'PCIEMOPMEDIAOPTF3IMM8': return 'PFNIEMAIMPLMEDIAOPTF3U' + sMember[offBits:] + 'IMM8';
789 if sBaseType == 'PCIEMOPBLENDOP': return 'PFNIEMAIMPLAVXBLENDU' + sMember[offBits:];
790
791 self.raiseProblem('Unknown call reference: %s::%s (%s)' % (sBaseType, sMember, sFnRef,));
792
793 self.raiseProblem('Unknown call reference: %s' % (sFnRef,));
794 return None; # Shut up pylint 2.16.2.
795
796 def analyze8BitGRegStmt(self, oStmt):
797 """
798 Gets the 8-bit general purpose register access details of the given statement.
799 ASSUMES the statement is one accessing an 8-bit GREG.
800 """
801 idxReg = 0;
802 if ( oStmt.sName.find('_FETCH_') > 0
803 or oStmt.sName.find('_REF_') > 0
804 or oStmt.sName.find('_TO_LOCAL') > 0):
805 idxReg = 1;
806
807 sRegRef = oStmt.asParams[idxReg];
808 if sRegRef.startswith('IEM_GET_MODRM_RM') or sRegRef.startswith('IEM_GET_MODRM_REG'):
809 asBits = [sBit.strip() for sBit in sRegRef.replace('(', ',').replace(')', '').split(',')];
810 if len(asBits) != 3 or asBits[1] != 'pVCpu' or (asBits[0] != 'IEM_GET_MODRM_RM' and asBits[0] != 'IEM_GET_MODRM_REG'):
811 self.raiseProblem('Unexpected reference: %s (asBits=%s)' % (sRegRef, asBits));
812 sOrgExpr = asBits[0] + '_EX8(pVCpu, ' + asBits[2] + ')';
813 else:
814 sOrgExpr = '((%s) < 4 || (pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_REX | IEM_OP_PRF_VEX)) ? (%s) : (%s) + 12)' \
815 % (sRegRef, sRegRef, sRegRef,);
816
817 if sRegRef.find('IEM_GET_MODRM_RM') >= 0: sStdRef = 'bRmRm8Ex';
818 elif sRegRef.find('IEM_GET_MODRM_REG') >= 0: sStdRef = 'bRmReg8Ex';
819 elif sRegRef == 'X86_GREG_xAX': sStdRef = 'bGregXAx8Ex';
820 elif sRegRef == 'X86_GREG_xCX': sStdRef = 'bGregXCx8Ex';
821 elif sRegRef == 'X86_GREG_xSP': sStdRef = 'bGregXSp8Ex';
822 elif sRegRef == 'iFixedReg': sStdRef = 'bFixedReg8Ex';
823 else:
824 self.warning('analyze8BitGRegStmt: sRegRef=%s -> bOther8Ex; %s %s; sOrgExpr=%s'
825 % (sRegRef, oStmt.sName, oStmt.asParams, sOrgExpr,));
826 sStdRef = 'bOther8Ex';
827
828 #print('analyze8BitGRegStmt: %s %s; sRegRef=%s\n -> idxReg=%s sOrgExpr=%s sStdRef=%s'
829 # % (oStmt.sName, oStmt.asParams, sRegRef, idxReg, sOrgExpr, sStdRef));
830 return (idxReg, sOrgExpr, sStdRef);
831
832
833 ## Maps memory related MCs to info for FLAT conversion.
834 ## This is used in 64-bit and flat 32-bit variants to skip the unnecessary
835 ## segmentation checking for every memory access. Only applied to access
836 ## via ES, DS and SS. FS, GS and CS gets the full segmentation threatment,
837 ## the latter (CS) is just to keep things simple (we could safely fetch via
838 ## it, but only in 64-bit mode could we safely write via it, IIRC).
839 kdMemMcToFlatInfo = {
840 'IEM_MC_FETCH_MEM_U8': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U8' ),
841 'IEM_MC_FETCH_MEM16_U8': ( 1, 'IEM_MC_FETCH_MEM16_FLAT_U8' ),
842 'IEM_MC_FETCH_MEM32_U8': ( 1, 'IEM_MC_FETCH_MEM32_FLAT_U8' ),
843 'IEM_MC_FETCH_MEM_U16': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U16' ),
844 'IEM_MC_FETCH_MEM_U16_DISP': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U16_DISP' ),
845 'IEM_MC_FETCH_MEM_I16': ( 1, 'IEM_MC_FETCH_MEM_FLAT_I16' ),
846 'IEM_MC_FETCH_MEM_U32': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U32' ),
847 'IEM_MC_FETCH_MEM_U32_DISP': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U32_DISP' ),
848 'IEM_MC_FETCH_MEM_I32': ( 1, 'IEM_MC_FETCH_MEM_FLAT_I32' ),
849 'IEM_MC_FETCH_MEM_U64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U64' ),
850 'IEM_MC_FETCH_MEM_U64_DISP': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U64_DISP' ),
851 'IEM_MC_FETCH_MEM_U64_ALIGN_U128': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U64_ALIGN_U128' ),
852 'IEM_MC_FETCH_MEM_I64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_I64' ),
853 'IEM_MC_FETCH_MEM_R32': ( 1, 'IEM_MC_FETCH_MEM_FLAT_R32' ),
854 'IEM_MC_FETCH_MEM_R64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_R64' ),
855 'IEM_MC_FETCH_MEM_R80': ( 1, 'IEM_MC_FETCH_MEM_FLAT_R80' ),
856 'IEM_MC_FETCH_MEM_D80': ( 1, 'IEM_MC_FETCH_MEM_FLAT_D80' ),
857 'IEM_MC_FETCH_MEM_U128': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U128' ),
858 'IEM_MC_FETCH_MEM_U128_NO_AC': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U128_NO_AC' ),
859 'IEM_MC_FETCH_MEM_U128_ALIGN_SSE': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U128_ALIGN_SSE' ),
860 'IEM_MC_FETCH_MEM_XMM': ( 1, 'IEM_MC_FETCH_MEM_FLAT_XMM' ),
861 'IEM_MC_FETCH_MEM_XMM_NO_AC': ( 1, 'IEM_MC_FETCH_MEM_FLAT_XMM_NO_AC' ),
862 'IEM_MC_FETCH_MEM_XMM_ALIGN_SSE': ( 1, 'IEM_MC_FETCH_MEM_FLAT_XMM_ALIGN_SSE' ),
863 'IEM_MC_FETCH_MEM_XMM_U32': ( 2, 'IEM_MC_FETCH_MEM_FLAT_XMM_U32' ),
864 'IEM_MC_FETCH_MEM_XMM_U64': ( 2, 'IEM_MC_FETCH_MEM_FLAT_XMM_U64' ),
865 'IEM_MC_FETCH_MEM_U256': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U256' ),
866 'IEM_MC_FETCH_MEM_U256_NO_AC': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U256_NO_AC' ),
867 'IEM_MC_FETCH_MEM_U256_ALIGN_AVX': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U256_ALIGN_AVX' ),
868 'IEM_MC_FETCH_MEM_YMM': ( 1, 'IEM_MC_FETCH_MEM_FLAT_YMM' ),
869 'IEM_MC_FETCH_MEM_YMM_NO_AC': ( 1, 'IEM_MC_FETCH_MEM_FLAT_YMM_NO_AC' ),
870 'IEM_MC_FETCH_MEM_YMM_ALIGN_AVX': ( 1, 'IEM_MC_FETCH_MEM_FLAT_YMM_ALIGN_AVX' ),
871 'IEM_MC_FETCH_MEM_U8_ZX_U16': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U8_ZX_U16' ),
872 'IEM_MC_FETCH_MEM_U8_ZX_U32': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U8_ZX_U32' ),
873 'IEM_MC_FETCH_MEM_U8_ZX_U64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U8_ZX_U64' ),
874 'IEM_MC_FETCH_MEM_U16_ZX_U32': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U16_ZX_U32' ),
875 'IEM_MC_FETCH_MEM_U16_ZX_U64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U16_ZX_U64' ),
876 'IEM_MC_FETCH_MEM_U32_ZX_U64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U32_ZX_U64' ),
877 'IEM_MC_FETCH_MEM_U8_SX_U16': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U8_SX_U16' ),
878 'IEM_MC_FETCH_MEM_U8_SX_U32': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U8_SX_U32' ),
879 'IEM_MC_FETCH_MEM_U8_SX_U64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U8_SX_U64' ),
880 'IEM_MC_FETCH_MEM_U16_SX_U32': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U16_SX_U32' ),
881 'IEM_MC_FETCH_MEM_U16_SX_U64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U16_SX_U64' ),
882 'IEM_MC_FETCH_MEM_U32_SX_U64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U32_SX_U64' ),
883 'IEM_MC_FETCH_MEM_U128_AND_XREG_U128': ( 2, 'IEM_MC_FETCH_MEM_FLAT_U128_AND_XREG_U128' ),
884 'IEM_MC_FETCH_MEM_XMM_ALIGN_SSE_AND_XREG_XMM': ( 2, 'IEM_MC_FETCH_MEM_FLAT_XMM_ALIGN_SSE_AND_XREG_XMM' ),
885 'IEM_MC_FETCH_MEM_XMM_U32_AND_XREG_XMM': ( 3, 'IEM_MC_FETCH_MEM_FLAT_XMM_U32_AND_XREG_XMM' ),
886 'IEM_MC_FETCH_MEM_XMM_U64_AND_XREG_XMM': ( 3, 'IEM_MC_FETCH_MEM_FLAT_XMM_U64_AND_XREG_XMM' ),
887 'IEM_MC_FETCH_MEM_U128_AND_XREG_U128_AND_RAX_RDX_U64':
888 ( 2, 'IEM_MC_FETCH_MEM_FLAT_U128_AND_XREG_U128_AND_RAX_RDX_U64' ),
889 'IEM_MC_FETCH_MEM_U128_AND_XREG_U128_AND_EAX_EDX_U32_SX_U64':
890 ( 2, 'IEM_MC_FETCH_MEM_FLAT_U128_AND_XREG_U128_AND_EAX_EDX_U32_SX_U64' ),
891 'IEM_MC_FETCH_MEM_YMM_ALIGN_AVX_AND_YREG_YMM': ( 2, 'IEM_MC_FETCH_MEM_FLAT_YMM_ALIGN_AVX_AND_YREG_YMM' ),
892 'IEM_MC_STORE_MEM_U8': ( 0, 'IEM_MC_STORE_MEM_FLAT_U8' ),
893 'IEM_MC_STORE_MEM_U16': ( 0, 'IEM_MC_STORE_MEM_FLAT_U16' ),
894 'IEM_MC_STORE_MEM_U32': ( 0, 'IEM_MC_STORE_MEM_FLAT_U32' ),
895 'IEM_MC_STORE_MEM_U64': ( 0, 'IEM_MC_STORE_MEM_FLAT_U64' ),
896 'IEM_MC_STORE_MEM_U8_CONST': ( 0, 'IEM_MC_STORE_MEM_FLAT_U8_CONST' ),
897 'IEM_MC_STORE_MEM_U16_CONST': ( 0, 'IEM_MC_STORE_MEM_FLAT_U16_CONST' ),
898 'IEM_MC_STORE_MEM_U32_CONST': ( 0, 'IEM_MC_STORE_MEM_FLAT_U32_CONST' ),
899 'IEM_MC_STORE_MEM_U64_CONST': ( 0, 'IEM_MC_STORE_MEM_FLAT_U64_CONST' ),
900 'IEM_MC_STORE_MEM_U128': ( 0, 'IEM_MC_STORE_MEM_FLAT_U128' ),
901 'IEM_MC_STORE_MEM_U128_NO_AC': ( 0, 'IEM_MC_STORE_MEM_FLAT_U128_NO_AC' ),
902 'IEM_MC_STORE_MEM_U128_ALIGN_SSE': ( 0, 'IEM_MC_STORE_MEM_FLAT_U128_ALIGN_SSE' ),
903 'IEM_MC_STORE_MEM_U256': ( 0, 'IEM_MC_STORE_MEM_FLAT_U256' ),
904 'IEM_MC_STORE_MEM_U256_NO_AC': ( 0, 'IEM_MC_STORE_MEM_FLAT_U256_NO_AC' ),
905 'IEM_MC_STORE_MEM_U256_ALIGN_AVX': ( 0, 'IEM_MC_STORE_MEM_FLAT_U256_ALIGN_AVX' ),
906 'IEM_MC_MEM_MAP_D80_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_D80_WO' ),
907 'IEM_MC_MEM_MAP_I16_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_I16_WO' ),
908 'IEM_MC_MEM_MAP_I32_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_I32_WO' ),
909 'IEM_MC_MEM_MAP_I64_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_I64_WO' ),
910 'IEM_MC_MEM_MAP_R32_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_R32_WO' ),
911 'IEM_MC_MEM_MAP_R64_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_R64_WO' ),
912 'IEM_MC_MEM_MAP_R80_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_R80_WO' ),
913 'IEM_MC_MEM_MAP_U8_ATOMIC': ( 2, 'IEM_MC_MEM_FLAT_MAP_U8_ATOMIC' ),
914 'IEM_MC_MEM_MAP_U8_RW': ( 2, 'IEM_MC_MEM_FLAT_MAP_U8_RW' ),
915 'IEM_MC_MEM_MAP_U8_RO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U8_RO' ),
916 'IEM_MC_MEM_MAP_U8_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U8_WO' ),
917 'IEM_MC_MEM_MAP_U16_ATOMIC': ( 2, 'IEM_MC_MEM_FLAT_MAP_U16_ATOMIC' ),
918 'IEM_MC_MEM_MAP_U16_RW': ( 2, 'IEM_MC_MEM_FLAT_MAP_U16_RW' ),
919 'IEM_MC_MEM_MAP_U16_RO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U16_RO' ),
920 'IEM_MC_MEM_MAP_U16_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U16_WO' ),
921 'IEM_MC_MEM_MAP_U32_ATOMIC': ( 2, 'IEM_MC_MEM_FLAT_MAP_U32_ATOMIC' ),
922 'IEM_MC_MEM_MAP_U32_RW': ( 2, 'IEM_MC_MEM_FLAT_MAP_U32_RW' ),
923 'IEM_MC_MEM_MAP_U32_RO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U32_RO' ),
924 'IEM_MC_MEM_MAP_U32_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U32_WO' ),
925 'IEM_MC_MEM_MAP_U64_ATOMIC': ( 2, 'IEM_MC_MEM_FLAT_MAP_U64_ATOMIC' ),
926 'IEM_MC_MEM_MAP_U64_RW': ( 2, 'IEM_MC_MEM_FLAT_MAP_U64_RW' ),
927 'IEM_MC_MEM_MAP_U64_RO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U64_RO' ),
928 'IEM_MC_MEM_MAP_U64_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U64_WO' ),
929 'IEM_MC_MEM_MAP_U128_ATOMIC': ( 2, 'IEM_MC_MEM_FLAT_MAP_U128_ATOMIC' ),
930 'IEM_MC_MEM_MAP_U128_RW': ( 2, 'IEM_MC_MEM_FLAT_MAP_U128_RW' ),
931 'IEM_MC_MEM_MAP_U128_RO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U128_RO' ),
932 'IEM_MC_MEM_MAP_U128_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U128_WO' ),
933 'IEM_MC_MEM_MAP_EX': ( 3, 'IEM_MC_MEM_FLAT_MAP_EX' ),
934 };
935
936 kdMemMcToFlatInfoStack = {
937 'IEM_MC_PUSH_U16': ( 'IEM_MC_FLAT32_PUSH_U16', 'IEM_MC_FLAT64_PUSH_U16', ),
938 'IEM_MC_PUSH_U32': ( 'IEM_MC_FLAT32_PUSH_U32', 'IEM_MC_PUSH_U32', ),
939 'IEM_MC_PUSH_U64': ( 'IEM_MC_PUSH_U64', 'IEM_MC_FLAT64_PUSH_U64', ),
940 'IEM_MC_PUSH_U32_SREG': ( 'IEM_MC_FLAT32_PUSH_U32_SREG', 'IEM_MC_PUSH_U32_SREG' ),
941 'IEM_MC_POP_GREG_U16': ( 'IEM_MC_FLAT32_POP_GREG_U16', 'IEM_MC_FLAT64_POP_GREG_U16', ),
942 'IEM_MC_POP_GREG_U32': ( 'IEM_MC_FLAT32_POP_GREG_U32', 'IEM_MC_POP_GREG_U32', ),
943 'IEM_MC_POP_GREG_U64': ( 'IEM_MC_POP_GREG_U64', 'IEM_MC_FLAT64_POP_GREG_U64', ),
944 };
945
946 kdThreadedCalcRmEffAddrMcByVariation = {
947 ksVariation_16: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_16',
948 ksVariation_16f: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_16',
949 ksVariation_16_Pre386: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_16',
950 ksVariation_16f_Pre386: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_16',
951 ksVariation_32_Addr16: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_16',
952 ksVariation_32f_Addr16: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_16',
953 ksVariation_16_Addr32: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_32',
954 ksVariation_16f_Addr32: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_32',
955 ksVariation_32: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_32',
956 ksVariation_32f: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_32',
957 ksVariation_32_Flat: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_32',
958 ksVariation_32f_Flat: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_32',
959 ksVariation_64: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_64',
960 ksVariation_64f: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_64',
961 ksVariation_64_FsGs: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_64_FSGS',
962 ksVariation_64f_FsGs: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_64_FSGS',
963 ksVariation_64_Addr32: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_64_ADDR32', ## @todo How did this work again...
964 ksVariation_64f_Addr32: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_64_ADDR32',
965 };
966
967 def analyzeMorphStmtForThreaded(self, aoStmts, dState, iParamRef = 0, iLevel = 0):
968 """
969 Transforms (copy) the statements into those for the threaded function.
970
971 Returns list/tree of statements (aoStmts is not modified) and the new
972 iParamRef value.
973 """
974 #
975 # We'll be traversing aoParamRefs in parallel to the statements, so we
976 # must match the traversal in analyzeFindThreadedParamRefs exactly.
977 #
978 #print('McBlock at %s:%s' % (os.path.split(self.oMcBlock.sSrcFile)[1], self.oMcBlock.iBeginLine,));
979 aoThreadedStmts = [];
980 for oStmt in aoStmts:
981 # Skip C++ statements that is purely related to decoding.
982 if not oStmt.isCppStmt() or not oStmt.fDecode:
983 # Copy the statement. Make a deep copy to make sure we've got our own
984 # copies of all instance variables, even if a bit overkill at the moment.
985 oNewStmt = copy.deepcopy(oStmt);
986 aoThreadedStmts.append(oNewStmt);
987 #print('oNewStmt %s %s' % (oNewStmt.sName, len(oNewStmt.asParams),));
988
989 # If the statement has parameter references, process the relevant parameters.
990 # We grab the references relevant to this statement and apply them in reserve order.
991 if iParamRef < len(self.aoParamRefs) and self.aoParamRefs[iParamRef].oStmt == oStmt:
992 iParamRefFirst = iParamRef;
993 while True:
994 iParamRef += 1;
995 if iParamRef >= len(self.aoParamRefs) or self.aoParamRefs[iParamRef].oStmt != oStmt:
996 break;
997
998 #print('iParamRefFirst=%s iParamRef=%s' % (iParamRefFirst, iParamRef));
999 for iCurRef in range(iParamRef - 1, iParamRefFirst - 1, -1):
1000 oCurRef = self.aoParamRefs[iCurRef];
1001 if oCurRef.iParam is not None:
1002 assert oCurRef.oStmt == oStmt;
1003 #print('iCurRef=%s iParam=%s sOrgRef=%s' % (iCurRef, oCurRef.iParam, oCurRef.sOrgRef));
1004 sSrcParam = oNewStmt.asParams[oCurRef.iParam];
1005 assert ( sSrcParam[oCurRef.offParam : oCurRef.offParam + len(oCurRef.sOrgRef)] == oCurRef.sOrgRef
1006 or oCurRef.fCustomRef), \
1007 'offParam=%s sOrgRef=%s iParam=%s oStmt.sName=%s sSrcParam=%s<eos>' \
1008 % (oCurRef.offParam, oCurRef.sOrgRef, oCurRef.iParam, oStmt.sName, sSrcParam);
1009 oNewStmt.asParams[oCurRef.iParam] = sSrcParam[0 : oCurRef.offParam] \
1010 + oCurRef.sNewName \
1011 + sSrcParam[oCurRef.offParam + len(oCurRef.sOrgRef) : ];
1012
1013 # Morph IEM_MC_CALC_RM_EFF_ADDR into IEM_MC_CALC_RM_EFF_ADDR_THREADED ...
1014 if oNewStmt.sName == 'IEM_MC_CALC_RM_EFF_ADDR':
1015 oNewStmt.sName = self.kdThreadedCalcRmEffAddrMcByVariation[self.sVariation];
1016 assert len(oNewStmt.asParams) == 3;
1017
1018 if self.sVariation in self.kdVariationsWithFlatAddr16:
1019 oNewStmt.asParams = [
1020 oNewStmt.asParams[0], oNewStmt.asParams[1], self.dParamRefs['u16Disp'][0].sNewName,
1021 ];
1022 else:
1023 sSibAndMore = self.dParamRefs['bSib'][0].sNewName; # Merge bSib and 2nd part of cbImmAndRspOffset.
1024 if oStmt.asParams[2] not in ('0', '1', '2', '4'):
1025 sSibAndMore = '(%s) | ((%s) & 0x0f00)' % (self.dParamRefs['bSib'][0].sNewName, oStmt.asParams[2]);
1026
1027 if self.sVariation in self.kdVariationsWithFlatAddr32No64:
1028 oNewStmt.asParams = [
1029 oNewStmt.asParams[0], oNewStmt.asParams[1], sSibAndMore, self.dParamRefs['u32Disp'][0].sNewName,
1030 ];
1031 else:
1032 oNewStmt.asParams = [
1033 oNewStmt.asParams[0], self.dParamRefs['bRmEx'][0].sNewName, sSibAndMore,
1034 self.dParamRefs['u32Disp'][0].sNewName, self.dParamRefs['cbInstr'][0].sNewName,
1035 ];
1036 # ... and IEM_MC_ADVANCE_RIP_AND_FINISH into *_THREADED_PCxx[_WITH_FLAGS] ...
1037 elif ( oNewStmt.sName
1038 in ('IEM_MC_ADVANCE_RIP_AND_FINISH',
1039 'IEM_MC_REL_JMP_S8_AND_FINISH', 'IEM_MC_REL_JMP_S16_AND_FINISH', 'IEM_MC_REL_JMP_S32_AND_FINISH',
1040 'IEM_MC_SET_RIP_U16_AND_FINISH', 'IEM_MC_SET_RIP_U32_AND_FINISH', 'IEM_MC_SET_RIP_U64_AND_FINISH',
1041 'IEM_MC_REL_CALL_S16_AND_FINISH', 'IEM_MC_REL_CALL_S32_AND_FINISH', 'IEM_MC_REL_CALL_S64_AND_FINISH',
1042 'IEM_MC_IND_CALL_U16_AND_FINISH', 'IEM_MC_IND_CALL_U32_AND_FINISH', 'IEM_MC_IND_CALL_U64_AND_FINISH',
1043 'IEM_MC_RETN_AND_FINISH',)):
1044 if oNewStmt.sName not in ('IEM_MC_SET_RIP_U16_AND_FINISH', 'IEM_MC_SET_RIP_U32_AND_FINISH',
1045 'IEM_MC_SET_RIP_U64_AND_FINISH', ):
1046 oNewStmt.asParams.append(self.dParamRefs['cbInstr'][0].sNewName);
1047 if ( oNewStmt.sName in ('IEM_MC_REL_JMP_S8_AND_FINISH', 'IEM_MC_RETN_AND_FINISH', )
1048 and self.sVariation not in self.kdVariationsOnlyPre386):
1049 oNewStmt.asParams.append(self.dParamRefs['pVCpu->iem.s.enmEffOpSize'][0].sNewName);
1050 oNewStmt.sName += '_THREADED';
1051 if self.sVariation in self.kdVariationsOnly64NoFlags:
1052 oNewStmt.sName += '_PC64';
1053 elif self.sVariation in self.kdVariationsOnly64WithFlags:
1054 oNewStmt.sName += '_PC64_WITH_FLAGS';
1055 elif self.sVariation in self.kdVariationsOnlyPre386NoFlags:
1056 oNewStmt.sName += '_PC16';
1057 elif self.sVariation in self.kdVariationsOnlyPre386WithFlags:
1058 oNewStmt.sName += '_PC16_WITH_FLAGS';
1059 elif self.sVariation not in self.kdVariationsWithEflagsCheckingAndClearing:
1060 assert self.sVariation != self.ksVariation_Default;
1061 oNewStmt.sName += '_PC32';
1062 else:
1063 oNewStmt.sName += '_PC32_WITH_FLAGS';
1064
1065 # This is making the wrong branch of conditionals break out of the TB.
1066 if (oStmt.sName in ('IEM_MC_ADVANCE_RIP_AND_FINISH', 'IEM_MC_REL_JMP_S8_AND_FINISH',
1067 'IEM_MC_REL_JMP_S16_AND_FINISH', 'IEM_MC_REL_JMP_S32_AND_FINISH')):
1068 sExitTbStatus = 'VINF_SUCCESS';
1069 if self.sVariation in self.kdVariationsWithConditional:
1070 if self.sVariation in self.kdVariationsWithConditionalNoJmp:
1071 if oStmt.sName != 'IEM_MC_ADVANCE_RIP_AND_FINISH':
1072 sExitTbStatus = 'VINF_IEM_REEXEC_BREAK';
1073 elif oStmt.sName == 'IEM_MC_ADVANCE_RIP_AND_FINISH':
1074 sExitTbStatus = 'VINF_IEM_REEXEC_BREAK';
1075 oNewStmt.asParams.append(sExitTbStatus);
1076
1077 # Insert an MC so we can assert the correctioness of modified flags annotations on IEM_MC_REF_EFLAGS.
1078 if 'IEM_MC_ASSERT_EFLAGS' in dState:
1079 aoThreadedStmts.insert(len(aoThreadedStmts) - 1,
1080 iai.McStmtAssertEFlags(self.oParent.oMcBlock.oInstruction));
1081 del dState['IEM_MC_ASSERT_EFLAGS'];
1082
1083 # ... and IEM_MC_*_GREG_U8 into *_THREADED w/ reworked index taking REX into account
1084 elif oNewStmt.sName.startswith('IEM_MC_') and oNewStmt.sName.find('_GREG_U8') > 0:
1085 (idxReg, _, sStdRef) = self.analyze8BitGRegStmt(oStmt); # Don't use oNewStmt as it has been modified!
1086 oNewStmt.asParams[idxReg] = self.dParamRefs[sStdRef][0].sNewName;
1087 oNewStmt.sName += '_THREADED';
1088
1089 # ... and IEM_MC_CALL_CIMPL_[0-5] and IEM_MC_DEFER_TO_CIMPL_[0-5]_RET into *_THREADED ...
1090 elif oNewStmt.sName.startswith('IEM_MC_CALL_CIMPL_') or oNewStmt.sName.startswith('IEM_MC_DEFER_TO_CIMPL_'):
1091 oNewStmt.sName += '_THREADED';
1092 oNewStmt.idxFn += 1;
1093 oNewStmt.idxParams += 1;
1094 oNewStmt.asParams.insert(0, self.dParamRefs['cbInstr'][0].sNewName);
1095
1096 # ... and in FLAT modes we must morph memory access into FLAT accesses ...
1097 elif ( self.sVariation in self.kdVariationsWithFlatAddress
1098 and ( oNewStmt.sName.startswith('IEM_MC_FETCH_MEM')
1099 or (oNewStmt.sName.startswith('IEM_MC_STORE_MEM_') and oNewStmt.sName.find('_BY_REF') < 0)
1100 or oNewStmt.sName.startswith('IEM_MC_MEM_MAP') )):
1101 idxEffSeg = self.kdMemMcToFlatInfo[oNewStmt.sName][0];
1102 if idxEffSeg != -1:
1103 if ( oNewStmt.asParams[idxEffSeg].find('iEffSeg') < 0
1104 and oNewStmt.asParams[idxEffSeg] not in ('X86_SREG_ES', ) ):
1105 self.raiseProblem('Expected iEffSeg as param #%d to %s: %s'
1106 % (idxEffSeg + 1, oNewStmt.sName, oNewStmt.asParams[idxEffSeg],));
1107 oNewStmt.asParams.pop(idxEffSeg);
1108 oNewStmt.sName = self.kdMemMcToFlatInfo[oNewStmt.sName][1];
1109
1110 # ... PUSH and POP also needs flat variants, but these differ a little.
1111 elif ( self.sVariation in self.kdVariationsWithFlatStackAddress
1112 and ( (oNewStmt.sName.startswith('IEM_MC_PUSH') and oNewStmt.sName.find('_FPU') < 0)
1113 or oNewStmt.sName.startswith('IEM_MC_POP'))):
1114 oNewStmt.sName = self.kdMemMcToFlatInfoStack[oNewStmt.sName][int(self.sVariation in
1115 self.kdVariationsWithFlat64StackAddress)];
1116
1117 # Add EFLAGS usage annotations to relevant MCs.
1118 elif oNewStmt.sName in ('IEM_MC_COMMIT_EFLAGS', 'IEM_MC_COMMIT_EFLAGS_OPT', 'IEM_MC_REF_EFLAGS',
1119 'IEM_MC_FETCH_EFLAGS'):
1120 oInstruction = self.oParent.oMcBlock.oInstruction;
1121 oNewStmt.sName += '_EX';
1122 oNewStmt.asParams.append(oInstruction.getTestedFlagsCStyle()); # Shall crash and burn if oInstruction is
1123 oNewStmt.asParams.append(oInstruction.getModifiedFlagsCStyle()); # None. Fix the IEM decoder code.
1124
1125 # For IEM_MC_REF_EFLAGS we to emit an MC before the ..._FINISH
1126 if oNewStmt.sName == 'IEM_MC_REF_EFLAGS_EX':
1127 dState['IEM_MC_ASSERT_EFLAGS'] = iLevel;
1128
1129 # Process branches of conditionals recursively.
1130 if isinstance(oStmt, iai.McStmtCond):
1131 (oNewStmt.aoIfBranch, iParamRef) = self.analyzeMorphStmtForThreaded(oStmt.aoIfBranch, dState,
1132 iParamRef, iLevel + 1);
1133 if oStmt.aoElseBranch:
1134 (oNewStmt.aoElseBranch, iParamRef) = self.analyzeMorphStmtForThreaded(oStmt.aoElseBranch,
1135 dState, iParamRef, iLevel + 1);
1136
1137 # Insert an MC so we can assert the correctioness of modified flags annotations
1138 # on IEM_MC_REF_EFLAGS if it goes out of scope.
1139 if dState.get('IEM_MC_ASSERT_EFLAGS', -1) == iLevel:
1140 aoThreadedStmts.append(iai.McStmtAssertEFlags(self.oParent.oMcBlock.oInstruction));
1141 del dState['IEM_MC_ASSERT_EFLAGS'];
1142
1143 return (aoThreadedStmts, iParamRef);
1144
1145
1146 def analyzeConsolidateThreadedParamRefs(self):
1147 """
1148 Consolidate threaded function parameter references into a dictionary
1149 with lists of the references to each variable/field.
1150 """
1151 # Gather unique parameters.
1152 self.dParamRefs = {};
1153 for oRef in self.aoParamRefs:
1154 if oRef.sStdRef not in self.dParamRefs:
1155 self.dParamRefs[oRef.sStdRef] = [oRef,];
1156 else:
1157 self.dParamRefs[oRef.sStdRef].append(oRef);
1158
1159 # Generate names for them for use in the threaded function.
1160 dParamNames = {};
1161 for sName, aoRefs in self.dParamRefs.items():
1162 # Morph the reference expression into a name.
1163 if sName.startswith('IEM_GET_MODRM_REG'): sName = 'bModRmRegP';
1164 elif sName.startswith('IEM_GET_MODRM_RM'): sName = 'bModRmRmP';
1165 elif sName.startswith('IEM_GET_MODRM_REG_8'): sName = 'bModRmReg8P';
1166 elif sName.startswith('IEM_GET_MODRM_RM_8'): sName = 'bModRmRm8P';
1167 elif sName.startswith('IEM_GET_EFFECTIVE_VVVV'): sName = 'bEffVvvvP';
1168 elif sName.startswith('IEM_GET_IMM8_REG'): sName = 'bImm8Reg';
1169 elif sName.find('.') >= 0 or sName.find('->') >= 0:
1170 sName = sName[max(sName.rfind('.'), sName.rfind('>')) + 1 : ] + 'P';
1171 else:
1172 sName += 'P';
1173
1174 # Ensure it's unique.
1175 if sName in dParamNames:
1176 for i in range(10):
1177 if sName + str(i) not in dParamNames:
1178 sName += str(i);
1179 break;
1180 dParamNames[sName] = True;
1181
1182 # Update all the references.
1183 for oRef in aoRefs:
1184 oRef.sNewName = sName;
1185
1186 # Organize them by size too for the purpose of optimize them.
1187 dBySize = {} # type: Dict[str, str]
1188 for sStdRef, aoRefs in self.dParamRefs.items():
1189 if aoRefs[0].sType[0] != 'P':
1190 cBits = g_kdTypeInfo[aoRefs[0].sType][0];
1191 assert(cBits <= 64);
1192 else:
1193 cBits = 64;
1194
1195 if cBits not in dBySize:
1196 dBySize[cBits] = [sStdRef,]
1197 else:
1198 dBySize[cBits].append(sStdRef);
1199
1200 # Pack the parameters as best as we can, starting with the largest ones
1201 # and ASSUMING a 64-bit parameter size.
1202 self.cMinParams = 0;
1203 offNewParam = 0;
1204 for cBits in sorted(dBySize.keys(), reverse = True):
1205 for sStdRef in dBySize[cBits]:
1206 if offNewParam == 0 or offNewParam + cBits > 64:
1207 self.cMinParams += 1;
1208 offNewParam = cBits;
1209 else:
1210 offNewParam += cBits;
1211 assert(offNewParam <= 64);
1212
1213 for oRef in self.dParamRefs[sStdRef]:
1214 oRef.iNewParam = self.cMinParams - 1;
1215 oRef.offNewParam = offNewParam - cBits;
1216
1217 # Currently there are a few that requires 4 parameters, list these so we can figure out why:
1218 if self.cMinParams >= 4:
1219 print('debug: cMinParams=%s cRawParams=%s - %s:%d'
1220 % (self.cMinParams, len(self.dParamRefs), self.oParent.oMcBlock.sSrcFile, self.oParent.oMcBlock.iBeginLine,));
1221
1222 return True;
1223
1224 ksHexDigits = '0123456789abcdefABCDEF';
1225
1226 def analyzeFindThreadedParamRefs(self, aoStmts): # pylint: disable=too-many-statements
1227 """
1228 Scans the statements for things that have to passed on to the threaded
1229 function (populates self.aoParamRefs).
1230 """
1231 for oStmt in aoStmts:
1232 # Some statements we can skip alltogether.
1233 if isinstance(oStmt, iai.McCppPreProc):
1234 continue;
1235 if oStmt.isCppStmt() and oStmt.fDecode:
1236 continue;
1237 if oStmt.sName in ('IEM_MC_BEGIN',):
1238 continue;
1239
1240 if isinstance(oStmt, iai.McStmtVar):
1241 if oStmt.sValue is None:
1242 continue;
1243 aiSkipParams = { 0: True, 1: True, 3: True };
1244 else:
1245 aiSkipParams = {};
1246
1247 # Several statements have implicit parameters and some have different parameters.
1248 if oStmt.sName in ('IEM_MC_ADVANCE_RIP_AND_FINISH', 'IEM_MC_REL_JMP_S8_AND_FINISH', 'IEM_MC_REL_JMP_S16_AND_FINISH',
1249 'IEM_MC_REL_JMP_S32_AND_FINISH',
1250 'IEM_MC_REL_CALL_S16_AND_FINISH', 'IEM_MC_REL_CALL_S32_AND_FINISH',
1251 'IEM_MC_REL_CALL_S64_AND_FINISH',
1252 'IEM_MC_IND_CALL_U16_AND_FINISH', 'IEM_MC_IND_CALL_U32_AND_FINISH',
1253 'IEM_MC_IND_CALL_U64_AND_FINISH',
1254 'IEM_MC_RETN_AND_FINISH',
1255 'IEM_MC_CALL_CIMPL_0', 'IEM_MC_CALL_CIMPL_1', 'IEM_MC_CALL_CIMPL_2', 'IEM_MC_CALL_CIMPL_3',
1256 'IEM_MC_CALL_CIMPL_4', 'IEM_MC_CALL_CIMPL_5',
1257 'IEM_MC_DEFER_TO_CIMPL_0_RET', 'IEM_MC_DEFER_TO_CIMPL_1_RET', 'IEM_MC_DEFER_TO_CIMPL_2_RET',
1258 'IEM_MC_DEFER_TO_CIMPL_3_RET', 'IEM_MC_DEFER_TO_CIMPL_4_RET', 'IEM_MC_DEFER_TO_CIMPL_5_RET', ):
1259 self.aoParamRefs.append(ThreadedParamRef('IEM_GET_INSTR_LEN(pVCpu)', 'uint4_t', oStmt, sStdRef = 'cbInstr'));
1260
1261 if ( oStmt.sName in ('IEM_MC_REL_JMP_S8_AND_FINISH', 'IEM_MC_RETN_AND_FINISH', )
1262 and self.sVariation not in self.kdVariationsOnlyPre386):
1263 self.aoParamRefs.append(ThreadedParamRef('pVCpu->iem.s.enmEffOpSize', 'IEMMODE', oStmt));
1264
1265 if oStmt.sName == 'IEM_MC_CALC_RM_EFF_ADDR':
1266 # This is being pretty presumptive about bRm always being the RM byte...
1267 assert len(oStmt.asParams) == 3;
1268 assert oStmt.asParams[1] == 'bRm';
1269
1270 if self.sVariation in self.kdVariationsWithFlatAddr16:
1271 self.aoParamRefs.append(ThreadedParamRef('bRm', 'uint8_t', oStmt));
1272 self.aoParamRefs.append(ThreadedParamRef('(uint16_t)uEffAddrInfo' ,
1273 'uint16_t', oStmt, sStdRef = 'u16Disp'));
1274 elif self.sVariation in self.kdVariationsWithFlatAddr32No64:
1275 self.aoParamRefs.append(ThreadedParamRef('bRm', 'uint8_t', oStmt));
1276 self.aoParamRefs.append(ThreadedParamRef('(uint8_t)(uEffAddrInfo >> 32)',
1277 'uint8_t', oStmt, sStdRef = 'bSib'));
1278 self.aoParamRefs.append(ThreadedParamRef('(uint32_t)uEffAddrInfo',
1279 'uint32_t', oStmt, sStdRef = 'u32Disp'));
1280 else:
1281 assert self.sVariation in self.kdVariationsWithAddressOnly64;
1282 self.aoParamRefs.append(ThreadedParamRef('IEM_GET_MODRM_EX(pVCpu, bRm)',
1283 'uint8_t', oStmt, sStdRef = 'bRmEx'));
1284 self.aoParamRefs.append(ThreadedParamRef('(uint8_t)(uEffAddrInfo >> 32)',
1285 'uint8_t', oStmt, sStdRef = 'bSib'));
1286 self.aoParamRefs.append(ThreadedParamRef('(uint32_t)uEffAddrInfo',
1287 'uint32_t', oStmt, sStdRef = 'u32Disp'));
1288 self.aoParamRefs.append(ThreadedParamRef('IEM_GET_INSTR_LEN(pVCpu)',
1289 'uint4_t', oStmt, sStdRef = 'cbInstr'));
1290 aiSkipParams[1] = True; # Skip the bRm parameter as it is being replaced by bRmEx.
1291
1292 # 8-bit register accesses needs to have their index argument reworked to take REX into account.
1293 if oStmt.sName.startswith('IEM_MC_') and oStmt.sName.find('_GREG_U8') > 0:
1294 (idxReg, sOrgRef, sStdRef) = self.analyze8BitGRegStmt(oStmt);
1295 self.aoParamRefs.append(ThreadedParamRef(sOrgRef, 'uint8_t', oStmt, idxReg, sStdRef = sStdRef));
1296 aiSkipParams[idxReg] = True; # Skip the parameter below.
1297
1298 # If in flat mode variation, ignore the effective segment parameter to memory MCs.
1299 if ( self.sVariation in self.kdVariationsWithFlatAddress
1300 and oStmt.sName in self.kdMemMcToFlatInfo
1301 and self.kdMemMcToFlatInfo[oStmt.sName][0] != -1):
1302 aiSkipParams[self.kdMemMcToFlatInfo[oStmt.sName][0]] = True;
1303
1304 # Inspect the target of calls to see if we need to pass down a
1305 # function pointer or function table pointer for it to work.
1306 if isinstance(oStmt, iai.McStmtCall):
1307 if oStmt.sFn[0] == 'p':
1308 self.aoParamRefs.append(ThreadedParamRef(oStmt.sFn, self.analyzeCallToType(oStmt.sFn), oStmt, oStmt.idxFn));
1309 elif ( oStmt.sFn[0] != 'i'
1310 and not oStmt.sFn.startswith('RT_CONCAT3')
1311 and not oStmt.sFn.startswith('IEMTARGETCPU_EFL_BEHAVIOR_SELECT')
1312 and not oStmt.sFn.startswith('IEM_SELECT_HOST_OR_FALLBACK') ):
1313 self.raiseProblem('Bogus function name in %s: %s' % (oStmt.sName, oStmt.sFn,));
1314 aiSkipParams[oStmt.idxFn] = True;
1315
1316 # Skip the hint parameter (first) for IEM_MC_CALL_CIMPL_X.
1317 if oStmt.sName.startswith('IEM_MC_CALL_CIMPL_'):
1318 assert oStmt.idxFn == 2;
1319 aiSkipParams[0] = True;
1320
1321 # Skip the function parameter (first) for IEM_MC_NATIVE_EMIT_X.
1322 if oStmt.sName.startswith('IEM_MC_NATIVE_EMIT_'):
1323 aiSkipParams[0] = True;
1324
1325
1326 # Check all the parameters for bogus references.
1327 for iParam, sParam in enumerate(oStmt.asParams):
1328 if iParam not in aiSkipParams and sParam not in self.oParent.dVariables:
1329 # The parameter may contain a C expression, so we have to try
1330 # extract the relevant bits, i.e. variables and fields while
1331 # ignoring operators and parentheses.
1332 offParam = 0;
1333 while offParam < len(sParam):
1334 # Is it the start of an C identifier? If so, find the end, but don't stop on field separators (->, .).
1335 ch = sParam[offParam];
1336 if ch.isalpha() or ch == '_':
1337 offStart = offParam;
1338 offParam += 1;
1339 while offParam < len(sParam):
1340 ch = sParam[offParam];
1341 if not ch.isalnum() and ch != '_' and ch != '.':
1342 if ch != '-' or sParam[offParam + 1] != '>':
1343 # Special hack for the 'CTX_SUFF(pVM)' bit in pVCpu->CTX_SUFF(pVM)->xxxx:
1344 if ( ch == '('
1345 and sParam[offStart : offParam + len('(pVM)->')] == 'pVCpu->CTX_SUFF(pVM)->'):
1346 offParam += len('(pVM)->') - 1;
1347 else:
1348 break;
1349 offParam += 1;
1350 offParam += 1;
1351 sRef = sParam[offStart : offParam];
1352
1353 # For register references, we pass the full register indexes instead as macros
1354 # like IEM_GET_MODRM_REG implicitly references pVCpu->iem.s.uRexReg and the
1355 # threaded function will be more efficient if we just pass the register index
1356 # as a 4-bit param.
1357 if ( sRef.startswith('IEM_GET_MODRM')
1358 or sRef.startswith('IEM_GET_EFFECTIVE_VVVV')
1359 or sRef.startswith('IEM_GET_IMM8_REG') ):
1360 offParam = iai.McBlock.skipSpacesAt(sParam, offParam, len(sParam));
1361 if sParam[offParam] != '(':
1362 self.raiseProblem('Expected "(" following %s in "%s"' % (sRef, oStmt.renderCode(),));
1363 (asMacroParams, offCloseParam) = iai.McBlock.extractParams(sParam, offParam);
1364 if asMacroParams is None:
1365 self.raiseProblem('Unable to find ")" for %s in "%s"' % (sRef, oStmt.renderCode(),));
1366 offParam = offCloseParam + 1;
1367 self.aoParamRefs.append(ThreadedParamRef(sParam[offStart : offParam], 'uint8_t',
1368 oStmt, iParam, offStart));
1369
1370 # We can skip known variables.
1371 elif sRef in self.oParent.dVariables:
1372 pass;
1373
1374 # Skip certain macro invocations.
1375 elif sRef in ('IEM_GET_HOST_CPU_FEATURES',
1376 'IEM_GET_GUEST_CPU_FEATURES',
1377 'IEM_IS_GUEST_CPU_AMD',
1378 'IEM_IS_16BIT_CODE',
1379 'IEM_IS_32BIT_CODE',
1380 'IEM_IS_64BIT_CODE',
1381 ):
1382 offParam = iai.McBlock.skipSpacesAt(sParam, offParam, len(sParam));
1383 if sParam[offParam] != '(':
1384 self.raiseProblem('Expected "(" following %s in "%s"' % (sRef, oStmt.renderCode(),));
1385 (asMacroParams, offCloseParam) = iai.McBlock.extractParams(sParam, offParam);
1386 if asMacroParams is None:
1387 self.raiseProblem('Unable to find ")" for %s in "%s"' % (sRef, oStmt.renderCode(),));
1388 offParam = offCloseParam + 1;
1389
1390 # Skip any dereference following it, unless it's a predicate like IEM_IS_GUEST_CPU_AMD.
1391 if sRef not in ('IEM_IS_GUEST_CPU_AMD',
1392 'IEM_IS_16BIT_CODE',
1393 'IEM_IS_32BIT_CODE',
1394 'IEM_IS_64BIT_CODE',
1395 ):
1396 offParam = iai.McBlock.skipSpacesAt(sParam, offParam, len(sParam));
1397 if offParam + 2 <= len(sParam) and sParam[offParam : offParam + 2] == '->':
1398 offParam = iai.McBlock.skipSpacesAt(sParam, offParam + 2, len(sParam));
1399 while offParam < len(sParam) and (sParam[offParam].isalnum() or sParam[offParam] in '_.'):
1400 offParam += 1;
1401
1402 # Skip constants, globals, types (casts), sizeof and macros.
1403 elif ( sRef.startswith('IEM_OP_PRF_')
1404 or sRef.startswith('IEM_ACCESS_')
1405 or sRef.startswith('IEMINT_')
1406 or sRef.startswith('X86_GREG_')
1407 or sRef.startswith('X86_SREG_')
1408 or sRef.startswith('X86_EFL_')
1409 or sRef.startswith('X86_FSW_')
1410 or sRef.startswith('X86_FCW_')
1411 or sRef.startswith('X86_XCPT_')
1412 or sRef.startswith('IEMMODE_')
1413 or sRef.startswith('IEM_F_')
1414 or sRef.startswith('IEM_CIMPL_F_')
1415 or sRef.startswith('g_')
1416 or sRef.startswith('iemAImpl_')
1417 or sRef.startswith('kIemNativeGstReg_')
1418 or sRef.startswith('RT_ARCH_VAL_')
1419 or sRef in ( 'int8_t', 'int16_t', 'int32_t', 'int64_t',
1420 'INT8_C', 'INT16_C', 'INT32_C', 'INT64_C',
1421 'uint8_t', 'uint16_t', 'uint32_t', 'uint64_t',
1422 'UINT8_C', 'UINT16_C', 'UINT32_C', 'UINT64_C',
1423 'UINT8_MAX', 'UINT16_MAX', 'UINT32_MAX', 'UINT64_MAX',
1424 'INT8_MAX', 'INT16_MAX', 'INT32_MAX', 'INT64_MAX',
1425 'INT8_MIN', 'INT16_MIN', 'INT32_MIN', 'INT64_MIN',
1426 'sizeof', 'NOREF', 'RT_NOREF', 'IEMMODE_64BIT',
1427 'RT_BIT_32', 'RT_BIT_64', 'true', 'false',
1428 'NIL_RTGCPTR',) ):
1429 pass;
1430
1431 # Skip certain macro invocations.
1432 # Any variable (non-field) and decoder fields in IEMCPU will need to be parameterized.
1433 elif ( ( '.' not in sRef
1434 and '-' not in sRef
1435 and sRef not in ('pVCpu', ) )
1436 or iai.McBlock.koReIemDecoderVars.search(sRef) is not None):
1437 self.aoParamRefs.append(ThreadedParamRef(sRef, self.analyzeReferenceToType(sRef),
1438 oStmt, iParam, offStart));
1439 # Number.
1440 elif ch.isdigit():
1441 if ( ch == '0'
1442 and offParam + 2 <= len(sParam)
1443 and sParam[offParam + 1] in 'xX'
1444 and sParam[offParam + 2] in self.ksHexDigits ):
1445 offParam += 2;
1446 while offParam < len(sParam) and sParam[offParam] in self.ksHexDigits:
1447 offParam += 1;
1448 else:
1449 while offParam < len(sParam) and sParam[offParam].isdigit():
1450 offParam += 1;
1451 # Comment?
1452 elif ( ch == '/'
1453 and offParam + 4 <= len(sParam)
1454 and sParam[offParam + 1] == '*'):
1455 offParam += 2;
1456 offNext = sParam.find('*/', offParam);
1457 if offNext < offParam:
1458 self.raiseProblem('Unable to find "*/" in "%s" ("%s")' % (sRef, oStmt.renderCode(),));
1459 offParam = offNext + 2;
1460 # Whatever else.
1461 else:
1462 offParam += 1;
1463
1464 # Traverse the branches of conditionals.
1465 if isinstance(oStmt, iai.McStmtCond):
1466 self.analyzeFindThreadedParamRefs(oStmt.aoIfBranch);
1467 self.analyzeFindThreadedParamRefs(oStmt.aoElseBranch);
1468 return True;
1469
1470 def analyzeVariation(self, aoStmts):
1471 """
1472 2nd part of the analysis, done on each variation.
1473
1474 The variations may differ in parameter requirements and will end up with
1475 slightly different MC sequences. Thus this is done on each individually.
1476
1477 Returns dummy True - raises exception on trouble.
1478 """
1479 # Now scan the code for variables and field references that needs to
1480 # be passed to the threaded function because they are related to the
1481 # instruction decoding.
1482 self.analyzeFindThreadedParamRefs(aoStmts);
1483 self.analyzeConsolidateThreadedParamRefs();
1484
1485 # Morph the statement stream for the block into what we'll be using in the threaded function.
1486 (self.aoStmtsForThreadedFunction, iParamRef) = self.analyzeMorphStmtForThreaded(aoStmts, {});
1487 if iParamRef != len(self.aoParamRefs):
1488 raise Exception('iParamRef=%s, expected %s!' % (iParamRef, len(self.aoParamRefs),));
1489
1490 return True;
1491
1492 def emitThreadedCallStmtsForVariant(self, cchIndent, fTbLookupTable = False, sCallVarNm = None):
1493 """
1494 Produces generic C++ statments that emits a call to the thread function
1495 variation and any subsequent checks that may be necessary after that.
1496
1497 The sCallVarNm is the name of the variable with the threaded function
1498 to call. This is for the case where all the variations have the same
1499 parameters and only the threaded function number differs.
1500
1501 The fTbLookupTable parameter can either be False, True or whatever else
1502 (like 2) - in the latte case this means a large lookup table.
1503 """
1504 aoStmts = [
1505 iai.McCppCall('IEM_MC2_BEGIN_EMIT_CALLS',
1506 ['1' if 'IEM_CIMPL_F_CHECK_IRQ_BEFORE' in self.oParent.dsCImplFlags else '0'],
1507 cchIndent = cchIndent), # Scope and a hook for various stuff.
1508 ];
1509
1510 # The call to the threaded function.
1511 asCallArgs = [ self.getIndexName() if not sCallVarNm else sCallVarNm, ];
1512 for iParam in range(self.cMinParams):
1513 asFrags = [];
1514 for aoRefs in self.dParamRefs.values():
1515 oRef = aoRefs[0];
1516 if oRef.iNewParam == iParam:
1517 sCast = '(uint64_t)'
1518 if oRef.sType in ('int8_t', 'int16_t', 'int32_t'): # Make sure these doesn't get sign-extended.
1519 sCast = '(uint64_t)(u' + oRef.sType + ')';
1520 if oRef.offNewParam == 0:
1521 asFrags.append(sCast + '(' + oRef.sOrgRef + ')');
1522 else:
1523 asFrags.append('(%s(%s) << %s)' % (sCast, oRef.sOrgRef, oRef.offNewParam));
1524 assert asFrags;
1525 asCallArgs.append(' | '.join(asFrags));
1526
1527 if fTbLookupTable is False:
1528 aoStmts.append(iai.McCppCall('IEM_MC2_EMIT_CALL_%s' % (len(asCallArgs) - 1,),
1529 asCallArgs, cchIndent = cchIndent));
1530 else:
1531 aoStmts.append(iai.McCppCall('IEM_MC2_EMIT_CALL_WITH_TB_LOOKUP_%s' % (len(asCallArgs) - 1,),
1532 ['0' if fTbLookupTable is True else '1',] + asCallArgs, cchIndent = cchIndent));
1533
1534 # 2023-11-28: This has to be done AFTER the CIMPL call, so we have to
1535 # emit this mode check from the compilation loop. On the
1536 # plus side, this means we eliminate unnecessary call at
1537 # end of the TB. :-)
1538 ## For CIMPL stuff, we need to consult the associated IEM_CIMPL_F_XXX
1539 ## mask and maybe emit additional checks.
1540 #if ( 'IEM_CIMPL_F_MODE' in self.oParent.dsCImplFlags
1541 # or 'IEM_CIMPL_F_XCPT' in self.oParent.dsCImplFlags
1542 # or 'IEM_CIMPL_F_VMEXIT' in self.oParent.dsCImplFlags):
1543 # aoStmts.append(iai.McCppCall('IEM_MC2_EMIT_CALL_1', ( 'kIemThreadedFunc_BltIn_CheckMode', 'pVCpu->iem.s.fExec', ),
1544 # cchIndent = cchIndent));
1545
1546 sCImplFlags = ' | '.join(self.oParent.dsCImplFlags.keys());
1547 if not sCImplFlags:
1548 sCImplFlags = '0'
1549 aoStmts.append(iai.McCppCall('IEM_MC2_END_EMIT_CALLS', ( sCImplFlags, ), cchIndent = cchIndent)); # For closing the scope.
1550
1551 # Emit fEndTb = true or fTbBranched = true if any of the CIMPL flags
1552 # indicates we should do so.
1553 # Note! iemThreadedRecompilerMcDeferToCImpl0 duplicates work done here.
1554 asEndTbFlags = [];
1555 asTbBranchedFlags = [];
1556 for sFlag in self.oParent.dsCImplFlags:
1557 if self.kdCImplFlags[sFlag] is True:
1558 asEndTbFlags.append(sFlag);
1559 elif sFlag.startswith('IEM_CIMPL_F_BRANCH_'):
1560 asTbBranchedFlags.append(sFlag);
1561 if ( asTbBranchedFlags
1562 and ( 'IEM_CIMPL_F_BRANCH_CONDITIONAL' not in asTbBranchedFlags
1563 or self.sVariation not in self.kdVariationsWithConditionalNoJmp)):
1564 aoStmts.append(iai.McCppGeneric('iemThreadedSetBranched(pVCpu, %s);'
1565 % ((' | '.join(asTbBranchedFlags)).replace('IEM_CIMPL_F_BRANCH', 'IEMBRANCHED_F'),),
1566 cchIndent = cchIndent)); # Inline fn saves ~2 seconds for gcc 13/dbg (1m13s vs 1m15s).
1567 if asEndTbFlags:
1568 aoStmts.append(iai.McCppGeneric('pVCpu->iem.s.fEndTb = true; /* %s */' % (','.join(asEndTbFlags),),
1569 cchIndent = cchIndent));
1570
1571 if 'IEM_CIMPL_F_CHECK_IRQ_AFTER' in self.oParent.dsCImplFlags:
1572 aoStmts.append(iai.McCppGeneric('pVCpu->iem.s.cInstrTillIrqCheck = 0;', cchIndent = cchIndent));
1573
1574 return aoStmts;
1575
1576
1577class ThreadedFunction(object):
1578 """
1579 A threaded function.
1580 """
1581
1582 def __init__(self, oMcBlock: iai.McBlock) -> None:
1583 self.oMcBlock = oMcBlock # type: iai.McBlock
1584 # The remaining fields are only useful after analyze() has been called:
1585 ## Variations for this block. There is at least one.
1586 self.aoVariations = [] # type: List[ThreadedFunctionVariation]
1587 ## Variation dictionary containing the same as aoVariations.
1588 self.dVariations = {} # type: Dict[str, ThreadedFunctionVariation]
1589 ## Dictionary of local variables (IEM_MC_LOCAL[_CONST]) and call arguments (IEM_MC_ARG*).
1590 self.dVariables = {} # type: Dict[str, iai.McStmtVar]
1591 ## Dictionary with any IEM_CIMPL_F_XXX flags explicitly advertised in the code block
1592 ## and those determined by analyzeCodeOperation().
1593 self.dsCImplFlags = {} # type: Dict[str, bool]
1594 ## The unique sub-name for this threaded function.
1595 self.sSubName = '';
1596 #if oMcBlock.iInFunction > 0 or (oMcBlock.oInstruction and len(oMcBlock.oInstruction.aoMcBlocks) > 1):
1597 # self.sSubName = '_%s' % (oMcBlock.iInFunction);
1598
1599 @staticmethod
1600 def dummyInstance():
1601 """ Gets a dummy instance. """
1602 return ThreadedFunction(iai.McBlock('null', 999999999, 999999999,
1603 iai.DecoderFunction('null', 999999999, 'nil', ('','')), 999999999));
1604
1605 def hasWithFlagsCheckingAndClearingVariation(self):
1606 """
1607 Check if there is one or more with flags checking and clearing
1608 variations for this threaded function.
1609 """
1610 for sVarWithFlags in ThreadedFunctionVariation.kdVariationsWithEflagsCheckingAndClearing:
1611 if sVarWithFlags in self.dVariations:
1612 return True;
1613 return False;
1614
1615 #
1616 # Analysis and code morphing.
1617 #
1618
1619 def raiseProblem(self, sMessage):
1620 """ Raises a problem. """
1621 raise Exception('%s:%s: error: %s' % (self.oMcBlock.sSrcFile, self.oMcBlock.iBeginLine, sMessage, ));
1622
1623 def error(self, sMessage, oGenerator):
1624 """ Emits an error via the generator object, causing it to fail. """
1625 oGenerator.rawError('%s:%s: error: %s' % (self.oMcBlock.sSrcFile, self.oMcBlock.iBeginLine, sMessage, ));
1626
1627 def warning(self, sMessage):
1628 """ Emits a warning. """
1629 print('%s:%s: warning: %s' % (self.oMcBlock.sSrcFile, self.oMcBlock.iBeginLine, sMessage, ));
1630
1631 ## Used by analyzeAndAnnotateName for memory MC blocks.
1632 kdAnnotateNameMemStmts = {
1633 'IEM_MC_FETCH_MEM16_U8': '__mem8',
1634 'IEM_MC_FETCH_MEM32_U8': '__mem8',
1635 'IEM_MC_FETCH_MEM_D80': '__mem80',
1636 'IEM_MC_FETCH_MEM_I16': '__mem16',
1637 'IEM_MC_FETCH_MEM_I32': '__mem32',
1638 'IEM_MC_FETCH_MEM_I64': '__mem64',
1639 'IEM_MC_FETCH_MEM_R32': '__mem32',
1640 'IEM_MC_FETCH_MEM_R64': '__mem64',
1641 'IEM_MC_FETCH_MEM_R80': '__mem80',
1642 'IEM_MC_FETCH_MEM_U128': '__mem128',
1643 'IEM_MC_FETCH_MEM_U128_ALIGN_SSE': '__mem128',
1644 'IEM_MC_FETCH_MEM_U128_NO_AC': '__mem128',
1645 'IEM_MC_FETCH_MEM_U16': '__mem16',
1646 'IEM_MC_FETCH_MEM_U16_DISP': '__mem16',
1647 'IEM_MC_FETCH_MEM_U16_SX_U32': '__mem16sx32',
1648 'IEM_MC_FETCH_MEM_U16_SX_U64': '__mem16sx64',
1649 'IEM_MC_FETCH_MEM_U16_ZX_U32': '__mem16zx32',
1650 'IEM_MC_FETCH_MEM_U16_ZX_U64': '__mem16zx64',
1651 'IEM_MC_FETCH_MEM_U256': '__mem256',
1652 'IEM_MC_FETCH_MEM_U256_ALIGN_AVX': '__mem256',
1653 'IEM_MC_FETCH_MEM_U256_NO_AC': '__mem256',
1654 'IEM_MC_FETCH_MEM_U32': '__mem32',
1655 'IEM_MC_FETCH_MEM_U32_DISP': '__mem32',
1656 'IEM_MC_FETCH_MEM_U32_SX_U64': '__mem32sx64',
1657 'IEM_MC_FETCH_MEM_U32_ZX_U64': '__mem32zx64',
1658 'IEM_MC_FETCH_MEM_U64': '__mem64',
1659 'IEM_MC_FETCH_MEM_U64_ALIGN_U128': '__mem64',
1660 'IEM_MC_FETCH_MEM_U64_DISP': '__mem64',
1661 'IEM_MC_FETCH_MEM_U8': '__mem8',
1662 'IEM_MC_FETCH_MEM_U8_DISP': '__mem8',
1663 'IEM_MC_FETCH_MEM_U8_SX_U16': '__mem8sx16',
1664 'IEM_MC_FETCH_MEM_U8_SX_U32': '__mem8sx32',
1665 'IEM_MC_FETCH_MEM_U8_SX_U64': '__mem8sx64',
1666 'IEM_MC_FETCH_MEM_U8_ZX_U16': '__mem8zx16',
1667 'IEM_MC_FETCH_MEM_U8_ZX_U32': '__mem8zx32',
1668 'IEM_MC_FETCH_MEM_U8_ZX_U64': '__mem8zx64',
1669 'IEM_MC_FETCH_MEM_XMM': '__mem128',
1670 'IEM_MC_FETCH_MEM_XMM_ALIGN_SSE': '__mem128',
1671 'IEM_MC_FETCH_MEM_XMM_NO_AC': '__mem128',
1672 'IEM_MC_FETCH_MEM_XMM_U32': '__mem32',
1673 'IEM_MC_FETCH_MEM_XMM_U64': '__mem64',
1674 'IEM_MC_FETCH_MEM_U128_AND_XREG_U128': '__mem128',
1675 'IEM_MC_FETCH_MEM_XMM_ALIGN_SSE_AND_XREG_XMM': '__mem128',
1676 'IEM_MC_FETCH_MEM_XMM_U32_AND_XREG_XMM': '__mem32',
1677 'IEM_MC_FETCH_MEM_XMM_U64_AND_XREG_XMM': '__mem64',
1678 'IEM_MC_FETCH_MEM_U128_AND_XREG_U128_AND_RAX_RDX_U64': '__mem128',
1679 'IEM_MC_FETCH_MEM_U128_AND_XREG_U128_AND_EAX_EDX_U32_SX_U64': '__mem128',
1680
1681 'IEM_MC_STORE_MEM_I16_CONST_BY_REF': '__mem16',
1682 'IEM_MC_STORE_MEM_I32_CONST_BY_REF': '__mem32',
1683 'IEM_MC_STORE_MEM_I64_CONST_BY_REF': '__mem64',
1684 'IEM_MC_STORE_MEM_I8_CONST_BY_REF': '__mem8',
1685 'IEM_MC_STORE_MEM_INDEF_D80_BY_REF': '__mem80',
1686 'IEM_MC_STORE_MEM_NEG_QNAN_R32_BY_REF': '__mem32',
1687 'IEM_MC_STORE_MEM_NEG_QNAN_R64_BY_REF': '__mem64',
1688 'IEM_MC_STORE_MEM_NEG_QNAN_R80_BY_REF': '__mem80',
1689 'IEM_MC_STORE_MEM_U128': '__mem128',
1690 'IEM_MC_STORE_MEM_U128_ALIGN_SSE': '__mem128',
1691 'IEM_MC_STORE_MEM_U128_NO_AC': '__mem128',
1692 'IEM_MC_STORE_MEM_U16': '__mem16',
1693 'IEM_MC_STORE_MEM_U16_CONST': '__mem16c',
1694 'IEM_MC_STORE_MEM_U256': '__mem256',
1695 'IEM_MC_STORE_MEM_U256_ALIGN_AVX': '__mem256',
1696 'IEM_MC_STORE_MEM_U256_NO_AC': '__mem256',
1697 'IEM_MC_STORE_MEM_U32': '__mem32',
1698 'IEM_MC_STORE_MEM_U32_CONST': '__mem32c',
1699 'IEM_MC_STORE_MEM_U64': '__mem64',
1700 'IEM_MC_STORE_MEM_U64_CONST': '__mem64c',
1701 'IEM_MC_STORE_MEM_U8': '__mem8',
1702 'IEM_MC_STORE_MEM_U8_CONST': '__mem8c',
1703
1704 'IEM_MC_MEM_MAP_D80_WO': '__mem80',
1705 'IEM_MC_MEM_MAP_I16_WO': '__mem16',
1706 'IEM_MC_MEM_MAP_I32_WO': '__mem32',
1707 'IEM_MC_MEM_MAP_I64_WO': '__mem64',
1708 'IEM_MC_MEM_MAP_R32_WO': '__mem32',
1709 'IEM_MC_MEM_MAP_R64_WO': '__mem64',
1710 'IEM_MC_MEM_MAP_R80_WO': '__mem80',
1711 'IEM_MC_MEM_MAP_U128_ATOMIC': '__mem128a',
1712 'IEM_MC_MEM_MAP_U128_RO': '__mem128',
1713 'IEM_MC_MEM_MAP_U128_RW': '__mem128',
1714 'IEM_MC_MEM_MAP_U128_WO': '__mem128',
1715 'IEM_MC_MEM_MAP_U16_ATOMIC': '__mem16a',
1716 'IEM_MC_MEM_MAP_U16_RO': '__mem16',
1717 'IEM_MC_MEM_MAP_U16_RW': '__mem16',
1718 'IEM_MC_MEM_MAP_U16_WO': '__mem16',
1719 'IEM_MC_MEM_MAP_U32_ATOMIC': '__mem32a',
1720 'IEM_MC_MEM_MAP_U32_RO': '__mem32',
1721 'IEM_MC_MEM_MAP_U32_RW': '__mem32',
1722 'IEM_MC_MEM_MAP_U32_WO': '__mem32',
1723 'IEM_MC_MEM_MAP_U64_ATOMIC': '__mem64a',
1724 'IEM_MC_MEM_MAP_U64_RO': '__mem64',
1725 'IEM_MC_MEM_MAP_U64_RW': '__mem64',
1726 'IEM_MC_MEM_MAP_U64_WO': '__mem64',
1727 'IEM_MC_MEM_MAP_U8_ATOMIC': '__mem8a',
1728 'IEM_MC_MEM_MAP_U8_RO': '__mem8',
1729 'IEM_MC_MEM_MAP_U8_RW': '__mem8',
1730 'IEM_MC_MEM_MAP_U8_WO': '__mem8',
1731 };
1732 ## Used by analyzeAndAnnotateName for non-memory MC blocks.
1733 kdAnnotateNameRegStmts = {
1734 'IEM_MC_FETCH_GREG_U8': '__greg8',
1735 'IEM_MC_FETCH_GREG_U8_ZX_U16': '__greg8zx16',
1736 'IEM_MC_FETCH_GREG_U8_ZX_U32': '__greg8zx32',
1737 'IEM_MC_FETCH_GREG_U8_ZX_U64': '__greg8zx64',
1738 'IEM_MC_FETCH_GREG_U8_SX_U16': '__greg8sx16',
1739 'IEM_MC_FETCH_GREG_U8_SX_U32': '__greg8sx32',
1740 'IEM_MC_FETCH_GREG_U8_SX_U64': '__greg8sx64',
1741 'IEM_MC_FETCH_GREG_U16': '__greg16',
1742 'IEM_MC_FETCH_GREG_U16_ZX_U32': '__greg16zx32',
1743 'IEM_MC_FETCH_GREG_U16_ZX_U64': '__greg16zx64',
1744 'IEM_MC_FETCH_GREG_U16_SX_U32': '__greg16sx32',
1745 'IEM_MC_FETCH_GREG_U16_SX_U64': '__greg16sx64',
1746 'IEM_MC_FETCH_GREG_U32': '__greg32',
1747 'IEM_MC_FETCH_GREG_U32_ZX_U64': '__greg32zx64',
1748 'IEM_MC_FETCH_GREG_U32_SX_U64': '__greg32sx64',
1749 'IEM_MC_FETCH_GREG_U64': '__greg64',
1750 'IEM_MC_FETCH_GREG_U64_ZX_U64': '__greg64zx64',
1751 'IEM_MC_FETCH_GREG_PAIR_U32': '__greg32',
1752 'IEM_MC_FETCH_GREG_PAIR_U64': '__greg64',
1753
1754 'IEM_MC_STORE_GREG_U8': '__greg8',
1755 'IEM_MC_STORE_GREG_U16': '__greg16',
1756 'IEM_MC_STORE_GREG_U32': '__greg32',
1757 'IEM_MC_STORE_GREG_U64': '__greg64',
1758 'IEM_MC_STORE_GREG_I64': '__greg64',
1759 'IEM_MC_STORE_GREG_U8_CONST': '__greg8c',
1760 'IEM_MC_STORE_GREG_U16_CONST': '__greg16c',
1761 'IEM_MC_STORE_GREG_U32_CONST': '__greg32c',
1762 'IEM_MC_STORE_GREG_U64_CONST': '__greg64c',
1763 'IEM_MC_STORE_GREG_PAIR_U32': '__greg32',
1764 'IEM_MC_STORE_GREG_PAIR_U64': '__greg64',
1765
1766 'IEM_MC_FETCH_SREG_U16': '__sreg16',
1767 'IEM_MC_FETCH_SREG_ZX_U32': '__sreg32',
1768 'IEM_MC_FETCH_SREG_ZX_U64': '__sreg64',
1769 'IEM_MC_FETCH_SREG_BASE_U64': '__sbase64',
1770 'IEM_MC_FETCH_SREG_BASE_U32': '__sbase32',
1771 'IEM_MC_STORE_SREG_BASE_U64': '__sbase64',
1772 'IEM_MC_STORE_SREG_BASE_U32': '__sbase32',
1773
1774 'IEM_MC_REF_GREG_U8': '__greg8',
1775 'IEM_MC_REF_GREG_U16': '__greg16',
1776 'IEM_MC_REF_GREG_U32': '__greg32',
1777 'IEM_MC_REF_GREG_U64': '__greg64',
1778 'IEM_MC_REF_GREG_U8_CONST': '__greg8',
1779 'IEM_MC_REF_GREG_U16_CONST': '__greg16',
1780 'IEM_MC_REF_GREG_U32_CONST': '__greg32',
1781 'IEM_MC_REF_GREG_U64_CONST': '__greg64',
1782 'IEM_MC_REF_GREG_I32': '__greg32',
1783 'IEM_MC_REF_GREG_I64': '__greg64',
1784 'IEM_MC_REF_GREG_I32_CONST': '__greg32',
1785 'IEM_MC_REF_GREG_I64_CONST': '__greg64',
1786
1787 'IEM_MC_STORE_FPUREG_R80_SRC_REF': '__fpu',
1788 'IEM_MC_REF_FPUREG': '__fpu',
1789
1790 'IEM_MC_FETCH_MREG_U64': '__mreg64',
1791 'IEM_MC_FETCH_MREG_U32': '__mreg32',
1792 'IEM_MC_FETCH_MREG_U16': '__mreg16',
1793 'IEM_MC_FETCH_MREG_U8': '__mreg8',
1794 'IEM_MC_STORE_MREG_U64': '__mreg64',
1795 'IEM_MC_STORE_MREG_U32': '__mreg32',
1796 'IEM_MC_STORE_MREG_U16': '__mreg16',
1797 'IEM_MC_STORE_MREG_U8': '__mreg8',
1798 'IEM_MC_STORE_MREG_U32_ZX_U64': '__mreg32zx64',
1799 'IEM_MC_REF_MREG_U64': '__mreg64',
1800 'IEM_MC_REF_MREG_U64_CONST': '__mreg64',
1801 'IEM_MC_REF_MREG_U32_CONST': '__mreg32',
1802
1803 'IEM_MC_CLEAR_XREG_U32_MASK': '__xreg32x4',
1804 'IEM_MC_FETCH_XREG_U128': '__xreg128',
1805 'IEM_MC_FETCH_XREG_XMM': '__xreg128',
1806 'IEM_MC_FETCH_XREG_U64': '__xreg64',
1807 'IEM_MC_FETCH_XREG_U32': '__xreg32',
1808 'IEM_MC_FETCH_XREG_U16': '__xreg16',
1809 'IEM_MC_FETCH_XREG_U8': '__xreg8',
1810 'IEM_MC_FETCH_XREG_PAIR_U128': '__xreg128p',
1811 'IEM_MC_FETCH_XREG_PAIR_XMM': '__xreg128p',
1812 'IEM_MC_FETCH_XREG_PAIR_U128_AND_RAX_RDX_U64': '__xreg128p',
1813 'IEM_MC_FETCH_XREG_PAIR_U128_AND_EAX_EDX_U32_SX_U64': '__xreg128p',
1814
1815 'IEM_MC_STORE_XREG_U32_U128': '__xreg32',
1816 'IEM_MC_STORE_XREG_U128': '__xreg128',
1817 'IEM_MC_STORE_XREG_XMM': '__xreg128',
1818 'IEM_MC_STORE_XREG_XMM_U32': '__xreg32',
1819 'IEM_MC_STORE_XREG_XMM_U64': '__xreg64',
1820 'IEM_MC_STORE_XREG_U64': '__xreg64',
1821 'IEM_MC_STORE_XREG_U64_ZX_U128': '__xreg64zx128',
1822 'IEM_MC_STORE_XREG_U32': '__xreg32',
1823 'IEM_MC_STORE_XREG_U16': '__xreg16',
1824 'IEM_MC_STORE_XREG_U8': '__xreg8',
1825 'IEM_MC_STORE_XREG_U32_ZX_U128': '__xreg32zx128',
1826 'IEM_MC_STORE_XREG_R32': '__xreg32',
1827 'IEM_MC_STORE_XREG_R64': '__xreg64',
1828 'IEM_MC_BROADCAST_XREG_U8_ZX_VLMAX': '__xreg8zx',
1829 'IEM_MC_BROADCAST_XREG_U16_ZX_VLMAX': '__xreg16zx',
1830 'IEM_MC_BROADCAST_XREG_U32_ZX_VLMAX': '__xreg32zx',
1831 'IEM_MC_BROADCAST_XREG_U64_ZX_VLMAX': '__xreg64zx',
1832 'IEM_MC_BROADCAST_XREG_U128_ZX_VLMAX': '__xreg128zx',
1833 'IEM_MC_REF_XREG_U128': '__xreg128',
1834 'IEM_MC_REF_XREG_U128_CONST': '__xreg128',
1835 'IEM_MC_REF_XREG_U32_CONST': '__xreg32',
1836 'IEM_MC_REF_XREG_U64_CONST': '__xreg64',
1837 'IEM_MC_REF_XREG_R32_CONST': '__xreg32',
1838 'IEM_MC_REF_XREG_R64_CONST': '__xreg64',
1839 'IEM_MC_REF_XREG_XMM_CONST': '__xreg128',
1840 'IEM_MC_COPY_XREG_U128': '__xreg128',
1841
1842 'IEM_MC_FETCH_YREG_U256': '__yreg256',
1843 'IEM_MC_FETCH_YREG_YMM': '__yreg256',
1844 'IEM_MC_FETCH_YREG_U128': '__yreg128',
1845 'IEM_MC_FETCH_YREG_U64': '__yreg64',
1846 'IEM_MC_FETCH_YREG_U32': '__yreg32',
1847 'IEM_MC_STORE_YREG_U128': '__yreg128',
1848 'IEM_MC_STORE_YREG_U32_ZX_VLMAX': '__yreg32zx',
1849 'IEM_MC_STORE_YREG_U64_ZX_VLMAX': '__yreg64zx',
1850 'IEM_MC_STORE_YREG_U128_ZX_VLMAX': '__yreg128zx',
1851 'IEM_MC_STORE_YREG_U256_ZX_VLMAX': '__yreg256zx',
1852 'IEM_MC_BROADCAST_YREG_U8_ZX_VLMAX': '__yreg8',
1853 'IEM_MC_BROADCAST_YREG_U16_ZX_VLMAX': '__yreg16',
1854 'IEM_MC_BROADCAST_YREG_U32_ZX_VLMAX': '__yreg32',
1855 'IEM_MC_BROADCAST_YREG_U64_ZX_VLMAX': '__yreg64',
1856 'IEM_MC_BROADCAST_YREG_U128_ZX_VLMAX': '__yreg128',
1857 'IEM_MC_REF_YREG_U128': '__yreg128',
1858 'IEM_MC_REF_YREG_U128_CONST': '__yreg128',
1859 'IEM_MC_REF_YREG_U64_CONST': '__yreg64',
1860 'IEM_MC_COPY_YREG_U256_ZX_VLMAX': '__yreg256zx',
1861 'IEM_MC_COPY_YREG_U128_ZX_VLMAX': '__yreg128zx',
1862 'IEM_MC_COPY_YREG_U64_ZX_VLMAX': '__yreg64zx',
1863 'IEM_MC_MERGE_YREG_U32_U96_ZX_VLMAX': '__yreg3296',
1864 'IEM_MC_MERGE_YREG_U64_U64_ZX_VLMAX': '__yreg6464',
1865 'IEM_MC_MERGE_YREG_U64HI_U64HI_ZX_VLMAX': '__yreg64hi64hi',
1866 'IEM_MC_MERGE_YREG_U64LO_U64LO_ZX_VLMAX': '__yreg64lo64lo',
1867 'IEM_MC_MERGE_YREG_U64LO_U64LOCAL_ZX_VLMAX':'__yreg64',
1868 'IEM_MC_MERGE_YREG_U64LOCAL_U64HI_ZX_VLMAX':'__yreg64',
1869 };
1870 kdAnnotateNameCallStmts = {
1871 'IEM_MC_CALL_CIMPL_0': '__cimpl',
1872 'IEM_MC_CALL_CIMPL_1': '__cimpl',
1873 'IEM_MC_CALL_CIMPL_2': '__cimpl',
1874 'IEM_MC_CALL_CIMPL_3': '__cimpl',
1875 'IEM_MC_CALL_CIMPL_4': '__cimpl',
1876 'IEM_MC_CALL_CIMPL_5': '__cimpl',
1877 'IEM_MC_CALL_CIMPL_6': '__cimpl',
1878 'IEM_MC_CALL_CIMPL_7': '__cimpl',
1879 'IEM_MC_DEFER_TO_CIMPL_0_RET': '__cimpl_defer',
1880 'IEM_MC_DEFER_TO_CIMPL_1_RET': '__cimpl_defer',
1881 'IEM_MC_DEFER_TO_CIMPL_2_RET': '__cimpl_defer',
1882 'IEM_MC_DEFER_TO_CIMPL_3_RET': '__cimpl_defer',
1883 'IEM_MC_DEFER_TO_CIMPL_4_RET': '__cimpl_defer',
1884 'IEM_MC_DEFER_TO_CIMPL_5_RET': '__cimpl_defer',
1885 'IEM_MC_DEFER_TO_CIMPL_6_RET': '__cimpl_defer',
1886 'IEM_MC_DEFER_TO_CIMPL_7_RET': '__cimpl_defer',
1887 'IEM_MC_CALL_VOID_AIMPL_0': '__aimpl',
1888 'IEM_MC_CALL_VOID_AIMPL_1': '__aimpl',
1889 'IEM_MC_CALL_VOID_AIMPL_2': '__aimpl',
1890 'IEM_MC_CALL_VOID_AIMPL_3': '__aimpl',
1891 'IEM_MC_CALL_VOID_AIMPL_4': '__aimpl',
1892 'IEM_MC_CALL_VOID_AIMPL_5': '__aimpl',
1893 'IEM_MC_CALL_AIMPL_0': '__aimpl_ret',
1894 'IEM_MC_CALL_AIMPL_1': '__aimpl_ret',
1895 'IEM_MC_CALL_AIMPL_2': '__aimpl_ret',
1896 'IEM_MC_CALL_AIMPL_3': '__aimpl_ret',
1897 'IEM_MC_CALL_AIMPL_4': '__aimpl_ret',
1898 'IEM_MC_CALL_AIMPL_5': '__aimpl_ret',
1899 'IEM_MC_CALL_AIMPL_6': '__aimpl_ret',
1900 'IEM_MC_CALL_VOID_AIMPL_6': '__aimpl_fpu',
1901 'IEM_MC_CALL_FPU_AIMPL_0': '__aimpl_fpu',
1902 'IEM_MC_CALL_FPU_AIMPL_1': '__aimpl_fpu',
1903 'IEM_MC_CALL_FPU_AIMPL_2': '__aimpl_fpu',
1904 'IEM_MC_CALL_FPU_AIMPL_3': '__aimpl_fpu',
1905 'IEM_MC_CALL_FPU_AIMPL_4': '__aimpl_fpu',
1906 'IEM_MC_CALL_FPU_AIMPL_5': '__aimpl_fpu',
1907 'IEM_MC_CALL_MMX_AIMPL_0': '__aimpl_mmx',
1908 'IEM_MC_CALL_MMX_AIMPL_1': '__aimpl_mmx',
1909 'IEM_MC_CALL_MMX_AIMPL_2': '__aimpl_mmx',
1910 'IEM_MC_CALL_MMX_AIMPL_3': '__aimpl_mmx',
1911 'IEM_MC_CALL_MMX_AIMPL_4': '__aimpl_mmx',
1912 'IEM_MC_CALL_MMX_AIMPL_5': '__aimpl_mmx',
1913 'IEM_MC_CALL_SSE_AIMPL_0': '__aimpl_sse',
1914 'IEM_MC_CALL_SSE_AIMPL_1': '__aimpl_sse',
1915 'IEM_MC_CALL_SSE_AIMPL_2': '__aimpl_sse',
1916 'IEM_MC_CALL_SSE_AIMPL_3': '__aimpl_sse',
1917 'IEM_MC_CALL_SSE_AIMPL_4': '__aimpl_sse',
1918 'IEM_MC_CALL_SSE_AIMPL_5': '__aimpl_sse',
1919 'IEM_MC_CALL_AVX_AIMPL_0': '__aimpl_avx',
1920 'IEM_MC_CALL_AVX_AIMPL_1': '__aimpl_avx',
1921 'IEM_MC_CALL_AVX_AIMPL_2': '__aimpl_avx',
1922 'IEM_MC_CALL_AVX_AIMPL_3': '__aimpl_avx',
1923 'IEM_MC_CALL_AVX_AIMPL_4': '__aimpl_avx',
1924 'IEM_MC_CALL_AVX_AIMPL_5': '__aimpl_avx',
1925 };
1926 def analyzeAndAnnotateName(self, aoStmts: List[iai.McStmt]):
1927 """
1928 Scans the statements and variation lists for clues about the threaded function,
1929 and sets self.sSubName if successfull.
1930 """
1931 # Operand base naming:
1932 dHits = {};
1933 cHits = iai.McStmt.countStmtsByName(aoStmts, self.kdAnnotateNameMemStmts, dHits);
1934 if cHits > 0:
1935 sStmtNm = sorted(dHits.keys())[-1]; # priority: STORE, MEM_MAP, FETCH.
1936 sName = self.kdAnnotateNameMemStmts[sStmtNm];
1937 else:
1938 cHits = iai.McStmt.countStmtsByName(aoStmts, self.kdAnnotateNameRegStmts, dHits);
1939 if cHits > 0:
1940 sStmtNm = sorted(dHits.keys())[-1]; # priority: STORE, MEM_MAP, FETCH.
1941 sName = self.kdAnnotateNameRegStmts[sStmtNm];
1942 else:
1943 # No op details, try name it by call type...
1944 cHits = iai.McStmt.countStmtsByName(aoStmts, self.kdAnnotateNameCallStmts, dHits);
1945 if cHits > 0:
1946 sStmtNm = sorted(dHits.keys())[-1]; # Not really necessary to sort, but simple this way.
1947 self.sSubName = self.kdAnnotateNameCallStmts[sStmtNm];
1948 return;
1949
1950 # Add call info if any:
1951 dHits = {};
1952 cHits = iai.McStmt.countStmtsByName(aoStmts, self.kdAnnotateNameCallStmts, dHits);
1953 if cHits > 0:
1954 sStmtNm = sorted(dHits.keys())[-1]; # Not really necessary to sort, but simple this way.
1955 sName += self.kdAnnotateNameCallStmts[sStmtNm][1:];
1956
1957 self.sSubName = sName;
1958 return;
1959
1960 def analyzeFindVariablesAndCallArgs(self, aoStmts: List[iai.McStmt]) -> bool:
1961 """ Scans the statements for MC variables and call arguments. """
1962 for oStmt in aoStmts:
1963 if isinstance(oStmt, iai.McStmtVar):
1964 if oStmt.sVarName in self.dVariables:
1965 raise Exception('Variable %s is defined more than once!' % (oStmt.sVarName,));
1966 self.dVariables[oStmt.sVarName] = oStmt.sVarName;
1967 elif isinstance(oStmt, iai.McStmtCall) and oStmt.sName.startswith('IEM_MC_CALL_AIMPL_'):
1968 if oStmt.asParams[1] in self.dVariables:
1969 raise Exception('Variable %s is defined more than once!' % (oStmt.asParams[1],));
1970 self.dVariables[oStmt.asParams[1]] = iai.McStmtVar('IEM_MC_LOCAL', oStmt.asParams[0:2],
1971 oStmt.asParams[0], oStmt.asParams[1]);
1972
1973 # There shouldn't be any variables or arguments declared inside if/
1974 # else blocks, but scan them too to be on the safe side.
1975 if isinstance(oStmt, iai.McStmtCond):
1976 #cBefore = len(self.dVariables);
1977 self.analyzeFindVariablesAndCallArgs(oStmt.aoIfBranch);
1978 self.analyzeFindVariablesAndCallArgs(oStmt.aoElseBranch);
1979 #if len(self.dVariables) != cBefore:
1980 # raise Exception('Variables/arguments defined in conditional branches!');
1981 return True;
1982
1983 kdReturnStmtAnnotations = {
1984 'IEM_MC_ADVANCE_RIP_AND_FINISH': g_ksFinishAnnotation_Advance,
1985 'IEM_MC_REL_JMP_S8_AND_FINISH': g_ksFinishAnnotation_RelJmp,
1986 'IEM_MC_REL_JMP_S16_AND_FINISH': g_ksFinishAnnotation_RelJmp,
1987 'IEM_MC_REL_JMP_S32_AND_FINISH': g_ksFinishAnnotation_RelJmp,
1988 'IEM_MC_SET_RIP_U16_AND_FINISH': g_ksFinishAnnotation_SetJmp,
1989 'IEM_MC_SET_RIP_U32_AND_FINISH': g_ksFinishAnnotation_SetJmp,
1990 'IEM_MC_SET_RIP_U64_AND_FINISH': g_ksFinishAnnotation_SetJmp,
1991 'IEM_MC_REL_CALL_S16_AND_FINISH': g_ksFinishAnnotation_RelCall,
1992 'IEM_MC_REL_CALL_S32_AND_FINISH': g_ksFinishAnnotation_RelCall,
1993 'IEM_MC_REL_CALL_S64_AND_FINISH': g_ksFinishAnnotation_RelCall,
1994 'IEM_MC_IND_CALL_U16_AND_FINISH': g_ksFinishAnnotation_IndCall,
1995 'IEM_MC_IND_CALL_U32_AND_FINISH': g_ksFinishAnnotation_IndCall,
1996 'IEM_MC_IND_CALL_U64_AND_FINISH': g_ksFinishAnnotation_IndCall,
1997 'IEM_MC_DEFER_TO_CIMPL_0_RET': g_ksFinishAnnotation_DeferToCImpl,
1998 'IEM_MC_DEFER_TO_CIMPL_1_RET': g_ksFinishAnnotation_DeferToCImpl,
1999 'IEM_MC_DEFER_TO_CIMPL_2_RET': g_ksFinishAnnotation_DeferToCImpl,
2000 'IEM_MC_DEFER_TO_CIMPL_3_RET': g_ksFinishAnnotation_DeferToCImpl,
2001 'IEM_MC_DEFER_TO_CIMPL_4_RET': g_ksFinishAnnotation_DeferToCImpl,
2002 'IEM_MC_DEFER_TO_CIMPL_5_RET': g_ksFinishAnnotation_DeferToCImpl,
2003 'IEM_MC_DEFER_TO_CIMPL_6_RET': g_ksFinishAnnotation_DeferToCImpl,
2004 'IEM_MC_DEFER_TO_CIMPL_7_RET': g_ksFinishAnnotation_DeferToCImpl,
2005 };
2006 def analyzeCodeOperation(self, aoStmts: List[iai.McStmt], dEflStmts, fSeenConditional = False) -> bool:
2007 """
2008 Analyzes the code looking clues as to additional side-effects.
2009
2010 Currently this is simply looking for branching and adding the relevant
2011 branch flags to dsCImplFlags. ASSUMES the caller pre-populates the
2012 dictionary with a copy of self.oMcBlock.dsCImplFlags.
2013
2014 This also sets McStmtCond.oIfBranchAnnotation & McStmtCond.oElseBranchAnnotation.
2015
2016 Returns annotation on return style.
2017 """
2018 sAnnotation = None;
2019 for oStmt in aoStmts:
2020 # Set IEM_IMPL_C_F_BRANCH_XXXX flags if we see any branching MCs.
2021 if oStmt.sName.startswith('IEM_MC_SET_RIP'):
2022 assert not fSeenConditional;
2023 self.dsCImplFlags['IEM_CIMPL_F_BRANCH_INDIRECT'] = True;
2024 elif oStmt.sName.startswith('IEM_MC_REL_JMP'):
2025 self.dsCImplFlags['IEM_CIMPL_F_BRANCH_RELATIVE'] = True;
2026 if fSeenConditional:
2027 self.dsCImplFlags['IEM_CIMPL_F_BRANCH_CONDITIONAL'] = True;
2028 elif oStmt.sName.startswith('IEM_MC_IND_CALL'):
2029 assert not fSeenConditional;
2030 self.dsCImplFlags['IEM_CIMPL_F_BRANCH_INDIRECT'] = True;
2031 self.dsCImplFlags['IEM_CIMPL_F_BRANCH_STACK'] = True;
2032 self.dsCImplFlags['IEM_CIMPL_F_END_TB'] = True;
2033 elif oStmt.sName.startswith('IEM_MC_REL_CALL'):
2034 assert not fSeenConditional;
2035 self.dsCImplFlags['IEM_CIMPL_F_BRANCH_RELATIVE'] = True;
2036 self.dsCImplFlags['IEM_CIMPL_F_BRANCH_STACK'] = True;
2037 self.dsCImplFlags['IEM_CIMPL_F_END_TB'] = True;
2038 elif oStmt.sName.startswith('IEM_MC_RETN'):
2039 assert not fSeenConditional;
2040 self.dsCImplFlags['IEM_CIMPL_F_BRANCH_INDIRECT'] = True;
2041 self.dsCImplFlags['IEM_CIMPL_F_BRANCH_STACK'] = True;
2042 self.dsCImplFlags['IEM_CIMPL_F_END_TB'] = True;
2043
2044 # Check for CIMPL and AIMPL calls.
2045 if oStmt.sName.startswith('IEM_MC_CALL_'):
2046 if oStmt.sName.startswith('IEM_MC_CALL_CIMPL_'):
2047 self.dsCImplFlags['IEM_CIMPL_F_CALLS_CIMPL'] = True;
2048 elif ( oStmt.sName.startswith('IEM_MC_CALL_VOID_AIMPL_')
2049 or oStmt.sName.startswith('IEM_MC_CALL_AIMPL_')):
2050 self.dsCImplFlags['IEM_CIMPL_F_CALLS_AIMPL'] = True;
2051 elif ( oStmt.sName.startswith('IEM_MC_CALL_SSE_AIMPL_')
2052 or oStmt.sName.startswith('IEM_MC_CALL_MMX_AIMPL_')
2053 or oStmt.sName.startswith('IEM_MC_CALL_FPU_AIMPL_')):
2054 self.dsCImplFlags['IEM_CIMPL_F_CALLS_AIMPL_WITH_FXSTATE'] = True;
2055 elif oStmt.sName.startswith('IEM_MC_CALL_AVX_AIMPL_'):
2056 self.dsCImplFlags['IEM_CIMPL_F_CALLS_AIMPL_WITH_XSTATE'] = True;
2057 else:
2058 raise Exception('Unknown IEM_MC_CALL_* statement: %s' % (oStmt.sName,));
2059
2060 # Check for return statements.
2061 if oStmt.sName in self.kdReturnStmtAnnotations:
2062 assert sAnnotation is None;
2063 sAnnotation = self.kdReturnStmtAnnotations[oStmt.sName];
2064
2065 # Collect MCs working on EFLAGS. Caller will check this.
2066 if oStmt.sName in ('IEM_MC_FETCH_EFLAGS', 'IEM_MC_FETCH_EFLAGS_U8', 'IEM_MC_COMMIT_EFLAGS',
2067 'IEM_MC_COMMIT_EFLAGS_OPT', 'IEM_MC_REF_EFLAGS', 'IEM_MC_ARG_LOCAL_EFLAGS', ):
2068 dEflStmts[oStmt.sName] = oStmt;
2069 elif isinstance(oStmt, iai.McStmtCall):
2070 if oStmt.sName in ('IEM_MC_CALL_CIMPL_0', 'IEM_MC_CALL_CIMPL_1', 'IEM_MC_CALL_CIMPL_2',
2071 'IEM_MC_CALL_CIMPL_3', 'IEM_MC_CALL_CIMPL_4', 'IEM_MC_CALL_CIMPL_5',):
2072 if ( oStmt.asParams[0].find('IEM_CIMPL_F_RFLAGS') >= 0
2073 or oStmt.asParams[0].find('IEM_CIMPL_F_STATUS_FLAGS') >= 0):
2074 dEflStmts[oStmt.sName] = oStmt;
2075
2076 # Process branches of conditionals recursively.
2077 if isinstance(oStmt, iai.McStmtCond):
2078 oStmt.oIfBranchAnnotation = self.analyzeCodeOperation(oStmt.aoIfBranch, dEflStmts, True);
2079 if oStmt.aoElseBranch:
2080 oStmt.oElseBranchAnnotation = self.analyzeCodeOperation(oStmt.aoElseBranch, dEflStmts, True);
2081
2082 return sAnnotation;
2083
2084 def analyzeThreadedFunction(self, oGenerator):
2085 """
2086 Analyzes the code, identifying the number of parameters it requires and such.
2087
2088 Returns dummy True - raises exception on trouble.
2089 """
2090
2091 #
2092 # Decode the block into a list/tree of McStmt objects.
2093 #
2094 aoStmts = self.oMcBlock.decode();
2095
2096 #
2097 # Check the block for errors before we proceed (will decode it).
2098 #
2099 asErrors = self.oMcBlock.check();
2100 if asErrors:
2101 raise Exception('\n'.join(['%s:%s: error: %s' % (self.oMcBlock.sSrcFile, self.oMcBlock.iBeginLine, sError, )
2102 for sError in asErrors]));
2103
2104 #
2105 # Scan the statements for local variables and call arguments (self.dVariables).
2106 #
2107 self.analyzeFindVariablesAndCallArgs(aoStmts);
2108
2109 #
2110 # Scan the code for IEM_CIMPL_F_ and other clues.
2111 #
2112 self.dsCImplFlags = self.oMcBlock.dsCImplFlags.copy();
2113 dEflStmts = {};
2114 self.analyzeCodeOperation(aoStmts, dEflStmts);
2115 if ( ('IEM_CIMPL_F_CALLS_CIMPL' in self.dsCImplFlags)
2116 + ('IEM_CIMPL_F_CALLS_AIMPL' in self.dsCImplFlags)
2117 + ('IEM_CIMPL_F_CALLS_AIMPL_WITH_FXSTATE' in self.dsCImplFlags)
2118 + ('IEM_CIMPL_F_CALLS_AIMPL_WITH_XSTATE' in self.dsCImplFlags) > 1):
2119 self.error('Mixing CIMPL/AIMPL/AIMPL_WITH_FXSTATE/AIMPL_WITH_XSTATE calls', oGenerator);
2120
2121 #
2122 # Analyse EFLAGS related MCs and @opflmodify and friends.
2123 #
2124 if dEflStmts:
2125 oInstruction = self.oMcBlock.oInstruction; # iai.Instruction
2126 if ( oInstruction is None
2127 or (oInstruction.asFlTest is None and oInstruction.asFlModify is None)):
2128 sMcNames = '+'.join(dEflStmts.keys());
2129 if len(dEflStmts) != 1 or not sMcNames.startswith('IEM_MC_CALL_CIMPL_'): # Hack for far calls
2130 self.error('Uses %s but has no @opflmodify, @opfltest or @opflclass with details!' % (sMcNames,), oGenerator);
2131 elif 'IEM_MC_COMMIT_EFLAGS' in dEflStmts or 'IEM_MC_COMMIT_EFLAGS_OPT' in dEflStmts:
2132 if not oInstruction.asFlModify:
2133 if oInstruction.sMnemonic not in [ 'not', ]:
2134 self.error('Uses IEM_MC_COMMIT_EFLAGS[_OPT] but has no flags in @opflmodify!', oGenerator);
2135 elif ( 'IEM_MC_CALL_CIMPL_0' in dEflStmts
2136 or 'IEM_MC_CALL_CIMPL_1' in dEflStmts
2137 or 'IEM_MC_CALL_CIMPL_2' in dEflStmts
2138 or 'IEM_MC_CALL_CIMPL_3' in dEflStmts
2139 or 'IEM_MC_CALL_CIMPL_4' in dEflStmts
2140 or 'IEM_MC_CALL_CIMPL_5' in dEflStmts ):
2141 if not oInstruction.asFlModify:
2142 self.error('Uses IEM_MC_CALL_CIMPL_x or IEM_MC_DEFER_TO_CIMPL_5_RET with IEM_CIMPL_F_STATUS_FLAGS '
2143 'or IEM_CIMPL_F_RFLAGS but has no flags in @opflmodify!', oGenerator);
2144 elif 'IEM_MC_REF_EFLAGS' not in dEflStmts:
2145 if not oInstruction.asFlTest:
2146 if oInstruction.sMnemonic not in [ 'not', ]:
2147 self.error('Expected @opfltest!', oGenerator);
2148 if oInstruction and oInstruction.asFlSet:
2149 for sFlag in oInstruction.asFlSet:
2150 if sFlag not in oInstruction.asFlModify:
2151 self.error('"%s" in @opflset but missing from @opflmodify (%s)!'
2152 % (sFlag, ', '.join(oInstruction.asFlModify)), oGenerator);
2153 if oInstruction and oInstruction.asFlClear:
2154 for sFlag in oInstruction.asFlClear:
2155 if sFlag not in oInstruction.asFlModify:
2156 self.error('"%s" in @opflclear but missing from @opflmodify (%s)!'
2157 % (sFlag, ', '.join(oInstruction.asFlModify)), oGenerator);
2158
2159 #
2160 # Create variations as needed.
2161 #
2162 if iai.McStmt.findStmtByNames(aoStmts,
2163 { 'IEM_MC_DEFER_TO_CIMPL_0_RET': True,
2164 'IEM_MC_DEFER_TO_CIMPL_1_RET': True,
2165 'IEM_MC_DEFER_TO_CIMPL_2_RET': True,
2166 'IEM_MC_DEFER_TO_CIMPL_3_RET': True, }):
2167 asVariations = (ThreadedFunctionVariation.ksVariation_Default,);
2168
2169 elif iai.McStmt.findStmtByNames(aoStmts, { 'IEM_MC_CALC_RM_EFF_ADDR' : True,
2170 'IEM_MC_FETCH_MEM_U8' : True, # mov_AL_Ob ++
2171 'IEM_MC_FETCH_MEM_U16' : True, # mov_rAX_Ov ++
2172 'IEM_MC_FETCH_MEM_U32' : True,
2173 'IEM_MC_FETCH_MEM_U64' : True,
2174 'IEM_MC_STORE_MEM_U8' : True, # mov_Ob_AL ++
2175 'IEM_MC_STORE_MEM_U16' : True, # mov_Ov_rAX ++
2176 'IEM_MC_STORE_MEM_U32' : True,
2177 'IEM_MC_STORE_MEM_U64' : True, }):
2178 if 'IEM_MC_F_64BIT' in self.oMcBlock.dsMcFlags:
2179 asVariations = ThreadedFunctionVariation.kasVariationsWithAddressOnly64;
2180 elif 'IEM_MC_F_NOT_64BIT' in self.oMcBlock.dsMcFlags and 'IEM_MC_F_NOT_286_OR_OLDER' in self.oMcBlock.dsMcFlags:
2181 asVariations = ThreadedFunctionVariation.kasVariationsWithAddressNot286Not64;
2182 elif 'IEM_MC_F_NOT_286_OR_OLDER' in self.oMcBlock.dsMcFlags:
2183 asVariations = ThreadedFunctionVariation.kasVariationsWithAddressNot286;
2184 elif 'IEM_MC_F_NOT_64BIT' in self.oMcBlock.dsMcFlags:
2185 asVariations = ThreadedFunctionVariation.kasVariationsWithAddressNot64;
2186 elif 'IEM_MC_F_ONLY_8086' in self.oMcBlock.dsMcFlags:
2187 asVariations = ThreadedFunctionVariation.kasVariationsOnlyPre386;
2188 else:
2189 asVariations = ThreadedFunctionVariation.kasVariationsWithAddress;
2190 else:
2191 if 'IEM_MC_F_64BIT' in self.oMcBlock.dsMcFlags:
2192 asVariations = ThreadedFunctionVariation.kasVariationsWithoutAddressOnly64;
2193 elif 'IEM_MC_F_NOT_64BIT' in self.oMcBlock.dsMcFlags and 'IEM_MC_F_NOT_286_OR_OLDER' in self.oMcBlock.dsMcFlags:
2194 asVariations = ThreadedFunctionVariation.kasVariationsWithoutAddressNot286Not64;
2195 elif 'IEM_MC_F_NOT_286_OR_OLDER' in self.oMcBlock.dsMcFlags:
2196 asVariations = ThreadedFunctionVariation.kasVariationsWithoutAddressNot286;
2197 elif 'IEM_MC_F_NOT_64BIT' in self.oMcBlock.dsMcFlags:
2198 asVariations = ThreadedFunctionVariation.kasVariationsWithoutAddressNot64;
2199 elif 'IEM_MC_F_ONLY_8086' in self.oMcBlock.dsMcFlags:
2200 asVariations = ThreadedFunctionVariation.kasVariationsOnlyPre386;
2201 else:
2202 asVariations = ThreadedFunctionVariation.kasVariationsWithoutAddress;
2203
2204 if ( 'IEM_CIMPL_F_BRANCH_CONDITIONAL' in self.dsCImplFlags
2205 and 'IEM_CIMPL_F_BRANCH_RELATIVE' in self.dsCImplFlags): # (latter to avoid iemOp_into)
2206 assert set(asVariations).issubset(ThreadedFunctionVariation.kasVariationsWithoutAddress), \
2207 '%s: vars=%s McFlags=%s' % (self.oMcBlock.oFunction.sName, asVariations, self.oMcBlock.dsMcFlags);
2208 asVariationsBase = asVariations;
2209 asVariations = [];
2210 for sVariation in asVariationsBase:
2211 asVariations.extend([sVariation + '_Jmp', sVariation + '_NoJmp']);
2212 assert set(asVariations).issubset(ThreadedFunctionVariation.kdVariationsWithConditional);
2213
2214 if not iai.McStmt.findStmtByNames(aoStmts,
2215 { 'IEM_MC_ADVANCE_RIP_AND_FINISH': True,
2216 'IEM_MC_REL_JMP_S8_AND_FINISH': True,
2217 'IEM_MC_REL_JMP_S16_AND_FINISH': True,
2218 'IEM_MC_REL_JMP_S32_AND_FINISH': True,
2219 'IEM_MC_SET_RIP_U16_AND_FINISH': True,
2220 'IEM_MC_SET_RIP_U32_AND_FINISH': True,
2221 'IEM_MC_SET_RIP_U64_AND_FINISH': True,
2222 'IEM_MC_REL_CALL_S16_AND_FINISH': True,
2223 'IEM_MC_REL_CALL_S32_AND_FINISH': True,
2224 'IEM_MC_REL_CALL_S64_AND_FINISH': True,
2225 'IEM_MC_IND_CALL_U16_AND_FINISH': True,
2226 'IEM_MC_IND_CALL_U32_AND_FINISH': True,
2227 'IEM_MC_IND_CALL_U64_AND_FINISH': True,
2228 'IEM_MC_RETN_AND_FINISH': True,
2229 }):
2230 asVariations = [sVariation for sVariation in asVariations
2231 if sVariation not in ThreadedFunctionVariation.kdVariationsWithEflagsCheckingAndClearing];
2232
2233 self.aoVariations = [ThreadedFunctionVariation(self, sVar) for sVar in asVariations];
2234
2235 # Dictionary variant of the list.
2236 self.dVariations = { oVar.sVariation: oVar for oVar in self.aoVariations };
2237
2238 #
2239 # Try annotate the threaded function name.
2240 #
2241 self.analyzeAndAnnotateName(aoStmts);
2242
2243 #
2244 # Continue the analysis on each variation.
2245 #
2246 for oVariation in self.aoVariations:
2247 oVariation.analyzeVariation(aoStmts);
2248
2249 return True;
2250
2251 ## Used by emitThreadedCallStmts.
2252 kdVariationsWithNeedForPrefixCheck = {
2253 ThreadedFunctionVariation.ksVariation_64_Addr32: True,
2254 ThreadedFunctionVariation.ksVariation_64f_Addr32: True,
2255 ThreadedFunctionVariation.ksVariation_64_FsGs: True,
2256 ThreadedFunctionVariation.ksVariation_64f_FsGs: True,
2257 ThreadedFunctionVariation.ksVariation_32_Addr16: True,
2258 ThreadedFunctionVariation.ksVariation_32f_Addr16: True,
2259 ThreadedFunctionVariation.ksVariation_32_Flat: True,
2260 ThreadedFunctionVariation.ksVariation_32f_Flat: True,
2261 ThreadedFunctionVariation.ksVariation_16_Addr32: True,
2262 ThreadedFunctionVariation.ksVariation_16f_Addr32: True,
2263 };
2264
2265 def emitThreadedCallStmts(self, sBranch = None, fTbLookupTable = False): # pylint: disable=too-many-statements
2266 """
2267 Worker for morphInputCode that returns a list of statements that emits
2268 the call to the threaded functions for the block.
2269
2270 The sBranch parameter is used with conditional branches where we'll emit
2271 different threaded calls depending on whether we're in the jump-taken or
2272 no-jump code path.
2273
2274 The fTbLookupTable parameter can either be False, True or whatever else
2275 (like 2) - in the latte case this means a large lookup table.
2276 """
2277 # Special case for only default variation:
2278 if len(self.aoVariations) == 1 and self.aoVariations[0].sVariation == ThreadedFunctionVariation.ksVariation_Default:
2279 assert not sBranch;
2280 return self.aoVariations[0].emitThreadedCallStmtsForVariant(0, fTbLookupTable);
2281
2282 #
2283 # Case statement sub-class.
2284 #
2285 dByVari = self.dVariations;
2286 #fDbg = self.oMcBlock.sFunction == 'iemOpCommonPushSReg';
2287 class Case:
2288 def __init__(self, sCond, sVarNm = None):
2289 self.sCond = sCond;
2290 self.sVarNm = sVarNm;
2291 self.oVar = dByVari[sVarNm] if sVarNm else None;
2292 self.aoBody = self.oVar.emitThreadedCallStmtsForVariant(8, fTbLookupTable) if sVarNm else None;
2293
2294 def toCode(self):
2295 aoStmts = [ iai.McCppGeneric('case %s:' % (self.sCond), cchIndent = 4), ];
2296 if self.aoBody:
2297 aoStmts.extend(self.aoBody);
2298 aoStmts.append(iai.McCppGeneric('break;', cchIndent = 8));
2299 return aoStmts;
2300
2301 def toFunctionAssignment(self):
2302 aoStmts = [ iai.McCppGeneric('case %s:' % (self.sCond), cchIndent = 4), ];
2303 if self.aoBody:
2304 aoStmts.extend([
2305 iai.McCppGeneric('enmFunction = %s;' % (self.oVar.getIndexName(),), cchIndent = 8),
2306 iai.McCppGeneric('break;', cchIndent = 8),
2307 ]);
2308 return aoStmts;
2309
2310 def isSame(self, oThat):
2311 if not self.aoBody: # fall thru always matches.
2312 return True;
2313 if len(self.aoBody) != len(oThat.aoBody):
2314 #if fDbg: print('dbg: body len diff: %s vs %s' % (len(self.aoBody), len(oThat.aoBody),));
2315 return False;
2316 for iStmt, oStmt in enumerate(self.aoBody):
2317 oThatStmt = oThat.aoBody[iStmt] # type: iai.McStmt
2318 assert isinstance(oStmt, iai.McCppGeneric);
2319 assert not isinstance(oStmt, iai.McStmtCond);
2320 if isinstance(oStmt, iai.McStmtCond):
2321 return False;
2322 if oStmt.sName != oThatStmt.sName:
2323 #if fDbg: print('dbg: stmt #%s name: %s vs %s' % (iStmt, oStmt.sName, oThatStmt.sName,));
2324 return False;
2325 if len(oStmt.asParams) != len(oThatStmt.asParams):
2326 #if fDbg: print('dbg: stmt #%s param count: %s vs %s'
2327 # % (iStmt, len(oStmt.asParams), len(oThatStmt.asParams),));
2328 return False;
2329 for iParam, sParam in enumerate(oStmt.asParams):
2330 if ( sParam != oThatStmt.asParams[iParam]
2331 and ( iParam != 1
2332 or not isinstance(oStmt, iai.McCppCall)
2333 or not oStmt.asParams[0].startswith('IEM_MC2_EMIT_CALL_')
2334 or sParam != self.oVar.getIndexName()
2335 or oThatStmt.asParams[iParam] != oThat.oVar.getIndexName() )):
2336 #if fDbg: print('dbg: stmt #%s, param #%s: %s vs %s'
2337 # % (iStmt, iParam, sParam, oThatStmt.asParams[iParam],));
2338 return False;
2339 return True;
2340
2341 #
2342 # Determine what we're switch on.
2343 # This ASSUMES that (IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | IEM_F_MODE_CPUMODE_MASK) == 7!
2344 #
2345 fSimple = True;
2346 sSwitchValue = '(pVCpu->iem.s.fExec & (IEM_F_MODE_CPUMODE_MASK | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK))';
2347 if dByVari.keys() & self.kdVariationsWithNeedForPrefixCheck.keys():
2348 sSwitchValue += ' | (pVCpu->iem.s.enmEffAddrMode == (pVCpu->iem.s.fExec & IEM_F_MODE_CPUMODE_MASK) ? 0 : 8)';
2349 # Accesses via FS and GS and CS goes thru non-FLAT functions. (CS
2350 # is not writable in 32-bit mode (at least), thus the penalty mode
2351 # for any accesses via it (simpler this way).)
2352 sSwitchValue += ' | (pVCpu->iem.s.iEffSeg < X86_SREG_FS && pVCpu->iem.s.iEffSeg != X86_SREG_CS ? 0 : 16)';
2353 fSimple = False; # threaded functions.
2354 if dByVari.keys() & ThreadedFunctionVariation.kdVariationsWithEflagsCheckingAndClearing:
2355 sSwitchValue += ' | ((pVCpu->iem.s.fTbPrevInstr & (IEM_CIMPL_F_RFLAGS | IEM_CIMPL_F_INHIBIT_SHADOW)) || ' \
2356 + '(pVCpu->iem.s.fExec & IEM_F_PENDING_BRK_MASK) ? 32 : 0)';
2357
2358 #
2359 # Generate the case statements.
2360 #
2361 # pylintx: disable=x
2362 aoCases = [];
2363 if ThreadedFunctionVariation.ksVariation_64_Addr32 in dByVari:
2364 assert not fSimple and not sBranch;
2365 aoCases.extend([
2366 Case('IEMMODE_64BIT', ThrdFnVar.ksVariation_64),
2367 Case('IEMMODE_64BIT | 16', ThrdFnVar.ksVariation_64_FsGs),
2368 Case('IEMMODE_64BIT | 8 | 16', None), # fall thru
2369 Case('IEMMODE_64BIT | 8', ThrdFnVar.ksVariation_64_Addr32),
2370 ]);
2371 if ThreadedFunctionVariation.ksVariation_64f_Addr32 in dByVari:
2372 aoCases.extend([
2373 Case('IEMMODE_64BIT | 32', ThrdFnVar.ksVariation_64f),
2374 Case('IEMMODE_64BIT | 32 | 16', ThrdFnVar.ksVariation_64f_FsGs),
2375 Case('IEMMODE_64BIT | 32 | 8 | 16', None), # fall thru
2376 Case('IEMMODE_64BIT | 32 | 8', ThrdFnVar.ksVariation_64f_Addr32),
2377 ]);
2378 elif ThrdFnVar.ksVariation_64 in dByVari:
2379 assert fSimple and not sBranch;
2380 aoCases.append(Case('IEMMODE_64BIT', ThrdFnVar.ksVariation_64));
2381 if ThreadedFunctionVariation.ksVariation_64f in dByVari:
2382 aoCases.append(Case('IEMMODE_64BIT | 32', ThrdFnVar.ksVariation_64f));
2383 elif ThrdFnVar.ksVariation_64_Jmp in dByVari:
2384 assert fSimple and sBranch;
2385 aoCases.append(Case('IEMMODE_64BIT',
2386 ThrdFnVar.ksVariation_64_Jmp if sBranch == 'Jmp' else ThrdFnVar.ksVariation_64_NoJmp));
2387 if ThreadedFunctionVariation.ksVariation_64f_Jmp in dByVari:
2388 aoCases.append(Case('IEMMODE_64BIT | 32',
2389 ThrdFnVar.ksVariation_64f_Jmp if sBranch == 'Jmp' else ThrdFnVar.ksVariation_64f_NoJmp));
2390
2391 if ThrdFnVar.ksVariation_32_Addr16 in dByVari:
2392 assert not fSimple and not sBranch;
2393 aoCases.extend([
2394 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK', ThrdFnVar.ksVariation_32_Flat),
2395 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 16', None), # fall thru
2396 Case('IEMMODE_32BIT | 16', None), # fall thru
2397 Case('IEMMODE_32BIT', ThrdFnVar.ksVariation_32),
2398 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 8', None), # fall thru
2399 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 8 | 16',None), # fall thru
2400 Case('IEMMODE_32BIT | 8 | 16',None), # fall thru
2401 Case('IEMMODE_32BIT | 8', ThrdFnVar.ksVariation_32_Addr16),
2402 ]);
2403 if ThrdFnVar.ksVariation_32f_Addr16 in dByVari:
2404 aoCases.extend([
2405 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 32', ThrdFnVar.ksVariation_32f_Flat),
2406 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 32 | 16', None), # fall thru
2407 Case('IEMMODE_32BIT | 32 | 16', None), # fall thru
2408 Case('IEMMODE_32BIT | 32', ThrdFnVar.ksVariation_32f),
2409 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 32 | 8', None), # fall thru
2410 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 32 | 8 | 16',None), # fall thru
2411 Case('IEMMODE_32BIT | 32 | 8 | 16',None), # fall thru
2412 Case('IEMMODE_32BIT | 32 | 8', ThrdFnVar.ksVariation_32f_Addr16),
2413 ]);
2414 elif ThrdFnVar.ksVariation_32 in dByVari:
2415 assert fSimple and not sBranch;
2416 aoCases.extend([
2417 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK', None), # fall thru
2418 Case('IEMMODE_32BIT', ThrdFnVar.ksVariation_32),
2419 ]);
2420 if ThrdFnVar.ksVariation_32f in dByVari:
2421 aoCases.extend([
2422 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 32', None), # fall thru
2423 Case('IEMMODE_32BIT | 32', ThrdFnVar.ksVariation_32f),
2424 ]);
2425 elif ThrdFnVar.ksVariation_32_Jmp in dByVari:
2426 assert fSimple and sBranch;
2427 aoCases.extend([
2428 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK', None), # fall thru
2429 Case('IEMMODE_32BIT',
2430 ThrdFnVar.ksVariation_32_Jmp if sBranch == 'Jmp' else ThrdFnVar.ksVariation_32_NoJmp),
2431 ]);
2432 if ThrdFnVar.ksVariation_32f_Jmp in dByVari:
2433 aoCases.extend([
2434 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 32', None), # fall thru
2435 Case('IEMMODE_32BIT | 32',
2436 ThrdFnVar.ksVariation_32f_Jmp if sBranch == 'Jmp' else ThrdFnVar.ksVariation_32f_NoJmp),
2437 ]);
2438
2439 if ThrdFnVar.ksVariation_16_Addr32 in dByVari:
2440 assert not fSimple and not sBranch;
2441 aoCases.extend([
2442 Case('IEMMODE_16BIT | 16', None), # fall thru
2443 Case('IEMMODE_16BIT', ThrdFnVar.ksVariation_16),
2444 Case('IEMMODE_16BIT | 8 | 16', None), # fall thru
2445 Case('IEMMODE_16BIT | 8', ThrdFnVar.ksVariation_16_Addr32),
2446 ]);
2447 if ThrdFnVar.ksVariation_16f_Addr32 in dByVari:
2448 aoCases.extend([
2449 Case('IEMMODE_16BIT | 32 | 16', None), # fall thru
2450 Case('IEMMODE_16BIT | 32', ThrdFnVar.ksVariation_16f),
2451 Case('IEMMODE_16BIT | 32 | 8 | 16', None), # fall thru
2452 Case('IEMMODE_16BIT | 32 | 8', ThrdFnVar.ksVariation_16f_Addr32),
2453 ]);
2454 elif ThrdFnVar.ksVariation_16 in dByVari:
2455 assert fSimple and not sBranch;
2456 aoCases.append(Case('IEMMODE_16BIT', ThrdFnVar.ksVariation_16));
2457 if ThrdFnVar.ksVariation_16f in dByVari:
2458 aoCases.append(Case('IEMMODE_16BIT | 32', ThrdFnVar.ksVariation_16f));
2459 elif ThrdFnVar.ksVariation_16_Jmp in dByVari:
2460 assert fSimple and sBranch;
2461 aoCases.append(Case('IEMMODE_16BIT',
2462 ThrdFnVar.ksVariation_16_Jmp if sBranch == 'Jmp' else ThrdFnVar.ksVariation_16_NoJmp));
2463 if ThrdFnVar.ksVariation_16f_Jmp in dByVari:
2464 aoCases.append(Case('IEMMODE_16BIT | 32',
2465 ThrdFnVar.ksVariation_16f_Jmp if sBranch == 'Jmp' else ThrdFnVar.ksVariation_16f_NoJmp));
2466
2467
2468 if ThrdFnVar.ksVariation_16_Pre386 in dByVari:
2469 if not fSimple:
2470 aoCases.append(Case('IEMMODE_16BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 16', None)); # fall thru
2471 aoCases.append(Case('IEMMODE_16BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK', ThrdFnVar.ksVariation_16_Pre386));
2472 if ThrdFnVar.ksVariation_16f_Pre386 in dByVari: # should be nested under previous if, but line too long.
2473 if not fSimple:
2474 aoCases.append(Case('IEMMODE_16BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 32 | 16', None)); # fall thru
2475 aoCases.append(Case('IEMMODE_16BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 32', ThrdFnVar.ksVariation_16f_Pre386));
2476
2477 if ThrdFnVar.ksVariation_16_Pre386_Jmp in dByVari:
2478 assert fSimple and sBranch;
2479 aoCases.append(Case('IEMMODE_16BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK',
2480 ThrdFnVar.ksVariation_16_Pre386_Jmp if sBranch == 'Jmp'
2481 else ThrdFnVar.ksVariation_16_Pre386_NoJmp));
2482 if ThrdFnVar.ksVariation_16f_Pre386_Jmp in dByVari:
2483 assert fSimple and sBranch;
2484 aoCases.append(Case('IEMMODE_16BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 32',
2485 ThrdFnVar.ksVariation_16f_Pre386_Jmp if sBranch == 'Jmp'
2486 else ThrdFnVar.ksVariation_16f_Pre386_NoJmp));
2487
2488 #
2489 # If the case bodies are all the same, except for the function called,
2490 # we can reduce the code size and hopefully compile time.
2491 #
2492 iFirstCaseWithBody = 0;
2493 while not aoCases[iFirstCaseWithBody].aoBody:
2494 iFirstCaseWithBody += 1
2495 fAllSameCases = True
2496 for iCase in range(iFirstCaseWithBody + 1, len(aoCases)):
2497 fAllSameCases = fAllSameCases and aoCases[iCase].isSame(aoCases[iFirstCaseWithBody]);
2498 #if fDbg: print('fAllSameCases=%s %s' % (fAllSameCases, self.oMcBlock.sFunction,));
2499 if fAllSameCases:
2500 aoStmts = [
2501 iai.McCppGeneric('IEMTHREADEDFUNCS enmFunction;'),
2502 iai.McCppGeneric('switch (%s)' % (sSwitchValue,)),
2503 iai.McCppGeneric('{'),
2504 ];
2505 for oCase in aoCases:
2506 aoStmts.extend(oCase.toFunctionAssignment());
2507 aoStmts.extend([
2508 iai.McCppGeneric('IEM_NOT_REACHED_DEFAULT_CASE_RET();', cchIndent = 4),
2509 iai.McCppGeneric('}'),
2510 ]);
2511 aoStmts.extend(dByVari[aoCases[iFirstCaseWithBody].sVarNm].emitThreadedCallStmtsForVariant(0, fTbLookupTable,
2512 'enmFunction'));
2513
2514 else:
2515 #
2516 # Generate the generic switch statement.
2517 #
2518 aoStmts = [
2519 iai.McCppGeneric('switch (%s)' % (sSwitchValue,)),
2520 iai.McCppGeneric('{'),
2521 ];
2522 for oCase in aoCases:
2523 aoStmts.extend(oCase.toCode());
2524 aoStmts.extend([
2525 iai.McCppGeneric('IEM_NOT_REACHED_DEFAULT_CASE_RET();', cchIndent = 4),
2526 iai.McCppGeneric('}'),
2527 ]);
2528
2529 return aoStmts;
2530
2531 def morphInputCode(self, aoStmts, fIsConditional = False, fCallEmitted = False, cDepth = 0, sBranchAnnotation = None):
2532 """
2533 Adjusts (& copies) the statements for the input/decoder so it will emit
2534 calls to the right threaded functions for each block.
2535
2536 Returns list/tree of statements (aoStmts is not modified) and updated
2537 fCallEmitted status.
2538 """
2539 #print('McBlock at %s:%s' % (os.path.split(self.oMcBlock.sSrcFile)[1], self.oMcBlock.iBeginLine,));
2540 aoDecoderStmts = [];
2541
2542 for iStmt, oStmt in enumerate(aoStmts):
2543 # Copy the statement. Make a deep copy to make sure we've got our own
2544 # copies of all instance variables, even if a bit overkill at the moment.
2545 oNewStmt = copy.deepcopy(oStmt);
2546 aoDecoderStmts.append(oNewStmt);
2547 #print('oNewStmt %s %s' % (oNewStmt.sName, len(oNewStmt.asParams),));
2548 if oNewStmt.sName == 'IEM_MC_BEGIN' and self.dsCImplFlags:
2549 oNewStmt.asParams[1] = ' | '.join(sorted(self.dsCImplFlags.keys()));
2550
2551 # If we haven't emitted the threaded function call yet, look for
2552 # statements which it would naturally follow or preceed.
2553 if not fCallEmitted:
2554 if not oStmt.isCppStmt():
2555 if ( oStmt.sName.startswith('IEM_MC_MAYBE_RAISE_') \
2556 or (oStmt.sName.endswith('_AND_FINISH') and oStmt.sName.startswith('IEM_MC_'))
2557 or oStmt.sName.startswith('IEM_MC_CALL_CIMPL_')
2558 or oStmt.sName.startswith('IEM_MC_DEFER_TO_CIMPL_')
2559 or oStmt.sName in ('IEM_MC_RAISE_DIVIDE_ERROR',)):
2560 aoDecoderStmts.pop();
2561 if not fIsConditional:
2562 aoDecoderStmts.extend(self.emitThreadedCallStmts());
2563 elif oStmt.sName == 'IEM_MC_ADVANCE_RIP_AND_FINISH':
2564 aoDecoderStmts.extend(self.emitThreadedCallStmts('NoJmp', True));
2565 else:
2566 assert oStmt.sName in { 'IEM_MC_REL_JMP_S8_AND_FINISH': True,
2567 'IEM_MC_REL_JMP_S16_AND_FINISH': True,
2568 'IEM_MC_REL_JMP_S32_AND_FINISH': True, };
2569 aoDecoderStmts.extend(self.emitThreadedCallStmts('Jmp', True));
2570 aoDecoderStmts.append(oNewStmt);
2571 fCallEmitted = True;
2572
2573 elif iai.g_dMcStmtParsers[oStmt.sName][2]:
2574 # This is for Jmp/NoJmp with loopne and friends which modifies state other than RIP.
2575 if not sBranchAnnotation:
2576 self.raiseProblem('Modifying state before emitting calls! %s' % (oStmt.sName,));
2577 assert fIsConditional;
2578 aoDecoderStmts.pop();
2579 if sBranchAnnotation == g_ksFinishAnnotation_Advance:
2580 assert iai.McStmt.findStmtByNames(aoStmts[iStmt:], {'IEM_MC_ADVANCE_RIP_AND_FINISH':1,})
2581 aoDecoderStmts.extend(self.emitThreadedCallStmts('NoJmp', True));
2582 elif sBranchAnnotation == g_ksFinishAnnotation_RelJmp:
2583 assert iai.McStmt.findStmtByNames(aoStmts[iStmt:],
2584 { 'IEM_MC_REL_JMP_S8_AND_FINISH': 1,
2585 'IEM_MC_REL_JMP_S16_AND_FINISH': 1,
2586 'IEM_MC_REL_JMP_S32_AND_FINISH': 1, });
2587 aoDecoderStmts.extend(self.emitThreadedCallStmts('Jmp', True));
2588 else:
2589 self.raiseProblem('Modifying state before emitting calls! %s' % (oStmt.sName,));
2590 aoDecoderStmts.append(oNewStmt);
2591 fCallEmitted = True;
2592
2593 elif ( not fIsConditional
2594 and oStmt.fDecode
2595 and ( oStmt.asParams[0].find('IEMOP_HLP_DONE_') >= 0
2596 or oStmt.asParams[0].find('IEMOP_HLP_DECODED_') >= 0)):
2597 aoDecoderStmts.extend(self.emitThreadedCallStmts());
2598 fCallEmitted = True;
2599
2600 # Process branches of conditionals recursively.
2601 if isinstance(oStmt, iai.McStmtCond):
2602 (oNewStmt.aoIfBranch, fCallEmitted1) = self.morphInputCode(oStmt.aoIfBranch, fIsConditional,
2603 fCallEmitted, cDepth + 1, oStmt.oIfBranchAnnotation);
2604 if oStmt.aoElseBranch:
2605 (oNewStmt.aoElseBranch, fCallEmitted2) = self.morphInputCode(oStmt.aoElseBranch, fIsConditional,
2606 fCallEmitted, cDepth + 1,
2607 oStmt.oElseBranchAnnotation);
2608 else:
2609 fCallEmitted2 = False;
2610 fCallEmitted = fCallEmitted or (fCallEmitted1 and fCallEmitted2);
2611
2612 if not fCallEmitted and cDepth == 0:
2613 self.raiseProblem('Unable to insert call to threaded function.');
2614
2615 return (aoDecoderStmts, fCallEmitted);
2616
2617
2618 def generateInputCode(self):
2619 """
2620 Modifies the input code.
2621 """
2622 cchIndent = (self.oMcBlock.cchIndent + 3) // 4 * 4;
2623
2624 if len(self.oMcBlock.aoStmts) == 1:
2625 # IEM_MC_DEFER_TO_CIMPL_X_RET - need to wrap in {} to make it safe to insert into random code.
2626 sCode = ' ' * cchIndent + 'pVCpu->iem.s.fTbCurInstr = ';
2627 if self.dsCImplFlags:
2628 sCode += ' | '.join(sorted(self.dsCImplFlags.keys())) + ';\n';
2629 else:
2630 sCode += '0;\n';
2631 sCode += iai.McStmt.renderCodeForList(self.morphInputCode(self.oMcBlock.aoStmts)[0],
2632 cchIndent = cchIndent).replace('\n', ' /* gen */\n', 1);
2633 sIndent = ' ' * (min(cchIndent, 2) - 2);
2634 sCode = sIndent + '{\n' + sCode + sIndent + '}\n';
2635 return sCode;
2636
2637 # IEM_MC_BEGIN/END block
2638 assert len(self.oMcBlock.asLines) > 2, "asLines=%s" % (self.oMcBlock.asLines,);
2639 fIsConditional = ( 'IEM_CIMPL_F_BRANCH_CONDITIONAL' in self.dsCImplFlags
2640 and 'IEM_CIMPL_F_BRANCH_RELATIVE' in self.dsCImplFlags); # (latter to avoid iemOp_into)
2641 return iai.McStmt.renderCodeForList(self.morphInputCode(self.oMcBlock.aoStmts, fIsConditional)[0],
2642 cchIndent = cchIndent).replace('\n', ' /* gen */\n', 1);
2643
2644# Short alias for ThreadedFunctionVariation.
2645ThrdFnVar = ThreadedFunctionVariation;
2646
2647
2648class IEMThreadedGenerator(object):
2649 """
2650 The threaded code generator & annotator.
2651 """
2652
2653 def __init__(self):
2654 self.aoThreadedFuncs = [] # type: List[ThreadedFunction]
2655 self.oOptions = None # type: argparse.Namespace
2656 self.aoParsers = [] # type: List[IEMAllInstPython.SimpleParser]
2657 self.aidxFirstFunctions = [] # type: List[int] ##< Runs parallel to aoParser giving the index of the first function.
2658 self.cErrors = 0;
2659
2660 #
2661 # Error reporting.
2662 #
2663
2664 def rawError(self, sCompleteMessage):
2665 """ Output a raw error and increment the error counter. """
2666 print(sCompleteMessage, file = sys.stderr);
2667 self.cErrors += 1;
2668 return False;
2669
2670 #
2671 # Processing.
2672 #
2673
2674 def processInputFiles(self, sHostArch, fNativeRecompilerEnabled):
2675 """
2676 Process the input files.
2677 """
2678
2679 # Parse the files.
2680 self.aoParsers = iai.parseFiles(self.oOptions.asInFiles, sHostArch);
2681
2682 # Create threaded functions for the MC blocks.
2683 self.aoThreadedFuncs = [ThreadedFunction(oMcBlock) for oMcBlock in iai.g_aoMcBlocks];
2684
2685 # Analyze the threaded functions.
2686 dRawParamCounts = {};
2687 dMinParamCounts = {};
2688 for oThreadedFunction in self.aoThreadedFuncs:
2689 oThreadedFunction.analyzeThreadedFunction(self);
2690 for oVariation in oThreadedFunction.aoVariations:
2691 dRawParamCounts[len(oVariation.dParamRefs)] = dRawParamCounts.get(len(oVariation.dParamRefs), 0) + 1;
2692 dMinParamCounts[oVariation.cMinParams] = dMinParamCounts.get(oVariation.cMinParams, 0) + 1;
2693 print('debug: param count distribution, raw and optimized:', file = sys.stderr);
2694 for cCount in sorted({cBits: True for cBits in list(dRawParamCounts.keys()) + list(dMinParamCounts.keys())}.keys()):
2695 print('debug: %s params: %4s raw, %4s min'
2696 % (cCount, dRawParamCounts.get(cCount, 0), dMinParamCounts.get(cCount, 0)),
2697 file = sys.stderr);
2698
2699 # Do another pass over the threaded functions to settle the name suffix.
2700 iThreadedFn = 0;
2701 while iThreadedFn < len(self.aoThreadedFuncs):
2702 oFunction = self.aoThreadedFuncs[iThreadedFn].oMcBlock.oFunction;
2703 assert oFunction;
2704 iThreadedFnNext = iThreadedFn + 1;
2705 dSubNames = { self.aoThreadedFuncs[iThreadedFn].sSubName: 1 };
2706 while ( iThreadedFnNext < len(self.aoThreadedFuncs)
2707 and self.aoThreadedFuncs[iThreadedFnNext].oMcBlock.oFunction == oFunction):
2708 dSubNames[self.aoThreadedFuncs[iThreadedFnNext].sSubName] = 1;
2709 iThreadedFnNext += 1;
2710 if iThreadedFnNext - iThreadedFn > len(dSubNames):
2711 iSubName = 0;
2712 while iThreadedFn + iSubName < iThreadedFnNext:
2713 self.aoThreadedFuncs[iThreadedFn + iSubName].sSubName += '_%s' % (iSubName,);
2714 iSubName += 1;
2715 iThreadedFn = iThreadedFnNext;
2716
2717 # Populate aidxFirstFunctions. This is ASSUMING that
2718 # g_aoMcBlocks/self.aoThreadedFuncs are in self.aoParsers order.
2719 iThreadedFunction = 0;
2720 oThreadedFunction = self.getThreadedFunctionByIndex(0);
2721 self.aidxFirstFunctions = [];
2722 for oParser in self.aoParsers:
2723 self.aidxFirstFunctions.append(iThreadedFunction);
2724
2725 while oThreadedFunction.oMcBlock.sSrcFile == oParser.sSrcFile:
2726 iThreadedFunction += 1;
2727 oThreadedFunction = self.getThreadedFunctionByIndex(iThreadedFunction);
2728
2729 # Analyze the threaded functions and their variations for native recompilation.
2730 if fNativeRecompilerEnabled:
2731 ian.analyzeThreadedFunctionsForNativeRecomp(self.aoThreadedFuncs, sHostArch);
2732
2733 # Gather arguments + variable statistics for the MC blocks.
2734 cMaxArgs = 0;
2735 cMaxVars = 0;
2736 cMaxVarsAndArgs = 0;
2737 cbMaxArgs = 0;
2738 cbMaxVars = 0;
2739 cbMaxVarsAndArgs = 0;
2740 for oThreadedFunction in self.aoThreadedFuncs:
2741 if oThreadedFunction.oMcBlock.aoLocals or oThreadedFunction.oMcBlock.aoArgs:
2742 # Counts.
2743 cMaxVars = max(cMaxVars, len(oThreadedFunction.oMcBlock.aoLocals));
2744 cMaxArgs = max(cMaxArgs, len(oThreadedFunction.oMcBlock.aoArgs));
2745 cMaxVarsAndArgs = max(cMaxVarsAndArgs,
2746 len(oThreadedFunction.oMcBlock.aoLocals) + len(oThreadedFunction.oMcBlock.aoArgs));
2747 if cMaxVarsAndArgs > 9:
2748 raise Exception('%s potentially uses too many variables / args: %u, max 10 - %u vars and %u args'
2749 % (oThreadedFunction.oMcBlock.oFunction.sName, cMaxVarsAndArgs,
2750 len(oThreadedFunction.oMcBlock.aoLocals), len(oThreadedFunction.oMcBlock.aoArgs),));
2751 # Calc stack allocation size:
2752 cbArgs = 0;
2753 for oArg in oThreadedFunction.oMcBlock.aoArgs:
2754 cbArgs += (getTypeBitCount(oArg.sType) + 63) // 64 * 8;
2755 cbVars = 0;
2756 for oVar in oThreadedFunction.oMcBlock.aoLocals:
2757 cbVars += (getTypeBitCount(oVar.sType) + 63) // 64 * 8;
2758 cbMaxVars = max(cbMaxVars, cbVars);
2759 cbMaxArgs = max(cbMaxArgs, cbArgs);
2760 cbMaxVarsAndArgs = max(cbMaxVarsAndArgs, cbVars + cbArgs);
2761 if cbMaxVarsAndArgs >= 0xc0:
2762 raise Exception('%s potentially uses too much stack: cbMaxVars=%#x cbMaxArgs=%#x'
2763 % (oThreadedFunction.oMcBlock.oFunction.sName, cbMaxVars, cbMaxArgs,));
2764
2765 print('debug: max vars+args: %u bytes / %u; max vars: %u bytes / %u; max args: %u bytes / %u'
2766 % (cbMaxVarsAndArgs, cMaxVarsAndArgs, cbMaxVars, cMaxVars, cbMaxArgs, cMaxArgs,), file = sys.stderr);
2767
2768 if self.cErrors > 0:
2769 print('fatal error: %u error%s during processing. Details above.'
2770 % (self.cErrors, 's' if self.cErrors > 1 else '',), file = sys.stderr);
2771 return False;
2772 return True;
2773
2774 #
2775 # Output
2776 #
2777
2778 def generateLicenseHeader(self):
2779 """
2780 Returns the lines for a license header.
2781 """
2782 return [
2783 '/*',
2784 ' * Autogenerated by $Id: IEMAllThrdPython.py 105283 2024-07-11 20:26:27Z vboxsync $ ',
2785 ' * Do not edit!',
2786 ' */',
2787 '',
2788 '/*',
2789 ' * Copyright (C) 2023-' + str(datetime.date.today().year) + ' Oracle and/or its affiliates.',
2790 ' *',
2791 ' * This file is part of VirtualBox base platform packages, as',
2792 ' * available from https://www.virtualbox.org.',
2793 ' *',
2794 ' * This program is free software; you can redistribute it and/or',
2795 ' * modify it under the terms of the GNU General Public License',
2796 ' * as published by the Free Software Foundation, in version 3 of the',
2797 ' * License.',
2798 ' *',
2799 ' * This program is distributed in the hope that it will be useful, but',
2800 ' * WITHOUT ANY WARRANTY; without even the implied warranty of',
2801 ' * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU',
2802 ' * General Public License for more details.',
2803 ' *',
2804 ' * You should have received a copy of the GNU General Public License',
2805 ' * along with this program; if not, see <https://www.gnu.org/licenses>.',
2806 ' *',
2807 ' * The contents of this file may alternatively be used under the terms',
2808 ' * of the Common Development and Distribution License Version 1.0',
2809 ' * (CDDL), a copy of it is provided in the "COPYING.CDDL" file included',
2810 ' * in the VirtualBox distribution, in which case the provisions of the',
2811 ' * CDDL are applicable instead of those of the GPL.',
2812 ' *',
2813 ' * You may elect to license modified versions of this file under the',
2814 ' * terms and conditions of either the GPL or the CDDL or both.',
2815 ' *',
2816 ' * SPDX-License-Identifier: GPL-3.0-only OR CDDL-1.0',
2817 ' */',
2818 '',
2819 '',
2820 '',
2821 ];
2822
2823 ## List of built-in threaded functions with user argument counts and
2824 ## whether it has a native recompiler implementation.
2825 katBltIns = (
2826 ( 'Nop', 0, True ),
2827 ( 'LogCpuState', 0, True ),
2828
2829 ( 'DeferToCImpl0', 2, True ),
2830 ( 'CheckIrq', 0, True ),
2831 ( 'CheckMode', 1, True ),
2832 ( 'CheckHwInstrBps', 0, False ),
2833 ( 'CheckCsLim', 1, True ),
2834
2835 ( 'CheckCsLimAndOpcodes', 3, True ),
2836 ( 'CheckOpcodes', 3, True ),
2837 ( 'CheckOpcodesConsiderCsLim', 3, True ),
2838
2839 ( 'CheckCsLimAndPcAndOpcodes', 3, True ),
2840 ( 'CheckPcAndOpcodes', 3, True ),
2841 ( 'CheckPcAndOpcodesConsiderCsLim', 3, True ),
2842
2843 ( 'CheckCsLimAndOpcodesAcrossPageLoadingTlb', 3, True ),
2844 ( 'CheckOpcodesAcrossPageLoadingTlb', 3, True ),
2845 ( 'CheckOpcodesAcrossPageLoadingTlbConsiderCsLim', 2, True ),
2846
2847 ( 'CheckCsLimAndOpcodesLoadingTlb', 3, True ),
2848 ( 'CheckOpcodesLoadingTlb', 3, True ),
2849 ( 'CheckOpcodesLoadingTlbConsiderCsLim', 3, True ),
2850
2851 ( 'CheckCsLimAndOpcodesOnNextPageLoadingTlb', 2, True ),
2852 ( 'CheckOpcodesOnNextPageLoadingTlb', 2, True ),
2853 ( 'CheckOpcodesOnNextPageLoadingTlbConsiderCsLim', 2, True ),
2854
2855 ( 'CheckCsLimAndOpcodesOnNewPageLoadingTlb', 2, True ),
2856 ( 'CheckOpcodesOnNewPageLoadingTlb', 2, True ),
2857 ( 'CheckOpcodesOnNewPageLoadingTlbConsiderCsLim', 2, True ),
2858 );
2859
2860 def generateThreadedFunctionsHeader(self, oOut, _):
2861 """
2862 Generates the threaded functions header file.
2863 Returns success indicator.
2864 """
2865
2866 asLines = self.generateLicenseHeader();
2867
2868 # Generate the threaded function table indexes.
2869 asLines += [
2870 'typedef enum IEMTHREADEDFUNCS',
2871 '{',
2872 ' kIemThreadedFunc_Invalid = 0,',
2873 '',
2874 ' /*',
2875 ' * Predefined',
2876 ' */',
2877 ];
2878 asLines += [' kIemThreadedFunc_BltIn_%s,' % (sFuncNm,) for sFuncNm, _, _ in self.katBltIns];
2879
2880 iThreadedFunction = 1 + len(self.katBltIns);
2881 for sVariation in ThreadedFunctionVariation.kasVariationsEmitOrder:
2882 asLines += [
2883 '',
2884 ' /*',
2885 ' * Variation: ' + ThreadedFunctionVariation.kdVariationNames[sVariation] + '',
2886 ' */',
2887 ];
2888 for oThreadedFunction in self.aoThreadedFuncs:
2889 oVariation = oThreadedFunction.dVariations.get(sVariation, None);
2890 if oVariation:
2891 iThreadedFunction += 1;
2892 oVariation.iEnumValue = iThreadedFunction;
2893 asLines.append(' ' + oVariation.getIndexName() + ',');
2894 asLines += [
2895 ' kIemThreadedFunc_End',
2896 '} IEMTHREADEDFUNCS;',
2897 '',
2898 ];
2899
2900 # Prototype the function table.
2901 asLines += [
2902 'extern const PFNIEMTHREADEDFUNC g_apfnIemThreadedFunctions[kIemThreadedFunc_End];',
2903 'extern uint8_t const g_acIemThreadedFunctionUsedArgs[kIemThreadedFunc_End];',
2904 '#if defined(IN_RING3) || defined(LOG_ENABLED)',
2905 'extern const char * const g_apszIemThreadedFunctions[kIemThreadedFunc_End];',
2906 '#endif',
2907 '#if defined(IN_RING3)',
2908 'extern const char * const g_apszIemThreadedFunctionStats[kIemThreadedFunc_End];',
2909 '#endif',
2910 ];
2911
2912 oOut.write('\n'.join(asLines));
2913 return True;
2914
2915 ksBitsToIntMask = {
2916 1: "UINT64_C(0x1)",
2917 2: "UINT64_C(0x3)",
2918 4: "UINT64_C(0xf)",
2919 8: "UINT64_C(0xff)",
2920 16: "UINT64_C(0xffff)",
2921 32: "UINT64_C(0xffffffff)",
2922 };
2923
2924 def generateFunctionParameterUnpacking(self, oVariation, oOut, asParams, uNoRefLevel = 0):
2925 """
2926 Outputs code for unpacking parameters.
2927 This is shared by the threaded and native code generators.
2928 """
2929 aasVars = [];
2930 for aoRefs in oVariation.dParamRefs.values():
2931 oRef = aoRefs[0];
2932 if oRef.sType[0] != 'P':
2933 cBits = g_kdTypeInfo[oRef.sType][0];
2934 sType = g_kdTypeInfo[oRef.sType][2];
2935 else:
2936 cBits = 64;
2937 sType = oRef.sType;
2938
2939 sTypeDecl = sType + ' const';
2940
2941 if cBits == 64:
2942 assert oRef.offNewParam == 0;
2943 if sType == 'uint64_t':
2944 sUnpack = '%s;' % (asParams[oRef.iNewParam],);
2945 else:
2946 sUnpack = '(%s)%s;' % (sType, asParams[oRef.iNewParam],);
2947 elif oRef.offNewParam == 0:
2948 sUnpack = '(%s)(%s & %s);' % (sType, asParams[oRef.iNewParam], self.ksBitsToIntMask[cBits]);
2949 else:
2950 sUnpack = '(%s)((%s >> %s) & %s);' \
2951 % (sType, asParams[oRef.iNewParam], oRef.offNewParam, self.ksBitsToIntMask[cBits]);
2952
2953 sComment = '/* %s - %s ref%s */' % (oRef.sOrgRef, len(aoRefs), 's' if len(aoRefs) != 1 else '',);
2954
2955 aasVars.append([ '%s:%02u' % (oRef.iNewParam, oRef.offNewParam),
2956 sTypeDecl, oRef.sNewName, sUnpack, sComment ]);
2957 acchVars = [0, 0, 0, 0, 0];
2958 for asVar in aasVars:
2959 for iCol, sStr in enumerate(asVar):
2960 acchVars[iCol] = max(acchVars[iCol], len(sStr));
2961 sFmt = ' %%-%ss %%-%ss = %%-%ss %%s\n' % (acchVars[1], acchVars[2], acchVars[3]);
2962 for asVar in sorted(aasVars):
2963 oOut.write(sFmt % (asVar[1], asVar[2], asVar[3], asVar[4],));
2964
2965 if uNoRefLevel > 0 and aasVars:
2966 if uNoRefLevel > 1:
2967 # level 2: Everything. This is used by liveness.
2968 oOut.write(' ');
2969 for asVar in sorted(aasVars):
2970 oOut.write(' RT_NOREF_PV(%s);' % (asVar[2],));
2971 oOut.write('\n');
2972 else:
2973 # level 1: Only pfnXxxx variables. This is used by native.
2974 for asVar in sorted(aasVars):
2975 if asVar[2].startswith('pfn'):
2976 oOut.write(' RT_NOREF_PV(%s);\n' % (asVar[2],));
2977 return True;
2978
2979 kasThreadedParamNames = ('uParam0', 'uParam1', 'uParam2');
2980 def generateThreadedFunctionsSource(self, oOut, _):
2981 """
2982 Generates the threaded functions source file.
2983 Returns success indicator.
2984 """
2985
2986 asLines = self.generateLicenseHeader();
2987 oOut.write('\n'.join(asLines));
2988
2989 #
2990 # Emit the function definitions.
2991 #
2992 for sVariation in ThreadedFunctionVariation.kasVariationsEmitOrder:
2993 sVarName = ThreadedFunctionVariation.kdVariationNames[sVariation];
2994 oOut.write( '\n'
2995 + '\n'
2996 + '\n'
2997 + '\n'
2998 + '/*' + '*' * 128 + '\n'
2999 + '* Variation: ' + sVarName + ' ' * (129 - len(sVarName) - 15) + '*\n'
3000 + '*' * 128 + '*/\n');
3001
3002 for oThreadedFunction in self.aoThreadedFuncs:
3003 oVariation = oThreadedFunction.dVariations.get(sVariation, None);
3004 if oVariation:
3005 oMcBlock = oThreadedFunction.oMcBlock;
3006
3007 # Function header
3008 oOut.write( '\n'
3009 + '\n'
3010 + '/**\n'
3011 + ' * #%u: %s at line %s offset %s in %s%s\n'
3012 % (oVariation.iEnumValue, oMcBlock.sFunction, oMcBlock.iBeginLine, oMcBlock.offBeginLine,
3013 os.path.split(oMcBlock.sSrcFile)[1],
3014 ' (macro expansion)' if oMcBlock.iBeginLine == oMcBlock.iEndLine else '')
3015 + ' */\n'
3016 + 'static IEM_DECL_IEMTHREADEDFUNC_DEF(' + oVariation.getThreadedFunctionName() + ')\n'
3017 + '{\n');
3018
3019 # Unpack parameters.
3020 self.generateFunctionParameterUnpacking(oVariation, oOut, self.kasThreadedParamNames);
3021
3022 # RT_NOREF for unused parameters.
3023 if oVariation.cMinParams < g_kcThreadedParams:
3024 oOut.write(' RT_NOREF(' + ', '.join(self.kasThreadedParamNames[oVariation.cMinParams:]) + ');\n');
3025
3026 # Now for the actual statements.
3027 oOut.write(iai.McStmt.renderCodeForList(oVariation.aoStmtsForThreadedFunction, cchIndent = 4));
3028
3029 oOut.write('}\n');
3030
3031
3032 #
3033 # Generate the output tables in parallel.
3034 #
3035 asFuncTable = [
3036 '/**',
3037 ' * Function pointer table.',
3038 ' */',
3039 'PFNIEMTHREADEDFUNC const g_apfnIemThreadedFunctions[kIemThreadedFunc_End] =',
3040 '{',
3041 ' /*Invalid*/ NULL,',
3042 ];
3043 asArgCntTab = [
3044 '/**',
3045 ' * Argument count table.',
3046 ' */',
3047 'uint8_t const g_acIemThreadedFunctionUsedArgs[kIemThreadedFunc_End] =',
3048 '{',
3049 ' 0, /*Invalid*/',
3050 ];
3051 asNameTable = [
3052 '/**',
3053 ' * Function name table.',
3054 ' */',
3055 'const char * const g_apszIemThreadedFunctions[kIemThreadedFunc_End] =',
3056 '{',
3057 ' "Invalid",',
3058 ];
3059 asStatTable = [
3060 '/**',
3061 ' * Function statistics name table.',
3062 ' */',
3063 'const char * const g_apszIemThreadedFunctionStats[kIemThreadedFunc_End] =',
3064 '{',
3065 ' NULL,',
3066 ];
3067 aasTables = (asFuncTable, asArgCntTab, asNameTable, asStatTable,);
3068
3069 for asTable in aasTables:
3070 asTable.extend((
3071 '',
3072 ' /*',
3073 ' * Predefined.',
3074 ' */',
3075 ));
3076 for sFuncNm, cArgs, _ in self.katBltIns:
3077 asFuncTable.append(' iemThreadedFunc_BltIn_%s,' % (sFuncNm,));
3078 asArgCntTab.append(' %d, /*BltIn_%s*/' % (cArgs, sFuncNm,));
3079 asNameTable.append(' "BltIn_%s",' % (sFuncNm,));
3080 asStatTable.append(' "BltIn/%s",' % (sFuncNm,));
3081
3082 iThreadedFunction = 1 + len(self.katBltIns);
3083 for sVariation in ThreadedFunctionVariation.kasVariationsEmitOrder:
3084 for asTable in aasTables:
3085 asTable.extend((
3086 '',
3087 ' /*',
3088 ' * Variation: ' + ThreadedFunctionVariation.kdVariationNames[sVariation],
3089 ' */',
3090 ));
3091 for oThreadedFunction in self.aoThreadedFuncs:
3092 oVariation = oThreadedFunction.dVariations.get(sVariation, None);
3093 if oVariation:
3094 iThreadedFunction += 1;
3095 assert oVariation.iEnumValue == iThreadedFunction;
3096 sName = oVariation.getThreadedFunctionName();
3097 asFuncTable.append(' /*%4u*/ %s,' % (iThreadedFunction, sName,));
3098 asNameTable.append(' /*%4u*/ "%s",' % (iThreadedFunction, sName,));
3099 asArgCntTab.append(' /*%4u*/ %d, /*%s*/' % (iThreadedFunction, oVariation.cMinParams, sName,));
3100 asStatTable.append(' "%s",' % (oVariation.getThreadedFunctionStatisticsName(),));
3101
3102 for asTable in aasTables:
3103 asTable.append('};');
3104
3105 #
3106 # Output the tables.
3107 #
3108 oOut.write( '\n'
3109 + '\n');
3110 oOut.write('\n'.join(asFuncTable));
3111 oOut.write( '\n'
3112 + '\n'
3113 + '\n');
3114 oOut.write('\n'.join(asArgCntTab));
3115 oOut.write( '\n'
3116 + '\n'
3117 + '#if defined(IN_RING3) || defined(LOG_ENABLED)\n');
3118 oOut.write('\n'.join(asNameTable));
3119 oOut.write( '\n'
3120 + '#endif /* IN_RING3 || LOG_ENABLED */\n'
3121 + '\n'
3122 + '\n'
3123 + '#if defined(IN_RING3)\n');
3124 oOut.write('\n'.join(asStatTable));
3125 oOut.write( '\n'
3126 + '#endif /* IN_RING3 */\n');
3127
3128 return True;
3129
3130 def generateNativeFunctionsHeader(self, oOut, _):
3131 """
3132 Generates the native recompiler functions header file.
3133 Returns success indicator.
3134 """
3135 if not self.oOptions.fNativeRecompilerEnabled:
3136 return True;
3137
3138 asLines = self.generateLicenseHeader();
3139
3140 # Prototype the function table.
3141 asLines += [
3142 'extern const PFNIEMNATIVERECOMPFUNC g_apfnIemNativeRecompileFunctions[kIemThreadedFunc_End];',
3143 'extern const PFNIEMNATIVELIVENESSFUNC g_apfnIemNativeLivenessFunctions[kIemThreadedFunc_End];',
3144 '',
3145 ];
3146
3147 # Emit indicators as to which of the builtin functions have a native
3148 # recompiler function and which not. (We only really need this for
3149 # kIemThreadedFunc_BltIn_CheckMode, but do all just for simplicity.)
3150 for atBltIn in self.katBltIns:
3151 if atBltIn[1]:
3152 asLines.append('#define IEMNATIVE_WITH_BLTIN_' + atBltIn[0].upper())
3153 else:
3154 asLines.append('#define IEMNATIVE_WITHOUT_BLTIN_' + atBltIn[0].upper())
3155
3156 # Emit prototypes for the builtin functions we use in tables.
3157 asLines += [
3158 '',
3159 '/* Prototypes for built-in functions used in the above tables. */',
3160 ];
3161 for sFuncNm, _, fHaveRecompFunc in self.katBltIns:
3162 if fHaveRecompFunc:
3163 asLines += [
3164 'IEM_DECL_IEMNATIVERECOMPFUNC_PROTO( iemNativeRecompFunc_BltIn_%s);' % (sFuncNm,),
3165 'IEM_DECL_IEMNATIVELIVENESSFUNC_PROTO(iemNativeLivenessFunc_BltIn_%s);' % (sFuncNm,),
3166 ];
3167
3168 # Emit prototypes for table function.
3169 asLines += [
3170 '',
3171 '#ifdef IEMNATIVE_INCL_TABLE_FUNCTION_PROTOTYPES'
3172 ]
3173 for sVariation in ThreadedFunctionVariation.kasVariationsEmitOrder:
3174 sVarName = ThreadedFunctionVariation.kdVariationNames[sVariation];
3175 asLines += [
3176 '',
3177 '/* Variation: ' + sVarName + ' */',
3178 ];
3179 for oThreadedFunction in self.aoThreadedFuncs:
3180 oVariation = oThreadedFunction.dVariations.get(sVariation, None) # type: ThreadedFunctionVariation
3181 if oVariation and oVariation.oNativeRecomp and oVariation.oNativeRecomp.isRecompilable():
3182 asLines.append('IEM_DECL_IEMNATIVERECOMPFUNC_PROTO(' + oVariation.getNativeFunctionName() + ');');
3183 asLines += [
3184 '',
3185 '#endif /* IEMNATIVE_INCL_TABLE_FUNCTION_PROTOTYPES */',
3186 ]
3187
3188 oOut.write('\n'.join(asLines));
3189 return True;
3190
3191 def generateNativeFunctionsSource(self, oOut, idxPart):
3192 """
3193 Generates the native recompiler functions source file.
3194 Returns success indicator.
3195 """
3196 cParts = 4;
3197 assert(idxPart in range(cParts));
3198 if not self.oOptions.fNativeRecompilerEnabled:
3199 return True;
3200
3201 #
3202 # The file header.
3203 #
3204 oOut.write('\n'.join(self.generateLicenseHeader()));
3205
3206 #
3207 # Emit the functions.
3208 #
3209 # The files are split up by threaded variation as that's the simplest way to
3210 # do it, even if the distribution isn't entirely even (ksVariation_Default
3211 # only has the defer to cimpl bits and the pre-386 variants will naturally
3212 # have fewer instructions).
3213 #
3214 cVariationsPerFile = len(ThreadedFunctionVariation.kasVariationsEmitOrder) // cParts;
3215 idxFirstVar = idxPart * cVariationsPerFile;
3216 idxEndVar = idxFirstVar + cVariationsPerFile;
3217 if idxPart + 1 >= cParts:
3218 idxEndVar = len(ThreadedFunctionVariation.kasVariationsEmitOrder);
3219 for sVariation in ThreadedFunctionVariation.kasVariationsEmitOrder[idxFirstVar:idxEndVar]:
3220 sVarName = ThreadedFunctionVariation.kdVariationNames[sVariation];
3221 oOut.write( '\n'
3222 + '\n'
3223 + '\n'
3224 + '\n'
3225 + '/*' + '*' * 128 + '\n'
3226 + '* Variation: ' + sVarName + ' ' * (129 - len(sVarName) - 15) + '*\n'
3227 + '*' * 128 + '*/\n');
3228
3229 for oThreadedFunction in self.aoThreadedFuncs:
3230 oVariation = oThreadedFunction.dVariations.get(sVariation, None) # type: ThreadedFunctionVariation
3231 if oVariation and oVariation.oNativeRecomp and oVariation.oNativeRecomp.isRecompilable():
3232 oMcBlock = oThreadedFunction.oMcBlock;
3233
3234 # Function header
3235 oOut.write( '\n'
3236 + '\n'
3237 + '/**\n'
3238 + ' * #%u: %s at line %s offset %s in %s%s\n'
3239 % (oVariation.iEnumValue, oMcBlock.sFunction, oMcBlock.iBeginLine, oMcBlock.offBeginLine,
3240 os.path.split(oMcBlock.sSrcFile)[1],
3241 ' (macro expansion)' if oMcBlock.iBeginLine == oMcBlock.iEndLine else '')
3242 + ' */\n'
3243 + 'IEM_DECL_IEMNATIVERECOMPFUNC_DEF(' + oVariation.getNativeFunctionName() + ')\n'
3244 + '{\n');
3245
3246 # Unpack parameters.
3247 self.generateFunctionParameterUnpacking(oVariation, oOut,
3248 ('pCallEntry->auParams[0]',
3249 'pCallEntry->auParams[1]',
3250 'pCallEntry->auParams[2]',),
3251 uNoRefLevel = 1);
3252
3253 # Now for the actual statements.
3254 oOut.write(oVariation.oNativeRecomp.renderCode(cchIndent = 4));
3255
3256 oOut.write('}\n');
3257
3258 #
3259 # Output the function table if this is the first file.
3260 #
3261 if idxPart == 0:
3262 oOut.write( '\n'
3263 + '\n'
3264 + '/*\n'
3265 + ' * Function table running parallel to g_apfnIemThreadedFunctions and friends.\n'
3266 + ' */\n'
3267 + 'const PFNIEMNATIVERECOMPFUNC g_apfnIemNativeRecompileFunctions[kIemThreadedFunc_End] =\n'
3268 + '{\n'
3269 + ' /*Invalid*/ NULL,'
3270 + '\n'
3271 + ' /*\n'
3272 + ' * Predefined.\n'
3273 + ' */\n'
3274 );
3275 for sFuncNm, _, fHaveRecompFunc in self.katBltIns:
3276 if fHaveRecompFunc:
3277 oOut.write(' iemNativeRecompFunc_BltIn_%s,\n' % (sFuncNm,))
3278 else:
3279 oOut.write(' NULL, /*BltIn_%s*/\n' % (sFuncNm,))
3280
3281 iThreadedFunction = 1 + len(self.katBltIns);
3282 for sVariation in ThreadedFunctionVariation.kasVariationsEmitOrder:
3283 oOut.write( ' /*\n'
3284 + ' * Variation: ' + ThreadedFunctionVariation.kdVariationNames[sVariation] + '\n'
3285 + ' */\n');
3286 for oThreadedFunction in self.aoThreadedFuncs:
3287 oVariation = oThreadedFunction.dVariations.get(sVariation, None);
3288 if oVariation:
3289 iThreadedFunction += 1;
3290 assert oVariation.iEnumValue == iThreadedFunction;
3291 sName = oVariation.getNativeFunctionName();
3292 if oVariation.oNativeRecomp and oVariation.oNativeRecomp.isRecompilable():
3293 oOut.write(' /*%4u*/ %s,\n' % (iThreadedFunction, sName,));
3294 else:
3295 oOut.write(' /*%4u*/ NULL /*%s*/,\n' % (iThreadedFunction, sName,));
3296
3297 oOut.write( '};\n');
3298
3299 oOut.write('\n');
3300 return True;
3301
3302 def generateNativeLivenessSource(self, oOut, _):
3303 """
3304 Generates the native recompiler liveness analysis functions source file.
3305 Returns success indicator.
3306 """
3307 if not self.oOptions.fNativeRecompilerEnabled:
3308 return True;
3309
3310 #
3311 # The file header.
3312 #
3313 oOut.write('\n'.join(self.generateLicenseHeader()));
3314
3315 #
3316 # Emit the functions.
3317 #
3318 for sVariation in ThreadedFunctionVariation.kasVariationsEmitOrder:
3319 sVarName = ThreadedFunctionVariation.kdVariationNames[sVariation];
3320 oOut.write( '\n'
3321 + '\n'
3322 + '\n'
3323 + '\n'
3324 + '/*' + '*' * 128 + '\n'
3325 + '* Variation: ' + sVarName + ' ' * (129 - len(sVarName) - 15) + '*\n'
3326 + '*' * 128 + '*/\n');
3327
3328 for oThreadedFunction in self.aoThreadedFuncs:
3329 oVariation = oThreadedFunction.dVariations.get(sVariation, None) # type: ThreadedFunctionVariation
3330 if oVariation and oVariation.oNativeRecomp and oVariation.oNativeRecomp.isRecompilable():
3331 oMcBlock = oThreadedFunction.oMcBlock;
3332
3333 # Function header
3334 oOut.write( '\n'
3335 + '\n'
3336 + '/**\n'
3337 + ' * #%u: %s at line %s offset %s in %s%s\n'
3338 % (oVariation.iEnumValue, oMcBlock.sFunction, oMcBlock.iBeginLine, oMcBlock.offBeginLine,
3339 os.path.split(oMcBlock.sSrcFile)[1],
3340 ' (macro expansion)' if oMcBlock.iBeginLine == oMcBlock.iEndLine else '')
3341 + ' */\n'
3342 + 'static IEM_DECL_IEMNATIVELIVENESSFUNC_DEF(' + oVariation.getLivenessFunctionName() + ')\n'
3343 + '{\n');
3344
3345 # Unpack parameters.
3346 self.generateFunctionParameterUnpacking(oVariation, oOut,
3347 ('pCallEntry->auParams[0]',
3348 'pCallEntry->auParams[1]',
3349 'pCallEntry->auParams[2]',),
3350 uNoRefLevel = 2);
3351
3352 # Now for the actual statements.
3353 oOut.write(oVariation.oNativeRecomp.renderCode(cchIndent = 4));
3354
3355 oOut.write('}\n');
3356
3357 #
3358 # Output the function table.
3359 #
3360 oOut.write( '\n'
3361 + '\n'
3362 + '/*\n'
3363 + ' * Liveness analysis function table running parallel to g_apfnIemThreadedFunctions and friends.\n'
3364 + ' */\n'
3365 + 'const PFNIEMNATIVELIVENESSFUNC g_apfnIemNativeLivenessFunctions[kIemThreadedFunc_End] =\n'
3366 + '{\n'
3367 + ' /*Invalid*/ NULL,'
3368 + '\n'
3369 + ' /*\n'
3370 + ' * Predefined.\n'
3371 + ' */\n'
3372 );
3373 for sFuncNm, _, fHaveRecompFunc in self.katBltIns:
3374 if fHaveRecompFunc:
3375 oOut.write(' iemNativeLivenessFunc_BltIn_%s,\n' % (sFuncNm,))
3376 else:
3377 oOut.write(' NULL, /*BltIn_%s*/\n' % (sFuncNm,))
3378
3379 iThreadedFunction = 1 + len(self.katBltIns);
3380 for sVariation in ThreadedFunctionVariation.kasVariationsEmitOrder:
3381 oOut.write( ' /*\n'
3382 + ' * Variation: ' + ThreadedFunctionVariation.kdVariationNames[sVariation] + '\n'
3383 + ' */\n');
3384 for oThreadedFunction in self.aoThreadedFuncs:
3385 oVariation = oThreadedFunction.dVariations.get(sVariation, None);
3386 if oVariation:
3387 iThreadedFunction += 1;
3388 assert oVariation.iEnumValue == iThreadedFunction;
3389 sName = oVariation.getLivenessFunctionName();
3390 if oVariation.oNativeRecomp and oVariation.oNativeRecomp.isRecompilable():
3391 oOut.write(' /*%4u*/ %s,\n' % (iThreadedFunction, sName,));
3392 else:
3393 oOut.write(' /*%4u*/ NULL /*%s*/,\n' % (iThreadedFunction, sName,));
3394
3395 oOut.write( '};\n'
3396 + '\n');
3397 return True;
3398
3399
3400 def getThreadedFunctionByIndex(self, idx):
3401 """
3402 Returns a ThreadedFunction object for the given index. If the index is
3403 out of bounds, a dummy is returned.
3404 """
3405 if idx < len(self.aoThreadedFuncs):
3406 return self.aoThreadedFuncs[idx];
3407 return ThreadedFunction.dummyInstance();
3408
3409 def generateModifiedInput(self, oOut, idxFile):
3410 """
3411 Generates the combined modified input source/header file.
3412 Returns success indicator.
3413 """
3414 #
3415 # File header and assert assumptions.
3416 #
3417 oOut.write('\n'.join(self.generateLicenseHeader()));
3418 oOut.write('AssertCompile((IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | IEM_F_MODE_CPUMODE_MASK) == 7);\n');
3419
3420 #
3421 # Iterate all parsers (input files) and output the ones related to the
3422 # file set given by idxFile.
3423 #
3424 for idxParser, oParser in enumerate(self.aoParsers): # type: int, IEMAllInstPython.SimpleParser
3425 # Is this included in the file set?
3426 sSrcBaseFile = os.path.basename(oParser.sSrcFile).lower();
3427 fInclude = -1;
3428 for aoInfo in iai.g_aaoAllInstrFilesAndDefaultMapAndSet:
3429 if sSrcBaseFile == aoInfo[0].lower():
3430 fInclude = aoInfo[2] in (-1, idxFile);
3431 break;
3432 if fInclude is not True:
3433 assert fInclude is False;
3434 continue;
3435
3436 # Output it.
3437 oOut.write("\n\n/* ****** BEGIN %s ******* */\n" % (oParser.sSrcFile,));
3438
3439 iThreadedFunction = self.aidxFirstFunctions[idxParser];
3440 oThreadedFunction = self.getThreadedFunctionByIndex(iThreadedFunction);
3441 iLine = 0;
3442 while iLine < len(oParser.asLines):
3443 sLine = oParser.asLines[iLine];
3444 iLine += 1; # iBeginLine and iEndLine are 1-based.
3445
3446 # Can we pass it thru?
3447 if ( iLine not in [oThreadedFunction.oMcBlock.iBeginLine, oThreadedFunction.oMcBlock.iEndLine]
3448 or oThreadedFunction.oMcBlock.sSrcFile != oParser.sSrcFile):
3449 oOut.write(sLine);
3450 #
3451 # Single MC block. Just extract it and insert the replacement.
3452 #
3453 elif oThreadedFunction.oMcBlock.iBeginLine != oThreadedFunction.oMcBlock.iEndLine:
3454 assert ( (sLine.count('IEM_MC_') - sLine.count('IEM_MC_F_') == 1)
3455 or oThreadedFunction.oMcBlock.iMacroExp == iai.McBlock.kiMacroExp_Partial), 'sLine="%s"' % (sLine,);
3456 oOut.write(sLine[:oThreadedFunction.oMcBlock.offBeginLine]);
3457 sModified = oThreadedFunction.generateInputCode().strip();
3458 oOut.write(sModified);
3459
3460 iLine = oThreadedFunction.oMcBlock.iEndLine;
3461 sLine = oParser.asLines[iLine - 1];
3462 assert ( sLine.count('IEM_MC_') - sLine.count('IEM_MC_F_') == 1
3463 or len(oThreadedFunction.oMcBlock.aoStmts) == 1
3464 or oThreadedFunction.oMcBlock.iMacroExp == iai.McBlock.kiMacroExp_Partial);
3465 oOut.write(sLine[oThreadedFunction.oMcBlock.offAfterEnd : ]);
3466
3467 # Advance
3468 iThreadedFunction += 1;
3469 oThreadedFunction = self.getThreadedFunctionByIndex(iThreadedFunction);
3470 #
3471 # Macro expansion line that have sublines and may contain multiple MC blocks.
3472 #
3473 else:
3474 offLine = 0;
3475 while iLine == oThreadedFunction.oMcBlock.iBeginLine:
3476 oOut.write(sLine[offLine : oThreadedFunction.oMcBlock.offBeginLine]);
3477
3478 sModified = oThreadedFunction.generateInputCode().strip();
3479 assert ( sModified.startswith('IEM_MC_BEGIN')
3480 or (sModified.find('IEM_MC_DEFER_TO_CIMPL_') > 0 and sModified.strip().startswith('{\n'))
3481 or sModified.startswith('pVCpu->iem.s.fEndTb = true')
3482 or sModified.startswith('pVCpu->iem.s.fTbCurInstr = ')
3483 ), 'sModified="%s"' % (sModified,);
3484 oOut.write(sModified);
3485
3486 offLine = oThreadedFunction.oMcBlock.offAfterEnd;
3487
3488 # Advance
3489 iThreadedFunction += 1;
3490 oThreadedFunction = self.getThreadedFunctionByIndex(iThreadedFunction);
3491
3492 # Last line segment.
3493 if offLine < len(sLine):
3494 oOut.write(sLine[offLine : ]);
3495
3496 oOut.write("/* ****** END %s ******* */\n" % (oParser.sSrcFile,));
3497
3498 return True;
3499
3500
3501 #
3502 # Main
3503 #
3504
3505 def main(self, asArgs):
3506 """
3507 C-like main function.
3508 Returns exit code.
3509 """
3510
3511 #
3512 # Parse arguments
3513 #
3514 sScriptDir = os.path.dirname(__file__);
3515 oParser = argparse.ArgumentParser(add_help = False);
3516 oParser.add_argument('asInFiles',
3517 metavar = 'input.cpp.h',
3518 nargs = '*',
3519 default = [os.path.join(sScriptDir, aoInfo[0])
3520 for aoInfo in iai.g_aaoAllInstrFilesAndDefaultMapAndSet],
3521 help = "Selection of VMMAll/IEMAllInst*.cpp.h files to use as input.");
3522 oParser.add_argument('--host-arch',
3523 metavar = 'arch',
3524 dest = 'sHostArch',
3525 action = 'store',
3526 default = None,
3527 help = 'The host architecture.');
3528
3529 oParser.add_argument('--out-thrd-funcs-hdr',
3530 metavar = 'file-thrd-funcs.h',
3531 dest = 'sOutFileThrdFuncsHdr',
3532 action = 'store',
3533 default = '-',
3534 help = 'The output header file for the threaded functions.');
3535 oParser.add_argument('--out-thrd-funcs-cpp',
3536 metavar = 'file-thrd-funcs.cpp',
3537 dest = 'sOutFileThrdFuncsCpp',
3538 action = 'store',
3539 default = '-',
3540 help = 'The output C++ file for the threaded functions.');
3541 oParser.add_argument('--out-n8ve-funcs-hdr',
3542 metavar = 'file-n8tv-funcs.h',
3543 dest = 'sOutFileN8veFuncsHdr',
3544 action = 'store',
3545 default = '-',
3546 help = 'The output header file for the native recompiler functions.');
3547 oParser.add_argument('--out-n8ve-funcs-cpp1',
3548 metavar = 'file-n8tv-funcs1.cpp',
3549 dest = 'sOutFileN8veFuncsCpp1',
3550 action = 'store',
3551 default = '-',
3552 help = 'The output C++ file for the native recompiler functions part 1.');
3553 oParser.add_argument('--out-n8ve-funcs-cpp2',
3554 metavar = 'file-n8ve-funcs2.cpp',
3555 dest = 'sOutFileN8veFuncsCpp2',
3556 action = 'store',
3557 default = '-',
3558 help = 'The output C++ file for the native recompiler functions part 2.');
3559 oParser.add_argument('--out-n8ve-funcs-cpp3',
3560 metavar = 'file-n8ve-funcs3.cpp',
3561 dest = 'sOutFileN8veFuncsCpp3',
3562 action = 'store',
3563 default = '-',
3564 help = 'The output C++ file for the native recompiler functions part 3.');
3565 oParser.add_argument('--out-n8ve-funcs-cpp4',
3566 metavar = 'file-n8ve-funcs4.cpp',
3567 dest = 'sOutFileN8veFuncsCpp4',
3568 action = 'store',
3569 default = '-',
3570 help = 'The output C++ file for the native recompiler functions part 4.');
3571 oParser.add_argument('--out-n8ve-liveness-cpp',
3572 metavar = 'file-n8ve-liveness.cpp',
3573 dest = 'sOutFileN8veLivenessCpp',
3574 action = 'store',
3575 default = '-',
3576 help = 'The output C++ file for the native recompiler liveness analysis functions.');
3577 oParser.add_argument('--native',
3578 dest = 'fNativeRecompilerEnabled',
3579 action = 'store_true',
3580 default = False,
3581 help = 'Enables generating the files related to native recompilation.');
3582 oParser.add_argument('--out-mod-input1',
3583 metavar = 'file-instr.cpp.h',
3584 dest = 'sOutFileModInput1',
3585 action = 'store',
3586 default = '-',
3587 help = 'The output C++/header file for modified input instruction files part 1.');
3588 oParser.add_argument('--out-mod-input2',
3589 metavar = 'file-instr.cpp.h',
3590 dest = 'sOutFileModInput2',
3591 action = 'store',
3592 default = '-',
3593 help = 'The output C++/header file for modified input instruction files part 2.');
3594 oParser.add_argument('--out-mod-input3',
3595 metavar = 'file-instr.cpp.h',
3596 dest = 'sOutFileModInput3',
3597 action = 'store',
3598 default = '-',
3599 help = 'The output C++/header file for modified input instruction files part 3.');
3600 oParser.add_argument('--out-mod-input4',
3601 metavar = 'file-instr.cpp.h',
3602 dest = 'sOutFileModInput4',
3603 action = 'store',
3604 default = '-',
3605 help = 'The output C++/header file for modified input instruction files part 4.');
3606 oParser.add_argument('--help', '-h', '-?',
3607 action = 'help',
3608 help = 'Display help and exit.');
3609 oParser.add_argument('--version', '-V',
3610 action = 'version',
3611 version = 'r%s (IEMAllThreadedPython.py), r%s (IEMAllInstPython.py)'
3612 % (__version__.split()[1], iai.__version__.split()[1],),
3613 help = 'Displays the version/revision of the script and exit.');
3614 self.oOptions = oParser.parse_args(asArgs[1:]);
3615 print("oOptions=%s" % (self.oOptions,), file = sys.stderr);
3616
3617 if self.oOptions.sHostArch not in ('amd64', 'arm64'):
3618 print('error! Unsupported (or missing) host architecture: %s' % (self.oOptions.sHostArch,), file = sys.stderr);
3619 return 1;
3620
3621 #
3622 # Process the instructions specified in the IEM sources.
3623 #
3624 if self.processInputFiles(self.oOptions.sHostArch, self.oOptions.fNativeRecompilerEnabled):
3625 #
3626 # Generate the output files.
3627 #
3628 aaoOutputFiles = (
3629 ( self.oOptions.sOutFileThrdFuncsHdr, self.generateThreadedFunctionsHeader, 0, ),
3630 ( self.oOptions.sOutFileThrdFuncsCpp, self.generateThreadedFunctionsSource, 0, ),
3631 ( self.oOptions.sOutFileN8veFuncsHdr, self.generateNativeFunctionsHeader, 0, ),
3632 ( self.oOptions.sOutFileN8veFuncsCpp1, self.generateNativeFunctionsSource, 0, ),
3633 ( self.oOptions.sOutFileN8veFuncsCpp2, self.generateNativeFunctionsSource, 1, ),
3634 ( self.oOptions.sOutFileN8veFuncsCpp3, self.generateNativeFunctionsSource, 2, ),
3635 ( self.oOptions.sOutFileN8veFuncsCpp4, self.generateNativeFunctionsSource, 3, ),
3636 ( self.oOptions.sOutFileN8veLivenessCpp, self.generateNativeLivenessSource, 0, ),
3637 ( self.oOptions.sOutFileModInput1, self.generateModifiedInput, 1, ),
3638 ( self.oOptions.sOutFileModInput2, self.generateModifiedInput, 2, ),
3639 ( self.oOptions.sOutFileModInput3, self.generateModifiedInput, 3, ),
3640 ( self.oOptions.sOutFileModInput4, self.generateModifiedInput, 4, ),
3641 );
3642 fRc = True;
3643 for sOutFile, fnGenMethod, iPartNo in aaoOutputFiles:
3644 if sOutFile == '-':
3645 fRc = fnGenMethod(sys.stdout, iPartNo) and fRc;
3646 else:
3647 try:
3648 oOut = open(sOutFile, 'w'); # pylint: disable=consider-using-with,unspecified-encoding
3649 except Exception as oXcpt:
3650 print('error! Failed open "%s" for writing: %s' % (sOutFile, oXcpt,), file = sys.stderr);
3651 return 1;
3652 fRc = fnGenMethod(oOut, iPartNo) and fRc;
3653 oOut.close();
3654 if fRc:
3655 return 0;
3656
3657 return 1;
3658
3659
3660if __name__ == '__main__':
3661 sys.exit(IEMThreadedGenerator().main(sys.argv));
3662
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette