VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAllThrdPython.py

Last change on this file was 104424, checked in by vboxsync, 4 weeks ago

VMM/IEM: Convert near return (retn) and relative/indirect call instructions to special IEM MC statements in order to be able to recompile them, bugref:10376 [scm]

  • Property svn:eol-style set to LF
  • Property svn:executable set to *
  • Property svn:keywords set to Author Date Id Revision
File size: 191.6 KB
Line 
1#!/usr/bin/env python
2# -*- coding: utf-8 -*-
3# $Id: IEMAllThrdPython.py 104424 2024-04-24 14:49:27Z vboxsync $
4# pylint: disable=invalid-name
5
6"""
7Annotates and generates threaded functions from IEMAllInst*.cpp.h.
8"""
9
10from __future__ import print_function;
11
12__copyright__ = \
13"""
14Copyright (C) 2023 Oracle and/or its affiliates.
15
16This file is part of VirtualBox base platform packages, as
17available from https://www.virtualbox.org.
18
19This program is free software; you can redistribute it and/or
20modify it under the terms of the GNU General Public License
21as published by the Free Software Foundation, in version 3 of the
22License.
23
24This program is distributed in the hope that it will be useful, but
25WITHOUT ANY WARRANTY; without even the implied warranty of
26MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
27General Public License for more details.
28
29You should have received a copy of the GNU General Public License
30along with this program; if not, see <https://www.gnu.org/licenses>.
31
32SPDX-License-Identifier: GPL-3.0-only
33"""
34__version__ = "$Revision: 104424 $"
35
36# Standard python imports.
37import copy;
38import datetime;
39import os;
40import re;
41import sys;
42import argparse;
43from typing import Dict, List;
44
45import IEMAllInstPython as iai;
46import IEMAllN8vePython as ian;
47
48
49# Python 3 hacks:
50if sys.version_info[0] >= 3:
51 long = int; # pylint: disable=redefined-builtin,invalid-name
52
53## Number of generic parameters for the thread functions.
54g_kcThreadedParams = 3;
55
56g_kdTypeInfo = {
57 # type name: (cBits, fSigned, C-type )
58 'int8_t': ( 8, True, 'int8_t', ),
59 'int16_t': ( 16, True, 'int16_t', ),
60 'int32_t': ( 32, True, 'int32_t', ),
61 'int64_t': ( 64, True, 'int64_t', ),
62 'uint4_t': ( 4, False, 'uint8_t', ),
63 'uint8_t': ( 8, False, 'uint8_t', ),
64 'uint16_t': ( 16, False, 'uint16_t', ),
65 'uint32_t': ( 32, False, 'uint32_t', ),
66 'uint64_t': ( 64, False, 'uint64_t', ),
67 'uintptr_t': ( 64, False, 'uintptr_t',), # ASSUMES 64-bit host pointer size.
68 'bool': ( 1, False, 'bool', ),
69 'IEMMODE': ( 2, False, 'IEMMODE', ),
70};
71
72# Only for getTypeBitCount/variables.
73g_kdTypeInfo2 = {
74 'RTFLOAT32U': ( 32, False, 'RTFLOAT32U', ),
75 'RTFLOAT64U': ( 64, False, 'RTFLOAT64U', ),
76 'RTUINT64U': ( 64, False, 'RTUINT64U', ),
77 'RTGCPTR': ( 64, False, 'RTGCPTR', ),
78 'RTPBCD80U': ( 80, False, 'RTPBCD80U', ),
79 'RTFLOAT80U': ( 80, False, 'RTFLOAT80U', ),
80 'IEMFPURESULT': (80+16, False, 'IEMFPURESULT', ),
81 'IEMFPURESULTTWO': (80+16+80,False, 'IEMFPURESULTTWO', ),
82 'RTUINT128U': ( 128, False, 'RTUINT128U', ),
83 'X86XMMREG': ( 128, False, 'X86XMMREG', ),
84 'IEMMEDIAF2XMMSRC': ( 256, False, 'IEMMEDIAF2XMMSRC',),
85 'RTUINT256U': ( 256, False, 'RTUINT256U', ),
86 'IEMPCMPISTRXSRC': ( 256, False, 'IEMPCMPISTRXSRC', ),
87 'IEMPCMPESTRXSRC': ( 384, False, 'IEMPCMPESTRXSRC', ),
88}; #| g_kdTypeInfo; - requires 3.9
89g_kdTypeInfo2.update(g_kdTypeInfo);
90
91def getTypeBitCount(sType):
92 """
93 Translate a type to size in bits
94 """
95 if sType in g_kdTypeInfo2:
96 return g_kdTypeInfo2[sType][0];
97 if '*' in sType or sType[0] == 'P':
98 return 64;
99 #raise Exception('Unknown type: %s' % (sType,));
100 print('error: Unknown type: %s' % (sType,));
101 return 64;
102
103g_kdIemFieldToType = {
104 # Illegal ones:
105 'offInstrNextByte': ( None, ),
106 'cbInstrBuf': ( None, ),
107 'pbInstrBuf': ( None, ),
108 'uInstrBufPc': ( None, ),
109 'cbInstrBufTotal': ( None, ),
110 'offCurInstrStart': ( None, ),
111 'cbOpcode': ( None, ),
112 'offOpcode': ( None, ),
113 'offModRm': ( None, ),
114 # Okay ones.
115 'fPrefixes': ( 'uint32_t', ),
116 'uRexReg': ( 'uint8_t', ),
117 'uRexB': ( 'uint8_t', ),
118 'uRexIndex': ( 'uint8_t', ),
119 'iEffSeg': ( 'uint8_t', ),
120 'enmEffOpSize': ( 'IEMMODE', ),
121 'enmDefAddrMode': ( 'IEMMODE', ),
122 'enmEffAddrMode': ( 'IEMMODE', ),
123 'enmDefOpSize': ( 'IEMMODE', ),
124 'idxPrefix': ( 'uint8_t', ),
125 'uVex3rdReg': ( 'uint8_t', ),
126 'uVexLength': ( 'uint8_t', ),
127 'fEvexStuff': ( 'uint8_t', ),
128 'uFpuOpcode': ( 'uint16_t', ),
129};
130
131## @name McStmtCond.oIfBranchAnnotation/McStmtCond.oElseBranchAnnotation values
132## @{
133g_ksFinishAnnotation_Advance = 'Advance';
134g_ksFinishAnnotation_RelJmp = 'RelJmp';
135g_ksFinishAnnotation_SetJmp = 'SetJmp';
136g_ksFinishAnnotation_RelCall = 'RelCall';
137g_ksFinishAnnotation_IndCall = 'IndCall';
138g_ksFinishAnnotation_DeferToCImpl = 'DeferToCImpl';
139## @}
140
141
142class ThreadedParamRef(object):
143 """
144 A parameter reference for a threaded function.
145 """
146
147 def __init__(self, sOrgRef, sType, oStmt, iParam = None, offParam = 0, sStdRef = None):
148 ## The name / reference in the original code.
149 self.sOrgRef = sOrgRef;
150 ## Normalized name to deal with spaces in macro invocations and such.
151 self.sStdRef = sStdRef if sStdRef else ''.join(sOrgRef.split());
152 ## Indicates that sOrgRef may not match the parameter.
153 self.fCustomRef = sStdRef is not None;
154 ## The type (typically derived).
155 self.sType = sType;
156 ## The statement making the reference.
157 self.oStmt = oStmt;
158 ## The parameter containing the references. None if implicit.
159 self.iParam = iParam;
160 ## The offset in the parameter of the reference.
161 self.offParam = offParam;
162
163 ## The variable name in the threaded function.
164 self.sNewName = 'x';
165 ## The this is packed into.
166 self.iNewParam = 99;
167 ## The bit offset in iNewParam.
168 self.offNewParam = 1024
169
170
171class ThreadedFunctionVariation(object):
172 """ Threaded function variation. """
173
174 ## @name Variations.
175 ## These variations will match translation block selection/distinctions as well.
176 ## @{
177 # pylint: disable=line-too-long
178 ksVariation_Default = ''; ##< No variations - only used by IEM_MC_DEFER_TO_CIMPL_X_RET.
179 ksVariation_16 = '_16'; ##< 16-bit mode code (386+).
180 ksVariation_16f = '_16f'; ##< 16-bit mode code (386+), check+clear eflags.
181 ksVariation_16_Jmp = '_16_Jmp'; ##< 16-bit mode code (386+), conditional jump taken.
182 ksVariation_16f_Jmp = '_16f_Jmp'; ##< 16-bit mode code (386+), check+clear eflags, conditional jump taken.
183 ksVariation_16_NoJmp = '_16_NoJmp'; ##< 16-bit mode code (386+), conditional jump not taken.
184 ksVariation_16f_NoJmp = '_16f_NoJmp'; ##< 16-bit mode code (386+), check+clear eflags, conditional jump not taken.
185 ksVariation_16_Addr32 = '_16_Addr32'; ##< 16-bit mode code (386+), address size prefixed to 32-bit addressing.
186 ksVariation_16f_Addr32 = '_16f_Addr32'; ##< 16-bit mode code (386+), address size prefixed to 32-bit addressing, eflags.
187 ksVariation_16_Pre386 = '_16_Pre386'; ##< 16-bit mode code, pre-386 CPU target.
188 ksVariation_16f_Pre386 = '_16f_Pre386'; ##< 16-bit mode code, pre-386 CPU target, check+clear eflags.
189 ksVariation_16_Pre386_Jmp = '_16_Pre386_Jmp'; ##< 16-bit mode code, pre-386 CPU target, conditional jump taken.
190 ksVariation_16f_Pre386_Jmp = '_16f_Pre386_Jmp'; ##< 16-bit mode code, pre-386 CPU target, check+clear eflags, conditional jump taken.
191 ksVariation_16_Pre386_NoJmp = '_16_Pre386_NoJmp'; ##< 16-bit mode code, pre-386 CPU target, conditional jump not taken.
192 ksVariation_16f_Pre386_NoJmp = '_16f_Pre386_NoJmp'; ##< 16-bit mode code, pre-386 CPU target, check+clear eflags, conditional jump not taken.
193 ksVariation_32 = '_32'; ##< 32-bit mode code (386+).
194 ksVariation_32f = '_32f'; ##< 32-bit mode code (386+), check+clear eflags.
195 ksVariation_32_Jmp = '_32_Jmp'; ##< 32-bit mode code (386+), conditional jump taken.
196 ksVariation_32f_Jmp = '_32f_Jmp'; ##< 32-bit mode code (386+), check+clear eflags, conditional jump taken.
197 ksVariation_32_NoJmp = '_32_NoJmp'; ##< 32-bit mode code (386+), conditional jump not taken.
198 ksVariation_32f_NoJmp = '_32f_NoJmp'; ##< 32-bit mode code (386+), check+clear eflags, conditional jump not taken.
199 ksVariation_32_Flat = '_32_Flat'; ##< 32-bit mode code (386+) with CS, DS, E,S and SS flat and 4GB wide.
200 ksVariation_32f_Flat = '_32f_Flat'; ##< 32-bit mode code (386+) with CS, DS, E,S and SS flat and 4GB wide, eflags.
201 ksVariation_32_Addr16 = '_32_Addr16'; ##< 32-bit mode code (386+), address size prefixed to 16-bit addressing.
202 ksVariation_32f_Addr16 = '_32f_Addr16'; ##< 32-bit mode code (386+), address size prefixed to 16-bit addressing, eflags.
203 ksVariation_64 = '_64'; ##< 64-bit mode code.
204 ksVariation_64f = '_64f'; ##< 64-bit mode code, check+clear eflags.
205 ksVariation_64_Jmp = '_64_Jmp'; ##< 64-bit mode code, conditional jump taken.
206 ksVariation_64f_Jmp = '_64f_Jmp'; ##< 64-bit mode code, check+clear eflags, conditional jump taken.
207 ksVariation_64_NoJmp = '_64_NoJmp'; ##< 64-bit mode code, conditional jump not taken.
208 ksVariation_64f_NoJmp = '_64f_NoJmp'; ##< 64-bit mode code, check+clear eflags, conditional jump not taken.
209 ksVariation_64_FsGs = '_64_FsGs'; ##< 64-bit mode code, with memory accesses via FS or GS.
210 ksVariation_64f_FsGs = '_64f_FsGs'; ##< 64-bit mode code, with memory accesses via FS or GS, check+clear eflags.
211 ksVariation_64_Addr32 = '_64_Addr32'; ##< 64-bit mode code, address size prefixed to 32-bit addressing.
212 ksVariation_64f_Addr32 = '_64f_Addr32'; ##< 64-bit mode code, address size prefixed to 32-bit addressing, c+c eflags.
213 # pylint: enable=line-too-long
214 kasVariations = (
215 ksVariation_Default,
216 ksVariation_16,
217 ksVariation_16f,
218 ksVariation_16_Jmp,
219 ksVariation_16f_Jmp,
220 ksVariation_16_NoJmp,
221 ksVariation_16f_NoJmp,
222 ksVariation_16_Addr32,
223 ksVariation_16f_Addr32,
224 ksVariation_16_Pre386,
225 ksVariation_16f_Pre386,
226 ksVariation_16_Pre386_Jmp,
227 ksVariation_16f_Pre386_Jmp,
228 ksVariation_16_Pre386_NoJmp,
229 ksVariation_16f_Pre386_NoJmp,
230 ksVariation_32,
231 ksVariation_32f,
232 ksVariation_32_Jmp,
233 ksVariation_32f_Jmp,
234 ksVariation_32_NoJmp,
235 ksVariation_32f_NoJmp,
236 ksVariation_32_Flat,
237 ksVariation_32f_Flat,
238 ksVariation_32_Addr16,
239 ksVariation_32f_Addr16,
240 ksVariation_64,
241 ksVariation_64f,
242 ksVariation_64_Jmp,
243 ksVariation_64f_Jmp,
244 ksVariation_64_NoJmp,
245 ksVariation_64f_NoJmp,
246 ksVariation_64_FsGs,
247 ksVariation_64f_FsGs,
248 ksVariation_64_Addr32,
249 ksVariation_64f_Addr32,
250 );
251 kasVariationsWithoutAddress = (
252 ksVariation_16,
253 ksVariation_16f,
254 ksVariation_16_Pre386,
255 ksVariation_16f_Pre386,
256 ksVariation_32,
257 ksVariation_32f,
258 ksVariation_64,
259 ksVariation_64f,
260 );
261 kasVariationsWithoutAddressNot286 = (
262 ksVariation_16,
263 ksVariation_16f,
264 ksVariation_32,
265 ksVariation_32f,
266 ksVariation_64,
267 ksVariation_64f,
268 );
269 kasVariationsWithoutAddressNot286Not64 = (
270 ksVariation_16,
271 ksVariation_16f,
272 ksVariation_32,
273 ksVariation_32f,
274 );
275 kasVariationsWithoutAddressNot64 = (
276 ksVariation_16,
277 ksVariation_16f,
278 ksVariation_16_Pre386,
279 ksVariation_16f_Pre386,
280 ksVariation_32,
281 ksVariation_32f,
282 );
283 kasVariationsWithoutAddressOnly64 = (
284 ksVariation_64,
285 ksVariation_64f,
286 );
287 kasVariationsWithAddress = (
288 ksVariation_16,
289 ksVariation_16f,
290 ksVariation_16_Addr32,
291 ksVariation_16f_Addr32,
292 ksVariation_16_Pre386,
293 ksVariation_16f_Pre386,
294 ksVariation_32,
295 ksVariation_32f,
296 ksVariation_32_Flat,
297 ksVariation_32f_Flat,
298 ksVariation_32_Addr16,
299 ksVariation_32f_Addr16,
300 ksVariation_64,
301 ksVariation_64f,
302 ksVariation_64_FsGs,
303 ksVariation_64f_FsGs,
304 ksVariation_64_Addr32,
305 ksVariation_64f_Addr32,
306 );
307 kasVariationsWithAddressNot286 = (
308 ksVariation_16,
309 ksVariation_16f,
310 ksVariation_16_Addr32,
311 ksVariation_16f_Addr32,
312 ksVariation_32,
313 ksVariation_32f,
314 ksVariation_32_Flat,
315 ksVariation_32f_Flat,
316 ksVariation_32_Addr16,
317 ksVariation_32f_Addr16,
318 ksVariation_64,
319 ksVariation_64f,
320 ksVariation_64_FsGs,
321 ksVariation_64f_FsGs,
322 ksVariation_64_Addr32,
323 ksVariation_64f_Addr32,
324 );
325 kasVariationsWithAddressNot286Not64 = (
326 ksVariation_16,
327 ksVariation_16f,
328 ksVariation_16_Addr32,
329 ksVariation_16f_Addr32,
330 ksVariation_32,
331 ksVariation_32f,
332 ksVariation_32_Flat,
333 ksVariation_32f_Flat,
334 ksVariation_32_Addr16,
335 ksVariation_32f_Addr16,
336 );
337 kasVariationsWithAddressNot64 = (
338 ksVariation_16,
339 ksVariation_16f,
340 ksVariation_16_Addr32,
341 ksVariation_16f_Addr32,
342 ksVariation_16_Pre386,
343 ksVariation_16f_Pre386,
344 ksVariation_32,
345 ksVariation_32f,
346 ksVariation_32_Flat,
347 ksVariation_32f_Flat,
348 ksVariation_32_Addr16,
349 ksVariation_32f_Addr16,
350 );
351 kasVariationsWithAddressOnly64 = (
352 ksVariation_64,
353 ksVariation_64f,
354 ksVariation_64_FsGs,
355 ksVariation_64f_FsGs,
356 ksVariation_64_Addr32,
357 ksVariation_64f_Addr32,
358 );
359 kasVariationsOnlyPre386 = (
360 ksVariation_16_Pre386,
361 ksVariation_16f_Pre386,
362 );
363 kasVariationsEmitOrder = (
364 ksVariation_Default,
365 ksVariation_64,
366 ksVariation_64f,
367 ksVariation_64_Jmp,
368 ksVariation_64f_Jmp,
369 ksVariation_64_NoJmp,
370 ksVariation_64f_NoJmp,
371 ksVariation_64_FsGs,
372 ksVariation_64f_FsGs,
373 ksVariation_32_Flat,
374 ksVariation_32f_Flat,
375 ksVariation_32,
376 ksVariation_32f,
377 ksVariation_32_Jmp,
378 ksVariation_32f_Jmp,
379 ksVariation_32_NoJmp,
380 ksVariation_32f_NoJmp,
381 ksVariation_16,
382 ksVariation_16f,
383 ksVariation_16_Jmp,
384 ksVariation_16f_Jmp,
385 ksVariation_16_NoJmp,
386 ksVariation_16f_NoJmp,
387 ksVariation_16_Addr32,
388 ksVariation_16f_Addr32,
389 ksVariation_16_Pre386,
390 ksVariation_16f_Pre386,
391 ksVariation_16_Pre386_Jmp,
392 ksVariation_16f_Pre386_Jmp,
393 ksVariation_16_Pre386_NoJmp,
394 ksVariation_16f_Pre386_NoJmp,
395 ksVariation_32_Addr16,
396 ksVariation_32f_Addr16,
397 ksVariation_64_Addr32,
398 ksVariation_64f_Addr32,
399 );
400 kdVariationNames = {
401 ksVariation_Default: 'defer-to-cimpl',
402 ksVariation_16: '16-bit',
403 ksVariation_16f: '16-bit w/ eflag checking and clearing',
404 ksVariation_16_Jmp: '16-bit w/ conditional jump taken',
405 ksVariation_16f_Jmp: '16-bit w/ eflag checking and clearing and conditional jump taken',
406 ksVariation_16_NoJmp: '16-bit w/ conditional jump not taken',
407 ksVariation_16f_NoJmp: '16-bit w/ eflag checking and clearing and conditional jump not taken',
408 ksVariation_16_Addr32: '16-bit w/ address prefix (Addr32)',
409 ksVariation_16f_Addr32: '16-bit w/ address prefix (Addr32) and eflag checking and clearing',
410 ksVariation_16_Pre386: '16-bit on pre-386 CPU',
411 ksVariation_16f_Pre386: '16-bit on pre-386 CPU w/ eflag checking and clearing',
412 ksVariation_16_Pre386_Jmp: '16-bit on pre-386 CPU w/ conditional jump taken',
413 ksVariation_16f_Pre386_Jmp: '16-bit on pre-386 CPU w/ eflag checking and clearing and conditional jump taken',
414 ksVariation_16_Pre386_NoJmp: '16-bit on pre-386 CPU w/ conditional jump taken',
415 ksVariation_16f_Pre386_NoJmp: '16-bit on pre-386 CPU w/ eflag checking and clearing and conditional jump taken',
416 ksVariation_32: '32-bit',
417 ksVariation_32f: '32-bit w/ eflag checking and clearing',
418 ksVariation_32_Jmp: '32-bit w/ conditional jump taken',
419 ksVariation_32f_Jmp: '32-bit w/ eflag checking and clearing and conditional jump taken',
420 ksVariation_32_NoJmp: '32-bit w/ conditional jump not taken',
421 ksVariation_32f_NoJmp: '32-bit w/ eflag checking and clearing and conditional jump not taken',
422 ksVariation_32_Flat: '32-bit flat and wide open CS, SS, DS and ES',
423 ksVariation_32f_Flat: '32-bit flat and wide open CS, SS, DS and ES w/ eflag checking and clearing',
424 ksVariation_32_Addr16: '32-bit w/ address prefix (Addr16)',
425 ksVariation_32f_Addr16: '32-bit w/ address prefix (Addr16) and eflag checking and clearing',
426 ksVariation_64: '64-bit',
427 ksVariation_64f: '64-bit w/ eflag checking and clearing',
428 ksVariation_64_Jmp: '64-bit w/ conditional jump taken',
429 ksVariation_64f_Jmp: '64-bit w/ eflag checking and clearing and conditional jump taken',
430 ksVariation_64_NoJmp: '64-bit w/ conditional jump not taken',
431 ksVariation_64f_NoJmp: '64-bit w/ eflag checking and clearing and conditional jump not taken',
432 ksVariation_64_FsGs: '64-bit with memory accessed via FS or GS',
433 ksVariation_64f_FsGs: '64-bit with memory accessed via FS or GS and eflag checking and clearing',
434 ksVariation_64_Addr32: '64-bit w/ address prefix (Addr32)',
435 ksVariation_64f_Addr32: '64-bit w/ address prefix (Addr32) and eflag checking and clearing',
436 };
437 kdVariationsWithEflagsCheckingAndClearing = {
438 ksVariation_16f: True,
439 ksVariation_16f_Jmp: True,
440 ksVariation_16f_NoJmp: True,
441 ksVariation_16f_Addr32: True,
442 ksVariation_16f_Pre386: True,
443 ksVariation_16f_Pre386_Jmp: True,
444 ksVariation_16f_Pre386_NoJmp: True,
445 ksVariation_32f: True,
446 ksVariation_32f_Jmp: True,
447 ksVariation_32f_NoJmp: True,
448 ksVariation_32f_Flat: True,
449 ksVariation_32f_Addr16: True,
450 ksVariation_64f: True,
451 ksVariation_64f_Jmp: True,
452 ksVariation_64f_NoJmp: True,
453 ksVariation_64f_FsGs: True,
454 ksVariation_64f_Addr32: True,
455 };
456 kdVariationsOnly64NoFlags = {
457 ksVariation_64: True,
458 ksVariation_64_Jmp: True,
459 ksVariation_64_NoJmp: True,
460 ksVariation_64_FsGs: True,
461 ksVariation_64_Addr32: True,
462 };
463 kdVariationsOnly64WithFlags = {
464 ksVariation_64f: True,
465 ksVariation_64f_Jmp: True,
466 ksVariation_64f_NoJmp: True,
467 ksVariation_64f_FsGs: True,
468 ksVariation_64f_Addr32: True,
469 };
470 kdVariationsOnlyPre386NoFlags = {
471 ksVariation_16_Pre386: True,
472 ksVariation_16_Pre386_Jmp: True,
473 ksVariation_16_Pre386_NoJmp: True,
474 };
475 kdVariationsOnlyPre386WithFlags = {
476 ksVariation_16f_Pre386: True,
477 ksVariation_16f_Pre386_Jmp: True,
478 ksVariation_16f_Pre386_NoJmp: True,
479 };
480 kdVariationsWithFlatAddress = {
481 ksVariation_32_Flat: True,
482 ksVariation_32f_Flat: True,
483 ksVariation_64: True,
484 ksVariation_64f: True,
485 ksVariation_64_Addr32: True,
486 ksVariation_64f_Addr32: True,
487 };
488 kdVariationsWithFlatStackAddress = {
489 ksVariation_32_Flat: True,
490 ksVariation_32f_Flat: True,
491 ksVariation_64: True,
492 ksVariation_64f: True,
493 ksVariation_64_FsGs: True,
494 ksVariation_64f_FsGs: True,
495 ksVariation_64_Addr32: True,
496 ksVariation_64f_Addr32: True,
497 };
498 kdVariationsWithFlat64StackAddress = {
499 ksVariation_64: True,
500 ksVariation_64f: True,
501 ksVariation_64_FsGs: True,
502 ksVariation_64f_FsGs: True,
503 ksVariation_64_Addr32: True,
504 ksVariation_64f_Addr32: True,
505 };
506 kdVariationsWithFlatAddr16 = {
507 ksVariation_16: True,
508 ksVariation_16f: True,
509 ksVariation_16_Pre386: True,
510 ksVariation_16f_Pre386: True,
511 ksVariation_32_Addr16: True,
512 ksVariation_32f_Addr16: True,
513 };
514 kdVariationsWithFlatAddr32No64 = {
515 ksVariation_16_Addr32: True,
516 ksVariation_16f_Addr32: True,
517 ksVariation_32: True,
518 ksVariation_32f: True,
519 ksVariation_32_Flat: True,
520 ksVariation_32f_Flat: True,
521 };
522 kdVariationsWithAddressOnly64 = {
523 ksVariation_64: True,
524 ksVariation_64f: True,
525 ksVariation_64_FsGs: True,
526 ksVariation_64f_FsGs: True,
527 ksVariation_64_Addr32: True,
528 ksVariation_64f_Addr32: True,
529 };
530 kdVariationsWithConditional = {
531 ksVariation_16_Jmp: True,
532 ksVariation_16_NoJmp: True,
533 ksVariation_16_Pre386_Jmp: True,
534 ksVariation_16_Pre386_NoJmp: True,
535 ksVariation_32_Jmp: True,
536 ksVariation_32_NoJmp: True,
537 ksVariation_64_Jmp: True,
538 ksVariation_64_NoJmp: True,
539 ksVariation_16f_Jmp: True,
540 ksVariation_16f_NoJmp: True,
541 ksVariation_16f_Pre386_Jmp: True,
542 ksVariation_16f_Pre386_NoJmp: True,
543 ksVariation_32f_Jmp: True,
544 ksVariation_32f_NoJmp: True,
545 ksVariation_64f_Jmp: True,
546 ksVariation_64f_NoJmp: True,
547 };
548 kdVariationsWithConditionalNoJmp = {
549 ksVariation_16_NoJmp: True,
550 ksVariation_16_Pre386_NoJmp: True,
551 ksVariation_32_NoJmp: True,
552 ksVariation_64_NoJmp: True,
553 ksVariation_16f_NoJmp: True,
554 ksVariation_16f_Pre386_NoJmp: True,
555 ksVariation_32f_NoJmp: True,
556 ksVariation_64f_NoJmp: True,
557 };
558 kdVariationsOnlyPre386 = {
559 ksVariation_16_Pre386: True,
560 ksVariation_16f_Pre386: True,
561 ksVariation_16_Pre386_Jmp: True,
562 ksVariation_16f_Pre386_Jmp: True,
563 ksVariation_16_Pre386_NoJmp: True,
564 ksVariation_16f_Pre386_NoJmp: True,
565 };
566 ## @}
567
568 ## IEM_CIMPL_F_XXX flags that we know.
569 ## The value indicates whether it terminates the TB or not. The goal is to
570 ## improve the recompiler so all but END_TB will be False.
571 ##
572 ## @note iemThreadedRecompilerMcDeferToCImpl0 duplicates info found here.
573 kdCImplFlags = {
574 'IEM_CIMPL_F_MODE': False,
575 'IEM_CIMPL_F_BRANCH_DIRECT': False,
576 'IEM_CIMPL_F_BRANCH_INDIRECT': False,
577 'IEM_CIMPL_F_BRANCH_RELATIVE': False,
578 'IEM_CIMPL_F_BRANCH_FAR': True,
579 'IEM_CIMPL_F_BRANCH_CONDITIONAL': False,
580 # IEM_CIMPL_F_BRANCH_ANY should only be used for testing, so not included here.
581 'IEM_CIMPL_F_BRANCH_STACK': False,
582 'IEM_CIMPL_F_BRANCH_STACK_FAR': False,
583 'IEM_CIMPL_F_RFLAGS': False,
584 'IEM_CIMPL_F_INHIBIT_SHADOW': False,
585 'IEM_CIMPL_F_CHECK_IRQ_AFTER': False,
586 'IEM_CIMPL_F_CHECK_IRQ_BEFORE': False,
587 'IEM_CIMPL_F_CHECK_IRQ_BEFORE_AND_AFTER': False, # (ignore)
588 'IEM_CIMPL_F_STATUS_FLAGS': False,
589 'IEM_CIMPL_F_VMEXIT': False,
590 'IEM_CIMPL_F_FPU': False,
591 'IEM_CIMPL_F_REP': False,
592 'IEM_CIMPL_F_IO': False,
593 'IEM_CIMPL_F_END_TB': True,
594 'IEM_CIMPL_F_XCPT': True,
595 'IEM_CIMPL_F_CALLS_CIMPL': False,
596 'IEM_CIMPL_F_CALLS_AIMPL': False,
597 'IEM_CIMPL_F_CALLS_AIMPL_WITH_FXSTATE': False,
598 'IEM_CIMPL_F_CALLS_AIMPL_WITH_XSTATE': False,
599 };
600
601 def __init__(self, oThreadedFunction, sVariation = ksVariation_Default):
602 self.oParent = oThreadedFunction # type: ThreadedFunction
603 ##< ksVariation_Xxxx.
604 self.sVariation = sVariation
605
606 ## Threaded function parameter references.
607 self.aoParamRefs = [] # type: List[ThreadedParamRef]
608 ## Unique parameter references.
609 self.dParamRefs = {} # type: Dict[str, List[ThreadedParamRef]]
610 ## Minimum number of parameters to the threaded function.
611 self.cMinParams = 0;
612
613 ## List/tree of statements for the threaded function.
614 self.aoStmtsForThreadedFunction = [] # type: List[McStmt]
615
616 ## Function enum number, for verification. Set by generateThreadedFunctionsHeader.
617 self.iEnumValue = -1;
618
619 ## Native recompilation details for this variation.
620 self.oNativeRecomp = None;
621
622 def getIndexName(self):
623 sName = self.oParent.oMcBlock.sFunction;
624 if sName.startswith('iemOp_'):
625 sName = sName[len('iemOp_'):];
626 return 'kIemThreadedFunc_%s%s%s' % ( sName, self.oParent.sSubName, self.sVariation, );
627
628 def getThreadedFunctionName(self):
629 sName = self.oParent.oMcBlock.sFunction;
630 if sName.startswith('iemOp_'):
631 sName = sName[len('iemOp_'):];
632 return 'iemThreadedFunc_%s%s%s' % ( sName, self.oParent.sSubName, self.sVariation, );
633
634 def getNativeFunctionName(self):
635 return 'iemNativeRecompFunc_' + self.getThreadedFunctionName()[len('iemThreadedFunc_'):];
636
637 def getLivenessFunctionName(self):
638 return 'iemNativeLivenessFunc_' + self.getThreadedFunctionName()[len('iemThreadedFunc_'):];
639
640 def getShortName(self):
641 sName = self.oParent.oMcBlock.sFunction;
642 if sName.startswith('iemOp_'):
643 sName = sName[len('iemOp_'):];
644 return '%s%s%s' % ( sName, self.oParent.sSubName, self.sVariation, );
645
646 def getThreadedFunctionStatisticsName(self):
647 sName = self.oParent.oMcBlock.sFunction;
648 if sName.startswith('iemOp_'):
649 sName = sName[len('iemOp_'):];
650
651 sVarNm = self.sVariation;
652 if sVarNm:
653 if sVarNm.startswith('_'):
654 sVarNm = sVarNm[1:];
655 if sVarNm.endswith('_Jmp'):
656 sVarNm = sVarNm[:-4];
657 sName += '_Jmp';
658 elif sVarNm.endswith('_NoJmp'):
659 sVarNm = sVarNm[:-6];
660 sName += '_NoJmp';
661 else:
662 sVarNm = 'DeferToCImpl';
663
664 return '%s/%s%s' % ( sVarNm, sName, self.oParent.sSubName );
665
666 def isWithFlagsCheckingAndClearingVariation(self):
667 """
668 Checks if this is a variation that checks and clears EFLAGS.
669 """
670 return self.sVariation in ThreadedFunctionVariation.kdVariationsWithEflagsCheckingAndClearing;
671
672 #
673 # Analysis and code morphing.
674 #
675
676 def raiseProblem(self, sMessage):
677 """ Raises a problem. """
678 self.oParent.raiseProblem(sMessage);
679
680 def warning(self, sMessage):
681 """ Emits a warning. """
682 self.oParent.warning(sMessage);
683
684 def analyzeReferenceToType(self, sRef):
685 """
686 Translates a variable or structure reference to a type.
687 Returns type name.
688 Raises exception if unable to figure it out.
689 """
690 ch0 = sRef[0];
691 if ch0 == 'u':
692 if sRef.startswith('u32'):
693 return 'uint32_t';
694 if sRef.startswith('u8') or sRef == 'uReg':
695 return 'uint8_t';
696 if sRef.startswith('u64'):
697 return 'uint64_t';
698 if sRef.startswith('u16'):
699 return 'uint16_t';
700 elif ch0 == 'b':
701 return 'uint8_t';
702 elif ch0 == 'f':
703 return 'bool';
704 elif ch0 == 'i':
705 if sRef.startswith('i8'):
706 return 'int8_t';
707 if sRef.startswith('i16'):
708 return 'int16_t';
709 if sRef.startswith('i32'):
710 return 'int32_t';
711 if sRef.startswith('i64'):
712 return 'int64_t';
713 if sRef in ('iReg', 'iFixedReg', 'iGReg', 'iSegReg', 'iSrcReg', 'iDstReg', 'iCrReg'):
714 return 'uint8_t';
715 elif ch0 == 'p':
716 if sRef.find('-') < 0:
717 return 'uintptr_t';
718 if sRef.startswith('pVCpu->iem.s.'):
719 sField = sRef[len('pVCpu->iem.s.') : ];
720 if sField in g_kdIemFieldToType:
721 if g_kdIemFieldToType[sField][0]:
722 return g_kdIemFieldToType[sField][0];
723 elif ch0 == 'G' and sRef.startswith('GCPtr'):
724 return 'uint64_t';
725 elif ch0 == 'e':
726 if sRef == 'enmEffOpSize':
727 return 'IEMMODE';
728 elif ch0 == 'o':
729 if sRef.startswith('off32'):
730 return 'uint32_t';
731 elif sRef == 'cbFrame': # enter
732 return 'uint16_t';
733 elif sRef == 'cShift': ## @todo risky
734 return 'uint8_t';
735
736 self.raiseProblem('Unknown reference: %s' % (sRef,));
737 return None; # Shut up pylint 2.16.2.
738
739 def analyzeCallToType(self, sFnRef):
740 """
741 Determins the type of an indirect function call.
742 """
743 assert sFnRef[0] == 'p';
744
745 #
746 # Simple?
747 #
748 if sFnRef.find('-') < 0:
749 oDecoderFunction = self.oParent.oMcBlock.oFunction;
750
751 # Try the argument list of the function defintion macro invocation first.
752 iArg = 2;
753 while iArg < len(oDecoderFunction.asDefArgs):
754 if sFnRef == oDecoderFunction.asDefArgs[iArg]:
755 return oDecoderFunction.asDefArgs[iArg - 1];
756 iArg += 1;
757
758 # Then check out line that includes the word and looks like a variable declaration.
759 oRe = re.compile(' +(P[A-Z0-9_]+|const +IEMOP[A-Z0-9_]+ *[*]) +(const |) *' + sFnRef + ' *(;|=)');
760 for sLine in oDecoderFunction.asLines:
761 oMatch = oRe.match(sLine);
762 if oMatch:
763 if not oMatch.group(1).startswith('const'):
764 return oMatch.group(1);
765 return 'PC' + oMatch.group(1)[len('const ') : -1].strip();
766
767 #
768 # Deal with the pImpl->pfnXxx:
769 #
770 elif sFnRef.startswith('pImpl->pfn'):
771 sMember = sFnRef[len('pImpl->') : ];
772 sBaseType = self.analyzeCallToType('pImpl');
773 offBits = sMember.rfind('U') + 1;
774 if sBaseType == 'PCIEMOPBINSIZES': return 'PFNIEMAIMPLBINU' + sMember[offBits:];
775 if sBaseType == 'PCIEMOPBINTODOSIZES': return 'PFNIEMAIMPLBINTODOU' + sMember[offBits:];
776 if sBaseType == 'PCIEMOPUNARYSIZES': return 'PFNIEMAIMPLUNARYU' + sMember[offBits:];
777 if sBaseType == 'PCIEMOPSHIFTSIZES': return 'PFNIEMAIMPLSHIFTU' + sMember[offBits:];
778 if sBaseType == 'PCIEMOPSHIFTDBLSIZES': return 'PFNIEMAIMPLSHIFTDBLU' + sMember[offBits:];
779 if sBaseType == 'PCIEMOPMULDIVSIZES': return 'PFNIEMAIMPLMULDIVU' + sMember[offBits:];
780 if sBaseType == 'PCIEMOPMEDIAF3': return 'PFNIEMAIMPLMEDIAF3U' + sMember[offBits:];
781 if sBaseType == 'PCIEMOPMEDIAOPTF2': return 'PFNIEMAIMPLMEDIAOPTF2U' + sMember[offBits:];
782 if sBaseType == 'PCIEMOPMEDIAOPTF2IMM8': return 'PFNIEMAIMPLMEDIAOPTF2U' + sMember[offBits:] + 'IMM8';
783 if sBaseType == 'PCIEMOPMEDIAOPTF3': return 'PFNIEMAIMPLMEDIAOPTF3U' + sMember[offBits:];
784 if sBaseType == 'PCIEMOPMEDIAOPTF3IMM8': return 'PFNIEMAIMPLMEDIAOPTF3U' + sMember[offBits:] + 'IMM8';
785 if sBaseType == 'PCIEMOPBLENDOP': return 'PFNIEMAIMPLAVXBLENDU' + sMember[offBits:];
786
787 self.raiseProblem('Unknown call reference: %s::%s (%s)' % (sBaseType, sMember, sFnRef,));
788
789 self.raiseProblem('Unknown call reference: %s' % (sFnRef,));
790 return None; # Shut up pylint 2.16.2.
791
792 def analyze8BitGRegStmt(self, oStmt):
793 """
794 Gets the 8-bit general purpose register access details of the given statement.
795 ASSUMES the statement is one accessing an 8-bit GREG.
796 """
797 idxReg = 0;
798 if ( oStmt.sName.find('_FETCH_') > 0
799 or oStmt.sName.find('_REF_') > 0
800 or oStmt.sName.find('_TO_LOCAL') > 0):
801 idxReg = 1;
802
803 sRegRef = oStmt.asParams[idxReg];
804 if sRegRef.startswith('IEM_GET_MODRM_RM') or sRegRef.startswith('IEM_GET_MODRM_REG'):
805 asBits = [sBit.strip() for sBit in sRegRef.replace('(', ',').replace(')', '').split(',')];
806 if len(asBits) != 3 or asBits[1] != 'pVCpu' or (asBits[0] != 'IEM_GET_MODRM_RM' and asBits[0] != 'IEM_GET_MODRM_REG'):
807 self.raiseProblem('Unexpected reference: %s (asBits=%s)' % (sRegRef, asBits));
808 sOrgExpr = asBits[0] + '_EX8(pVCpu, ' + asBits[2] + ')';
809 else:
810 sOrgExpr = '((%s) < 4 || (pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_REX | IEM_OP_PRF_VEX)) ? (%s) : (%s) + 12)' \
811 % (sRegRef, sRegRef, sRegRef,);
812
813 if sRegRef.find('IEM_GET_MODRM_RM') >= 0: sStdRef = 'bRmRm8Ex';
814 elif sRegRef.find('IEM_GET_MODRM_REG') >= 0: sStdRef = 'bRmReg8Ex';
815 elif sRegRef == 'X86_GREG_xAX': sStdRef = 'bGregXAx8Ex';
816 elif sRegRef == 'X86_GREG_xCX': sStdRef = 'bGregXCx8Ex';
817 elif sRegRef == 'X86_GREG_xSP': sStdRef = 'bGregXSp8Ex';
818 elif sRegRef == 'iFixedReg': sStdRef = 'bFixedReg8Ex';
819 else:
820 self.warning('analyze8BitGRegStmt: sRegRef=%s -> bOther8Ex; %s %s; sOrgExpr=%s'
821 % (sRegRef, oStmt.sName, oStmt.asParams, sOrgExpr,));
822 sStdRef = 'bOther8Ex';
823
824 #print('analyze8BitGRegStmt: %s %s; sRegRef=%s\n -> idxReg=%s sOrgExpr=%s sStdRef=%s'
825 # % (oStmt.sName, oStmt.asParams, sRegRef, idxReg, sOrgExpr, sStdRef));
826 return (idxReg, sOrgExpr, sStdRef);
827
828
829 ## Maps memory related MCs to info for FLAT conversion.
830 ## This is used in 64-bit and flat 32-bit variants to skip the unnecessary
831 ## segmentation checking for every memory access. Only applied to access
832 ## via ES, DS and SS. FS, GS and CS gets the full segmentation threatment,
833 ## the latter (CS) is just to keep things simple (we could safely fetch via
834 ## it, but only in 64-bit mode could we safely write via it, IIRC).
835 kdMemMcToFlatInfo = {
836 'IEM_MC_FETCH_MEM_U8': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U8' ),
837 'IEM_MC_FETCH_MEM16_U8': ( 1, 'IEM_MC_FETCH_MEM16_FLAT_U8' ),
838 'IEM_MC_FETCH_MEM32_U8': ( 1, 'IEM_MC_FETCH_MEM32_FLAT_U8' ),
839 'IEM_MC_FETCH_MEM_U16': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U16' ),
840 'IEM_MC_FETCH_MEM_U16_DISP': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U16_DISP' ),
841 'IEM_MC_FETCH_MEM_I16': ( 1, 'IEM_MC_FETCH_MEM_FLAT_I16' ),
842 'IEM_MC_FETCH_MEM_U32': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U32' ),
843 'IEM_MC_FETCH_MEM_U32_DISP': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U32_DISP' ),
844 'IEM_MC_FETCH_MEM_I32': ( 1, 'IEM_MC_FETCH_MEM_FLAT_I32' ),
845 'IEM_MC_FETCH_MEM_U64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U64' ),
846 'IEM_MC_FETCH_MEM_U64_DISP': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U64_DISP' ),
847 'IEM_MC_FETCH_MEM_U64_ALIGN_U128': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U64_ALIGN_U128' ),
848 'IEM_MC_FETCH_MEM_I64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_I64' ),
849 'IEM_MC_FETCH_MEM_R32': ( 1, 'IEM_MC_FETCH_MEM_FLAT_R32' ),
850 'IEM_MC_FETCH_MEM_R64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_R64' ),
851 'IEM_MC_FETCH_MEM_R80': ( 1, 'IEM_MC_FETCH_MEM_FLAT_R80' ),
852 'IEM_MC_FETCH_MEM_D80': ( 1, 'IEM_MC_FETCH_MEM_FLAT_D80' ),
853 'IEM_MC_FETCH_MEM_U128': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U128' ),
854 'IEM_MC_FETCH_MEM_U128_NO_AC': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U128_NO_AC' ),
855 'IEM_MC_FETCH_MEM_U128_ALIGN_SSE': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U128_ALIGN_SSE' ),
856 'IEM_MC_FETCH_MEM_XMM': ( 1, 'IEM_MC_FETCH_MEM_FLAT_XMM' ),
857 'IEM_MC_FETCH_MEM_XMM_NO_AC': ( 1, 'IEM_MC_FETCH_MEM_FLAT_XMM_NO_AC' ),
858 'IEM_MC_FETCH_MEM_XMM_ALIGN_SSE': ( 1, 'IEM_MC_FETCH_MEM_FLAT_XMM_ALIGN_SSE' ),
859 'IEM_MC_FETCH_MEM_XMM_U32': ( 2, 'IEM_MC_FETCH_MEM_FLAT_XMM_U32' ),
860 'IEM_MC_FETCH_MEM_XMM_U64': ( 2, 'IEM_MC_FETCH_MEM_FLAT_XMM_U64' ),
861 'IEM_MC_FETCH_MEM_U256': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U256' ),
862 'IEM_MC_FETCH_MEM_U256_NO_AC': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U256_NO_AC' ),
863 'IEM_MC_FETCH_MEM_U256_ALIGN_AVX': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U256_ALIGN_AVX' ),
864 'IEM_MC_FETCH_MEM_YMM': ( 1, 'IEM_MC_FETCH_MEM_FLAT_YMM' ),
865 'IEM_MC_FETCH_MEM_YMM_NO_AC': ( 1, 'IEM_MC_FETCH_MEM_FLAT_YMM_NO_AC' ),
866 'IEM_MC_FETCH_MEM_YMM_ALIGN_AVX': ( 1, 'IEM_MC_FETCH_MEM_FLAT_YMM_ALIGN_AVX' ),
867 'IEM_MC_FETCH_MEM_U8_ZX_U16': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U8_ZX_U16' ),
868 'IEM_MC_FETCH_MEM_U8_ZX_U32': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U8_ZX_U32' ),
869 'IEM_MC_FETCH_MEM_U8_ZX_U64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U8_ZX_U64' ),
870 'IEM_MC_FETCH_MEM_U16_ZX_U32': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U16_ZX_U32' ),
871 'IEM_MC_FETCH_MEM_U16_ZX_U64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U16_ZX_U64' ),
872 'IEM_MC_FETCH_MEM_U32_ZX_U64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U32_ZX_U64' ),
873 'IEM_MC_FETCH_MEM_U8_SX_U16': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U8_SX_U16' ),
874 'IEM_MC_FETCH_MEM_U8_SX_U32': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U8_SX_U32' ),
875 'IEM_MC_FETCH_MEM_U8_SX_U64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U8_SX_U64' ),
876 'IEM_MC_FETCH_MEM_U16_SX_U32': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U16_SX_U32' ),
877 'IEM_MC_FETCH_MEM_U16_SX_U64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U16_SX_U64' ),
878 'IEM_MC_FETCH_MEM_U32_SX_U64': ( 1, 'IEM_MC_FETCH_MEM_FLAT_U32_SX_U64' ),
879 'IEM_MC_FETCH_MEM_U128_AND_XREG_U128': ( 2, 'IEM_MC_FETCH_MEM_FLAT_U128_AND_XREG_U128' ),
880 'IEM_MC_FETCH_MEM_XMM_ALIGN_SSE_AND_XREG_XMM': ( 2, 'IEM_MC_FETCH_MEM_FLAT_XMM_ALIGN_SSE_AND_XREG_XMM' ),
881 'IEM_MC_FETCH_MEM_XMM_U32_AND_XREG_XMM': ( 3, 'IEM_MC_FETCH_MEM_FLAT_XMM_U32_AND_XREG_XMM' ),
882 'IEM_MC_FETCH_MEM_XMM_U64_AND_XREG_XMM': ( 3, 'IEM_MC_FETCH_MEM_FLAT_XMM_U64_AND_XREG_XMM' ),
883 'IEM_MC_FETCH_MEM_U128_AND_XREG_U128_AND_RAX_RDX_U64':
884 ( 2, 'IEM_MC_FETCH_MEM_FLAT_U128_AND_XREG_U128_AND_RAX_RDX_U64' ),
885 'IEM_MC_FETCH_MEM_U128_AND_XREG_U128_AND_EAX_EDX_U32_SX_U64':
886 ( 2, 'IEM_MC_FETCH_MEM_FLAT_U128_AND_XREG_U128_AND_EAX_EDX_U32_SX_U64' ),
887 'IEM_MC_STORE_MEM_U8': ( 0, 'IEM_MC_STORE_MEM_FLAT_U8' ),
888 'IEM_MC_STORE_MEM_U16': ( 0, 'IEM_MC_STORE_MEM_FLAT_U16' ),
889 'IEM_MC_STORE_MEM_U32': ( 0, 'IEM_MC_STORE_MEM_FLAT_U32' ),
890 'IEM_MC_STORE_MEM_U64': ( 0, 'IEM_MC_STORE_MEM_FLAT_U64' ),
891 'IEM_MC_STORE_MEM_U8_CONST': ( 0, 'IEM_MC_STORE_MEM_FLAT_U8_CONST' ),
892 'IEM_MC_STORE_MEM_U16_CONST': ( 0, 'IEM_MC_STORE_MEM_FLAT_U16_CONST' ),
893 'IEM_MC_STORE_MEM_U32_CONST': ( 0, 'IEM_MC_STORE_MEM_FLAT_U32_CONST' ),
894 'IEM_MC_STORE_MEM_U64_CONST': ( 0, 'IEM_MC_STORE_MEM_FLAT_U64_CONST' ),
895 'IEM_MC_STORE_MEM_U128': ( 0, 'IEM_MC_STORE_MEM_FLAT_U128' ),
896 'IEM_MC_STORE_MEM_U128_NO_AC': ( 0, 'IEM_MC_STORE_MEM_FLAT_U128_NO_AC' ),
897 'IEM_MC_STORE_MEM_U128_ALIGN_SSE': ( 0, 'IEM_MC_STORE_MEM_FLAT_U128_ALIGN_SSE' ),
898 'IEM_MC_STORE_MEM_U256': ( 0, 'IEM_MC_STORE_MEM_FLAT_U256' ),
899 'IEM_MC_STORE_MEM_U256_NO_AC': ( 0, 'IEM_MC_STORE_MEM_FLAT_U256_NO_AC' ),
900 'IEM_MC_STORE_MEM_U256_ALIGN_AVX': ( 0, 'IEM_MC_STORE_MEM_FLAT_U256_ALIGN_AVX' ),
901 'IEM_MC_MEM_MAP_D80_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_D80_WO' ),
902 'IEM_MC_MEM_MAP_I16_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_I16_WO' ),
903 'IEM_MC_MEM_MAP_I32_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_I32_WO' ),
904 'IEM_MC_MEM_MAP_I64_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_I64_WO' ),
905 'IEM_MC_MEM_MAP_R32_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_R32_WO' ),
906 'IEM_MC_MEM_MAP_R64_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_R64_WO' ),
907 'IEM_MC_MEM_MAP_R80_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_R80_WO' ),
908 'IEM_MC_MEM_MAP_U8_ATOMIC': ( 2, 'IEM_MC_MEM_FLAT_MAP_U8_ATOMIC' ),
909 'IEM_MC_MEM_MAP_U8_RW': ( 2, 'IEM_MC_MEM_FLAT_MAP_U8_RW' ),
910 'IEM_MC_MEM_MAP_U8_RO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U8_RO' ),
911 'IEM_MC_MEM_MAP_U8_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U8_WO' ),
912 'IEM_MC_MEM_MAP_U16_ATOMIC': ( 2, 'IEM_MC_MEM_FLAT_MAP_U16_ATOMIC' ),
913 'IEM_MC_MEM_MAP_U16_RW': ( 2, 'IEM_MC_MEM_FLAT_MAP_U16_RW' ),
914 'IEM_MC_MEM_MAP_U16_RO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U16_RO' ),
915 'IEM_MC_MEM_MAP_U16_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U16_WO' ),
916 'IEM_MC_MEM_MAP_U32_ATOMIC': ( 2, 'IEM_MC_MEM_FLAT_MAP_U32_ATOMIC' ),
917 'IEM_MC_MEM_MAP_U32_RW': ( 2, 'IEM_MC_MEM_FLAT_MAP_U32_RW' ),
918 'IEM_MC_MEM_MAP_U32_RO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U32_RO' ),
919 'IEM_MC_MEM_MAP_U32_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U32_WO' ),
920 'IEM_MC_MEM_MAP_U64_ATOMIC': ( 2, 'IEM_MC_MEM_FLAT_MAP_U64_ATOMIC' ),
921 'IEM_MC_MEM_MAP_U64_RW': ( 2, 'IEM_MC_MEM_FLAT_MAP_U64_RW' ),
922 'IEM_MC_MEM_MAP_U64_RO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U64_RO' ),
923 'IEM_MC_MEM_MAP_U64_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U64_WO' ),
924 'IEM_MC_MEM_MAP_U128_ATOMIC': ( 2, 'IEM_MC_MEM_FLAT_MAP_U128_ATOMIC' ),
925 'IEM_MC_MEM_MAP_U128_RW': ( 2, 'IEM_MC_MEM_FLAT_MAP_U128_RW' ),
926 'IEM_MC_MEM_MAP_U128_RO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U128_RO' ),
927 'IEM_MC_MEM_MAP_U128_WO': ( 2, 'IEM_MC_MEM_FLAT_MAP_U128_WO' ),
928 'IEM_MC_MEM_MAP_EX': ( 3, 'IEM_MC_MEM_FLAT_MAP_EX' ),
929 };
930
931 kdMemMcToFlatInfoStack = {
932 'IEM_MC_PUSH_U16': ( 'IEM_MC_FLAT32_PUSH_U16', 'IEM_MC_FLAT64_PUSH_U16', ),
933 'IEM_MC_PUSH_U32': ( 'IEM_MC_FLAT32_PUSH_U32', 'IEM_MC_PUSH_U32', ),
934 'IEM_MC_PUSH_U64': ( 'IEM_MC_PUSH_U64', 'IEM_MC_FLAT64_PUSH_U64', ),
935 'IEM_MC_PUSH_U32_SREG': ( 'IEM_MC_FLAT32_PUSH_U32_SREG', 'IEM_MC_PUSH_U32_SREG' ),
936 'IEM_MC_POP_GREG_U16': ( 'IEM_MC_FLAT32_POP_GREG_U16', 'IEM_MC_FLAT64_POP_GREG_U16', ),
937 'IEM_MC_POP_GREG_U32': ( 'IEM_MC_FLAT32_POP_GREG_U32', 'IEM_MC_POP_GREG_U32', ),
938 'IEM_MC_POP_GREG_U64': ( 'IEM_MC_POP_GREG_U64', 'IEM_MC_FLAT64_POP_GREG_U64', ),
939 };
940
941 kdThreadedCalcRmEffAddrMcByVariation = {
942 ksVariation_16: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_16',
943 ksVariation_16f: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_16',
944 ksVariation_16_Pre386: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_16',
945 ksVariation_16f_Pre386: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_16',
946 ksVariation_32_Addr16: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_16',
947 ksVariation_32f_Addr16: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_16',
948 ksVariation_16_Addr32: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_32',
949 ksVariation_16f_Addr32: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_32',
950 ksVariation_32: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_32',
951 ksVariation_32f: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_32',
952 ksVariation_32_Flat: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_32',
953 ksVariation_32f_Flat: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_32',
954 ksVariation_64: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_64',
955 ksVariation_64f: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_64',
956 ksVariation_64_FsGs: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_64_FSGS',
957 ksVariation_64f_FsGs: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_64_FSGS',
958 ksVariation_64_Addr32: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_64_ADDR32', ## @todo How did this work again...
959 ksVariation_64f_Addr32: 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_64_ADDR32',
960 };
961
962 def analyzeMorphStmtForThreaded(self, aoStmts, dState, iParamRef = 0, iLevel = 0):
963 """
964 Transforms (copy) the statements into those for the threaded function.
965
966 Returns list/tree of statements (aoStmts is not modified) and the new
967 iParamRef value.
968 """
969 #
970 # We'll be traversing aoParamRefs in parallel to the statements, so we
971 # must match the traversal in analyzeFindThreadedParamRefs exactly.
972 #
973 #print('McBlock at %s:%s' % (os.path.split(self.oMcBlock.sSrcFile)[1], self.oMcBlock.iBeginLine,));
974 aoThreadedStmts = [];
975 for oStmt in aoStmts:
976 # Skip C++ statements that is purely related to decoding.
977 if not oStmt.isCppStmt() or not oStmt.fDecode:
978 # Copy the statement. Make a deep copy to make sure we've got our own
979 # copies of all instance variables, even if a bit overkill at the moment.
980 oNewStmt = copy.deepcopy(oStmt);
981 aoThreadedStmts.append(oNewStmt);
982 #print('oNewStmt %s %s' % (oNewStmt.sName, len(oNewStmt.asParams),));
983
984 # If the statement has parameter references, process the relevant parameters.
985 # We grab the references relevant to this statement and apply them in reserve order.
986 if iParamRef < len(self.aoParamRefs) and self.aoParamRefs[iParamRef].oStmt == oStmt:
987 iParamRefFirst = iParamRef;
988 while True:
989 iParamRef += 1;
990 if iParamRef >= len(self.aoParamRefs) or self.aoParamRefs[iParamRef].oStmt != oStmt:
991 break;
992
993 #print('iParamRefFirst=%s iParamRef=%s' % (iParamRefFirst, iParamRef));
994 for iCurRef in range(iParamRef - 1, iParamRefFirst - 1, -1):
995 oCurRef = self.aoParamRefs[iCurRef];
996 if oCurRef.iParam is not None:
997 assert oCurRef.oStmt == oStmt;
998 #print('iCurRef=%s iParam=%s sOrgRef=%s' % (iCurRef, oCurRef.iParam, oCurRef.sOrgRef));
999 sSrcParam = oNewStmt.asParams[oCurRef.iParam];
1000 assert ( sSrcParam[oCurRef.offParam : oCurRef.offParam + len(oCurRef.sOrgRef)] == oCurRef.sOrgRef
1001 or oCurRef.fCustomRef), \
1002 'offParam=%s sOrgRef=%s iParam=%s oStmt.sName=%s sSrcParam=%s<eos>' \
1003 % (oCurRef.offParam, oCurRef.sOrgRef, oCurRef.iParam, oStmt.sName, sSrcParam);
1004 oNewStmt.asParams[oCurRef.iParam] = sSrcParam[0 : oCurRef.offParam] \
1005 + oCurRef.sNewName \
1006 + sSrcParam[oCurRef.offParam + len(oCurRef.sOrgRef) : ];
1007
1008 # Morph IEM_MC_CALC_RM_EFF_ADDR into IEM_MC_CALC_RM_EFF_ADDR_THREADED ...
1009 if oNewStmt.sName == 'IEM_MC_CALC_RM_EFF_ADDR':
1010 oNewStmt.sName = self.kdThreadedCalcRmEffAddrMcByVariation[self.sVariation];
1011 assert len(oNewStmt.asParams) == 3;
1012
1013 if self.sVariation in self.kdVariationsWithFlatAddr16:
1014 oNewStmt.asParams = [
1015 oNewStmt.asParams[0], oNewStmt.asParams[1], self.dParamRefs['u16Disp'][0].sNewName,
1016 ];
1017 else:
1018 sSibAndMore = self.dParamRefs['bSib'][0].sNewName; # Merge bSib and 2nd part of cbImmAndRspOffset.
1019 if oStmt.asParams[2] not in ('0', '1', '2', '4'):
1020 sSibAndMore = '(%s) | ((%s) & 0x0f00)' % (self.dParamRefs['bSib'][0].sNewName, oStmt.asParams[2]);
1021
1022 if self.sVariation in self.kdVariationsWithFlatAddr32No64:
1023 oNewStmt.asParams = [
1024 oNewStmt.asParams[0], oNewStmt.asParams[1], sSibAndMore, self.dParamRefs['u32Disp'][0].sNewName,
1025 ];
1026 else:
1027 oNewStmt.asParams = [
1028 oNewStmt.asParams[0], self.dParamRefs['bRmEx'][0].sNewName, sSibAndMore,
1029 self.dParamRefs['u32Disp'][0].sNewName, self.dParamRefs['cbInstr'][0].sNewName,
1030 ];
1031 # ... and IEM_MC_ADVANCE_RIP_AND_FINISH into *_THREADED_PCxx[_WITH_FLAGS] ...
1032 elif ( oNewStmt.sName
1033 in ('IEM_MC_ADVANCE_RIP_AND_FINISH',
1034 'IEM_MC_REL_JMP_S8_AND_FINISH', 'IEM_MC_REL_JMP_S16_AND_FINISH', 'IEM_MC_REL_JMP_S32_AND_FINISH',
1035 'IEM_MC_SET_RIP_U16_AND_FINISH', 'IEM_MC_SET_RIP_U32_AND_FINISH', 'IEM_MC_SET_RIP_U64_AND_FINISH',
1036 'IEM_MC_REL_CALL_S16_AND_FINISH', 'IEM_MC_REL_CALL_S32_AND_FINISH', 'IEM_MC_REL_CALL_S64_AND_FINISH',
1037 'IEM_MC_IND_CALL_U16_AND_FINISH', 'IEM_MC_IND_CALL_U32_AND_FINISH', 'IEM_MC_IND_CALL_U64_AND_FINISH',
1038 'IEM_MC_RETN_AND_FINISH',)):
1039 if oNewStmt.sName not in ('IEM_MC_SET_RIP_U16_AND_FINISH', 'IEM_MC_SET_RIP_U32_AND_FINISH',
1040 'IEM_MC_SET_RIP_U64_AND_FINISH', ):
1041 oNewStmt.asParams.append(self.dParamRefs['cbInstr'][0].sNewName);
1042 if ( oNewStmt.sName in ('IEM_MC_REL_JMP_S8_AND_FINISH', 'IEM_MC_RETN_AND_FINISH', )
1043 and self.sVariation not in self.kdVariationsOnlyPre386):
1044 oNewStmt.asParams.append(self.dParamRefs['pVCpu->iem.s.enmEffOpSize'][0].sNewName);
1045 oNewStmt.sName += '_THREADED';
1046 if self.sVariation in self.kdVariationsOnly64NoFlags:
1047 oNewStmt.sName += '_PC64';
1048 elif self.sVariation in self.kdVariationsOnly64WithFlags:
1049 oNewStmt.sName += '_PC64_WITH_FLAGS';
1050 elif self.sVariation in self.kdVariationsOnlyPre386NoFlags:
1051 oNewStmt.sName += '_PC16';
1052 elif self.sVariation in self.kdVariationsOnlyPre386WithFlags:
1053 oNewStmt.sName += '_PC16_WITH_FLAGS';
1054 elif self.sVariation not in self.kdVariationsWithEflagsCheckingAndClearing:
1055 assert self.sVariation != self.ksVariation_Default;
1056 oNewStmt.sName += '_PC32';
1057 else:
1058 oNewStmt.sName += '_PC32_WITH_FLAGS';
1059
1060 # This is making the wrong branch of conditionals break out of the TB.
1061 if (oStmt.sName in ('IEM_MC_ADVANCE_RIP_AND_FINISH', 'IEM_MC_REL_JMP_S8_AND_FINISH',
1062 'IEM_MC_REL_JMP_S16_AND_FINISH', 'IEM_MC_REL_JMP_S32_AND_FINISH')):
1063 sExitTbStatus = 'VINF_SUCCESS';
1064 if self.sVariation in self.kdVariationsWithConditional:
1065 if self.sVariation in self.kdVariationsWithConditionalNoJmp:
1066 if oStmt.sName != 'IEM_MC_ADVANCE_RIP_AND_FINISH':
1067 sExitTbStatus = 'VINF_IEM_REEXEC_BREAK';
1068 elif oStmt.sName == 'IEM_MC_ADVANCE_RIP_AND_FINISH':
1069 sExitTbStatus = 'VINF_IEM_REEXEC_BREAK';
1070 oNewStmt.asParams.append(sExitTbStatus);
1071
1072 # Insert an MC so we can assert the correctioness of modified flags annotations on IEM_MC_REF_EFLAGS.
1073 if 'IEM_MC_ASSERT_EFLAGS' in dState:
1074 aoThreadedStmts.insert(len(aoThreadedStmts) - 1,
1075 iai.McStmtAssertEFlags(self.oParent.oMcBlock.oInstruction));
1076 del dState['IEM_MC_ASSERT_EFLAGS'];
1077
1078 # ... and IEM_MC_*_GREG_U8 into *_THREADED w/ reworked index taking REX into account
1079 elif oNewStmt.sName.startswith('IEM_MC_') and oNewStmt.sName.find('_GREG_U8') > 0:
1080 (idxReg, _, sStdRef) = self.analyze8BitGRegStmt(oStmt); # Don't use oNewStmt as it has been modified!
1081 oNewStmt.asParams[idxReg] = self.dParamRefs[sStdRef][0].sNewName;
1082 oNewStmt.sName += '_THREADED';
1083
1084 # ... and IEM_MC_CALL_CIMPL_[0-5] and IEM_MC_DEFER_TO_CIMPL_[0-5]_RET into *_THREADED ...
1085 elif oNewStmt.sName.startswith('IEM_MC_CALL_CIMPL_') or oNewStmt.sName.startswith('IEM_MC_DEFER_TO_CIMPL_'):
1086 oNewStmt.sName += '_THREADED';
1087 oNewStmt.idxFn += 1;
1088 oNewStmt.idxParams += 1;
1089 oNewStmt.asParams.insert(0, self.dParamRefs['cbInstr'][0].sNewName);
1090
1091 # ... and in FLAT modes we must morph memory access into FLAT accesses ...
1092 elif ( self.sVariation in self.kdVariationsWithFlatAddress
1093 and ( oNewStmt.sName.startswith('IEM_MC_FETCH_MEM')
1094 or (oNewStmt.sName.startswith('IEM_MC_STORE_MEM_') and oNewStmt.sName.find('_BY_REF') < 0)
1095 or oNewStmt.sName.startswith('IEM_MC_MEM_MAP') )):
1096 idxEffSeg = self.kdMemMcToFlatInfo[oNewStmt.sName][0];
1097 if idxEffSeg != -1:
1098 if ( oNewStmt.asParams[idxEffSeg].find('iEffSeg') < 0
1099 and oNewStmt.asParams[idxEffSeg] not in ('X86_SREG_ES', ) ):
1100 self.raiseProblem('Expected iEffSeg as param #%d to %s: %s'
1101 % (idxEffSeg + 1, oNewStmt.sName, oNewStmt.asParams[idxEffSeg],));
1102 oNewStmt.asParams.pop(idxEffSeg);
1103 oNewStmt.sName = self.kdMemMcToFlatInfo[oNewStmt.sName][1];
1104
1105 # ... PUSH and POP also needs flat variants, but these differ a little.
1106 elif ( self.sVariation in self.kdVariationsWithFlatStackAddress
1107 and ( (oNewStmt.sName.startswith('IEM_MC_PUSH') and oNewStmt.sName.find('_FPU') < 0)
1108 or oNewStmt.sName.startswith('IEM_MC_POP'))):
1109 oNewStmt.sName = self.kdMemMcToFlatInfoStack[oNewStmt.sName][int(self.sVariation in
1110 self.kdVariationsWithFlat64StackAddress)];
1111
1112 # Add EFLAGS usage annotations to relevant MCs.
1113 elif oNewStmt.sName in ('IEM_MC_COMMIT_EFLAGS', 'IEM_MC_COMMIT_EFLAGS_OPT', 'IEM_MC_REF_EFLAGS',
1114 'IEM_MC_FETCH_EFLAGS'):
1115 oInstruction = self.oParent.oMcBlock.oInstruction;
1116 oNewStmt.sName += '_EX';
1117 oNewStmt.asParams.append(oInstruction.getTestedFlagsCStyle()); # Shall crash and burn if oInstruction is
1118 oNewStmt.asParams.append(oInstruction.getModifiedFlagsCStyle()); # None. Fix the IEM decoder code.
1119
1120 # For IEM_MC_REF_EFLAGS we to emit an MC before the ..._FINISH
1121 if oNewStmt.sName == 'IEM_MC_REF_EFLAGS_EX':
1122 dState['IEM_MC_ASSERT_EFLAGS'] = iLevel;
1123
1124 # Process branches of conditionals recursively.
1125 if isinstance(oStmt, iai.McStmtCond):
1126 (oNewStmt.aoIfBranch, iParamRef) = self.analyzeMorphStmtForThreaded(oStmt.aoIfBranch, dState,
1127 iParamRef, iLevel + 1);
1128 if oStmt.aoElseBranch:
1129 (oNewStmt.aoElseBranch, iParamRef) = self.analyzeMorphStmtForThreaded(oStmt.aoElseBranch,
1130 dState, iParamRef, iLevel + 1);
1131
1132 # Insert an MC so we can assert the correctioness of modified flags annotations
1133 # on IEM_MC_REF_EFLAGS if it goes out of scope.
1134 if dState.get('IEM_MC_ASSERT_EFLAGS', -1) == iLevel:
1135 aoThreadedStmts.append(iai.McStmtAssertEFlags(self.oParent.oMcBlock.oInstruction));
1136 del dState['IEM_MC_ASSERT_EFLAGS'];
1137
1138 return (aoThreadedStmts, iParamRef);
1139
1140
1141 def analyzeConsolidateThreadedParamRefs(self):
1142 """
1143 Consolidate threaded function parameter references into a dictionary
1144 with lists of the references to each variable/field.
1145 """
1146 # Gather unique parameters.
1147 self.dParamRefs = {};
1148 for oRef in self.aoParamRefs:
1149 if oRef.sStdRef not in self.dParamRefs:
1150 self.dParamRefs[oRef.sStdRef] = [oRef,];
1151 else:
1152 self.dParamRefs[oRef.sStdRef].append(oRef);
1153
1154 # Generate names for them for use in the threaded function.
1155 dParamNames = {};
1156 for sName, aoRefs in self.dParamRefs.items():
1157 # Morph the reference expression into a name.
1158 if sName.startswith('IEM_GET_MODRM_REG'): sName = 'bModRmRegP';
1159 elif sName.startswith('IEM_GET_MODRM_RM'): sName = 'bModRmRmP';
1160 elif sName.startswith('IEM_GET_MODRM_REG_8'): sName = 'bModRmReg8P';
1161 elif sName.startswith('IEM_GET_MODRM_RM_8'): sName = 'bModRmRm8P';
1162 elif sName.startswith('IEM_GET_EFFECTIVE_VVVV'): sName = 'bEffVvvvP';
1163 elif sName.startswith('IEM_GET_IMM8_REG'): sName = 'bImm8Reg';
1164 elif sName.find('.') >= 0 or sName.find('->') >= 0:
1165 sName = sName[max(sName.rfind('.'), sName.rfind('>')) + 1 : ] + 'P';
1166 else:
1167 sName += 'P';
1168
1169 # Ensure it's unique.
1170 if sName in dParamNames:
1171 for i in range(10):
1172 if sName + str(i) not in dParamNames:
1173 sName += str(i);
1174 break;
1175 dParamNames[sName] = True;
1176
1177 # Update all the references.
1178 for oRef in aoRefs:
1179 oRef.sNewName = sName;
1180
1181 # Organize them by size too for the purpose of optimize them.
1182 dBySize = {} # type: Dict[str, str]
1183 for sStdRef, aoRefs in self.dParamRefs.items():
1184 if aoRefs[0].sType[0] != 'P':
1185 cBits = g_kdTypeInfo[aoRefs[0].sType][0];
1186 assert(cBits <= 64);
1187 else:
1188 cBits = 64;
1189
1190 if cBits not in dBySize:
1191 dBySize[cBits] = [sStdRef,]
1192 else:
1193 dBySize[cBits].append(sStdRef);
1194
1195 # Pack the parameters as best as we can, starting with the largest ones
1196 # and ASSUMING a 64-bit parameter size.
1197 self.cMinParams = 0;
1198 offNewParam = 0;
1199 for cBits in sorted(dBySize.keys(), reverse = True):
1200 for sStdRef in dBySize[cBits]:
1201 if offNewParam == 0 or offNewParam + cBits > 64:
1202 self.cMinParams += 1;
1203 offNewParam = cBits;
1204 else:
1205 offNewParam += cBits;
1206 assert(offNewParam <= 64);
1207
1208 for oRef in self.dParamRefs[sStdRef]:
1209 oRef.iNewParam = self.cMinParams - 1;
1210 oRef.offNewParam = offNewParam - cBits;
1211
1212 # Currently there are a few that requires 4 parameters, list these so we can figure out why:
1213 if self.cMinParams >= 4:
1214 print('debug: cMinParams=%s cRawParams=%s - %s:%d'
1215 % (self.cMinParams, len(self.dParamRefs), self.oParent.oMcBlock.sSrcFile, self.oParent.oMcBlock.iBeginLine,));
1216
1217 return True;
1218
1219 ksHexDigits = '0123456789abcdefABCDEF';
1220
1221 def analyzeFindThreadedParamRefs(self, aoStmts): # pylint: disable=too-many-statements
1222 """
1223 Scans the statements for things that have to passed on to the threaded
1224 function (populates self.aoParamRefs).
1225 """
1226 for oStmt in aoStmts:
1227 # Some statements we can skip alltogether.
1228 if isinstance(oStmt, iai.McCppPreProc):
1229 continue;
1230 if oStmt.isCppStmt() and oStmt.fDecode:
1231 continue;
1232 if oStmt.sName in ('IEM_MC_BEGIN',):
1233 continue;
1234
1235 if isinstance(oStmt, iai.McStmtVar):
1236 if oStmt.sValue is None:
1237 continue;
1238 aiSkipParams = { 0: True, 1: True, 3: True };
1239 else:
1240 aiSkipParams = {};
1241
1242 # Several statements have implicit parameters and some have different parameters.
1243 if oStmt.sName in ('IEM_MC_ADVANCE_RIP_AND_FINISH', 'IEM_MC_REL_JMP_S8_AND_FINISH', 'IEM_MC_REL_JMP_S16_AND_FINISH',
1244 'IEM_MC_REL_JMP_S32_AND_FINISH',
1245 'IEM_MC_REL_CALL_S16_AND_FINISH', 'IEM_MC_REL_CALL_S32_AND_FINISH',
1246 'IEM_MC_REL_CALL_S64_AND_FINISH',
1247 'IEM_MC_IND_CALL_U16_AND_FINISH', 'IEM_MC_IND_CALL_U32_AND_FINISH',
1248 'IEM_MC_IND_CALL_U64_AND_FINISH',
1249 'IEM_MC_RETN_AND_FINISH',
1250 'IEM_MC_CALL_CIMPL_0', 'IEM_MC_CALL_CIMPL_1', 'IEM_MC_CALL_CIMPL_2', 'IEM_MC_CALL_CIMPL_3',
1251 'IEM_MC_CALL_CIMPL_4', 'IEM_MC_CALL_CIMPL_5',
1252 'IEM_MC_DEFER_TO_CIMPL_0_RET', 'IEM_MC_DEFER_TO_CIMPL_1_RET', 'IEM_MC_DEFER_TO_CIMPL_2_RET',
1253 'IEM_MC_DEFER_TO_CIMPL_3_RET', 'IEM_MC_DEFER_TO_CIMPL_4_RET', 'IEM_MC_DEFER_TO_CIMPL_5_RET', ):
1254 self.aoParamRefs.append(ThreadedParamRef('IEM_GET_INSTR_LEN(pVCpu)', 'uint4_t', oStmt, sStdRef = 'cbInstr'));
1255
1256 if ( oStmt.sName in ('IEM_MC_REL_JMP_S8_AND_FINISH', 'IEM_MC_RETN_AND_FINISH', )
1257 and self.sVariation not in self.kdVariationsOnlyPre386):
1258 self.aoParamRefs.append(ThreadedParamRef('pVCpu->iem.s.enmEffOpSize', 'IEMMODE', oStmt));
1259
1260 if oStmt.sName == 'IEM_MC_CALC_RM_EFF_ADDR':
1261 # This is being pretty presumptive about bRm always being the RM byte...
1262 assert len(oStmt.asParams) == 3;
1263 assert oStmt.asParams[1] == 'bRm';
1264
1265 if self.sVariation in self.kdVariationsWithFlatAddr16:
1266 self.aoParamRefs.append(ThreadedParamRef('bRm', 'uint8_t', oStmt));
1267 self.aoParamRefs.append(ThreadedParamRef('(uint16_t)uEffAddrInfo' ,
1268 'uint16_t', oStmt, sStdRef = 'u16Disp'));
1269 elif self.sVariation in self.kdVariationsWithFlatAddr32No64:
1270 self.aoParamRefs.append(ThreadedParamRef('bRm', 'uint8_t', oStmt));
1271 self.aoParamRefs.append(ThreadedParamRef('(uint8_t)(uEffAddrInfo >> 32)',
1272 'uint8_t', oStmt, sStdRef = 'bSib'));
1273 self.aoParamRefs.append(ThreadedParamRef('(uint32_t)uEffAddrInfo',
1274 'uint32_t', oStmt, sStdRef = 'u32Disp'));
1275 else:
1276 assert self.sVariation in self.kdVariationsWithAddressOnly64;
1277 self.aoParamRefs.append(ThreadedParamRef('IEM_GET_MODRM_EX(pVCpu, bRm)',
1278 'uint8_t', oStmt, sStdRef = 'bRmEx'));
1279 self.aoParamRefs.append(ThreadedParamRef('(uint8_t)(uEffAddrInfo >> 32)',
1280 'uint8_t', oStmt, sStdRef = 'bSib'));
1281 self.aoParamRefs.append(ThreadedParamRef('(uint32_t)uEffAddrInfo',
1282 'uint32_t', oStmt, sStdRef = 'u32Disp'));
1283 self.aoParamRefs.append(ThreadedParamRef('IEM_GET_INSTR_LEN(pVCpu)',
1284 'uint4_t', oStmt, sStdRef = 'cbInstr'));
1285 aiSkipParams[1] = True; # Skip the bRm parameter as it is being replaced by bRmEx.
1286
1287 # 8-bit register accesses needs to have their index argument reworked to take REX into account.
1288 if oStmt.sName.startswith('IEM_MC_') and oStmt.sName.find('_GREG_U8') > 0:
1289 (idxReg, sOrgRef, sStdRef) = self.analyze8BitGRegStmt(oStmt);
1290 self.aoParamRefs.append(ThreadedParamRef(sOrgRef, 'uint8_t', oStmt, idxReg, sStdRef = sStdRef));
1291 aiSkipParams[idxReg] = True; # Skip the parameter below.
1292
1293 # If in flat mode variation, ignore the effective segment parameter to memory MCs.
1294 if ( self.sVariation in self.kdVariationsWithFlatAddress
1295 and oStmt.sName in self.kdMemMcToFlatInfo
1296 and self.kdMemMcToFlatInfo[oStmt.sName][0] != -1):
1297 aiSkipParams[self.kdMemMcToFlatInfo[oStmt.sName][0]] = True;
1298
1299 # Inspect the target of calls to see if we need to pass down a
1300 # function pointer or function table pointer for it to work.
1301 if isinstance(oStmt, iai.McStmtCall):
1302 if oStmt.sFn[0] == 'p':
1303 self.aoParamRefs.append(ThreadedParamRef(oStmt.sFn, self.analyzeCallToType(oStmt.sFn), oStmt, oStmt.idxFn));
1304 elif ( oStmt.sFn[0] != 'i'
1305 and not oStmt.sFn.startswith('RT_CONCAT3')
1306 and not oStmt.sFn.startswith('IEMTARGETCPU_EFL_BEHAVIOR_SELECT')
1307 and not oStmt.sFn.startswith('IEM_SELECT_HOST_OR_FALLBACK') ):
1308 self.raiseProblem('Bogus function name in %s: %s' % (oStmt.sName, oStmt.sFn,));
1309 aiSkipParams[oStmt.idxFn] = True;
1310
1311 # Skip the hint parameter (first) for IEM_MC_CALL_CIMPL_X.
1312 if oStmt.sName.startswith('IEM_MC_CALL_CIMPL_'):
1313 assert oStmt.idxFn == 2;
1314 aiSkipParams[0] = True;
1315
1316 # Skip the function parameter (first) for IEM_MC_NATIVE_EMIT_X.
1317 if oStmt.sName.startswith('IEM_MC_NATIVE_EMIT_'):
1318 aiSkipParams[0] = True;
1319
1320
1321 # Check all the parameters for bogus references.
1322 for iParam, sParam in enumerate(oStmt.asParams):
1323 if iParam not in aiSkipParams and sParam not in self.oParent.dVariables:
1324 # The parameter may contain a C expression, so we have to try
1325 # extract the relevant bits, i.e. variables and fields while
1326 # ignoring operators and parentheses.
1327 offParam = 0;
1328 while offParam < len(sParam):
1329 # Is it the start of an C identifier? If so, find the end, but don't stop on field separators (->, .).
1330 ch = sParam[offParam];
1331 if ch.isalpha() or ch == '_':
1332 offStart = offParam;
1333 offParam += 1;
1334 while offParam < len(sParam):
1335 ch = sParam[offParam];
1336 if not ch.isalnum() and ch != '_' and ch != '.':
1337 if ch != '-' or sParam[offParam + 1] != '>':
1338 # Special hack for the 'CTX_SUFF(pVM)' bit in pVCpu->CTX_SUFF(pVM)->xxxx:
1339 if ( ch == '('
1340 and sParam[offStart : offParam + len('(pVM)->')] == 'pVCpu->CTX_SUFF(pVM)->'):
1341 offParam += len('(pVM)->') - 1;
1342 else:
1343 break;
1344 offParam += 1;
1345 offParam += 1;
1346 sRef = sParam[offStart : offParam];
1347
1348 # For register references, we pass the full register indexes instead as macros
1349 # like IEM_GET_MODRM_REG implicitly references pVCpu->iem.s.uRexReg and the
1350 # threaded function will be more efficient if we just pass the register index
1351 # as a 4-bit param.
1352 if ( sRef.startswith('IEM_GET_MODRM')
1353 or sRef.startswith('IEM_GET_EFFECTIVE_VVVV')
1354 or sRef.startswith('IEM_GET_IMM8_REG') ):
1355 offParam = iai.McBlock.skipSpacesAt(sParam, offParam, len(sParam));
1356 if sParam[offParam] != '(':
1357 self.raiseProblem('Expected "(" following %s in "%s"' % (sRef, oStmt.renderCode(),));
1358 (asMacroParams, offCloseParam) = iai.McBlock.extractParams(sParam, offParam);
1359 if asMacroParams is None:
1360 self.raiseProblem('Unable to find ")" for %s in "%s"' % (sRef, oStmt.renderCode(),));
1361 offParam = offCloseParam + 1;
1362 self.aoParamRefs.append(ThreadedParamRef(sParam[offStart : offParam], 'uint8_t',
1363 oStmt, iParam, offStart));
1364
1365 # We can skip known variables.
1366 elif sRef in self.oParent.dVariables:
1367 pass;
1368
1369 # Skip certain macro invocations.
1370 elif sRef in ('IEM_GET_HOST_CPU_FEATURES',
1371 'IEM_GET_GUEST_CPU_FEATURES',
1372 'IEM_IS_GUEST_CPU_AMD',
1373 'IEM_IS_16BIT_CODE',
1374 'IEM_IS_32BIT_CODE',
1375 'IEM_IS_64BIT_CODE',
1376 ):
1377 offParam = iai.McBlock.skipSpacesAt(sParam, offParam, len(sParam));
1378 if sParam[offParam] != '(':
1379 self.raiseProblem('Expected "(" following %s in "%s"' % (sRef, oStmt.renderCode(),));
1380 (asMacroParams, offCloseParam) = iai.McBlock.extractParams(sParam, offParam);
1381 if asMacroParams is None:
1382 self.raiseProblem('Unable to find ")" for %s in "%s"' % (sRef, oStmt.renderCode(),));
1383 offParam = offCloseParam + 1;
1384
1385 # Skip any dereference following it, unless it's a predicate like IEM_IS_GUEST_CPU_AMD.
1386 if sRef not in ('IEM_IS_GUEST_CPU_AMD',
1387 'IEM_IS_16BIT_CODE',
1388 'IEM_IS_32BIT_CODE',
1389 'IEM_IS_64BIT_CODE',
1390 ):
1391 offParam = iai.McBlock.skipSpacesAt(sParam, offParam, len(sParam));
1392 if offParam + 2 <= len(sParam) and sParam[offParam : offParam + 2] == '->':
1393 offParam = iai.McBlock.skipSpacesAt(sParam, offParam + 2, len(sParam));
1394 while offParam < len(sParam) and (sParam[offParam].isalnum() or sParam[offParam] in '_.'):
1395 offParam += 1;
1396
1397 # Skip constants, globals, types (casts), sizeof and macros.
1398 elif ( sRef.startswith('IEM_OP_PRF_')
1399 or sRef.startswith('IEM_ACCESS_')
1400 or sRef.startswith('IEMINT_')
1401 or sRef.startswith('X86_GREG_')
1402 or sRef.startswith('X86_SREG_')
1403 or sRef.startswith('X86_EFL_')
1404 or sRef.startswith('X86_FSW_')
1405 or sRef.startswith('X86_FCW_')
1406 or sRef.startswith('X86_XCPT_')
1407 or sRef.startswith('IEMMODE_')
1408 or sRef.startswith('IEM_F_')
1409 or sRef.startswith('IEM_CIMPL_F_')
1410 or sRef.startswith('g_')
1411 or sRef.startswith('iemAImpl_')
1412 or sRef.startswith('kIemNativeGstReg_')
1413 or sRef.startswith('RT_ARCH_VAL_')
1414 or sRef in ( 'int8_t', 'int16_t', 'int32_t', 'int64_t',
1415 'INT8_C', 'INT16_C', 'INT32_C', 'INT64_C',
1416 'uint8_t', 'uint16_t', 'uint32_t', 'uint64_t',
1417 'UINT8_C', 'UINT16_C', 'UINT32_C', 'UINT64_C',
1418 'UINT8_MAX', 'UINT16_MAX', 'UINT32_MAX', 'UINT64_MAX',
1419 'INT8_MAX', 'INT16_MAX', 'INT32_MAX', 'INT64_MAX',
1420 'INT8_MIN', 'INT16_MIN', 'INT32_MIN', 'INT64_MIN',
1421 'sizeof', 'NOREF', 'RT_NOREF', 'IEMMODE_64BIT',
1422 'RT_BIT_32', 'RT_BIT_64', 'true', 'false',
1423 'NIL_RTGCPTR',) ):
1424 pass;
1425
1426 # Skip certain macro invocations.
1427 # Any variable (non-field) and decoder fields in IEMCPU will need to be parameterized.
1428 elif ( ( '.' not in sRef
1429 and '-' not in sRef
1430 and sRef not in ('pVCpu', ) )
1431 or iai.McBlock.koReIemDecoderVars.search(sRef) is not None):
1432 self.aoParamRefs.append(ThreadedParamRef(sRef, self.analyzeReferenceToType(sRef),
1433 oStmt, iParam, offStart));
1434 # Number.
1435 elif ch.isdigit():
1436 if ( ch == '0'
1437 and offParam + 2 <= len(sParam)
1438 and sParam[offParam + 1] in 'xX'
1439 and sParam[offParam + 2] in self.ksHexDigits ):
1440 offParam += 2;
1441 while offParam < len(sParam) and sParam[offParam] in self.ksHexDigits:
1442 offParam += 1;
1443 else:
1444 while offParam < len(sParam) and sParam[offParam].isdigit():
1445 offParam += 1;
1446 # Comment?
1447 elif ( ch == '/'
1448 and offParam + 4 <= len(sParam)
1449 and sParam[offParam + 1] == '*'):
1450 offParam += 2;
1451 offNext = sParam.find('*/', offParam);
1452 if offNext < offParam:
1453 self.raiseProblem('Unable to find "*/" in "%s" ("%s")' % (sRef, oStmt.renderCode(),));
1454 offParam = offNext + 2;
1455 # Whatever else.
1456 else:
1457 offParam += 1;
1458
1459 # Traverse the branches of conditionals.
1460 if isinstance(oStmt, iai.McStmtCond):
1461 self.analyzeFindThreadedParamRefs(oStmt.aoIfBranch);
1462 self.analyzeFindThreadedParamRefs(oStmt.aoElseBranch);
1463 return True;
1464
1465 def analyzeVariation(self, aoStmts):
1466 """
1467 2nd part of the analysis, done on each variation.
1468
1469 The variations may differ in parameter requirements and will end up with
1470 slightly different MC sequences. Thus this is done on each individually.
1471
1472 Returns dummy True - raises exception on trouble.
1473 """
1474 # Now scan the code for variables and field references that needs to
1475 # be passed to the threaded function because they are related to the
1476 # instruction decoding.
1477 self.analyzeFindThreadedParamRefs(aoStmts);
1478 self.analyzeConsolidateThreadedParamRefs();
1479
1480 # Morph the statement stream for the block into what we'll be using in the threaded function.
1481 (self.aoStmtsForThreadedFunction, iParamRef) = self.analyzeMorphStmtForThreaded(aoStmts, {});
1482 if iParamRef != len(self.aoParamRefs):
1483 raise Exception('iParamRef=%s, expected %s!' % (iParamRef, len(self.aoParamRefs),));
1484
1485 return True;
1486
1487 def emitThreadedCallStmtsForVariant(self, cchIndent, fTbLookupTable = False, sCallVarNm = None):
1488 """
1489 Produces generic C++ statments that emits a call to the thread function
1490 variation and any subsequent checks that may be necessary after that.
1491
1492 The sCallVarNm is the name of the variable with the threaded function
1493 to call. This is for the case where all the variations have the same
1494 parameters and only the threaded function number differs.
1495
1496 The fTbLookupTable parameter can either be False, True or whatever else
1497 (like 2) - in the latte case this means a large lookup table.
1498 """
1499 aoStmts = [
1500 iai.McCppCall('IEM_MC2_BEGIN_EMIT_CALLS',
1501 ['1' if 'IEM_CIMPL_F_CHECK_IRQ_BEFORE' in self.oParent.dsCImplFlags else '0'],
1502 cchIndent = cchIndent), # Scope and a hook for various stuff.
1503 ];
1504
1505 # The call to the threaded function.
1506 asCallArgs = [ self.getIndexName() if not sCallVarNm else sCallVarNm, ];
1507 for iParam in range(self.cMinParams):
1508 asFrags = [];
1509 for aoRefs in self.dParamRefs.values():
1510 oRef = aoRefs[0];
1511 if oRef.iNewParam == iParam:
1512 sCast = '(uint64_t)'
1513 if oRef.sType in ('int8_t', 'int16_t', 'int32_t'): # Make sure these doesn't get sign-extended.
1514 sCast = '(uint64_t)(u' + oRef.sType + ')';
1515 if oRef.offNewParam == 0:
1516 asFrags.append(sCast + '(' + oRef.sOrgRef + ')');
1517 else:
1518 asFrags.append('(%s(%s) << %s)' % (sCast, oRef.sOrgRef, oRef.offNewParam));
1519 assert asFrags;
1520 asCallArgs.append(' | '.join(asFrags));
1521
1522 if fTbLookupTable is False:
1523 aoStmts.append(iai.McCppCall('IEM_MC2_EMIT_CALL_%s' % (len(asCallArgs) - 1,),
1524 asCallArgs, cchIndent = cchIndent));
1525 else:
1526 aoStmts.append(iai.McCppCall('IEM_MC2_EMIT_CALL_WITH_TB_LOOKUP_%s' % (len(asCallArgs) - 1,),
1527 ['0' if fTbLookupTable is True else '1',] + asCallArgs, cchIndent = cchIndent));
1528
1529 # 2023-11-28: This has to be done AFTER the CIMPL call, so we have to
1530 # emit this mode check from the compilation loop. On the
1531 # plus side, this means we eliminate unnecessary call at
1532 # end of the TB. :-)
1533 ## For CIMPL stuff, we need to consult the associated IEM_CIMPL_F_XXX
1534 ## mask and maybe emit additional checks.
1535 #if ( 'IEM_CIMPL_F_MODE' in self.oParent.dsCImplFlags
1536 # or 'IEM_CIMPL_F_XCPT' in self.oParent.dsCImplFlags
1537 # or 'IEM_CIMPL_F_VMEXIT' in self.oParent.dsCImplFlags):
1538 # aoStmts.append(iai.McCppCall('IEM_MC2_EMIT_CALL_1', ( 'kIemThreadedFunc_BltIn_CheckMode', 'pVCpu->iem.s.fExec', ),
1539 # cchIndent = cchIndent));
1540
1541 sCImplFlags = ' | '.join(self.oParent.dsCImplFlags.keys());
1542 if not sCImplFlags:
1543 sCImplFlags = '0'
1544 aoStmts.append(iai.McCppCall('IEM_MC2_END_EMIT_CALLS', ( sCImplFlags, ), cchIndent = cchIndent)); # For closing the scope.
1545
1546 # Emit fEndTb = true or fTbBranched = true if any of the CIMPL flags
1547 # indicates we should do so.
1548 # Note! iemThreadedRecompilerMcDeferToCImpl0 duplicates work done here.
1549 asEndTbFlags = [];
1550 asTbBranchedFlags = [];
1551 for sFlag in self.oParent.dsCImplFlags:
1552 if self.kdCImplFlags[sFlag] is True:
1553 asEndTbFlags.append(sFlag);
1554 elif sFlag.startswith('IEM_CIMPL_F_BRANCH_'):
1555 asTbBranchedFlags.append(sFlag);
1556 if ( asTbBranchedFlags
1557 and ( 'IEM_CIMPL_F_BRANCH_CONDITIONAL' not in asTbBranchedFlags
1558 or self.sVariation not in self.kdVariationsWithConditionalNoJmp)):
1559 aoStmts.append(iai.McCppGeneric('iemThreadedSetBranched(pVCpu, %s);'
1560 % ((' | '.join(asTbBranchedFlags)).replace('IEM_CIMPL_F_BRANCH', 'IEMBRANCHED_F'),),
1561 cchIndent = cchIndent)); # Inline fn saves ~2 seconds for gcc 13/dbg (1m13s vs 1m15s).
1562 if asEndTbFlags:
1563 aoStmts.append(iai.McCppGeneric('pVCpu->iem.s.fEndTb = true; /* %s */' % (','.join(asEndTbFlags),),
1564 cchIndent = cchIndent));
1565
1566 if 'IEM_CIMPL_F_CHECK_IRQ_AFTER' in self.oParent.dsCImplFlags:
1567 aoStmts.append(iai.McCppGeneric('pVCpu->iem.s.cInstrTillIrqCheck = 0;', cchIndent = cchIndent));
1568
1569 return aoStmts;
1570
1571
1572class ThreadedFunction(object):
1573 """
1574 A threaded function.
1575 """
1576
1577 def __init__(self, oMcBlock: iai.McBlock) -> None:
1578 self.oMcBlock = oMcBlock # type: iai.McBlock
1579 # The remaining fields are only useful after analyze() has been called:
1580 ## Variations for this block. There is at least one.
1581 self.aoVariations = [] # type: List[ThreadedFunctionVariation]
1582 ## Variation dictionary containing the same as aoVariations.
1583 self.dVariations = {} # type: Dict[str, ThreadedFunctionVariation]
1584 ## Dictionary of local variables (IEM_MC_LOCAL[_CONST]) and call arguments (IEM_MC_ARG*).
1585 self.dVariables = {} # type: Dict[str, iai.McStmtVar]
1586 ## Dictionary with any IEM_CIMPL_F_XXX flags explicitly advertised in the code block
1587 ## and those determined by analyzeCodeOperation().
1588 self.dsCImplFlags = {} # type: Dict[str, bool]
1589 ## The unique sub-name for this threaded function.
1590 self.sSubName = '';
1591 #if oMcBlock.iInFunction > 0 or (oMcBlock.oInstruction and len(oMcBlock.oInstruction.aoMcBlocks) > 1):
1592 # self.sSubName = '_%s' % (oMcBlock.iInFunction);
1593
1594 @staticmethod
1595 def dummyInstance():
1596 """ Gets a dummy instance. """
1597 return ThreadedFunction(iai.McBlock('null', 999999999, 999999999,
1598 iai.DecoderFunction('null', 999999999, 'nil', ('','')), 999999999));
1599
1600 def hasWithFlagsCheckingAndClearingVariation(self):
1601 """
1602 Check if there is one or more with flags checking and clearing
1603 variations for this threaded function.
1604 """
1605 for sVarWithFlags in ThreadedFunctionVariation.kdVariationsWithEflagsCheckingAndClearing:
1606 if sVarWithFlags in self.dVariations:
1607 return True;
1608 return False;
1609
1610 #
1611 # Analysis and code morphing.
1612 #
1613
1614 def raiseProblem(self, sMessage):
1615 """ Raises a problem. """
1616 raise Exception('%s:%s: error: %s' % (self.oMcBlock.sSrcFile, self.oMcBlock.iBeginLine, sMessage, ));
1617
1618 def error(self, sMessage, oGenerator):
1619 """ Emits an error via the generator object, causing it to fail. """
1620 oGenerator.rawError('%s:%s: error: %s' % (self.oMcBlock.sSrcFile, self.oMcBlock.iBeginLine, sMessage, ));
1621
1622 def warning(self, sMessage):
1623 """ Emits a warning. """
1624 print('%s:%s: warning: %s' % (self.oMcBlock.sSrcFile, self.oMcBlock.iBeginLine, sMessage, ));
1625
1626 ## Used by analyzeAndAnnotateName for memory MC blocks.
1627 kdAnnotateNameMemStmts = {
1628 'IEM_MC_FETCH_MEM16_U8': '__mem8',
1629 'IEM_MC_FETCH_MEM32_U8': '__mem8',
1630 'IEM_MC_FETCH_MEM_D80': '__mem80',
1631 'IEM_MC_FETCH_MEM_I16': '__mem16',
1632 'IEM_MC_FETCH_MEM_I32': '__mem32',
1633 'IEM_MC_FETCH_MEM_I64': '__mem64',
1634 'IEM_MC_FETCH_MEM_R32': '__mem32',
1635 'IEM_MC_FETCH_MEM_R64': '__mem64',
1636 'IEM_MC_FETCH_MEM_R80': '__mem80',
1637 'IEM_MC_FETCH_MEM_U128': '__mem128',
1638 'IEM_MC_FETCH_MEM_U128_ALIGN_SSE': '__mem128',
1639 'IEM_MC_FETCH_MEM_U128_NO_AC': '__mem128',
1640 'IEM_MC_FETCH_MEM_U16': '__mem16',
1641 'IEM_MC_FETCH_MEM_U16_DISP': '__mem16',
1642 'IEM_MC_FETCH_MEM_U16_SX_U32': '__mem16sx32',
1643 'IEM_MC_FETCH_MEM_U16_SX_U64': '__mem16sx64',
1644 'IEM_MC_FETCH_MEM_U16_ZX_U32': '__mem16zx32',
1645 'IEM_MC_FETCH_MEM_U16_ZX_U64': '__mem16zx64',
1646 'IEM_MC_FETCH_MEM_U256': '__mem256',
1647 'IEM_MC_FETCH_MEM_U256_ALIGN_AVX': '__mem256',
1648 'IEM_MC_FETCH_MEM_U256_NO_AC': '__mem256',
1649 'IEM_MC_FETCH_MEM_U32': '__mem32',
1650 'IEM_MC_FETCH_MEM_U32_DISP': '__mem32',
1651 'IEM_MC_FETCH_MEM_U32_SX_U64': '__mem32sx64',
1652 'IEM_MC_FETCH_MEM_U32_ZX_U64': '__mem32zx64',
1653 'IEM_MC_FETCH_MEM_U64': '__mem64',
1654 'IEM_MC_FETCH_MEM_U64_ALIGN_U128': '__mem64',
1655 'IEM_MC_FETCH_MEM_U64_DISP': '__mem64',
1656 'IEM_MC_FETCH_MEM_U8': '__mem8',
1657 'IEM_MC_FETCH_MEM_U8_DISP': '__mem8',
1658 'IEM_MC_FETCH_MEM_U8_SX_U16': '__mem8sx16',
1659 'IEM_MC_FETCH_MEM_U8_SX_U32': '__mem8sx32',
1660 'IEM_MC_FETCH_MEM_U8_SX_U64': '__mem8sx64',
1661 'IEM_MC_FETCH_MEM_U8_ZX_U16': '__mem8zx16',
1662 'IEM_MC_FETCH_MEM_U8_ZX_U32': '__mem8zx32',
1663 'IEM_MC_FETCH_MEM_U8_ZX_U64': '__mem8zx64',
1664 'IEM_MC_FETCH_MEM_XMM': '__mem128',
1665 'IEM_MC_FETCH_MEM_XMM_ALIGN_SSE': '__mem128',
1666 'IEM_MC_FETCH_MEM_XMM_NO_AC': '__mem128',
1667 'IEM_MC_FETCH_MEM_XMM_U32': '__mem32',
1668 'IEM_MC_FETCH_MEM_XMM_U64': '__mem64',
1669 'IEM_MC_FETCH_MEM_U128_AND_XREG_U128': '__mem128',
1670 'IEM_MC_FETCH_MEM_XMM_ALIGN_SSE_AND_XREG_XMM': '__mem128',
1671 'IEM_MC_FETCH_MEM_XMM_U32_AND_XREG_XMM': '__mem32',
1672 'IEM_MC_FETCH_MEM_XMM_U64_AND_XREG_XMM': '__mem64',
1673 'IEM_MC_FETCH_MEM_U128_AND_XREG_U128_AND_RAX_RDX_U64': '__mem128',
1674 'IEM_MC_FETCH_MEM_U128_AND_XREG_U128_AND_EAX_EDX_U32_SX_U64': '__mem128',
1675
1676 'IEM_MC_STORE_MEM_I16_CONST_BY_REF': '__mem16',
1677 'IEM_MC_STORE_MEM_I32_CONST_BY_REF': '__mem32',
1678 'IEM_MC_STORE_MEM_I64_CONST_BY_REF': '__mem64',
1679 'IEM_MC_STORE_MEM_I8_CONST_BY_REF': '__mem8',
1680 'IEM_MC_STORE_MEM_INDEF_D80_BY_REF': '__mem80',
1681 'IEM_MC_STORE_MEM_NEG_QNAN_R32_BY_REF': '__mem32',
1682 'IEM_MC_STORE_MEM_NEG_QNAN_R64_BY_REF': '__mem64',
1683 'IEM_MC_STORE_MEM_NEG_QNAN_R80_BY_REF': '__mem80',
1684 'IEM_MC_STORE_MEM_U128': '__mem128',
1685 'IEM_MC_STORE_MEM_U128_ALIGN_SSE': '__mem128',
1686 'IEM_MC_STORE_MEM_U128_NO_AC': '__mem128',
1687 'IEM_MC_STORE_MEM_U16': '__mem16',
1688 'IEM_MC_STORE_MEM_U16_CONST': '__mem16c',
1689 'IEM_MC_STORE_MEM_U256': '__mem256',
1690 'IEM_MC_STORE_MEM_U256_ALIGN_AVX': '__mem256',
1691 'IEM_MC_STORE_MEM_U256_NO_AC': '__mem256',
1692 'IEM_MC_STORE_MEM_U32': '__mem32',
1693 'IEM_MC_STORE_MEM_U32_CONST': '__mem32c',
1694 'IEM_MC_STORE_MEM_U64': '__mem64',
1695 'IEM_MC_STORE_MEM_U64_CONST': '__mem64c',
1696 'IEM_MC_STORE_MEM_U8': '__mem8',
1697 'IEM_MC_STORE_MEM_U8_CONST': '__mem8c',
1698
1699 'IEM_MC_MEM_MAP_D80_WO': '__mem80',
1700 'IEM_MC_MEM_MAP_I16_WO': '__mem16',
1701 'IEM_MC_MEM_MAP_I32_WO': '__mem32',
1702 'IEM_MC_MEM_MAP_I64_WO': '__mem64',
1703 'IEM_MC_MEM_MAP_R32_WO': '__mem32',
1704 'IEM_MC_MEM_MAP_R64_WO': '__mem64',
1705 'IEM_MC_MEM_MAP_R80_WO': '__mem80',
1706 'IEM_MC_MEM_MAP_U128_ATOMIC': '__mem128a',
1707 'IEM_MC_MEM_MAP_U128_RO': '__mem128',
1708 'IEM_MC_MEM_MAP_U128_RW': '__mem128',
1709 'IEM_MC_MEM_MAP_U128_WO': '__mem128',
1710 'IEM_MC_MEM_MAP_U16_ATOMIC': '__mem16a',
1711 'IEM_MC_MEM_MAP_U16_RO': '__mem16',
1712 'IEM_MC_MEM_MAP_U16_RW': '__mem16',
1713 'IEM_MC_MEM_MAP_U16_WO': '__mem16',
1714 'IEM_MC_MEM_MAP_U32_ATOMIC': '__mem32a',
1715 'IEM_MC_MEM_MAP_U32_RO': '__mem32',
1716 'IEM_MC_MEM_MAP_U32_RW': '__mem32',
1717 'IEM_MC_MEM_MAP_U32_WO': '__mem32',
1718 'IEM_MC_MEM_MAP_U64_ATOMIC': '__mem64a',
1719 'IEM_MC_MEM_MAP_U64_RO': '__mem64',
1720 'IEM_MC_MEM_MAP_U64_RW': '__mem64',
1721 'IEM_MC_MEM_MAP_U64_WO': '__mem64',
1722 'IEM_MC_MEM_MAP_U8_ATOMIC': '__mem8a',
1723 'IEM_MC_MEM_MAP_U8_RO': '__mem8',
1724 'IEM_MC_MEM_MAP_U8_RW': '__mem8',
1725 'IEM_MC_MEM_MAP_U8_WO': '__mem8',
1726 };
1727 ## Used by analyzeAndAnnotateName for non-memory MC blocks.
1728 kdAnnotateNameRegStmts = {
1729 'IEM_MC_FETCH_GREG_U8': '__greg8',
1730 'IEM_MC_FETCH_GREG_U8_ZX_U16': '__greg8zx16',
1731 'IEM_MC_FETCH_GREG_U8_ZX_U32': '__greg8zx32',
1732 'IEM_MC_FETCH_GREG_U8_ZX_U64': '__greg8zx64',
1733 'IEM_MC_FETCH_GREG_U8_SX_U16': '__greg8sx16',
1734 'IEM_MC_FETCH_GREG_U8_SX_U32': '__greg8sx32',
1735 'IEM_MC_FETCH_GREG_U8_SX_U64': '__greg8sx64',
1736 'IEM_MC_FETCH_GREG_U16': '__greg16',
1737 'IEM_MC_FETCH_GREG_U16_ZX_U32': '__greg16zx32',
1738 'IEM_MC_FETCH_GREG_U16_ZX_U64': '__greg16zx64',
1739 'IEM_MC_FETCH_GREG_U16_SX_U32': '__greg16sx32',
1740 'IEM_MC_FETCH_GREG_U16_SX_U64': '__greg16sx64',
1741 'IEM_MC_FETCH_GREG_U32': '__greg32',
1742 'IEM_MC_FETCH_GREG_U32_ZX_U64': '__greg32zx64',
1743 'IEM_MC_FETCH_GREG_U32_SX_U64': '__greg32sx64',
1744 'IEM_MC_FETCH_GREG_U64': '__greg64',
1745 'IEM_MC_FETCH_GREG_U64_ZX_U64': '__greg64zx64',
1746 'IEM_MC_FETCH_GREG_PAIR_U32': '__greg32',
1747 'IEM_MC_FETCH_GREG_PAIR_U64': '__greg64',
1748
1749 'IEM_MC_STORE_GREG_U8': '__greg8',
1750 'IEM_MC_STORE_GREG_U16': '__greg16',
1751 'IEM_MC_STORE_GREG_U32': '__greg32',
1752 'IEM_MC_STORE_GREG_U64': '__greg64',
1753 'IEM_MC_STORE_GREG_I64': '__greg64',
1754 'IEM_MC_STORE_GREG_U8_CONST': '__greg8c',
1755 'IEM_MC_STORE_GREG_U16_CONST': '__greg16c',
1756 'IEM_MC_STORE_GREG_U32_CONST': '__greg32c',
1757 'IEM_MC_STORE_GREG_U64_CONST': '__greg64c',
1758 'IEM_MC_STORE_GREG_PAIR_U32': '__greg32',
1759 'IEM_MC_STORE_GREG_PAIR_U64': '__greg64',
1760
1761 'IEM_MC_FETCH_SREG_U16': '__sreg16',
1762 'IEM_MC_FETCH_SREG_ZX_U32': '__sreg32',
1763 'IEM_MC_FETCH_SREG_ZX_U64': '__sreg64',
1764 'IEM_MC_FETCH_SREG_BASE_U64': '__sbase64',
1765 'IEM_MC_FETCH_SREG_BASE_U32': '__sbase32',
1766 'IEM_MC_STORE_SREG_BASE_U64': '__sbase64',
1767 'IEM_MC_STORE_SREG_BASE_U32': '__sbase32',
1768
1769 'IEM_MC_REF_GREG_U8': '__greg8',
1770 'IEM_MC_REF_GREG_U16': '__greg16',
1771 'IEM_MC_REF_GREG_U32': '__greg32',
1772 'IEM_MC_REF_GREG_U64': '__greg64',
1773 'IEM_MC_REF_GREG_U8_CONST': '__greg8',
1774 'IEM_MC_REF_GREG_U16_CONST': '__greg16',
1775 'IEM_MC_REF_GREG_U32_CONST': '__greg32',
1776 'IEM_MC_REF_GREG_U64_CONST': '__greg64',
1777 'IEM_MC_REF_GREG_I32': '__greg32',
1778 'IEM_MC_REF_GREG_I64': '__greg64',
1779 'IEM_MC_REF_GREG_I32_CONST': '__greg32',
1780 'IEM_MC_REF_GREG_I64_CONST': '__greg64',
1781
1782 'IEM_MC_STORE_FPUREG_R80_SRC_REF': '__fpu',
1783 'IEM_MC_REF_FPUREG': '__fpu',
1784
1785 'IEM_MC_FETCH_MREG_U64': '__mreg64',
1786 'IEM_MC_FETCH_MREG_U32': '__mreg32',
1787 'IEM_MC_FETCH_MREG_U16': '__mreg16',
1788 'IEM_MC_FETCH_MREG_U8': '__mreg8',
1789 'IEM_MC_STORE_MREG_U64': '__mreg64',
1790 'IEM_MC_STORE_MREG_U32': '__mreg32',
1791 'IEM_MC_STORE_MREG_U16': '__mreg16',
1792 'IEM_MC_STORE_MREG_U8': '__mreg8',
1793 'IEM_MC_STORE_MREG_U32_ZX_U64': '__mreg32zx64',
1794 'IEM_MC_REF_MREG_U64': '__mreg64',
1795 'IEM_MC_REF_MREG_U64_CONST': '__mreg64',
1796 'IEM_MC_REF_MREG_U32_CONST': '__mreg32',
1797
1798 'IEM_MC_CLEAR_XREG_U32_MASK': '__xreg32x4',
1799 'IEM_MC_FETCH_XREG_U128': '__xreg128',
1800 'IEM_MC_FETCH_XREG_XMM': '__xreg128',
1801 'IEM_MC_FETCH_XREG_U64': '__xreg64',
1802 'IEM_MC_FETCH_XREG_U32': '__xreg32',
1803 'IEM_MC_FETCH_XREG_U16': '__xreg16',
1804 'IEM_MC_FETCH_XREG_U8': '__xreg8',
1805 'IEM_MC_FETCH_XREG_PAIR_U128': '__xreg128p',
1806 'IEM_MC_FETCH_XREG_PAIR_XMM': '__xreg128p',
1807 'IEM_MC_FETCH_XREG_PAIR_U128_AND_RAX_RDX_U64': '__xreg128p',
1808 'IEM_MC_FETCH_XREG_PAIR_U128_AND_EAX_EDX_U32_SX_U64': '__xreg128p',
1809
1810 'IEM_MC_STORE_XREG_U32_U128': '__xreg32',
1811 'IEM_MC_STORE_XREG_U128': '__xreg128',
1812 'IEM_MC_STORE_XREG_XMM': '__xreg128',
1813 'IEM_MC_STORE_XREG_XMM_U32': '__xreg32',
1814 'IEM_MC_STORE_XREG_XMM_U64': '__xreg64',
1815 'IEM_MC_STORE_XREG_U64': '__xreg64',
1816 'IEM_MC_STORE_XREG_U64_ZX_U128': '__xreg64zx128',
1817 'IEM_MC_STORE_XREG_U32': '__xreg32',
1818 'IEM_MC_STORE_XREG_U16': '__xreg16',
1819 'IEM_MC_STORE_XREG_U8': '__xreg8',
1820 'IEM_MC_STORE_XREG_U32_ZX_U128': '__xreg32zx128',
1821 'IEM_MC_STORE_XREG_R32': '__xreg32',
1822 'IEM_MC_STORE_XREG_R64': '__xreg64',
1823 'IEM_MC_BROADCAST_XREG_U8_ZX_VLMAX': '__xreg8zx',
1824 'IEM_MC_BROADCAST_XREG_U16_ZX_VLMAX': '__xreg16zx',
1825 'IEM_MC_BROADCAST_XREG_U32_ZX_VLMAX': '__xreg32zx',
1826 'IEM_MC_BROADCAST_XREG_U64_ZX_VLMAX': '__xreg64zx',
1827 'IEM_MC_BROADCAST_XREG_U128_ZX_VLMAX': '__xreg128zx',
1828 'IEM_MC_REF_XREG_U128': '__xreg128',
1829 'IEM_MC_REF_XREG_U128_CONST': '__xreg128',
1830 'IEM_MC_REF_XREG_U32_CONST': '__xreg32',
1831 'IEM_MC_REF_XREG_U64_CONST': '__xreg64',
1832 'IEM_MC_REF_XREG_R32_CONST': '__xreg32',
1833 'IEM_MC_REF_XREG_R64_CONST': '__xreg64',
1834 'IEM_MC_REF_XREG_XMM_CONST': '__xreg128',
1835 'IEM_MC_COPY_XREG_U128': '__xreg128',
1836
1837 'IEM_MC_FETCH_YREG_U256': '__yreg256',
1838 'IEM_MC_FETCH_YREG_U128': '__yreg128',
1839 'IEM_MC_FETCH_YREG_U64': '__yreg64',
1840 'IEM_MC_FETCH_YREG_U32': '__yreg32',
1841 'IEM_MC_STORE_YREG_U128': '__yreg128',
1842 'IEM_MC_STORE_YREG_U32_ZX_VLMAX': '__yreg32zx',
1843 'IEM_MC_STORE_YREG_U64_ZX_VLMAX': '__yreg64zx',
1844 'IEM_MC_STORE_YREG_U128_ZX_VLMAX': '__yreg128zx',
1845 'IEM_MC_STORE_YREG_U256_ZX_VLMAX': '__yreg256zx',
1846 'IEM_MC_BROADCAST_YREG_U8_ZX_VLMAX': '__yreg8',
1847 'IEM_MC_BROADCAST_YREG_U16_ZX_VLMAX': '__yreg16',
1848 'IEM_MC_BROADCAST_YREG_U32_ZX_VLMAX': '__yreg32',
1849 'IEM_MC_BROADCAST_YREG_U64_ZX_VLMAX': '__yreg64',
1850 'IEM_MC_BROADCAST_YREG_U128_ZX_VLMAX': '__yreg128',
1851 'IEM_MC_REF_YREG_U128': '__yreg128',
1852 'IEM_MC_REF_YREG_U128_CONST': '__yreg128',
1853 'IEM_MC_REF_YREG_U64_CONST': '__yreg64',
1854 'IEM_MC_COPY_YREG_U256_ZX_VLMAX': '__yreg256zx',
1855 'IEM_MC_COPY_YREG_U128_ZX_VLMAX': '__yreg128zx',
1856 'IEM_MC_COPY_YREG_U64_ZX_VLMAX': '__yreg64zx',
1857 'IEM_MC_MERGE_YREG_U32_U96_ZX_VLMAX': '__yreg3296',
1858 'IEM_MC_MERGE_YREG_U64_U64_ZX_VLMAX': '__yreg6464',
1859 'IEM_MC_MERGE_YREG_U64HI_U64HI_ZX_VLMAX': '__yreg64hi64hi',
1860 'IEM_MC_MERGE_YREG_U64LO_U64LO_ZX_VLMAX': '__yreg64lo64lo',
1861 'IEM_MC_MERGE_YREG_U64LO_U64LOCAL_ZX_VLMAX':'__yreg64',
1862 'IEM_MC_MERGE_YREG_U64LOCAL_U64HI_ZX_VLMAX':'__yreg64',
1863 };
1864 kdAnnotateNameCallStmts = {
1865 'IEM_MC_CALL_CIMPL_0': '__cimpl',
1866 'IEM_MC_CALL_CIMPL_1': '__cimpl',
1867 'IEM_MC_CALL_CIMPL_2': '__cimpl',
1868 'IEM_MC_CALL_CIMPL_3': '__cimpl',
1869 'IEM_MC_CALL_CIMPL_4': '__cimpl',
1870 'IEM_MC_CALL_CIMPL_5': '__cimpl',
1871 'IEM_MC_CALL_CIMPL_6': '__cimpl',
1872 'IEM_MC_CALL_CIMPL_7': '__cimpl',
1873 'IEM_MC_DEFER_TO_CIMPL_0_RET': '__cimpl_defer',
1874 'IEM_MC_DEFER_TO_CIMPL_1_RET': '__cimpl_defer',
1875 'IEM_MC_DEFER_TO_CIMPL_2_RET': '__cimpl_defer',
1876 'IEM_MC_DEFER_TO_CIMPL_3_RET': '__cimpl_defer',
1877 'IEM_MC_DEFER_TO_CIMPL_4_RET': '__cimpl_defer',
1878 'IEM_MC_DEFER_TO_CIMPL_5_RET': '__cimpl_defer',
1879 'IEM_MC_DEFER_TO_CIMPL_6_RET': '__cimpl_defer',
1880 'IEM_MC_DEFER_TO_CIMPL_7_RET': '__cimpl_defer',
1881 'IEM_MC_CALL_VOID_AIMPL_0': '__aimpl',
1882 'IEM_MC_CALL_VOID_AIMPL_1': '__aimpl',
1883 'IEM_MC_CALL_VOID_AIMPL_2': '__aimpl',
1884 'IEM_MC_CALL_VOID_AIMPL_3': '__aimpl',
1885 'IEM_MC_CALL_VOID_AIMPL_4': '__aimpl',
1886 'IEM_MC_CALL_VOID_AIMPL_5': '__aimpl',
1887 'IEM_MC_CALL_AIMPL_0': '__aimpl_ret',
1888 'IEM_MC_CALL_AIMPL_1': '__aimpl_ret',
1889 'IEM_MC_CALL_AIMPL_2': '__aimpl_ret',
1890 'IEM_MC_CALL_AIMPL_3': '__aimpl_ret',
1891 'IEM_MC_CALL_AIMPL_4': '__aimpl_ret',
1892 'IEM_MC_CALL_AIMPL_5': '__aimpl_ret',
1893 'IEM_MC_CALL_AIMPL_6': '__aimpl_ret',
1894 'IEM_MC_CALL_VOID_AIMPL_6': '__aimpl_fpu',
1895 'IEM_MC_CALL_FPU_AIMPL_0': '__aimpl_fpu',
1896 'IEM_MC_CALL_FPU_AIMPL_1': '__aimpl_fpu',
1897 'IEM_MC_CALL_FPU_AIMPL_2': '__aimpl_fpu',
1898 'IEM_MC_CALL_FPU_AIMPL_3': '__aimpl_fpu',
1899 'IEM_MC_CALL_FPU_AIMPL_4': '__aimpl_fpu',
1900 'IEM_MC_CALL_FPU_AIMPL_5': '__aimpl_fpu',
1901 'IEM_MC_CALL_MMX_AIMPL_0': '__aimpl_mmx',
1902 'IEM_MC_CALL_MMX_AIMPL_1': '__aimpl_mmx',
1903 'IEM_MC_CALL_MMX_AIMPL_2': '__aimpl_mmx',
1904 'IEM_MC_CALL_MMX_AIMPL_3': '__aimpl_mmx',
1905 'IEM_MC_CALL_MMX_AIMPL_4': '__aimpl_mmx',
1906 'IEM_MC_CALL_MMX_AIMPL_5': '__aimpl_mmx',
1907 'IEM_MC_CALL_SSE_AIMPL_0': '__aimpl_sse',
1908 'IEM_MC_CALL_SSE_AIMPL_1': '__aimpl_sse',
1909 'IEM_MC_CALL_SSE_AIMPL_2': '__aimpl_sse',
1910 'IEM_MC_CALL_SSE_AIMPL_3': '__aimpl_sse',
1911 'IEM_MC_CALL_SSE_AIMPL_4': '__aimpl_sse',
1912 'IEM_MC_CALL_SSE_AIMPL_5': '__aimpl_sse',
1913 'IEM_MC_CALL_AVX_AIMPL_0': '__aimpl_avx',
1914 'IEM_MC_CALL_AVX_AIMPL_1': '__aimpl_avx',
1915 'IEM_MC_CALL_AVX_AIMPL_2': '__aimpl_avx',
1916 'IEM_MC_CALL_AVX_AIMPL_3': '__aimpl_avx',
1917 'IEM_MC_CALL_AVX_AIMPL_4': '__aimpl_avx',
1918 'IEM_MC_CALL_AVX_AIMPL_5': '__aimpl_avx',
1919 };
1920 def analyzeAndAnnotateName(self, aoStmts: List[iai.McStmt]):
1921 """
1922 Scans the statements and variation lists for clues about the threaded function,
1923 and sets self.sSubName if successfull.
1924 """
1925 # Operand base naming:
1926 dHits = {};
1927 cHits = iai.McStmt.countStmtsByName(aoStmts, self.kdAnnotateNameMemStmts, dHits);
1928 if cHits > 0:
1929 sStmtNm = sorted(dHits.keys())[-1]; # priority: STORE, MEM_MAP, FETCH.
1930 sName = self.kdAnnotateNameMemStmts[sStmtNm];
1931 else:
1932 cHits = iai.McStmt.countStmtsByName(aoStmts, self.kdAnnotateNameRegStmts, dHits);
1933 if cHits > 0:
1934 sStmtNm = sorted(dHits.keys())[-1]; # priority: STORE, MEM_MAP, FETCH.
1935 sName = self.kdAnnotateNameRegStmts[sStmtNm];
1936 else:
1937 # No op details, try name it by call type...
1938 cHits = iai.McStmt.countStmtsByName(aoStmts, self.kdAnnotateNameCallStmts, dHits);
1939 if cHits > 0:
1940 sStmtNm = sorted(dHits.keys())[-1]; # Not really necessary to sort, but simple this way.
1941 self.sSubName = self.kdAnnotateNameCallStmts[sStmtNm];
1942 return;
1943
1944 # Add call info if any:
1945 dHits = {};
1946 cHits = iai.McStmt.countStmtsByName(aoStmts, self.kdAnnotateNameCallStmts, dHits);
1947 if cHits > 0:
1948 sStmtNm = sorted(dHits.keys())[-1]; # Not really necessary to sort, but simple this way.
1949 sName += self.kdAnnotateNameCallStmts[sStmtNm][1:];
1950
1951 self.sSubName = sName;
1952 return;
1953
1954 def analyzeFindVariablesAndCallArgs(self, aoStmts: List[iai.McStmt]) -> bool:
1955 """ Scans the statements for MC variables and call arguments. """
1956 for oStmt in aoStmts:
1957 if isinstance(oStmt, iai.McStmtVar):
1958 if oStmt.sVarName in self.dVariables:
1959 raise Exception('Variable %s is defined more than once!' % (oStmt.sVarName,));
1960 self.dVariables[oStmt.sVarName] = oStmt.sVarName;
1961 elif isinstance(oStmt, iai.McStmtCall) and oStmt.sName.startswith('IEM_MC_CALL_AIMPL_'):
1962 if oStmt.asParams[1] in self.dVariables:
1963 raise Exception('Variable %s is defined more than once!' % (oStmt.asParams[1],));
1964 self.dVariables[oStmt.asParams[1]] = iai.McStmtVar('IEM_MC_LOCAL', oStmt.asParams[0:2],
1965 oStmt.asParams[0], oStmt.asParams[1]);
1966
1967 # There shouldn't be any variables or arguments declared inside if/
1968 # else blocks, but scan them too to be on the safe side.
1969 if isinstance(oStmt, iai.McStmtCond):
1970 #cBefore = len(self.dVariables);
1971 self.analyzeFindVariablesAndCallArgs(oStmt.aoIfBranch);
1972 self.analyzeFindVariablesAndCallArgs(oStmt.aoElseBranch);
1973 #if len(self.dVariables) != cBefore:
1974 # raise Exception('Variables/arguments defined in conditional branches!');
1975 return True;
1976
1977 kdReturnStmtAnnotations = {
1978 'IEM_MC_ADVANCE_RIP_AND_FINISH': g_ksFinishAnnotation_Advance,
1979 'IEM_MC_REL_JMP_S8_AND_FINISH': g_ksFinishAnnotation_RelJmp,
1980 'IEM_MC_REL_JMP_S16_AND_FINISH': g_ksFinishAnnotation_RelJmp,
1981 'IEM_MC_REL_JMP_S32_AND_FINISH': g_ksFinishAnnotation_RelJmp,
1982 'IEM_MC_SET_RIP_U16_AND_FINISH': g_ksFinishAnnotation_SetJmp,
1983 'IEM_MC_SET_RIP_U32_AND_FINISH': g_ksFinishAnnotation_SetJmp,
1984 'IEM_MC_SET_RIP_U64_AND_FINISH': g_ksFinishAnnotation_SetJmp,
1985 'IEM_MC_REL_CALL_S16_AND_FINISH': g_ksFinishAnnotation_RelCall,
1986 'IEM_MC_REL_CALL_S32_AND_FINISH': g_ksFinishAnnotation_RelCall,
1987 'IEM_MC_REL_CALL_S64_AND_FINISH': g_ksFinishAnnotation_RelCall,
1988 'IEM_MC_IND_CALL_U16_AND_FINISH': g_ksFinishAnnotation_IndCall,
1989 'IEM_MC_IND_CALL_U32_AND_FINISH': g_ksFinishAnnotation_IndCall,
1990 'IEM_MC_IND_CALL_U64_AND_FINISH': g_ksFinishAnnotation_IndCall,
1991 'IEM_MC_DEFER_TO_CIMPL_0_RET': g_ksFinishAnnotation_DeferToCImpl,
1992 'IEM_MC_DEFER_TO_CIMPL_1_RET': g_ksFinishAnnotation_DeferToCImpl,
1993 'IEM_MC_DEFER_TO_CIMPL_2_RET': g_ksFinishAnnotation_DeferToCImpl,
1994 'IEM_MC_DEFER_TO_CIMPL_3_RET': g_ksFinishAnnotation_DeferToCImpl,
1995 'IEM_MC_DEFER_TO_CIMPL_4_RET': g_ksFinishAnnotation_DeferToCImpl,
1996 'IEM_MC_DEFER_TO_CIMPL_5_RET': g_ksFinishAnnotation_DeferToCImpl,
1997 'IEM_MC_DEFER_TO_CIMPL_6_RET': g_ksFinishAnnotation_DeferToCImpl,
1998 'IEM_MC_DEFER_TO_CIMPL_7_RET': g_ksFinishAnnotation_DeferToCImpl,
1999 };
2000 def analyzeCodeOperation(self, aoStmts: List[iai.McStmt], dEflStmts, fSeenConditional = False) -> bool:
2001 """
2002 Analyzes the code looking clues as to additional side-effects.
2003
2004 Currently this is simply looking for branching and adding the relevant
2005 branch flags to dsCImplFlags. ASSUMES the caller pre-populates the
2006 dictionary with a copy of self.oMcBlock.dsCImplFlags.
2007
2008 This also sets McStmtCond.oIfBranchAnnotation & McStmtCond.oElseBranchAnnotation.
2009
2010 Returns annotation on return style.
2011 """
2012 sAnnotation = None;
2013 for oStmt in aoStmts:
2014 # Set IEM_IMPL_C_F_BRANCH_XXXX flags if we see any branching MCs.
2015 if oStmt.sName.startswith('IEM_MC_SET_RIP'):
2016 assert not fSeenConditional;
2017 self.dsCImplFlags['IEM_CIMPL_F_BRANCH_INDIRECT'] = True;
2018 elif oStmt.sName.startswith('IEM_MC_REL_JMP'):
2019 self.dsCImplFlags['IEM_CIMPL_F_BRANCH_RELATIVE'] = True;
2020 if fSeenConditional:
2021 self.dsCImplFlags['IEM_CIMPL_F_BRANCH_CONDITIONAL'] = True;
2022 elif oStmt.sName.startswith('IEM_MC_IND_CALL'):
2023 assert not fSeenConditional;
2024 self.dsCImplFlags['IEM_CIMPL_F_BRANCH_INDIRECT'] = True;
2025 self.dsCImplFlags['IEM_CIMPL_F_BRANCH_STACK'] = True;
2026 self.dsCImplFlags['IEM_CIMPL_F_END_TB'] = True;
2027 elif oStmt.sName.startswith('IEM_MC_REL_CALL'):
2028 assert not fSeenConditional;
2029 self.dsCImplFlags['IEM_CIMPL_F_BRANCH_RELATIVE'] = True;
2030 self.dsCImplFlags['IEM_CIMPL_F_BRANCH_STACK'] = True;
2031 self.dsCImplFlags['IEM_CIMPL_F_END_TB'] = True;
2032 elif oStmt.sName.startswith('IEM_MC_RETN'):
2033 assert not fSeenConditional;
2034 self.dsCImplFlags['IEM_CIMPL_F_BRANCH_INDIRECT'] = True;
2035 self.dsCImplFlags['IEM_CIMPL_F_BRANCH_STACK'] = True;
2036 self.dsCImplFlags['IEM_CIMPL_F_END_TB'] = True;
2037
2038 # Check for CIMPL and AIMPL calls.
2039 if oStmt.sName.startswith('IEM_MC_CALL_'):
2040 if oStmt.sName.startswith('IEM_MC_CALL_CIMPL_'):
2041 self.dsCImplFlags['IEM_CIMPL_F_CALLS_CIMPL'] = True;
2042 elif ( oStmt.sName.startswith('IEM_MC_CALL_VOID_AIMPL_')
2043 or oStmt.sName.startswith('IEM_MC_CALL_AIMPL_')):
2044 self.dsCImplFlags['IEM_CIMPL_F_CALLS_AIMPL'] = True;
2045 elif ( oStmt.sName.startswith('IEM_MC_CALL_SSE_AIMPL_')
2046 or oStmt.sName.startswith('IEM_MC_CALL_MMX_AIMPL_')
2047 or oStmt.sName.startswith('IEM_MC_CALL_FPU_AIMPL_')):
2048 self.dsCImplFlags['IEM_CIMPL_F_CALLS_AIMPL_WITH_FXSTATE'] = True;
2049 elif oStmt.sName.startswith('IEM_MC_CALL_AVX_AIMPL_'):
2050 self.dsCImplFlags['IEM_CIMPL_F_CALLS_AIMPL_WITH_XSTATE'] = True;
2051 else:
2052 raise Exception('Unknown IEM_MC_CALL_* statement: %s' % (oStmt.sName,));
2053
2054 # Check for return statements.
2055 if oStmt.sName in self.kdReturnStmtAnnotations:
2056 assert sAnnotation is None;
2057 sAnnotation = self.kdReturnStmtAnnotations[oStmt.sName];
2058
2059 # Collect MCs working on EFLAGS. Caller will check this.
2060 if oStmt.sName in ('IEM_MC_FETCH_EFLAGS', 'IEM_MC_FETCH_EFLAGS_U8', 'IEM_MC_COMMIT_EFLAGS',
2061 'IEM_MC_COMMIT_EFLAGS_OPT', 'IEM_MC_REF_EFLAGS', 'IEM_MC_ARG_LOCAL_EFLAGS', ):
2062 dEflStmts[oStmt.sName] = oStmt;
2063 elif isinstance(oStmt, iai.McStmtCall):
2064 if oStmt.sName in ('IEM_MC_CALL_CIMPL_0', 'IEM_MC_CALL_CIMPL_1', 'IEM_MC_CALL_CIMPL_2',
2065 'IEM_MC_CALL_CIMPL_3', 'IEM_MC_CALL_CIMPL_4', 'IEM_MC_CALL_CIMPL_5',):
2066 if ( oStmt.asParams[0].find('IEM_CIMPL_F_RFLAGS') >= 0
2067 or oStmt.asParams[0].find('IEM_CIMPL_F_STATUS_FLAGS') >= 0):
2068 dEflStmts[oStmt.sName] = oStmt;
2069
2070 # Process branches of conditionals recursively.
2071 if isinstance(oStmt, iai.McStmtCond):
2072 oStmt.oIfBranchAnnotation = self.analyzeCodeOperation(oStmt.aoIfBranch, dEflStmts, True);
2073 if oStmt.aoElseBranch:
2074 oStmt.oElseBranchAnnotation = self.analyzeCodeOperation(oStmt.aoElseBranch, dEflStmts, True);
2075
2076 return sAnnotation;
2077
2078 def analyzeThreadedFunction(self, oGenerator):
2079 """
2080 Analyzes the code, identifying the number of parameters it requires and such.
2081
2082 Returns dummy True - raises exception on trouble.
2083 """
2084
2085 #
2086 # Decode the block into a list/tree of McStmt objects.
2087 #
2088 aoStmts = self.oMcBlock.decode();
2089
2090 #
2091 # Check the block for errors before we proceed (will decode it).
2092 #
2093 asErrors = self.oMcBlock.check();
2094 if asErrors:
2095 raise Exception('\n'.join(['%s:%s: error: %s' % (self.oMcBlock.sSrcFile, self.oMcBlock.iBeginLine, sError, )
2096 for sError in asErrors]));
2097
2098 #
2099 # Scan the statements for local variables and call arguments (self.dVariables).
2100 #
2101 self.analyzeFindVariablesAndCallArgs(aoStmts);
2102
2103 #
2104 # Scan the code for IEM_CIMPL_F_ and other clues.
2105 #
2106 self.dsCImplFlags = self.oMcBlock.dsCImplFlags.copy();
2107 dEflStmts = {};
2108 self.analyzeCodeOperation(aoStmts, dEflStmts);
2109 if ( ('IEM_CIMPL_F_CALLS_CIMPL' in self.dsCImplFlags)
2110 + ('IEM_CIMPL_F_CALLS_AIMPL' in self.dsCImplFlags)
2111 + ('IEM_CIMPL_F_CALLS_AIMPL_WITH_FXSTATE' in self.dsCImplFlags)
2112 + ('IEM_CIMPL_F_CALLS_AIMPL_WITH_XSTATE' in self.dsCImplFlags) > 1):
2113 self.error('Mixing CIMPL/AIMPL/AIMPL_WITH_FXSTATE/AIMPL_WITH_XSTATE calls', oGenerator);
2114
2115 #
2116 # Analyse EFLAGS related MCs and @opflmodify and friends.
2117 #
2118 if dEflStmts:
2119 oInstruction = self.oMcBlock.oInstruction; # iai.Instruction
2120 if ( oInstruction is None
2121 or (oInstruction.asFlTest is None and oInstruction.asFlModify is None)):
2122 sMcNames = '+'.join(dEflStmts.keys());
2123 if len(dEflStmts) != 1 or not sMcNames.startswith('IEM_MC_CALL_CIMPL_'): # Hack for far calls
2124 self.error('Uses %s but has no @opflmodify, @opfltest or @opflclass with details!' % (sMcNames,), oGenerator);
2125 elif 'IEM_MC_COMMIT_EFLAGS' in dEflStmts or 'IEM_MC_COMMIT_EFLAGS_OPT' in dEflStmts:
2126 if not oInstruction.asFlModify:
2127 if oInstruction.sMnemonic not in [ 'not', ]:
2128 self.error('Uses IEM_MC_COMMIT_EFLAGS[_OPT] but has no flags in @opflmodify!', oGenerator);
2129 elif ( 'IEM_MC_CALL_CIMPL_0' in dEflStmts
2130 or 'IEM_MC_CALL_CIMPL_1' in dEflStmts
2131 or 'IEM_MC_CALL_CIMPL_2' in dEflStmts
2132 or 'IEM_MC_CALL_CIMPL_3' in dEflStmts
2133 or 'IEM_MC_CALL_CIMPL_4' in dEflStmts
2134 or 'IEM_MC_CALL_CIMPL_5' in dEflStmts ):
2135 if not oInstruction.asFlModify:
2136 self.error('Uses IEM_MC_CALL_CIMPL_x or IEM_MC_DEFER_TO_CIMPL_5_RET with IEM_CIMPL_F_STATUS_FLAGS '
2137 'or IEM_CIMPL_F_RFLAGS but has no flags in @opflmodify!', oGenerator);
2138 elif 'IEM_MC_REF_EFLAGS' not in dEflStmts:
2139 if not oInstruction.asFlTest:
2140 if oInstruction.sMnemonic not in [ 'not', ]:
2141 self.error('Expected @opfltest!', oGenerator);
2142 if oInstruction and oInstruction.asFlSet:
2143 for sFlag in oInstruction.asFlSet:
2144 if sFlag not in oInstruction.asFlModify:
2145 self.error('"%s" in @opflset but missing from @opflmodify (%s)!'
2146 % (sFlag, ', '.join(oInstruction.asFlModify)), oGenerator);
2147 if oInstruction and oInstruction.asFlClear:
2148 for sFlag in oInstruction.asFlClear:
2149 if sFlag not in oInstruction.asFlModify:
2150 self.error('"%s" in @opflclear but missing from @opflmodify (%s)!'
2151 % (sFlag, ', '.join(oInstruction.asFlModify)), oGenerator);
2152
2153 #
2154 # Create variations as needed.
2155 #
2156 if iai.McStmt.findStmtByNames(aoStmts,
2157 { 'IEM_MC_DEFER_TO_CIMPL_0_RET': True,
2158 'IEM_MC_DEFER_TO_CIMPL_1_RET': True,
2159 'IEM_MC_DEFER_TO_CIMPL_2_RET': True,
2160 'IEM_MC_DEFER_TO_CIMPL_3_RET': True, }):
2161 asVariations = (ThreadedFunctionVariation.ksVariation_Default,);
2162
2163 elif iai.McStmt.findStmtByNames(aoStmts, { 'IEM_MC_CALC_RM_EFF_ADDR' : True,
2164 'IEM_MC_FETCH_MEM_U8' : True, # mov_AL_Ob ++
2165 'IEM_MC_FETCH_MEM_U16' : True, # mov_rAX_Ov ++
2166 'IEM_MC_FETCH_MEM_U32' : True,
2167 'IEM_MC_FETCH_MEM_U64' : True,
2168 'IEM_MC_STORE_MEM_U8' : True, # mov_Ob_AL ++
2169 'IEM_MC_STORE_MEM_U16' : True, # mov_Ov_rAX ++
2170 'IEM_MC_STORE_MEM_U32' : True,
2171 'IEM_MC_STORE_MEM_U64' : True, }):
2172 if 'IEM_MC_F_64BIT' in self.oMcBlock.dsMcFlags:
2173 asVariations = ThreadedFunctionVariation.kasVariationsWithAddressOnly64;
2174 elif 'IEM_MC_F_NOT_64BIT' in self.oMcBlock.dsMcFlags and 'IEM_MC_F_NOT_286_OR_OLDER' in self.oMcBlock.dsMcFlags:
2175 asVariations = ThreadedFunctionVariation.kasVariationsWithAddressNot286Not64;
2176 elif 'IEM_MC_F_NOT_286_OR_OLDER' in self.oMcBlock.dsMcFlags:
2177 asVariations = ThreadedFunctionVariation.kasVariationsWithAddressNot286;
2178 elif 'IEM_MC_F_NOT_64BIT' in self.oMcBlock.dsMcFlags:
2179 asVariations = ThreadedFunctionVariation.kasVariationsWithAddressNot64;
2180 elif 'IEM_MC_F_ONLY_8086' in self.oMcBlock.dsMcFlags:
2181 asVariations = ThreadedFunctionVariation.kasVariationsOnlyPre386;
2182 else:
2183 asVariations = ThreadedFunctionVariation.kasVariationsWithAddress;
2184 else:
2185 if 'IEM_MC_F_64BIT' in self.oMcBlock.dsMcFlags:
2186 asVariations = ThreadedFunctionVariation.kasVariationsWithoutAddressOnly64;
2187 elif 'IEM_MC_F_NOT_64BIT' in self.oMcBlock.dsMcFlags and 'IEM_MC_F_NOT_286_OR_OLDER' in self.oMcBlock.dsMcFlags:
2188 asVariations = ThreadedFunctionVariation.kasVariationsWithoutAddressNot286Not64;
2189 elif 'IEM_MC_F_NOT_286_OR_OLDER' in self.oMcBlock.dsMcFlags:
2190 asVariations = ThreadedFunctionVariation.kasVariationsWithoutAddressNot286;
2191 elif 'IEM_MC_F_NOT_64BIT' in self.oMcBlock.dsMcFlags:
2192 asVariations = ThreadedFunctionVariation.kasVariationsWithoutAddressNot64;
2193 elif 'IEM_MC_F_ONLY_8086' in self.oMcBlock.dsMcFlags:
2194 asVariations = ThreadedFunctionVariation.kasVariationsOnlyPre386;
2195 else:
2196 asVariations = ThreadedFunctionVariation.kasVariationsWithoutAddress;
2197
2198 if ( 'IEM_CIMPL_F_BRANCH_CONDITIONAL' in self.dsCImplFlags
2199 and 'IEM_CIMPL_F_BRANCH_RELATIVE' in self.dsCImplFlags): # (latter to avoid iemOp_into)
2200 assert set(asVariations).issubset(ThreadedFunctionVariation.kasVariationsWithoutAddress), \
2201 '%s: vars=%s McFlags=%s' % (self.oMcBlock.oFunction.sName, asVariations, self.oMcBlock.dsMcFlags);
2202 asVariationsBase = asVariations;
2203 asVariations = [];
2204 for sVariation in asVariationsBase:
2205 asVariations.extend([sVariation + '_Jmp', sVariation + '_NoJmp']);
2206 assert set(asVariations).issubset(ThreadedFunctionVariation.kdVariationsWithConditional);
2207
2208 if not iai.McStmt.findStmtByNames(aoStmts,
2209 { 'IEM_MC_ADVANCE_RIP_AND_FINISH': True,
2210 'IEM_MC_REL_JMP_S8_AND_FINISH': True,
2211 'IEM_MC_REL_JMP_S16_AND_FINISH': True,
2212 'IEM_MC_REL_JMP_S32_AND_FINISH': True,
2213 'IEM_MC_SET_RIP_U16_AND_FINISH': True,
2214 'IEM_MC_SET_RIP_U32_AND_FINISH': True,
2215 'IEM_MC_SET_RIP_U64_AND_FINISH': True,
2216 'IEM_MC_REL_CALL_S16_AND_FINISH': True,
2217 'IEM_MC_REL_CALL_S32_AND_FINISH': True,
2218 'IEM_MC_REL_CALL_S64_AND_FINISH': True,
2219 'IEM_MC_IND_CALL_U16_AND_FINISH': True,
2220 'IEM_MC_IND_CALL_U32_AND_FINISH': True,
2221 'IEM_MC_IND_CALL_U64_AND_FINISH': True,
2222 'IEM_MC_RETN_AND_FINISH': True,
2223 }):
2224 asVariations = [sVariation for sVariation in asVariations
2225 if sVariation not in ThreadedFunctionVariation.kdVariationsWithEflagsCheckingAndClearing];
2226
2227 self.aoVariations = [ThreadedFunctionVariation(self, sVar) for sVar in asVariations];
2228
2229 # Dictionary variant of the list.
2230 self.dVariations = { oVar.sVariation: oVar for oVar in self.aoVariations };
2231
2232 #
2233 # Try annotate the threaded function name.
2234 #
2235 self.analyzeAndAnnotateName(aoStmts);
2236
2237 #
2238 # Continue the analysis on each variation.
2239 #
2240 for oVariation in self.aoVariations:
2241 oVariation.analyzeVariation(aoStmts);
2242
2243 return True;
2244
2245 ## Used by emitThreadedCallStmts.
2246 kdVariationsWithNeedForPrefixCheck = {
2247 ThreadedFunctionVariation.ksVariation_64_Addr32: True,
2248 ThreadedFunctionVariation.ksVariation_64f_Addr32: True,
2249 ThreadedFunctionVariation.ksVariation_64_FsGs: True,
2250 ThreadedFunctionVariation.ksVariation_64f_FsGs: True,
2251 ThreadedFunctionVariation.ksVariation_32_Addr16: True,
2252 ThreadedFunctionVariation.ksVariation_32f_Addr16: True,
2253 ThreadedFunctionVariation.ksVariation_32_Flat: True,
2254 ThreadedFunctionVariation.ksVariation_32f_Flat: True,
2255 ThreadedFunctionVariation.ksVariation_16_Addr32: True,
2256 ThreadedFunctionVariation.ksVariation_16f_Addr32: True,
2257 };
2258
2259 def emitThreadedCallStmts(self, sBranch = None, fTbLookupTable = False): # pylint: disable=too-many-statements
2260 """
2261 Worker for morphInputCode that returns a list of statements that emits
2262 the call to the threaded functions for the block.
2263
2264 The sBranch parameter is used with conditional branches where we'll emit
2265 different threaded calls depending on whether we're in the jump-taken or
2266 no-jump code path.
2267
2268 The fTbLookupTable parameter can either be False, True or whatever else
2269 (like 2) - in the latte case this means a large lookup table.
2270 """
2271 # Special case for only default variation:
2272 if len(self.aoVariations) == 1 and self.aoVariations[0].sVariation == ThreadedFunctionVariation.ksVariation_Default:
2273 assert not sBranch;
2274 return self.aoVariations[0].emitThreadedCallStmtsForVariant(0, fTbLookupTable);
2275
2276 #
2277 # Case statement sub-class.
2278 #
2279 dByVari = self.dVariations;
2280 #fDbg = self.oMcBlock.sFunction == 'iemOpCommonPushSReg';
2281 class Case:
2282 def __init__(self, sCond, sVarNm = None):
2283 self.sCond = sCond;
2284 self.sVarNm = sVarNm;
2285 self.oVar = dByVari[sVarNm] if sVarNm else None;
2286 self.aoBody = self.oVar.emitThreadedCallStmtsForVariant(8, fTbLookupTable) if sVarNm else None;
2287
2288 def toCode(self):
2289 aoStmts = [ iai.McCppGeneric('case %s:' % (self.sCond), cchIndent = 4), ];
2290 if self.aoBody:
2291 aoStmts.extend(self.aoBody);
2292 aoStmts.append(iai.McCppGeneric('break;', cchIndent = 8));
2293 return aoStmts;
2294
2295 def toFunctionAssignment(self):
2296 aoStmts = [ iai.McCppGeneric('case %s:' % (self.sCond), cchIndent = 4), ];
2297 if self.aoBody:
2298 aoStmts.extend([
2299 iai.McCppGeneric('enmFunction = %s;' % (self.oVar.getIndexName(),), cchIndent = 8),
2300 iai.McCppGeneric('break;', cchIndent = 8),
2301 ]);
2302 return aoStmts;
2303
2304 def isSame(self, oThat):
2305 if not self.aoBody: # fall thru always matches.
2306 return True;
2307 if len(self.aoBody) != len(oThat.aoBody):
2308 #if fDbg: print('dbg: body len diff: %s vs %s' % (len(self.aoBody), len(oThat.aoBody),));
2309 return False;
2310 for iStmt, oStmt in enumerate(self.aoBody):
2311 oThatStmt = oThat.aoBody[iStmt] # type: iai.McStmt
2312 assert isinstance(oStmt, iai.McCppGeneric);
2313 assert not isinstance(oStmt, iai.McStmtCond);
2314 if isinstance(oStmt, iai.McStmtCond):
2315 return False;
2316 if oStmt.sName != oThatStmt.sName:
2317 #if fDbg: print('dbg: stmt #%s name: %s vs %s' % (iStmt, oStmt.sName, oThatStmt.sName,));
2318 return False;
2319 if len(oStmt.asParams) != len(oThatStmt.asParams):
2320 #if fDbg: print('dbg: stmt #%s param count: %s vs %s'
2321 # % (iStmt, len(oStmt.asParams), len(oThatStmt.asParams),));
2322 return False;
2323 for iParam, sParam in enumerate(oStmt.asParams):
2324 if ( sParam != oThatStmt.asParams[iParam]
2325 and ( iParam != 1
2326 or not isinstance(oStmt, iai.McCppCall)
2327 or not oStmt.asParams[0].startswith('IEM_MC2_EMIT_CALL_')
2328 or sParam != self.oVar.getIndexName()
2329 or oThatStmt.asParams[iParam] != oThat.oVar.getIndexName() )):
2330 #if fDbg: print('dbg: stmt #%s, param #%s: %s vs %s'
2331 # % (iStmt, iParam, sParam, oThatStmt.asParams[iParam],));
2332 return False;
2333 return True;
2334
2335 #
2336 # Determine what we're switch on.
2337 # This ASSUMES that (IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | IEM_F_MODE_CPUMODE_MASK) == 7!
2338 #
2339 fSimple = True;
2340 sSwitchValue = '(pVCpu->iem.s.fExec & (IEM_F_MODE_CPUMODE_MASK | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK))';
2341 if dByVari.keys() & self.kdVariationsWithNeedForPrefixCheck.keys():
2342 sSwitchValue += ' | (pVCpu->iem.s.enmEffAddrMode == (pVCpu->iem.s.fExec & IEM_F_MODE_CPUMODE_MASK) ? 0 : 8)';
2343 # Accesses via FS and GS and CS goes thru non-FLAT functions. (CS
2344 # is not writable in 32-bit mode (at least), thus the penalty mode
2345 # for any accesses via it (simpler this way).)
2346 sSwitchValue += ' | (pVCpu->iem.s.iEffSeg < X86_SREG_FS && pVCpu->iem.s.iEffSeg != X86_SREG_CS ? 0 : 16)';
2347 fSimple = False; # threaded functions.
2348 if dByVari.keys() & ThreadedFunctionVariation.kdVariationsWithEflagsCheckingAndClearing:
2349 sSwitchValue += ' | ((pVCpu->iem.s.fTbPrevInstr & (IEM_CIMPL_F_RFLAGS | IEM_CIMPL_F_INHIBIT_SHADOW)) || ' \
2350 + '(pVCpu->iem.s.fExec & IEM_F_PENDING_BRK_MASK) ? 32 : 0)';
2351
2352 #
2353 # Generate the case statements.
2354 #
2355 # pylintx: disable=x
2356 aoCases = [];
2357 if ThreadedFunctionVariation.ksVariation_64_Addr32 in dByVari:
2358 assert not fSimple and not sBranch;
2359 aoCases.extend([
2360 Case('IEMMODE_64BIT', ThrdFnVar.ksVariation_64),
2361 Case('IEMMODE_64BIT | 16', ThrdFnVar.ksVariation_64_FsGs),
2362 Case('IEMMODE_64BIT | 8 | 16', None), # fall thru
2363 Case('IEMMODE_64BIT | 8', ThrdFnVar.ksVariation_64_Addr32),
2364 ]);
2365 if ThreadedFunctionVariation.ksVariation_64f_Addr32 in dByVari:
2366 aoCases.extend([
2367 Case('IEMMODE_64BIT | 32', ThrdFnVar.ksVariation_64f),
2368 Case('IEMMODE_64BIT | 32 | 16', ThrdFnVar.ksVariation_64f_FsGs),
2369 Case('IEMMODE_64BIT | 32 | 8 | 16', None), # fall thru
2370 Case('IEMMODE_64BIT | 32 | 8', ThrdFnVar.ksVariation_64f_Addr32),
2371 ]);
2372 elif ThrdFnVar.ksVariation_64 in dByVari:
2373 assert fSimple and not sBranch;
2374 aoCases.append(Case('IEMMODE_64BIT', ThrdFnVar.ksVariation_64));
2375 if ThreadedFunctionVariation.ksVariation_64f in dByVari:
2376 aoCases.append(Case('IEMMODE_64BIT | 32', ThrdFnVar.ksVariation_64f));
2377 elif ThrdFnVar.ksVariation_64_Jmp in dByVari:
2378 assert fSimple and sBranch;
2379 aoCases.append(Case('IEMMODE_64BIT',
2380 ThrdFnVar.ksVariation_64_Jmp if sBranch == 'Jmp' else ThrdFnVar.ksVariation_64_NoJmp));
2381 if ThreadedFunctionVariation.ksVariation_64f_Jmp in dByVari:
2382 aoCases.append(Case('IEMMODE_64BIT | 32',
2383 ThrdFnVar.ksVariation_64f_Jmp if sBranch == 'Jmp' else ThrdFnVar.ksVariation_64f_NoJmp));
2384
2385 if ThrdFnVar.ksVariation_32_Addr16 in dByVari:
2386 assert not fSimple and not sBranch;
2387 aoCases.extend([
2388 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK', ThrdFnVar.ksVariation_32_Flat),
2389 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 16', None), # fall thru
2390 Case('IEMMODE_32BIT | 16', None), # fall thru
2391 Case('IEMMODE_32BIT', ThrdFnVar.ksVariation_32),
2392 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 8', None), # fall thru
2393 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 8 | 16',None), # fall thru
2394 Case('IEMMODE_32BIT | 8 | 16',None), # fall thru
2395 Case('IEMMODE_32BIT | 8', ThrdFnVar.ksVariation_32_Addr16),
2396 ]);
2397 if ThrdFnVar.ksVariation_32f_Addr16 in dByVari:
2398 aoCases.extend([
2399 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 32', ThrdFnVar.ksVariation_32f_Flat),
2400 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 32 | 16', None), # fall thru
2401 Case('IEMMODE_32BIT | 32 | 16', None), # fall thru
2402 Case('IEMMODE_32BIT | 32', ThrdFnVar.ksVariation_32f),
2403 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 32 | 8', None), # fall thru
2404 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 32 | 8 | 16',None), # fall thru
2405 Case('IEMMODE_32BIT | 32 | 8 | 16',None), # fall thru
2406 Case('IEMMODE_32BIT | 32 | 8', ThrdFnVar.ksVariation_32f_Addr16),
2407 ]);
2408 elif ThrdFnVar.ksVariation_32 in dByVari:
2409 assert fSimple and not sBranch;
2410 aoCases.extend([
2411 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK', None), # fall thru
2412 Case('IEMMODE_32BIT', ThrdFnVar.ksVariation_32),
2413 ]);
2414 if ThrdFnVar.ksVariation_32f in dByVari:
2415 aoCases.extend([
2416 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 32', None), # fall thru
2417 Case('IEMMODE_32BIT | 32', ThrdFnVar.ksVariation_32f),
2418 ]);
2419 elif ThrdFnVar.ksVariation_32_Jmp in dByVari:
2420 assert fSimple and sBranch;
2421 aoCases.extend([
2422 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK', None), # fall thru
2423 Case('IEMMODE_32BIT',
2424 ThrdFnVar.ksVariation_32_Jmp if sBranch == 'Jmp' else ThrdFnVar.ksVariation_32_NoJmp),
2425 ]);
2426 if ThrdFnVar.ksVariation_32f_Jmp in dByVari:
2427 aoCases.extend([
2428 Case('IEMMODE_32BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 32', None), # fall thru
2429 Case('IEMMODE_32BIT | 32',
2430 ThrdFnVar.ksVariation_32f_Jmp if sBranch == 'Jmp' else ThrdFnVar.ksVariation_32f_NoJmp),
2431 ]);
2432
2433 if ThrdFnVar.ksVariation_16_Addr32 in dByVari:
2434 assert not fSimple and not sBranch;
2435 aoCases.extend([
2436 Case('IEMMODE_16BIT | 16', None), # fall thru
2437 Case('IEMMODE_16BIT', ThrdFnVar.ksVariation_16),
2438 Case('IEMMODE_16BIT | 8 | 16', None), # fall thru
2439 Case('IEMMODE_16BIT | 8', ThrdFnVar.ksVariation_16_Addr32),
2440 ]);
2441 if ThrdFnVar.ksVariation_16f_Addr32 in dByVari:
2442 aoCases.extend([
2443 Case('IEMMODE_16BIT | 32 | 16', None), # fall thru
2444 Case('IEMMODE_16BIT | 32', ThrdFnVar.ksVariation_16f),
2445 Case('IEMMODE_16BIT | 32 | 8 | 16', None), # fall thru
2446 Case('IEMMODE_16BIT | 32 | 8', ThrdFnVar.ksVariation_16f_Addr32),
2447 ]);
2448 elif ThrdFnVar.ksVariation_16 in dByVari:
2449 assert fSimple and not sBranch;
2450 aoCases.append(Case('IEMMODE_16BIT', ThrdFnVar.ksVariation_16));
2451 if ThrdFnVar.ksVariation_16f in dByVari:
2452 aoCases.append(Case('IEMMODE_16BIT | 32', ThrdFnVar.ksVariation_16f));
2453 elif ThrdFnVar.ksVariation_16_Jmp in dByVari:
2454 assert fSimple and sBranch;
2455 aoCases.append(Case('IEMMODE_16BIT',
2456 ThrdFnVar.ksVariation_16_Jmp if sBranch == 'Jmp' else ThrdFnVar.ksVariation_16_NoJmp));
2457 if ThrdFnVar.ksVariation_16f_Jmp in dByVari:
2458 aoCases.append(Case('IEMMODE_16BIT | 32',
2459 ThrdFnVar.ksVariation_16f_Jmp if sBranch == 'Jmp' else ThrdFnVar.ksVariation_16f_NoJmp));
2460
2461
2462 if ThrdFnVar.ksVariation_16_Pre386 in dByVari:
2463 if not fSimple:
2464 aoCases.append(Case('IEMMODE_16BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 16', None)); # fall thru
2465 aoCases.append(Case('IEMMODE_16BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK', ThrdFnVar.ksVariation_16_Pre386));
2466 if ThrdFnVar.ksVariation_16f_Pre386 in dByVari: # should be nested under previous if, but line too long.
2467 if not fSimple:
2468 aoCases.append(Case('IEMMODE_16BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 32 | 16', None)); # fall thru
2469 aoCases.append(Case('IEMMODE_16BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 32', ThrdFnVar.ksVariation_16f_Pre386));
2470
2471 if ThrdFnVar.ksVariation_16_Pre386_Jmp in dByVari:
2472 assert fSimple and sBranch;
2473 aoCases.append(Case('IEMMODE_16BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK',
2474 ThrdFnVar.ksVariation_16_Pre386_Jmp if sBranch == 'Jmp'
2475 else ThrdFnVar.ksVariation_16_Pre386_NoJmp));
2476 if ThrdFnVar.ksVariation_16f_Pre386_Jmp in dByVari:
2477 assert fSimple and sBranch;
2478 aoCases.append(Case('IEMMODE_16BIT | IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | 32',
2479 ThrdFnVar.ksVariation_16f_Pre386_Jmp if sBranch == 'Jmp'
2480 else ThrdFnVar.ksVariation_16f_Pre386_NoJmp));
2481
2482 #
2483 # If the case bodies are all the same, except for the function called,
2484 # we can reduce the code size and hopefully compile time.
2485 #
2486 iFirstCaseWithBody = 0;
2487 while not aoCases[iFirstCaseWithBody].aoBody:
2488 iFirstCaseWithBody += 1
2489 fAllSameCases = True
2490 for iCase in range(iFirstCaseWithBody + 1, len(aoCases)):
2491 fAllSameCases = fAllSameCases and aoCases[iCase].isSame(aoCases[iFirstCaseWithBody]);
2492 #if fDbg: print('fAllSameCases=%s %s' % (fAllSameCases, self.oMcBlock.sFunction,));
2493 if fAllSameCases:
2494 aoStmts = [
2495 iai.McCppGeneric('IEMTHREADEDFUNCS enmFunction;'),
2496 iai.McCppGeneric('switch (%s)' % (sSwitchValue,)),
2497 iai.McCppGeneric('{'),
2498 ];
2499 for oCase in aoCases:
2500 aoStmts.extend(oCase.toFunctionAssignment());
2501 aoStmts.extend([
2502 iai.McCppGeneric('IEM_NOT_REACHED_DEFAULT_CASE_RET();', cchIndent = 4),
2503 iai.McCppGeneric('}'),
2504 ]);
2505 aoStmts.extend(dByVari[aoCases[iFirstCaseWithBody].sVarNm].emitThreadedCallStmtsForVariant(0, fTbLookupTable,
2506 'enmFunction'));
2507
2508 else:
2509 #
2510 # Generate the generic switch statement.
2511 #
2512 aoStmts = [
2513 iai.McCppGeneric('switch (%s)' % (sSwitchValue,)),
2514 iai.McCppGeneric('{'),
2515 ];
2516 for oCase in aoCases:
2517 aoStmts.extend(oCase.toCode());
2518 aoStmts.extend([
2519 iai.McCppGeneric('IEM_NOT_REACHED_DEFAULT_CASE_RET();', cchIndent = 4),
2520 iai.McCppGeneric('}'),
2521 ]);
2522
2523 return aoStmts;
2524
2525 def morphInputCode(self, aoStmts, fIsConditional = False, fCallEmitted = False, cDepth = 0, sBranchAnnotation = None):
2526 """
2527 Adjusts (& copies) the statements for the input/decoder so it will emit
2528 calls to the right threaded functions for each block.
2529
2530 Returns list/tree of statements (aoStmts is not modified) and updated
2531 fCallEmitted status.
2532 """
2533 #print('McBlock at %s:%s' % (os.path.split(self.oMcBlock.sSrcFile)[1], self.oMcBlock.iBeginLine,));
2534 aoDecoderStmts = [];
2535
2536 for iStmt, oStmt in enumerate(aoStmts):
2537 # Copy the statement. Make a deep copy to make sure we've got our own
2538 # copies of all instance variables, even if a bit overkill at the moment.
2539 oNewStmt = copy.deepcopy(oStmt);
2540 aoDecoderStmts.append(oNewStmt);
2541 #print('oNewStmt %s %s' % (oNewStmt.sName, len(oNewStmt.asParams),));
2542 if oNewStmt.sName == 'IEM_MC_BEGIN' and self.dsCImplFlags:
2543 oNewStmt.asParams[1] = ' | '.join(sorted(self.dsCImplFlags.keys()));
2544
2545 # If we haven't emitted the threaded function call yet, look for
2546 # statements which it would naturally follow or preceed.
2547 if not fCallEmitted:
2548 if not oStmt.isCppStmt():
2549 if ( oStmt.sName.startswith('IEM_MC_MAYBE_RAISE_') \
2550 or (oStmt.sName.endswith('_AND_FINISH') and oStmt.sName.startswith('IEM_MC_'))
2551 or oStmt.sName.startswith('IEM_MC_CALL_CIMPL_')
2552 or oStmt.sName.startswith('IEM_MC_DEFER_TO_CIMPL_')
2553 or oStmt.sName in ('IEM_MC_RAISE_DIVIDE_ERROR',)):
2554 aoDecoderStmts.pop();
2555 if not fIsConditional:
2556 aoDecoderStmts.extend(self.emitThreadedCallStmts());
2557 elif oStmt.sName == 'IEM_MC_ADVANCE_RIP_AND_FINISH':
2558 aoDecoderStmts.extend(self.emitThreadedCallStmts('NoJmp', True));
2559 else:
2560 assert oStmt.sName in { 'IEM_MC_REL_JMP_S8_AND_FINISH': True,
2561 'IEM_MC_REL_JMP_S16_AND_FINISH': True,
2562 'IEM_MC_REL_JMP_S32_AND_FINISH': True, };
2563 aoDecoderStmts.extend(self.emitThreadedCallStmts('Jmp', True));
2564 aoDecoderStmts.append(oNewStmt);
2565 fCallEmitted = True;
2566
2567 elif iai.g_dMcStmtParsers[oStmt.sName][2]:
2568 # This is for Jmp/NoJmp with loopne and friends which modifies state other than RIP.
2569 if not sBranchAnnotation:
2570 self.raiseProblem('Modifying state before emitting calls! %s' % (oStmt.sName,));
2571 assert fIsConditional;
2572 aoDecoderStmts.pop();
2573 if sBranchAnnotation == g_ksFinishAnnotation_Advance:
2574 assert iai.McStmt.findStmtByNames(aoStmts[iStmt:], {'IEM_MC_ADVANCE_RIP_AND_FINISH':1,})
2575 aoDecoderStmts.extend(self.emitThreadedCallStmts('NoJmp', True));
2576 elif sBranchAnnotation == g_ksFinishAnnotation_RelJmp:
2577 assert iai.McStmt.findStmtByNames(aoStmts[iStmt:],
2578 { 'IEM_MC_REL_JMP_S8_AND_FINISH': 1,
2579 'IEM_MC_REL_JMP_S16_AND_FINISH': 1,
2580 'IEM_MC_REL_JMP_S32_AND_FINISH': 1, });
2581 aoDecoderStmts.extend(self.emitThreadedCallStmts('Jmp', True));
2582 else:
2583 self.raiseProblem('Modifying state before emitting calls! %s' % (oStmt.sName,));
2584 aoDecoderStmts.append(oNewStmt);
2585 fCallEmitted = True;
2586
2587 elif ( not fIsConditional
2588 and oStmt.fDecode
2589 and ( oStmt.asParams[0].find('IEMOP_HLP_DONE_') >= 0
2590 or oStmt.asParams[0].find('IEMOP_HLP_DECODED_') >= 0)):
2591 aoDecoderStmts.extend(self.emitThreadedCallStmts());
2592 fCallEmitted = True;
2593
2594 # Process branches of conditionals recursively.
2595 if isinstance(oStmt, iai.McStmtCond):
2596 (oNewStmt.aoIfBranch, fCallEmitted1) = self.morphInputCode(oStmt.aoIfBranch, fIsConditional,
2597 fCallEmitted, cDepth + 1, oStmt.oIfBranchAnnotation);
2598 if oStmt.aoElseBranch:
2599 (oNewStmt.aoElseBranch, fCallEmitted2) = self.morphInputCode(oStmt.aoElseBranch, fIsConditional,
2600 fCallEmitted, cDepth + 1,
2601 oStmt.oElseBranchAnnotation);
2602 else:
2603 fCallEmitted2 = False;
2604 fCallEmitted = fCallEmitted or (fCallEmitted1 and fCallEmitted2);
2605
2606 if not fCallEmitted and cDepth == 0:
2607 self.raiseProblem('Unable to insert call to threaded function.');
2608
2609 return (aoDecoderStmts, fCallEmitted);
2610
2611
2612 def generateInputCode(self):
2613 """
2614 Modifies the input code.
2615 """
2616 cchIndent = (self.oMcBlock.cchIndent + 3) // 4 * 4;
2617
2618 if len(self.oMcBlock.aoStmts) == 1:
2619 # IEM_MC_DEFER_TO_CIMPL_X_RET - need to wrap in {} to make it safe to insert into random code.
2620 sCode = ' ' * cchIndent + 'pVCpu->iem.s.fTbCurInstr = ';
2621 if self.dsCImplFlags:
2622 sCode += ' | '.join(sorted(self.dsCImplFlags.keys())) + ';\n';
2623 else:
2624 sCode += '0;\n';
2625 sCode += iai.McStmt.renderCodeForList(self.morphInputCode(self.oMcBlock.aoStmts)[0],
2626 cchIndent = cchIndent).replace('\n', ' /* gen */\n', 1);
2627 sIndent = ' ' * (min(cchIndent, 2) - 2);
2628 sCode = sIndent + '{\n' + sCode + sIndent + '}\n';
2629 return sCode;
2630
2631 # IEM_MC_BEGIN/END block
2632 assert len(self.oMcBlock.asLines) > 2, "asLines=%s" % (self.oMcBlock.asLines,);
2633 fIsConditional = ( 'IEM_CIMPL_F_BRANCH_CONDITIONAL' in self.dsCImplFlags
2634 and 'IEM_CIMPL_F_BRANCH_RELATIVE' in self.dsCImplFlags); # (latter to avoid iemOp_into)
2635 return iai.McStmt.renderCodeForList(self.morphInputCode(self.oMcBlock.aoStmts, fIsConditional)[0],
2636 cchIndent = cchIndent).replace('\n', ' /* gen */\n', 1);
2637
2638# Short alias for ThreadedFunctionVariation.
2639ThrdFnVar = ThreadedFunctionVariation;
2640
2641
2642class IEMThreadedGenerator(object):
2643 """
2644 The threaded code generator & annotator.
2645 """
2646
2647 def __init__(self):
2648 self.aoThreadedFuncs = [] # type: List[ThreadedFunction]
2649 self.oOptions = None # type: argparse.Namespace
2650 self.aoParsers = [] # type: List[IEMAllInstPython.SimpleParser]
2651 self.aidxFirstFunctions = [] # type: List[int] ##< Runs parallel to aoParser giving the index of the first function.
2652 self.cErrors = 0;
2653
2654 #
2655 # Error reporting.
2656 #
2657
2658 def rawError(self, sCompleteMessage):
2659 """ Output a raw error and increment the error counter. """
2660 print(sCompleteMessage, file = sys.stderr);
2661 self.cErrors += 1;
2662 return False;
2663
2664 #
2665 # Processing.
2666 #
2667
2668 def processInputFiles(self, sHostArch, fNativeRecompilerEnabled):
2669 """
2670 Process the input files.
2671 """
2672
2673 # Parse the files.
2674 self.aoParsers = iai.parseFiles(self.oOptions.asInFiles, sHostArch);
2675
2676 # Create threaded functions for the MC blocks.
2677 self.aoThreadedFuncs = [ThreadedFunction(oMcBlock) for oMcBlock in iai.g_aoMcBlocks];
2678
2679 # Analyze the threaded functions.
2680 dRawParamCounts = {};
2681 dMinParamCounts = {};
2682 for oThreadedFunction in self.aoThreadedFuncs:
2683 oThreadedFunction.analyzeThreadedFunction(self);
2684 for oVariation in oThreadedFunction.aoVariations:
2685 dRawParamCounts[len(oVariation.dParamRefs)] = dRawParamCounts.get(len(oVariation.dParamRefs), 0) + 1;
2686 dMinParamCounts[oVariation.cMinParams] = dMinParamCounts.get(oVariation.cMinParams, 0) + 1;
2687 print('debug: param count distribution, raw and optimized:', file = sys.stderr);
2688 for cCount in sorted({cBits: True for cBits in list(dRawParamCounts.keys()) + list(dMinParamCounts.keys())}.keys()):
2689 print('debug: %s params: %4s raw, %4s min'
2690 % (cCount, dRawParamCounts.get(cCount, 0), dMinParamCounts.get(cCount, 0)),
2691 file = sys.stderr);
2692
2693 # Do another pass over the threaded functions to settle the name suffix.
2694 iThreadedFn = 0;
2695 while iThreadedFn < len(self.aoThreadedFuncs):
2696 oFunction = self.aoThreadedFuncs[iThreadedFn].oMcBlock.oFunction;
2697 assert oFunction;
2698 iThreadedFnNext = iThreadedFn + 1;
2699 dSubNames = { self.aoThreadedFuncs[iThreadedFn].sSubName: 1 };
2700 while ( iThreadedFnNext < len(self.aoThreadedFuncs)
2701 and self.aoThreadedFuncs[iThreadedFnNext].oMcBlock.oFunction == oFunction):
2702 dSubNames[self.aoThreadedFuncs[iThreadedFnNext].sSubName] = 1;
2703 iThreadedFnNext += 1;
2704 if iThreadedFnNext - iThreadedFn > len(dSubNames):
2705 iSubName = 0;
2706 while iThreadedFn + iSubName < iThreadedFnNext:
2707 self.aoThreadedFuncs[iThreadedFn + iSubName].sSubName += '_%s' % (iSubName,);
2708 iSubName += 1;
2709 iThreadedFn = iThreadedFnNext;
2710
2711 # Populate aidxFirstFunctions. This is ASSUMING that
2712 # g_aoMcBlocks/self.aoThreadedFuncs are in self.aoParsers order.
2713 iThreadedFunction = 0;
2714 oThreadedFunction = self.getThreadedFunctionByIndex(0);
2715 self.aidxFirstFunctions = [];
2716 for oParser in self.aoParsers:
2717 self.aidxFirstFunctions.append(iThreadedFunction);
2718
2719 while oThreadedFunction.oMcBlock.sSrcFile == oParser.sSrcFile:
2720 iThreadedFunction += 1;
2721 oThreadedFunction = self.getThreadedFunctionByIndex(iThreadedFunction);
2722
2723 # Analyze the threaded functions and their variations for native recompilation.
2724 if fNativeRecompilerEnabled:
2725 ian.analyzeThreadedFunctionsForNativeRecomp(self.aoThreadedFuncs, sHostArch);
2726
2727 # Gather arguments + variable statistics for the MC blocks.
2728 cMaxArgs = 0;
2729 cMaxVars = 0;
2730 cMaxVarsAndArgs = 0;
2731 cbMaxArgs = 0;
2732 cbMaxVars = 0;
2733 cbMaxVarsAndArgs = 0;
2734 for oThreadedFunction in self.aoThreadedFuncs:
2735 if oThreadedFunction.oMcBlock.aoLocals or oThreadedFunction.oMcBlock.aoArgs:
2736 # Counts.
2737 cMaxVars = max(cMaxVars, len(oThreadedFunction.oMcBlock.aoLocals));
2738 cMaxArgs = max(cMaxArgs, len(oThreadedFunction.oMcBlock.aoArgs));
2739 cMaxVarsAndArgs = max(cMaxVarsAndArgs,
2740 len(oThreadedFunction.oMcBlock.aoLocals) + len(oThreadedFunction.oMcBlock.aoArgs));
2741 if cMaxVarsAndArgs > 9:
2742 raise Exception('%s potentially uses too many variables / args: %u, max 10 - %u vars and %u args'
2743 % (oThreadedFunction.oMcBlock.oFunction.sName, cMaxVarsAndArgs,
2744 len(oThreadedFunction.oMcBlock.aoLocals), len(oThreadedFunction.oMcBlock.aoArgs),));
2745 # Calc stack allocation size:
2746 cbArgs = 0;
2747 for oArg in oThreadedFunction.oMcBlock.aoArgs:
2748 cbArgs += (getTypeBitCount(oArg.sType) + 63) // 64 * 8;
2749 cbVars = 0;
2750 for oVar in oThreadedFunction.oMcBlock.aoLocals:
2751 cbVars += (getTypeBitCount(oVar.sType) + 63) // 64 * 8;
2752 cbMaxVars = max(cbMaxVars, cbVars);
2753 cbMaxArgs = max(cbMaxArgs, cbArgs);
2754 cbMaxVarsAndArgs = max(cbMaxVarsAndArgs, cbVars + cbArgs);
2755 if cbMaxVarsAndArgs >= 0xc0:
2756 raise Exception('%s potentially uses too much stack: cbMaxVars=%#x cbMaxArgs=%#x'
2757 % (oThreadedFunction.oMcBlock.oFunction.sName, cbMaxVars, cbMaxArgs,));
2758
2759 print('debug: max vars+args: %u bytes / %u; max vars: %u bytes / %u; max args: %u bytes / %u'
2760 % (cbMaxVarsAndArgs, cMaxVarsAndArgs, cbMaxVars, cMaxVars, cbMaxArgs, cMaxArgs,), file = sys.stderr);
2761
2762 if self.cErrors > 0:
2763 print('fatal error: %u error%s during processing. Details above.'
2764 % (self.cErrors, 's' if self.cErrors > 1 else '',), file = sys.stderr);
2765 return False;
2766 return True;
2767
2768 #
2769 # Output
2770 #
2771
2772 def generateLicenseHeader(self):
2773 """
2774 Returns the lines for a license header.
2775 """
2776 return [
2777 '/*',
2778 ' * Autogenerated by $Id: IEMAllThrdPython.py 104424 2024-04-24 14:49:27Z vboxsync $ ',
2779 ' * Do not edit!',
2780 ' */',
2781 '',
2782 '/*',
2783 ' * Copyright (C) 2023-' + str(datetime.date.today().year) + ' Oracle and/or its affiliates.',
2784 ' *',
2785 ' * This file is part of VirtualBox base platform packages, as',
2786 ' * available from https://www.virtualbox.org.',
2787 ' *',
2788 ' * This program is free software; you can redistribute it and/or',
2789 ' * modify it under the terms of the GNU General Public License',
2790 ' * as published by the Free Software Foundation, in version 3 of the',
2791 ' * License.',
2792 ' *',
2793 ' * This program is distributed in the hope that it will be useful, but',
2794 ' * WITHOUT ANY WARRANTY; without even the implied warranty of',
2795 ' * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU',
2796 ' * General Public License for more details.',
2797 ' *',
2798 ' * You should have received a copy of the GNU General Public License',
2799 ' * along with this program; if not, see <https://www.gnu.org/licenses>.',
2800 ' *',
2801 ' * The contents of this file may alternatively be used under the terms',
2802 ' * of the Common Development and Distribution License Version 1.0',
2803 ' * (CDDL), a copy of it is provided in the "COPYING.CDDL" file included',
2804 ' * in the VirtualBox distribution, in which case the provisions of the',
2805 ' * CDDL are applicable instead of those of the GPL.',
2806 ' *',
2807 ' * You may elect to license modified versions of this file under the',
2808 ' * terms and conditions of either the GPL or the CDDL or both.',
2809 ' *',
2810 ' * SPDX-License-Identifier: GPL-3.0-only OR CDDL-1.0',
2811 ' */',
2812 '',
2813 '',
2814 '',
2815 ];
2816
2817 ## List of built-in threaded functions with user argument counts and
2818 ## whether it has a native recompiler implementation.
2819 katBltIns = (
2820 ( 'Nop', 0, True ),
2821 ( 'LogCpuState', 0, True ),
2822
2823 ( 'DeferToCImpl0', 2, True ),
2824 ( 'CheckIrq', 0, True ),
2825 ( 'CheckMode', 1, True ),
2826 ( 'CheckHwInstrBps', 0, False ),
2827 ( 'CheckCsLim', 1, True ),
2828
2829 ( 'CheckCsLimAndOpcodes', 3, True ),
2830 ( 'CheckOpcodes', 3, True ),
2831 ( 'CheckOpcodesConsiderCsLim', 3, True ),
2832
2833 ( 'CheckCsLimAndPcAndOpcodes', 3, True ),
2834 ( 'CheckPcAndOpcodes', 3, True ),
2835 ( 'CheckPcAndOpcodesConsiderCsLim', 3, True ),
2836
2837 ( 'CheckCsLimAndOpcodesAcrossPageLoadingTlb', 3, True ),
2838 ( 'CheckOpcodesAcrossPageLoadingTlb', 3, True ),
2839 ( 'CheckOpcodesAcrossPageLoadingTlbConsiderCsLim', 2, True ),
2840
2841 ( 'CheckCsLimAndOpcodesLoadingTlb', 3, True ),
2842 ( 'CheckOpcodesLoadingTlb', 3, True ),
2843 ( 'CheckOpcodesLoadingTlbConsiderCsLim', 3, True ),
2844
2845 ( 'CheckCsLimAndOpcodesOnNextPageLoadingTlb', 2, True ),
2846 ( 'CheckOpcodesOnNextPageLoadingTlb', 2, True ),
2847 ( 'CheckOpcodesOnNextPageLoadingTlbConsiderCsLim', 2, True ),
2848
2849 ( 'CheckCsLimAndOpcodesOnNewPageLoadingTlb', 2, True ),
2850 ( 'CheckOpcodesOnNewPageLoadingTlb', 2, True ),
2851 ( 'CheckOpcodesOnNewPageLoadingTlbConsiderCsLim', 2, True ),
2852 );
2853
2854 def generateThreadedFunctionsHeader(self, oOut, _):
2855 """
2856 Generates the threaded functions header file.
2857 Returns success indicator.
2858 """
2859
2860 asLines = self.generateLicenseHeader();
2861
2862 # Generate the threaded function table indexes.
2863 asLines += [
2864 'typedef enum IEMTHREADEDFUNCS',
2865 '{',
2866 ' kIemThreadedFunc_Invalid = 0,',
2867 '',
2868 ' /*',
2869 ' * Predefined',
2870 ' */',
2871 ];
2872 asLines += [' kIemThreadedFunc_BltIn_%s,' % (sFuncNm,) for sFuncNm, _, _ in self.katBltIns];
2873
2874 iThreadedFunction = 1 + len(self.katBltIns);
2875 for sVariation in ThreadedFunctionVariation.kasVariationsEmitOrder:
2876 asLines += [
2877 '',
2878 ' /*',
2879 ' * Variation: ' + ThreadedFunctionVariation.kdVariationNames[sVariation] + '',
2880 ' */',
2881 ];
2882 for oThreadedFunction in self.aoThreadedFuncs:
2883 oVariation = oThreadedFunction.dVariations.get(sVariation, None);
2884 if oVariation:
2885 iThreadedFunction += 1;
2886 oVariation.iEnumValue = iThreadedFunction;
2887 asLines.append(' ' + oVariation.getIndexName() + ',');
2888 asLines += [
2889 ' kIemThreadedFunc_End',
2890 '} IEMTHREADEDFUNCS;',
2891 '',
2892 ];
2893
2894 # Prototype the function table.
2895 asLines += [
2896 'extern const PFNIEMTHREADEDFUNC g_apfnIemThreadedFunctions[kIemThreadedFunc_End];',
2897 'extern uint8_t const g_acIemThreadedFunctionUsedArgs[kIemThreadedFunc_End];',
2898 '#if defined(IN_RING3) || defined(LOG_ENABLED)',
2899 'extern const char * const g_apszIemThreadedFunctions[kIemThreadedFunc_End];',
2900 '#endif',
2901 '#if defined(IN_RING3)',
2902 'extern const char * const g_apszIemThreadedFunctionStats[kIemThreadedFunc_End];',
2903 '#endif',
2904 ];
2905
2906 oOut.write('\n'.join(asLines));
2907 return True;
2908
2909 ksBitsToIntMask = {
2910 1: "UINT64_C(0x1)",
2911 2: "UINT64_C(0x3)",
2912 4: "UINT64_C(0xf)",
2913 8: "UINT64_C(0xff)",
2914 16: "UINT64_C(0xffff)",
2915 32: "UINT64_C(0xffffffff)",
2916 };
2917
2918 def generateFunctionParameterUnpacking(self, oVariation, oOut, asParams, uNoRefLevel = 0):
2919 """
2920 Outputs code for unpacking parameters.
2921 This is shared by the threaded and native code generators.
2922 """
2923 aasVars = [];
2924 for aoRefs in oVariation.dParamRefs.values():
2925 oRef = aoRefs[0];
2926 if oRef.sType[0] != 'P':
2927 cBits = g_kdTypeInfo[oRef.sType][0];
2928 sType = g_kdTypeInfo[oRef.sType][2];
2929 else:
2930 cBits = 64;
2931 sType = oRef.sType;
2932
2933 sTypeDecl = sType + ' const';
2934
2935 if cBits == 64:
2936 assert oRef.offNewParam == 0;
2937 if sType == 'uint64_t':
2938 sUnpack = '%s;' % (asParams[oRef.iNewParam],);
2939 else:
2940 sUnpack = '(%s)%s;' % (sType, asParams[oRef.iNewParam],);
2941 elif oRef.offNewParam == 0:
2942 sUnpack = '(%s)(%s & %s);' % (sType, asParams[oRef.iNewParam], self.ksBitsToIntMask[cBits]);
2943 else:
2944 sUnpack = '(%s)((%s >> %s) & %s);' \
2945 % (sType, asParams[oRef.iNewParam], oRef.offNewParam, self.ksBitsToIntMask[cBits]);
2946
2947 sComment = '/* %s - %s ref%s */' % (oRef.sOrgRef, len(aoRefs), 's' if len(aoRefs) != 1 else '',);
2948
2949 aasVars.append([ '%s:%02u' % (oRef.iNewParam, oRef.offNewParam),
2950 sTypeDecl, oRef.sNewName, sUnpack, sComment ]);
2951 acchVars = [0, 0, 0, 0, 0];
2952 for asVar in aasVars:
2953 for iCol, sStr in enumerate(asVar):
2954 acchVars[iCol] = max(acchVars[iCol], len(sStr));
2955 sFmt = ' %%-%ss %%-%ss = %%-%ss %%s\n' % (acchVars[1], acchVars[2], acchVars[3]);
2956 for asVar in sorted(aasVars):
2957 oOut.write(sFmt % (asVar[1], asVar[2], asVar[3], asVar[4],));
2958
2959 if uNoRefLevel > 0 and aasVars:
2960 if uNoRefLevel > 1:
2961 # level 2: Everything. This is used by liveness.
2962 oOut.write(' ');
2963 for asVar in sorted(aasVars):
2964 oOut.write(' RT_NOREF_PV(%s);' % (asVar[2],));
2965 oOut.write('\n');
2966 else:
2967 # level 1: Only pfnXxxx variables. This is used by native.
2968 for asVar in sorted(aasVars):
2969 if asVar[2].startswith('pfn'):
2970 oOut.write(' RT_NOREF_PV(%s);\n' % (asVar[2],));
2971 return True;
2972
2973 kasThreadedParamNames = ('uParam0', 'uParam1', 'uParam2');
2974 def generateThreadedFunctionsSource(self, oOut, _):
2975 """
2976 Generates the threaded functions source file.
2977 Returns success indicator.
2978 """
2979
2980 asLines = self.generateLicenseHeader();
2981 oOut.write('\n'.join(asLines));
2982
2983 #
2984 # Emit the function definitions.
2985 #
2986 for sVariation in ThreadedFunctionVariation.kasVariationsEmitOrder:
2987 sVarName = ThreadedFunctionVariation.kdVariationNames[sVariation];
2988 oOut.write( '\n'
2989 + '\n'
2990 + '\n'
2991 + '\n'
2992 + '/*' + '*' * 128 + '\n'
2993 + '* Variation: ' + sVarName + ' ' * (129 - len(sVarName) - 15) + '*\n'
2994 + '*' * 128 + '*/\n');
2995
2996 for oThreadedFunction in self.aoThreadedFuncs:
2997 oVariation = oThreadedFunction.dVariations.get(sVariation, None);
2998 if oVariation:
2999 oMcBlock = oThreadedFunction.oMcBlock;
3000
3001 # Function header
3002 oOut.write( '\n'
3003 + '\n'
3004 + '/**\n'
3005 + ' * #%u: %s at line %s offset %s in %s%s\n'
3006 % (oVariation.iEnumValue, oMcBlock.sFunction, oMcBlock.iBeginLine, oMcBlock.offBeginLine,
3007 os.path.split(oMcBlock.sSrcFile)[1],
3008 ' (macro expansion)' if oMcBlock.iBeginLine == oMcBlock.iEndLine else '')
3009 + ' */\n'
3010 + 'static IEM_DECL_IEMTHREADEDFUNC_DEF(' + oVariation.getThreadedFunctionName() + ')\n'
3011 + '{\n');
3012
3013 # Unpack parameters.
3014 self.generateFunctionParameterUnpacking(oVariation, oOut, self.kasThreadedParamNames);
3015
3016 # RT_NOREF for unused parameters.
3017 if oVariation.cMinParams < g_kcThreadedParams:
3018 oOut.write(' RT_NOREF(' + ', '.join(self.kasThreadedParamNames[oVariation.cMinParams:]) + ');\n');
3019
3020 # Now for the actual statements.
3021 oOut.write(iai.McStmt.renderCodeForList(oVariation.aoStmtsForThreadedFunction, cchIndent = 4));
3022
3023 oOut.write('}\n');
3024
3025
3026 #
3027 # Generate the output tables in parallel.
3028 #
3029 asFuncTable = [
3030 '/**',
3031 ' * Function pointer table.',
3032 ' */',
3033 'PFNIEMTHREADEDFUNC const g_apfnIemThreadedFunctions[kIemThreadedFunc_End] =',
3034 '{',
3035 ' /*Invalid*/ NULL,',
3036 ];
3037 asArgCntTab = [
3038 '/**',
3039 ' * Argument count table.',
3040 ' */',
3041 'uint8_t const g_acIemThreadedFunctionUsedArgs[kIemThreadedFunc_End] =',
3042 '{',
3043 ' 0, /*Invalid*/',
3044 ];
3045 asNameTable = [
3046 '/**',
3047 ' * Function name table.',
3048 ' */',
3049 'const char * const g_apszIemThreadedFunctions[kIemThreadedFunc_End] =',
3050 '{',
3051 ' "Invalid",',
3052 ];
3053 asStatTable = [
3054 '/**',
3055 ' * Function statistics name table.',
3056 ' */',
3057 'const char * const g_apszIemThreadedFunctionStats[kIemThreadedFunc_End] =',
3058 '{',
3059 ' NULL,',
3060 ];
3061 aasTables = (asFuncTable, asArgCntTab, asNameTable, asStatTable,);
3062
3063 for asTable in aasTables:
3064 asTable.extend((
3065 '',
3066 ' /*',
3067 ' * Predefined.',
3068 ' */',
3069 ));
3070 for sFuncNm, cArgs, _ in self.katBltIns:
3071 asFuncTable.append(' iemThreadedFunc_BltIn_%s,' % (sFuncNm,));
3072 asArgCntTab.append(' %d, /*BltIn_%s*/' % (cArgs, sFuncNm,));
3073 asNameTable.append(' "BltIn_%s",' % (sFuncNm,));
3074 asStatTable.append(' "BltIn/%s",' % (sFuncNm,));
3075
3076 iThreadedFunction = 1 + len(self.katBltIns);
3077 for sVariation in ThreadedFunctionVariation.kasVariationsEmitOrder:
3078 for asTable in aasTables:
3079 asTable.extend((
3080 '',
3081 ' /*',
3082 ' * Variation: ' + ThreadedFunctionVariation.kdVariationNames[sVariation],
3083 ' */',
3084 ));
3085 for oThreadedFunction in self.aoThreadedFuncs:
3086 oVariation = oThreadedFunction.dVariations.get(sVariation, None);
3087 if oVariation:
3088 iThreadedFunction += 1;
3089 assert oVariation.iEnumValue == iThreadedFunction;
3090 sName = oVariation.getThreadedFunctionName();
3091 asFuncTable.append(' /*%4u*/ %s,' % (iThreadedFunction, sName,));
3092 asNameTable.append(' /*%4u*/ "%s",' % (iThreadedFunction, sName,));
3093 asArgCntTab.append(' /*%4u*/ %d, /*%s*/' % (iThreadedFunction, oVariation.cMinParams, sName,));
3094 asStatTable.append(' "%s",' % (oVariation.getThreadedFunctionStatisticsName(),));
3095
3096 for asTable in aasTables:
3097 asTable.append('};');
3098
3099 #
3100 # Output the tables.
3101 #
3102 oOut.write( '\n'
3103 + '\n');
3104 oOut.write('\n'.join(asFuncTable));
3105 oOut.write( '\n'
3106 + '\n'
3107 + '\n');
3108 oOut.write('\n'.join(asArgCntTab));
3109 oOut.write( '\n'
3110 + '\n'
3111 + '#if defined(IN_RING3) || defined(LOG_ENABLED)\n');
3112 oOut.write('\n'.join(asNameTable));
3113 oOut.write( '\n'
3114 + '#endif /* IN_RING3 || LOG_ENABLED */\n'
3115 + '\n'
3116 + '\n'
3117 + '#if defined(IN_RING3)\n');
3118 oOut.write('\n'.join(asStatTable));
3119 oOut.write( '\n'
3120 + '#endif /* IN_RING3 */\n');
3121
3122 return True;
3123
3124 def generateNativeFunctionsHeader(self, oOut, _):
3125 """
3126 Generates the native recompiler functions header file.
3127 Returns success indicator.
3128 """
3129 if not self.oOptions.fNativeRecompilerEnabled:
3130 return True;
3131
3132 asLines = self.generateLicenseHeader();
3133
3134 # Prototype the function table.
3135 asLines += [
3136 'extern const PFNIEMNATIVERECOMPFUNC g_apfnIemNativeRecompileFunctions[kIemThreadedFunc_End];',
3137 'extern const PFNIEMNATIVELIVENESSFUNC g_apfnIemNativeLivenessFunctions[kIemThreadedFunc_End];',
3138 '',
3139 ];
3140
3141 # Emit indicators as to which of the builtin functions have a native
3142 # recompiler function and which not. (We only really need this for
3143 # kIemThreadedFunc_BltIn_CheckMode, but do all just for simplicity.)
3144 for atBltIn in self.katBltIns:
3145 if atBltIn[1]:
3146 asLines.append('#define IEMNATIVE_WITH_BLTIN_' + atBltIn[0].upper())
3147 else:
3148 asLines.append('#define IEMNATIVE_WITHOUT_BLTIN_' + atBltIn[0].upper())
3149
3150 # Emit prototypes for the builtin functions we use in tables.
3151 asLines += [
3152 '',
3153 '/* Prototypes for built-in functions used in the above tables. */',
3154 ];
3155 for sFuncNm, _, fHaveRecompFunc in self.katBltIns:
3156 if fHaveRecompFunc:
3157 asLines += [
3158 'IEM_DECL_IEMNATIVERECOMPFUNC_PROTO( iemNativeRecompFunc_BltIn_%s);' % (sFuncNm,),
3159 'IEM_DECL_IEMNATIVELIVENESSFUNC_PROTO(iemNativeLivenessFunc_BltIn_%s);' % (sFuncNm,),
3160 ];
3161
3162 # Emit prototypes for table function.
3163 asLines += [
3164 '',
3165 '#ifdef IEMNATIVE_INCL_TABLE_FUNCTION_PROTOTYPES'
3166 ]
3167 for sVariation in ThreadedFunctionVariation.kasVariationsEmitOrder:
3168 sVarName = ThreadedFunctionVariation.kdVariationNames[sVariation];
3169 asLines += [
3170 '',
3171 '/* Variation: ' + sVarName + ' */',
3172 ];
3173 for oThreadedFunction in self.aoThreadedFuncs:
3174 oVariation = oThreadedFunction.dVariations.get(sVariation, None) # type: ThreadedFunctionVariation
3175 if oVariation and oVariation.oNativeRecomp and oVariation.oNativeRecomp.isRecompilable():
3176 asLines.append('IEM_DECL_IEMNATIVERECOMPFUNC_PROTO(' + oVariation.getNativeFunctionName() + ');');
3177 asLines += [
3178 '',
3179 '#endif /* IEMNATIVE_INCL_TABLE_FUNCTION_PROTOTYPES */',
3180 ]
3181
3182 oOut.write('\n'.join(asLines));
3183 return True;
3184
3185 def generateNativeFunctionsSource(self, oOut, idxPart):
3186 """
3187 Generates the native recompiler functions source file.
3188 Returns success indicator.
3189 """
3190 cParts = 4;
3191 assert(idxPart in range(cParts));
3192 if not self.oOptions.fNativeRecompilerEnabled:
3193 return True;
3194
3195 #
3196 # The file header.
3197 #
3198 oOut.write('\n'.join(self.generateLicenseHeader()));
3199
3200 #
3201 # Emit the functions.
3202 #
3203 # The files are split up by threaded variation as that's the simplest way to
3204 # do it, even if the distribution isn't entirely even (ksVariation_Default
3205 # only has the defer to cimpl bits and the pre-386 variants will naturally
3206 # have fewer instructions).
3207 #
3208 cVariationsPerFile = len(ThreadedFunctionVariation.kasVariationsEmitOrder) // cParts;
3209 idxFirstVar = idxPart * cVariationsPerFile;
3210 idxEndVar = idxFirstVar + cVariationsPerFile;
3211 if idxPart + 1 >= cParts:
3212 idxEndVar = len(ThreadedFunctionVariation.kasVariationsEmitOrder);
3213 for sVariation in ThreadedFunctionVariation.kasVariationsEmitOrder[idxFirstVar:idxEndVar]:
3214 sVarName = ThreadedFunctionVariation.kdVariationNames[sVariation];
3215 oOut.write( '\n'
3216 + '\n'
3217 + '\n'
3218 + '\n'
3219 + '/*' + '*' * 128 + '\n'
3220 + '* Variation: ' + sVarName + ' ' * (129 - len(sVarName) - 15) + '*\n'
3221 + '*' * 128 + '*/\n');
3222
3223 for oThreadedFunction in self.aoThreadedFuncs:
3224 oVariation = oThreadedFunction.dVariations.get(sVariation, None) # type: ThreadedFunctionVariation
3225 if oVariation and oVariation.oNativeRecomp and oVariation.oNativeRecomp.isRecompilable():
3226 oMcBlock = oThreadedFunction.oMcBlock;
3227
3228 # Function header
3229 oOut.write( '\n'
3230 + '\n'
3231 + '/**\n'
3232 + ' * #%u: %s at line %s offset %s in %s%s\n'
3233 % (oVariation.iEnumValue, oMcBlock.sFunction, oMcBlock.iBeginLine, oMcBlock.offBeginLine,
3234 os.path.split(oMcBlock.sSrcFile)[1],
3235 ' (macro expansion)' if oMcBlock.iBeginLine == oMcBlock.iEndLine else '')
3236 + ' */\n'
3237 + 'IEM_DECL_IEMNATIVERECOMPFUNC_DEF(' + oVariation.getNativeFunctionName() + ')\n'
3238 + '{\n');
3239
3240 # Unpack parameters.
3241 self.generateFunctionParameterUnpacking(oVariation, oOut,
3242 ('pCallEntry->auParams[0]',
3243 'pCallEntry->auParams[1]',
3244 'pCallEntry->auParams[2]',),
3245 uNoRefLevel = 1);
3246
3247 # Now for the actual statements.
3248 oOut.write(oVariation.oNativeRecomp.renderCode(cchIndent = 4));
3249
3250 oOut.write('}\n');
3251
3252 #
3253 # Output the function table if this is the first file.
3254 #
3255 if idxPart == 0:
3256 oOut.write( '\n'
3257 + '\n'
3258 + '/*\n'
3259 + ' * Function table running parallel to g_apfnIemThreadedFunctions and friends.\n'
3260 + ' */\n'
3261 + 'const PFNIEMNATIVERECOMPFUNC g_apfnIemNativeRecompileFunctions[kIemThreadedFunc_End] =\n'
3262 + '{\n'
3263 + ' /*Invalid*/ NULL,'
3264 + '\n'
3265 + ' /*\n'
3266 + ' * Predefined.\n'
3267 + ' */\n'
3268 );
3269 for sFuncNm, _, fHaveRecompFunc in self.katBltIns:
3270 if fHaveRecompFunc:
3271 oOut.write(' iemNativeRecompFunc_BltIn_%s,\n' % (sFuncNm,))
3272 else:
3273 oOut.write(' NULL, /*BltIn_%s*/\n' % (sFuncNm,))
3274
3275 iThreadedFunction = 1 + len(self.katBltIns);
3276 for sVariation in ThreadedFunctionVariation.kasVariationsEmitOrder:
3277 oOut.write( ' /*\n'
3278 + ' * Variation: ' + ThreadedFunctionVariation.kdVariationNames[sVariation] + '\n'
3279 + ' */\n');
3280 for oThreadedFunction in self.aoThreadedFuncs:
3281 oVariation = oThreadedFunction.dVariations.get(sVariation, None);
3282 if oVariation:
3283 iThreadedFunction += 1;
3284 assert oVariation.iEnumValue == iThreadedFunction;
3285 sName = oVariation.getNativeFunctionName();
3286 if oVariation.oNativeRecomp and oVariation.oNativeRecomp.isRecompilable():
3287 oOut.write(' /*%4u*/ %s,\n' % (iThreadedFunction, sName,));
3288 else:
3289 oOut.write(' /*%4u*/ NULL /*%s*/,\n' % (iThreadedFunction, sName,));
3290
3291 oOut.write( '};\n');
3292
3293 oOut.write('\n');
3294 return True;
3295
3296 def generateNativeLivenessSource(self, oOut, _):
3297 """
3298 Generates the native recompiler liveness analysis functions source file.
3299 Returns success indicator.
3300 """
3301 if not self.oOptions.fNativeRecompilerEnabled:
3302 return True;
3303
3304 #
3305 # The file header.
3306 #
3307 oOut.write('\n'.join(self.generateLicenseHeader()));
3308
3309 #
3310 # Emit the functions.
3311 #
3312 for sVariation in ThreadedFunctionVariation.kasVariationsEmitOrder:
3313 sVarName = ThreadedFunctionVariation.kdVariationNames[sVariation];
3314 oOut.write( '\n'
3315 + '\n'
3316 + '\n'
3317 + '\n'
3318 + '/*' + '*' * 128 + '\n'
3319 + '* Variation: ' + sVarName + ' ' * (129 - len(sVarName) - 15) + '*\n'
3320 + '*' * 128 + '*/\n');
3321
3322 for oThreadedFunction in self.aoThreadedFuncs:
3323 oVariation = oThreadedFunction.dVariations.get(sVariation, None) # type: ThreadedFunctionVariation
3324 if oVariation and oVariation.oNativeRecomp and oVariation.oNativeRecomp.isRecompilable():
3325 oMcBlock = oThreadedFunction.oMcBlock;
3326
3327 # Function header
3328 oOut.write( '\n'
3329 + '\n'
3330 + '/**\n'
3331 + ' * #%u: %s at line %s offset %s in %s%s\n'
3332 % (oVariation.iEnumValue, oMcBlock.sFunction, oMcBlock.iBeginLine, oMcBlock.offBeginLine,
3333 os.path.split(oMcBlock.sSrcFile)[1],
3334 ' (macro expansion)' if oMcBlock.iBeginLine == oMcBlock.iEndLine else '')
3335 + ' */\n'
3336 + 'static IEM_DECL_IEMNATIVELIVENESSFUNC_DEF(' + oVariation.getLivenessFunctionName() + ')\n'
3337 + '{\n');
3338
3339 # Unpack parameters.
3340 self.generateFunctionParameterUnpacking(oVariation, oOut,
3341 ('pCallEntry->auParams[0]',
3342 'pCallEntry->auParams[1]',
3343 'pCallEntry->auParams[2]',),
3344 uNoRefLevel = 2);
3345
3346 # Now for the actual statements.
3347 oOut.write(oVariation.oNativeRecomp.renderCode(cchIndent = 4));
3348
3349 oOut.write('}\n');
3350
3351 #
3352 # Output the function table.
3353 #
3354 oOut.write( '\n'
3355 + '\n'
3356 + '/*\n'
3357 + ' * Liveness analysis function table running parallel to g_apfnIemThreadedFunctions and friends.\n'
3358 + ' */\n'
3359 + 'const PFNIEMNATIVELIVENESSFUNC g_apfnIemNativeLivenessFunctions[kIemThreadedFunc_End] =\n'
3360 + '{\n'
3361 + ' /*Invalid*/ NULL,'
3362 + '\n'
3363 + ' /*\n'
3364 + ' * Predefined.\n'
3365 + ' */\n'
3366 );
3367 for sFuncNm, _, fHaveRecompFunc in self.katBltIns:
3368 if fHaveRecompFunc:
3369 oOut.write(' iemNativeLivenessFunc_BltIn_%s,\n' % (sFuncNm,))
3370 else:
3371 oOut.write(' NULL, /*BltIn_%s*/\n' % (sFuncNm,))
3372
3373 iThreadedFunction = 1 + len(self.katBltIns);
3374 for sVariation in ThreadedFunctionVariation.kasVariationsEmitOrder:
3375 oOut.write( ' /*\n'
3376 + ' * Variation: ' + ThreadedFunctionVariation.kdVariationNames[sVariation] + '\n'
3377 + ' */\n');
3378 for oThreadedFunction in self.aoThreadedFuncs:
3379 oVariation = oThreadedFunction.dVariations.get(sVariation, None);
3380 if oVariation:
3381 iThreadedFunction += 1;
3382 assert oVariation.iEnumValue == iThreadedFunction;
3383 sName = oVariation.getLivenessFunctionName();
3384 if oVariation.oNativeRecomp and oVariation.oNativeRecomp.isRecompilable():
3385 oOut.write(' /*%4u*/ %s,\n' % (iThreadedFunction, sName,));
3386 else:
3387 oOut.write(' /*%4u*/ NULL /*%s*/,\n' % (iThreadedFunction, sName,));
3388
3389 oOut.write( '};\n'
3390 + '\n');
3391 return True;
3392
3393
3394 def getThreadedFunctionByIndex(self, idx):
3395 """
3396 Returns a ThreadedFunction object for the given index. If the index is
3397 out of bounds, a dummy is returned.
3398 """
3399 if idx < len(self.aoThreadedFuncs):
3400 return self.aoThreadedFuncs[idx];
3401 return ThreadedFunction.dummyInstance();
3402
3403 def generateModifiedInput(self, oOut, idxFile):
3404 """
3405 Generates the combined modified input source/header file.
3406 Returns success indicator.
3407 """
3408 #
3409 # File header and assert assumptions.
3410 #
3411 oOut.write('\n'.join(self.generateLicenseHeader()));
3412 oOut.write('AssertCompile((IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK | IEM_F_MODE_CPUMODE_MASK) == 7);\n');
3413
3414 #
3415 # Iterate all parsers (input files) and output the ones related to the
3416 # file set given by idxFile.
3417 #
3418 for idxParser, oParser in enumerate(self.aoParsers): # type: int, IEMAllInstPython.SimpleParser
3419 # Is this included in the file set?
3420 sSrcBaseFile = os.path.basename(oParser.sSrcFile).lower();
3421 fInclude = -1;
3422 for aoInfo in iai.g_aaoAllInstrFilesAndDefaultMapAndSet:
3423 if sSrcBaseFile == aoInfo[0].lower():
3424 fInclude = aoInfo[2] in (-1, idxFile);
3425 break;
3426 if fInclude is not True:
3427 assert fInclude is False;
3428 continue;
3429
3430 # Output it.
3431 oOut.write("\n\n/* ****** BEGIN %s ******* */\n" % (oParser.sSrcFile,));
3432
3433 iThreadedFunction = self.aidxFirstFunctions[idxParser];
3434 oThreadedFunction = self.getThreadedFunctionByIndex(iThreadedFunction);
3435 iLine = 0;
3436 while iLine < len(oParser.asLines):
3437 sLine = oParser.asLines[iLine];
3438 iLine += 1; # iBeginLine and iEndLine are 1-based.
3439
3440 # Can we pass it thru?
3441 if ( iLine not in [oThreadedFunction.oMcBlock.iBeginLine, oThreadedFunction.oMcBlock.iEndLine]
3442 or oThreadedFunction.oMcBlock.sSrcFile != oParser.sSrcFile):
3443 oOut.write(sLine);
3444 #
3445 # Single MC block. Just extract it and insert the replacement.
3446 #
3447 elif oThreadedFunction.oMcBlock.iBeginLine != oThreadedFunction.oMcBlock.iEndLine:
3448 assert ( (sLine.count('IEM_MC_') - sLine.count('IEM_MC_F_') == 1)
3449 or oThreadedFunction.oMcBlock.iMacroExp == iai.McBlock.kiMacroExp_Partial), 'sLine="%s"' % (sLine,);
3450 oOut.write(sLine[:oThreadedFunction.oMcBlock.offBeginLine]);
3451 sModified = oThreadedFunction.generateInputCode().strip();
3452 oOut.write(sModified);
3453
3454 iLine = oThreadedFunction.oMcBlock.iEndLine;
3455 sLine = oParser.asLines[iLine - 1];
3456 assert ( sLine.count('IEM_MC_') - sLine.count('IEM_MC_F_') == 1
3457 or len(oThreadedFunction.oMcBlock.aoStmts) == 1
3458 or oThreadedFunction.oMcBlock.iMacroExp == iai.McBlock.kiMacroExp_Partial);
3459 oOut.write(sLine[oThreadedFunction.oMcBlock.offAfterEnd : ]);
3460
3461 # Advance
3462 iThreadedFunction += 1;
3463 oThreadedFunction = self.getThreadedFunctionByIndex(iThreadedFunction);
3464 #
3465 # Macro expansion line that have sublines and may contain multiple MC blocks.
3466 #
3467 else:
3468 offLine = 0;
3469 while iLine == oThreadedFunction.oMcBlock.iBeginLine:
3470 oOut.write(sLine[offLine : oThreadedFunction.oMcBlock.offBeginLine]);
3471
3472 sModified = oThreadedFunction.generateInputCode().strip();
3473 assert ( sModified.startswith('IEM_MC_BEGIN')
3474 or (sModified.find('IEM_MC_DEFER_TO_CIMPL_') > 0 and sModified.strip().startswith('{\n'))
3475 or sModified.startswith('pVCpu->iem.s.fEndTb = true')
3476 or sModified.startswith('pVCpu->iem.s.fTbCurInstr = ')
3477 ), 'sModified="%s"' % (sModified,);
3478 oOut.write(sModified);
3479
3480 offLine = oThreadedFunction.oMcBlock.offAfterEnd;
3481
3482 # Advance
3483 iThreadedFunction += 1;
3484 oThreadedFunction = self.getThreadedFunctionByIndex(iThreadedFunction);
3485
3486 # Last line segment.
3487 if offLine < len(sLine):
3488 oOut.write(sLine[offLine : ]);
3489
3490 oOut.write("/* ****** END %s ******* */\n" % (oParser.sSrcFile,));
3491
3492 return True;
3493
3494
3495 #
3496 # Main
3497 #
3498
3499 def main(self, asArgs):
3500 """
3501 C-like main function.
3502 Returns exit code.
3503 """
3504
3505 #
3506 # Parse arguments
3507 #
3508 sScriptDir = os.path.dirname(__file__);
3509 oParser = argparse.ArgumentParser(add_help = False);
3510 oParser.add_argument('asInFiles',
3511 metavar = 'input.cpp.h',
3512 nargs = '*',
3513 default = [os.path.join(sScriptDir, aoInfo[0])
3514 for aoInfo in iai.g_aaoAllInstrFilesAndDefaultMapAndSet],
3515 help = "Selection of VMMAll/IEMAllInst*.cpp.h files to use as input.");
3516 oParser.add_argument('--host-arch',
3517 metavar = 'arch',
3518 dest = 'sHostArch',
3519 action = 'store',
3520 default = None,
3521 help = 'The host architecture.');
3522
3523 oParser.add_argument('--out-thrd-funcs-hdr',
3524 metavar = 'file-thrd-funcs.h',
3525 dest = 'sOutFileThrdFuncsHdr',
3526 action = 'store',
3527 default = '-',
3528 help = 'The output header file for the threaded functions.');
3529 oParser.add_argument('--out-thrd-funcs-cpp',
3530 metavar = 'file-thrd-funcs.cpp',
3531 dest = 'sOutFileThrdFuncsCpp',
3532 action = 'store',
3533 default = '-',
3534 help = 'The output C++ file for the threaded functions.');
3535 oParser.add_argument('--out-n8ve-funcs-hdr',
3536 metavar = 'file-n8tv-funcs.h',
3537 dest = 'sOutFileN8veFuncsHdr',
3538 action = 'store',
3539 default = '-',
3540 help = 'The output header file for the native recompiler functions.');
3541 oParser.add_argument('--out-n8ve-funcs-cpp1',
3542 metavar = 'file-n8tv-funcs1.cpp',
3543 dest = 'sOutFileN8veFuncsCpp1',
3544 action = 'store',
3545 default = '-',
3546 help = 'The output C++ file for the native recompiler functions part 1.');
3547 oParser.add_argument('--out-n8ve-funcs-cpp2',
3548 metavar = 'file-n8ve-funcs2.cpp',
3549 dest = 'sOutFileN8veFuncsCpp2',
3550 action = 'store',
3551 default = '-',
3552 help = 'The output C++ file for the native recompiler functions part 2.');
3553 oParser.add_argument('--out-n8ve-funcs-cpp3',
3554 metavar = 'file-n8ve-funcs3.cpp',
3555 dest = 'sOutFileN8veFuncsCpp3',
3556 action = 'store',
3557 default = '-',
3558 help = 'The output C++ file for the native recompiler functions part 3.');
3559 oParser.add_argument('--out-n8ve-funcs-cpp4',
3560 metavar = 'file-n8ve-funcs4.cpp',
3561 dest = 'sOutFileN8veFuncsCpp4',
3562 action = 'store',
3563 default = '-',
3564 help = 'The output C++ file for the native recompiler functions part 4.');
3565 oParser.add_argument('--out-n8ve-liveness-cpp',
3566 metavar = 'file-n8ve-liveness.cpp',
3567 dest = 'sOutFileN8veLivenessCpp',
3568 action = 'store',
3569 default = '-',
3570 help = 'The output C++ file for the native recompiler liveness analysis functions.');
3571 oParser.add_argument('--native',
3572 dest = 'fNativeRecompilerEnabled',
3573 action = 'store_true',
3574 default = False,
3575 help = 'Enables generating the files related to native recompilation.');
3576 oParser.add_argument('--out-mod-input1',
3577 metavar = 'file-instr.cpp.h',
3578 dest = 'sOutFileModInput1',
3579 action = 'store',
3580 default = '-',
3581 help = 'The output C++/header file for modified input instruction files part 1.');
3582 oParser.add_argument('--out-mod-input2',
3583 metavar = 'file-instr.cpp.h',
3584 dest = 'sOutFileModInput2',
3585 action = 'store',
3586 default = '-',
3587 help = 'The output C++/header file for modified input instruction files part 2.');
3588 oParser.add_argument('--out-mod-input3',
3589 metavar = 'file-instr.cpp.h',
3590 dest = 'sOutFileModInput3',
3591 action = 'store',
3592 default = '-',
3593 help = 'The output C++/header file for modified input instruction files part 3.');
3594 oParser.add_argument('--out-mod-input4',
3595 metavar = 'file-instr.cpp.h',
3596 dest = 'sOutFileModInput4',
3597 action = 'store',
3598 default = '-',
3599 help = 'The output C++/header file for modified input instruction files part 4.');
3600 oParser.add_argument('--help', '-h', '-?',
3601 action = 'help',
3602 help = 'Display help and exit.');
3603 oParser.add_argument('--version', '-V',
3604 action = 'version',
3605 version = 'r%s (IEMAllThreadedPython.py), r%s (IEMAllInstPython.py)'
3606 % (__version__.split()[1], iai.__version__.split()[1],),
3607 help = 'Displays the version/revision of the script and exit.');
3608 self.oOptions = oParser.parse_args(asArgs[1:]);
3609 print("oOptions=%s" % (self.oOptions,), file = sys.stderr);
3610
3611 if self.oOptions.sHostArch not in ('amd64', 'arm64'):
3612 print('error! Unsupported (or missing) host architecture: %s' % (self.oOptions.sHostArch,), file = sys.stderr);
3613 return 1;
3614
3615 #
3616 # Process the instructions specified in the IEM sources.
3617 #
3618 if self.processInputFiles(self.oOptions.sHostArch, self.oOptions.fNativeRecompilerEnabled):
3619 #
3620 # Generate the output files.
3621 #
3622 aaoOutputFiles = (
3623 ( self.oOptions.sOutFileThrdFuncsHdr, self.generateThreadedFunctionsHeader, 0, ),
3624 ( self.oOptions.sOutFileThrdFuncsCpp, self.generateThreadedFunctionsSource, 0, ),
3625 ( self.oOptions.sOutFileN8veFuncsHdr, self.generateNativeFunctionsHeader, 0, ),
3626 ( self.oOptions.sOutFileN8veFuncsCpp1, self.generateNativeFunctionsSource, 0, ),
3627 ( self.oOptions.sOutFileN8veFuncsCpp2, self.generateNativeFunctionsSource, 1, ),
3628 ( self.oOptions.sOutFileN8veFuncsCpp3, self.generateNativeFunctionsSource, 2, ),
3629 ( self.oOptions.sOutFileN8veFuncsCpp4, self.generateNativeFunctionsSource, 3, ),
3630 ( self.oOptions.sOutFileN8veLivenessCpp, self.generateNativeLivenessSource, 0, ),
3631 ( self.oOptions.sOutFileModInput1, self.generateModifiedInput, 1, ),
3632 ( self.oOptions.sOutFileModInput2, self.generateModifiedInput, 2, ),
3633 ( self.oOptions.sOutFileModInput3, self.generateModifiedInput, 3, ),
3634 ( self.oOptions.sOutFileModInput4, self.generateModifiedInput, 4, ),
3635 );
3636 fRc = True;
3637 for sOutFile, fnGenMethod, iPartNo in aaoOutputFiles:
3638 if sOutFile == '-':
3639 fRc = fnGenMethod(sys.stdout, iPartNo) and fRc;
3640 else:
3641 try:
3642 oOut = open(sOutFile, 'w'); # pylint: disable=consider-using-with,unspecified-encoding
3643 except Exception as oXcpt:
3644 print('error! Failed open "%s" for writing: %s' % (sOutFile, oXcpt,), file = sys.stderr);
3645 return 1;
3646 fRc = fnGenMethod(oOut, iPartNo) and fRc;
3647 oOut.close();
3648 if fRc:
3649 return 0;
3650
3651 return 1;
3652
3653
3654if __name__ == '__main__':
3655 sys.exit(IEMThreadedGenerator().main(sys.argv));
3656
Note: See TracBrowser for help on using the repository browser.

© 2023 Oracle
ContactPrivacy policyTerms of Use