VirtualBox

source: vbox/trunk/src/VBox/VMM/testcase/tstIEMAImpl.cpp@ 94249

Last change on this file since 94249 was 94221, checked in by vboxsync, 3 years ago

tstIEMAImpl: More shift test variations. bugref:9898

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 77.6 KB
Line 
1/* $Id: tstIEMAImpl.cpp 94221 2022-03-14 12:57:25Z vboxsync $ */
2/** @file
3 * IEM Assembly Instruction Helper Testcase.
4 */
5
6/*
7 * Copyright (C) 2022 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#include "../include/IEMInternal.h"
23
24#include <iprt/errcore.h>
25#include <VBox/log.h>
26#include <iprt/assert.h>
27#include <iprt/ctype.h>
28#include <iprt/initterm.h>
29#include <iprt/message.h>
30#include <iprt/mp.h>
31#include <iprt/rand.h>
32#include <iprt/stream.h>
33#include <iprt/string.h>
34#include <iprt/test.h>
35
36
37/*********************************************************************************************************************************
38* Structures and Typedefs *
39*********************************************************************************************************************************/
40/** @name 8-bit binary (PFNIEMAIMPLBINU8)
41 * @{ */
42typedef struct BINU8_TEST_T
43{
44 uint32_t fEflIn;
45 uint32_t fEflOut;
46 uint8_t uDstIn;
47 uint8_t uDstOut;
48 uint8_t uSrcIn;
49 uint8_t uMisc;
50} BINU8_TEST_T;
51
52typedef struct BINU8_T
53{
54 const char *pszName;
55 PFNIEMAIMPLBINU8 pfn;
56 PFNIEMAIMPLBINU8 pfnNative;
57 BINU8_TEST_T const *paTests;
58 uint32_t cTests;
59 uint32_t uExtra;
60 uint8_t idxCpuEflFlavour;
61} BINU8_T;
62/** @} */
63
64
65/** @name 16-bit binary (PFNIEMAIMPLBINU16)
66 * @{ */
67typedef struct BINU16_TEST_T
68{
69 uint32_t fEflIn;
70 uint32_t fEflOut;
71 uint16_t uDstIn;
72 uint16_t uDstOut;
73 uint16_t uSrcIn;
74 uint16_t uMisc;
75} BINU16_TEST_T;
76
77typedef struct BINU16_T
78{
79 const char *pszName;
80 PFNIEMAIMPLBINU16 pfn;
81 PFNIEMAIMPLBINU16 pfnNative;
82 BINU16_TEST_T const *paTests;
83 uint32_t cTests;
84 uint32_t uExtra;
85 uint8_t idxCpuEflFlavour;
86} BINU16_T;
87/** @} */
88
89
90/** @name 32-bit binary (PFNIEMAIMPLBINU32)
91 * @{ */
92typedef struct BINU32_TEST_T
93{
94 uint32_t fEflIn;
95 uint32_t fEflOut;
96 uint32_t uDstIn;
97 uint32_t uDstOut;
98 uint32_t uSrcIn;
99 uint32_t uMisc;
100} BINU32_TEST_T;
101
102typedef struct BINU32_T
103{
104 const char *pszName;
105 PFNIEMAIMPLBINU32 pfn;
106 PFNIEMAIMPLBINU32 pfnNative;
107 BINU32_TEST_T const *paTests;
108 uint32_t cTests;
109 uint32_t uExtra;
110 uint8_t idxCpuEflFlavour;
111} BINU32_T;
112/** @} */
113
114
115/** @name 64-bit binary (PFNIEMAIMPLBINU64)
116 * @{ */
117typedef struct BINU64_TEST_T
118{
119 uint32_t fEflIn;
120 uint32_t fEflOut;
121 uint64_t uDstIn;
122 uint64_t uDstOut;
123 uint64_t uSrcIn;
124 uint64_t uMisc;
125} BINU64_TEST_T;
126
127typedef struct BINU64_T
128{
129 const char *pszName;
130 PFNIEMAIMPLBINU64 pfn;
131 PFNIEMAIMPLBINU64 pfnNative;
132 BINU64_TEST_T const *paTests;
133 uint32_t cTests;
134 uint32_t uExtra;
135 uint8_t idxCpuEflFlavour;
136} BINU64_T;
137/** @} */
138
139
140/** @name mult/div (PFNIEMAIMPLBINU8, PFNIEMAIMPLBINU16, PFNIEMAIMPLBINU32, PFNIEMAIMPLBINU64)
141 * @{ */
142typedef struct MULDIVU8_TEST_T
143{
144 uint32_t fEflIn;
145 uint32_t fEflOut;
146 uint16_t uDstIn;
147 uint16_t uDstOut;
148 uint8_t uSrcIn;
149 int32_t rc;
150} MULDIVU8_TEST_T;
151
152typedef struct MULDIVU16_TEST_T
153{
154 uint32_t fEflIn;
155 uint32_t fEflOut;
156 uint16_t uDst1In;
157 uint16_t uDst1Out;
158 uint16_t uDst2In;
159 uint16_t uDst2Out;
160 uint16_t uSrcIn;
161 int32_t rc;
162} MULDIVU16_TEST_T;
163
164typedef struct MULDIVU32_TEST_T
165{
166 uint32_t fEflIn;
167 uint32_t fEflOut;
168 uint32_t uDst1In;
169 uint32_t uDst1Out;
170 uint32_t uDst2In;
171 uint32_t uDst2Out;
172 uint32_t uSrcIn;
173 int32_t rc;
174} MULDIVU32_TEST_T;
175
176typedef struct MULDIVU64_TEST_T
177{
178 uint32_t fEflIn;
179 uint32_t fEflOut;
180 uint64_t uDst1In;
181 uint64_t uDst1Out;
182 uint64_t uDst2In;
183 uint64_t uDst2Out;
184 uint64_t uSrcIn;
185 int32_t rc;
186} MULDIVU64_TEST_T;
187/** @} */
188
189
190/*********************************************************************************************************************************
191* Defined Constants And Macros *
192*********************************************************************************************************************************/
193#define ENTRY(a_Name) ENTRY_EX(a_Name, 0)
194#define ENTRY_EX(a_Name, a_uExtra) \
195 { #a_Name, iemAImpl_ ## a_Name, NULL, \
196 g_aTests_ ## a_Name, RT_ELEMENTS(g_aTests_ ## a_Name), \
197 a_uExtra, IEMTARGETCPU_EFL_BEHAVIOR_NATIVE /* means same for all here */ }
198
199#define ENTRY_INTEL(a_Name, a_fEflUndef) ENTRY_INTEL_EX(a_Name, a_fEflUndef, 0)
200#define ENTRY_INTEL_EX(a_Name, a_fEflUndef, a_uExtra) \
201 { #a_Name "_intel", iemAImpl_ ## a_Name ## _intel, iemAImpl_ ## a_Name, \
202 g_aTests_ ## a_Name ## _intel, RT_ELEMENTS(g_aTests_ ## a_Name ## _intel), \
203 a_uExtra, IEMTARGETCPU_EFL_BEHAVIOR_INTEL }
204
205#define ENTRY_AMD(a_Name, a_fEflUndef) ENTRY_AMD_EX(a_Name, a_fEflUndef, 0)
206#define ENTRY_AMD_EX(a_Name, a_fEflUndef, a_uExtra) \
207 { #a_Name "_amd", iemAImpl_ ## a_Name ## _amd, iemAImpl_ ## a_Name, \
208 g_aTests_ ## a_Name ## _amd, RT_ELEMENTS(g_aTests_ ## a_Name ## _amd), \
209 a_uExtra, IEMTARGETCPU_EFL_BEHAVIOR_AMD }
210
211
212/*********************************************************************************************************************************
213* Global Variables *
214*********************************************************************************************************************************/
215static RTTEST g_hTest;
216static uint8_t g_idxCpuEflFlavour = IEMTARGETCPU_EFL_BEHAVIOR_INTEL;
217#ifdef TSTIEMAIMPL_WITH_GENERATOR
218static uint32_t g_cZeroDstTests = 2;
219static uint32_t g_cZeroSrcTests = 4;
220#endif
221static uint8_t *g_pu8, *g_pu8Two;
222static uint16_t *g_pu16, *g_pu16Two;
223static uint32_t *g_pu32, *g_pu32Two, *g_pfEfl;
224static uint64_t *g_pu64, *g_pu64Two;
225static RTUINT128U *g_pu128, *g_pu128Two;
226
227
228#include "tstIEMAImplData.h"
229#include "tstIEMAImplData-Intel.h"
230#include "tstIEMAImplData-Amd.h"
231
232
233/*
234 * Random helpers.
235 */
236
237static uint32_t RandEFlags(void)
238{
239 uint32_t fEfl = RTRandU32();
240 return (fEfl & X86_EFL_LIVE_MASK) | X86_EFL_RA1_MASK;
241}
242
243
244static uint8_t RandU8(void)
245{
246 return RTRandU32Ex(0, 0xff);
247}
248
249
250static uint16_t RandU16(void)
251{
252 return RTRandU32Ex(0, 0xffff);
253}
254
255
256static uint32_t RandU32(void)
257{
258 return RTRandU32();
259}
260
261
262static uint64_t RandU64(void)
263{
264 return RTRandU64();
265}
266
267
268static RTUINT128U RandU128(void)
269{
270 RTUINT128U Ret;
271 Ret.s.Hi = RTRandU64();
272 Ret.s.Lo = RTRandU64();
273 return Ret;
274}
275
276#ifdef TSTIEMAIMPL_WITH_GENERATOR
277
278static uint8_t RandU8Dst(uint32_t iTest)
279{
280 if (iTest < g_cZeroDstTests)
281 return 0;
282 return RandU8();
283}
284
285
286static uint8_t RandU8Src(uint32_t iTest)
287{
288 if (iTest < g_cZeroSrcTests)
289 return 0;
290 return RandU8();
291}
292
293
294static uint16_t RandU16Dst(uint32_t iTest)
295{
296 if (iTest < g_cZeroDstTests)
297 return 0;
298 return RandU16();
299}
300
301
302static uint16_t RandU16Src(uint32_t iTest)
303{
304 if (iTest < g_cZeroSrcTests)
305 return 0;
306 return RandU16();
307}
308
309
310static uint32_t RandU32Dst(uint32_t iTest)
311{
312 if (iTest < g_cZeroDstTests)
313 return 0;
314 return RandU32();
315}
316
317
318static uint32_t RandU32Src(uint32_t iTest)
319{
320 if (iTest < g_cZeroSrcTests)
321 return 0;
322 return RandU32();
323}
324
325
326static uint64_t RandU64Dst(uint32_t iTest)
327{
328 if (iTest < g_cZeroDstTests)
329 return 0;
330 return RandU64();
331}
332
333
334static uint64_t RandU64Src(uint32_t iTest)
335{
336 if (iTest < g_cZeroSrcTests)
337 return 0;
338 return RandU64();
339}
340
341
342static void GenerateHeader(PRTSTREAM pOut, const char *pszCpuDesc, const char *pszCpuType, const char *pszCpuSuffU)
343{
344 /* We want to tag the generated source code with the revision that produced it. */
345 static char s_szRev[] = "$Revision: 94221 $";
346 const char *pszRev = RTStrStripL(strchr(s_szRev, ':') + 1);
347 size_t cchRev = 0;
348 while (RT_C_IS_DIGIT(pszRev[cchRev]))
349 cchRev++;
350
351 RTStrmPrintf(pOut,
352 "/* $Id: tstIEMAImpl.cpp 94221 2022-03-14 12:57:25Z vboxsync $ */\n"
353 "/** @file\n"
354 " * IEM Assembly Instruction Helper Testcase Data%s%s - r%.*s on %s.\n"
355 " */\n"
356 "\n"
357 "/*\n"
358 " * Copyright (C) 2022 Oracle Corporation\n"
359 " *\n"
360 " * This file is part of VirtualBox Open Source Edition (OSE), as\n"
361 " * available from http://www.virtualbox.org. This file is free software;\n"
362 " * you can redistribute it and/or modify it under the terms of the GNU\n"
363 " * General Public License (GPL) as published by the Free Software\n"
364 " * Foundation, in version 2 as it comes in the \"COPYING\" file of the\n"
365 " * VirtualBox OSE distribution. VirtualBox OSE is distributed in the\n"
366 " * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.\n"
367 " */\n"
368 "\n"
369 "#ifndef VMM_INCLUDED_SRC_testcase_tstIEMAImplData%s_h\n"
370 "#define VMM_INCLUDED_SRC_testcase_tstIEMAImplData%s_h\n"
371 "#ifndef RT_WITHOUT_PRAGMA_ONCE\n"
372 "# pragma once\n"
373 "#endif\n"
374 ,
375 pszCpuType ? " " : "", pszCpuType ? pszCpuType : "", cchRev, pszRev, pszCpuDesc,
376 pszCpuSuffU,
377 pszCpuSuffU);
378}
379
380
381static RTEXITCODE GenerateFooterAndClose(PRTSTREAM pOut, const char *pszCpuType, const char *pszCpuSuff, RTEXITCODE rcExit)
382{
383 RTStrmPrintf(pOut,
384 "\n"
385 "#endif /* !VMM_INCLUDED_SRC_testcase_tstIEMAImplData%s_h */\n", pszCpuSuff);
386 int rc = RTStrmClose(pOut);
387 if (RT_SUCCESS(rc))
388 return rcExit;
389 return RTMsgErrorExitFailure("RTStrmClose failed on tstIEMAImplData%s%s.h: %Rrc",
390 pszCpuType ? "-" : "", pszCpuType ? pszCpuType : "", rc);
391}
392
393#endif
394
395
396/*
397 * Test helpers.
398 */
399static const char *EFlagsDiff(uint32_t fActual, uint32_t fExpected)
400{
401 if (fActual == fExpected)
402 return "";
403
404 uint32_t const fXor = fActual ^ fExpected;
405 static char s_szBuf[256];
406 size_t cch = RTStrPrintf(s_szBuf, sizeof(s_szBuf), " - %#x", fXor);
407
408 static struct
409 {
410 const char *pszName;
411 uint32_t fFlag;
412 } const s_aFlags[] =
413 {
414#define EFL_ENTRY(a_Flags) { #a_Flags, X86_EFL_ ## a_Flags }
415 EFL_ENTRY(CF),
416 EFL_ENTRY(PF),
417 EFL_ENTRY(AF),
418 EFL_ENTRY(ZF),
419 EFL_ENTRY(SF),
420 EFL_ENTRY(TF),
421 EFL_ENTRY(IF),
422 EFL_ENTRY(DF),
423 EFL_ENTRY(OF),
424 EFL_ENTRY(IOPL),
425 EFL_ENTRY(NT),
426 EFL_ENTRY(RF),
427 EFL_ENTRY(VM),
428 EFL_ENTRY(AC),
429 EFL_ENTRY(VIF),
430 EFL_ENTRY(VIP),
431 EFL_ENTRY(ID),
432 };
433 for (size_t i = 0; i < RT_ELEMENTS(s_aFlags); i++)
434 if (s_aFlags[i].fFlag & fXor)
435 cch += RTStrPrintf(&s_szBuf[cch], sizeof(s_szBuf) - cch,
436 s_aFlags[i].fFlag & fActual ? "/%s" : "/!%s", s_aFlags[i].pszName);
437 RTStrPrintf(&s_szBuf[cch], sizeof(s_szBuf) - cch, "");
438 return s_szBuf;
439}
440
441
442/*
443 * Binary operations.
444 */
445#ifdef TSTIEMAIMPL_WITH_GENERATOR
446# define GEN_BINARY_TESTS(a_cBits, a_Fmt) \
447static void BinU ## a_cBits ## Generate(PRTSTREAM pOut, PRTSTREAM pOutCpu, const char *pszCpuSuffU, uint32_t cTests) \
448{ \
449 RTStrmPrintf(pOut, "\n\n#define HAVE_BINU%u_TESTS\n", a_cBits); \
450 RTStrmPrintf(pOutCpu, "\n\n#define HAVE_BINU%u_TESTS%s\n", a_cBits, pszCpuSuffU); \
451 for (size_t iFn = 0; iFn < RT_ELEMENTS(g_aBinU ## a_cBits); iFn++) \
452 { \
453 PFNIEMAIMPLBINU ## a_cBits const pfn = g_aBinU ## a_cBits[iFn].pfnNative \
454 ? g_aBinU ## a_cBits[iFn].pfnNative : g_aBinU ## a_cBits[iFn].pfn; \
455 PRTSTREAM pOutFn = pOut; \
456 if (g_aBinU ## a_cBits[iFn].idxCpuEflFlavour != IEMTARGETCPU_EFL_BEHAVIOR_NATIVE) \
457 { \
458 if (g_aBinU ## a_cBits[iFn].idxCpuEflFlavour != g_idxCpuEflFlavour) \
459 continue; \
460 pOutFn = pOutCpu; \
461 } \
462 \
463 RTStrmPrintf(pOutFn, "static const BINU%u_TEST_T g_aTests_%s[] =\n{\n", a_cBits, g_aBinU ## a_cBits[iFn].pszName); \
464 for (uint32_t iTest = 0; iTest < cTests; iTest++ ) \
465 { \
466 BINU ## a_cBits ## _TEST_T Test; \
467 Test.fEflIn = RandEFlags(); \
468 Test.fEflOut = Test.fEflIn; \
469 Test.uDstIn = RandU ## a_cBits ## Dst(iTest); \
470 Test.uDstOut = Test.uDstIn; \
471 Test.uSrcIn = RandU ## a_cBits ## Src(iTest); \
472 if (g_aBinU ## a_cBits[iFn].uExtra) \
473 Test.uSrcIn &= a_cBits - 1; /* Restrict bit index according to operand width */ \
474 Test.uMisc = 0; \
475 pfn(&Test.uDstOut, Test.uSrcIn, &Test.fEflOut); \
476 RTStrmPrintf(pOutFn, " { %#08x, %#08x, " a_Fmt ", " a_Fmt ", " a_Fmt ", %#x }, /* #%u */\n", \
477 Test.fEflIn, Test.fEflOut, Test.uDstIn, Test.uDstOut, Test.uSrcIn, Test.uMisc, iTest); \
478 } \
479 RTStrmPrintf(pOutFn, "};\n"); \
480 } \
481}
482#else
483# define GEN_BINARY_TESTS(a_cBits, a_Fmt)
484#endif
485
486#define TEST_BINARY_OPS(a_cBits, a_uType, a_Fmt, a_aSubTests) \
487GEN_BINARY_TESTS(a_cBits, a_Fmt) \
488\
489static void BinU ## a_cBits ## Test(void) \
490{ \
491 for (size_t iFn = 0; iFn < RT_ELEMENTS(a_aSubTests); iFn++) \
492 { \
493 RTTestSub(g_hTest, a_aSubTests[iFn].pszName); \
494 BINU ## a_cBits ## _TEST_T const * const paTests = a_aSubTests[iFn].paTests; \
495 uint32_t const cTests = a_aSubTests[iFn].cTests; \
496 PFNIEMAIMPLBINU ## a_cBits pfn = a_aSubTests[iFn].pfn; \
497 uint32_t const cVars = 1 + (a_aSubTests[iFn].idxCpuEflFlavour == g_idxCpuEflFlavour && a_aSubTests[iFn].pfnNative); \
498 for (uint32_t iVar = 0; iVar < cVars; iVar++) \
499 { \
500 for (uint32_t iTest = 0; iTest < cTests; iTest++ ) \
501 { \
502 uint32_t fEfl = paTests[iTest].fEflIn; \
503 a_uType uDst = paTests[iTest].uDstIn; \
504 pfn(&uDst, paTests[iTest].uSrcIn, &fEfl); \
505 if ( uDst != paTests[iTest].uDstOut \
506 || fEfl != paTests[iTest].fEflOut) \
507 RTTestFailed(g_hTest, "#%u%s: efl=%#08x dst=" a_Fmt " src=" a_Fmt " -> efl=%#08x dst=" a_Fmt ", expected %#08x & " a_Fmt "%s - %s\n", \
508 iTest, !iVar ? "" : "/n", paTests[iTest].fEflIn, paTests[iTest].uDstIn, paTests[iTest].uSrcIn, \
509 fEfl, uDst, paTests[iTest].fEflOut, paTests[iTest].uDstOut, \
510 EFlagsDiff(fEfl, paTests[iTest].fEflOut), \
511 uDst == paTests[iTest].uDstOut ? "eflags" : fEfl == paTests[iTest].fEflOut ? "dst" : "both"); \
512 else \
513 { \
514 *g_pu ## a_cBits = paTests[iTest].uDstIn; \
515 *g_pfEfl = paTests[iTest].fEflIn; \
516 pfn(g_pu ## a_cBits, paTests[iTest].uSrcIn, g_pfEfl); \
517 RTTEST_CHECK(g_hTest, *g_pu ## a_cBits == paTests[iTest].uDstOut); \
518 RTTEST_CHECK(g_hTest, *g_pfEfl == paTests[iTest].fEflOut); \
519 } \
520 } \
521 pfn = a_aSubTests[iFn].pfnNative; \
522 } \
523 } \
524}
525
526
527/*
528 * 8-bit binary operations.
529 */
530
531#ifndef HAVE_BINU8_TESTS
532static const BINU8_TEST_T g_aTests_add_u8[] = { {0} };
533static const BINU8_TEST_T g_aTests_add_u8_locked[] = { {0} };
534static const BINU8_TEST_T g_aTests_adc_u8[] = { {0} };
535static const BINU8_TEST_T g_aTests_adc_u8_locked[] = { {0} };
536static const BINU8_TEST_T g_aTests_sub_u8[] = { {0} };
537static const BINU8_TEST_T g_aTests_sub_u8_locked[] = { {0} };
538static const BINU8_TEST_T g_aTests_sbb_u8[] = { {0} };
539static const BINU8_TEST_T g_aTests_sbb_u8_locked[] = { {0} };
540static const BINU8_TEST_T g_aTests_or_u8[] = { {0} };
541static const BINU8_TEST_T g_aTests_or_u8_locked[] = { {0} };
542static const BINU8_TEST_T g_aTests_xor_u8[] = { {0} };
543static const BINU8_TEST_T g_aTests_xor_u8_locked[] = { {0} };
544static const BINU8_TEST_T g_aTests_and_u8[] = { {0} };
545static const BINU8_TEST_T g_aTests_and_u8_locked[] = { {0} };
546static const BINU8_TEST_T g_aTests_cmp_u8[] = { {0} };
547static const BINU8_TEST_T g_aTests_test_u8[] = { {0} };
548#endif
549
550static const BINU8_T g_aBinU8[] =
551{
552 ENTRY(add_u8),
553 ENTRY(add_u8_locked),
554 ENTRY(adc_u8),
555 ENTRY(adc_u8_locked),
556 ENTRY(sub_u8),
557 ENTRY(sub_u8_locked),
558 ENTRY(sbb_u8),
559 ENTRY(sbb_u8_locked),
560 ENTRY(or_u8),
561 ENTRY(or_u8_locked),
562 ENTRY(xor_u8),
563 ENTRY(xor_u8_locked),
564 ENTRY(and_u8),
565 ENTRY(and_u8_locked),
566 ENTRY(cmp_u8),
567 ENTRY(test_u8),
568};
569
570TEST_BINARY_OPS(8, uint8_t, "%#04x", g_aBinU8)
571
572
573/*
574 * 16-bit binary operations.
575 */
576
577#ifndef HAVE_BINU16_TESTS
578static const BINU16_TEST_T g_aTests_add_u16[] = { {0} };
579static const BINU16_TEST_T g_aTests_add_u16_locked[] = { {0} };
580static const BINU16_TEST_T g_aTests_adc_u16[] = { {0} };
581static const BINU16_TEST_T g_aTests_adc_u16_locked[] = { {0} };
582static const BINU16_TEST_T g_aTests_sub_u16[] = { {0} };
583static const BINU16_TEST_T g_aTests_sub_u16_locked[] = { {0} };
584static const BINU16_TEST_T g_aTests_sbb_u16[] = { {0} };
585static const BINU16_TEST_T g_aTests_sbb_u16_locked[] = { {0} };
586static const BINU16_TEST_T g_aTests_or_u16[] = { {0} };
587static const BINU16_TEST_T g_aTests_or_u16_locked[] = { {0} };
588static const BINU16_TEST_T g_aTests_xor_u16[] = { {0} };
589static const BINU16_TEST_T g_aTests_xor_u16_locked[] = { {0} };
590static const BINU16_TEST_T g_aTests_and_u16[] = { {0} };
591static const BINU16_TEST_T g_aTests_and_u16_locked[] = { {0} };
592static const BINU16_TEST_T g_aTests_cmp_u16[] = { {0} };
593static const BINU16_TEST_T g_aTests_test_u16[] = { {0} };
594static const BINU16_TEST_T g_aTests_bt_u16[] = { {0} };
595static const BINU16_TEST_T g_aTests_btc_u16[] = { {0} };
596static const BINU16_TEST_T g_aTests_btc_u16_locked[] = { {0} };
597static const BINU16_TEST_T g_aTests_btr_u16[] = { {0} };
598static const BINU16_TEST_T g_aTests_btr_u16_locked[] = { {0} };
599static const BINU16_TEST_T g_aTests_bts_u16[] = { {0} };
600static const BINU16_TEST_T g_aTests_bts_u16_locked[] = { {0} };
601static const BINU16_TEST_T g_aTests_arpl[] = { {0} };
602#endif
603#ifndef HAVE_BINU16_TESTS_AMD
604static const BINU16_TEST_T g_aTests_bsf_u16_amd[] = { {0} };
605static const BINU16_TEST_T g_aTests_bsr_u16_amd[] = { {0} };
606static const BINU16_TEST_T g_aTests_imul_two_u16_amd[] = { {0} };
607#endif
608#ifndef HAVE_BINU16_TESTS_INTEL
609static const BINU16_TEST_T g_aTests_bsf_u16_intel[] = { {0} };
610static const BINU16_TEST_T g_aTests_bsr_u16_intel[] = { {0} };
611static const BINU16_TEST_T g_aTests_imul_two_u16_intel[] = { {0} };
612#endif
613
614static const BINU16_T g_aBinU16[] =
615{
616 ENTRY(add_u16),
617 ENTRY(add_u16_locked),
618 ENTRY(adc_u16),
619 ENTRY(adc_u16_locked),
620 ENTRY(sub_u16),
621 ENTRY(sub_u16_locked),
622 ENTRY(sbb_u16),
623 ENTRY(sbb_u16_locked),
624 ENTRY(or_u16),
625 ENTRY(or_u16_locked),
626 ENTRY(xor_u16),
627 ENTRY(xor_u16_locked),
628 ENTRY(and_u16),
629 ENTRY(and_u16_locked),
630 ENTRY(cmp_u16),
631 ENTRY(test_u16),
632 ENTRY_EX(bt_u16, 1),
633 ENTRY_EX(btc_u16, 1),
634 ENTRY_EX(btc_u16_locked, 1),
635 ENTRY_EX(btr_u16, 1),
636 ENTRY_EX(btr_u16_locked, 1),
637 ENTRY_EX(bts_u16, 1),
638 ENTRY_EX(bts_u16_locked, 1),
639 ENTRY_AMD( bsf_u16, X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_SF | X86_EFL_OF),
640 ENTRY_INTEL(bsf_u16, X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_SF | X86_EFL_OF),
641 ENTRY_AMD( bsr_u16, X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_SF | X86_EFL_OF),
642 ENTRY_INTEL(bsr_u16, X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_SF | X86_EFL_OF),
643 ENTRY_AMD( imul_two_u16, X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF),
644 ENTRY_INTEL(imul_two_u16, X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF),
645 ENTRY(arpl),
646};
647
648TEST_BINARY_OPS(16, uint16_t, "%#06x", g_aBinU16)
649
650
651/*
652 * 32-bit binary operations.
653 */
654
655#ifndef HAVE_BINU32_TESTS
656static const BINU32_TEST_T g_aTests_add_u32[] = { {0} };
657static const BINU32_TEST_T g_aTests_add_u32_locked[] = { {0} };
658static const BINU32_TEST_T g_aTests_adc_u32[] = { {0} };
659static const BINU32_TEST_T g_aTests_adc_u32_locked[] = { {0} };
660static const BINU32_TEST_T g_aTests_sub_u32[] = { {0} };
661static const BINU32_TEST_T g_aTests_sub_u32_locked[] = { {0} };
662static const BINU32_TEST_T g_aTests_sbb_u32[] = { {0} };
663static const BINU32_TEST_T g_aTests_sbb_u32_locked[] = { {0} };
664static const BINU32_TEST_T g_aTests_or_u32[] = { {0} };
665static const BINU32_TEST_T g_aTests_or_u32_locked[] = { {0} };
666static const BINU32_TEST_T g_aTests_xor_u32[] = { {0} };
667static const BINU32_TEST_T g_aTests_xor_u32_locked[] = { {0} };
668static const BINU32_TEST_T g_aTests_and_u32[] = { {0} };
669static const BINU32_TEST_T g_aTests_and_u32_locked[] = { {0} };
670static const BINU32_TEST_T g_aTests_cmp_u32[] = { {0} };
671static const BINU32_TEST_T g_aTests_test_u32[] = { {0} };
672static const BINU32_TEST_T g_aTests_bt_u32[] = { {0} };
673static const BINU32_TEST_T g_aTests_btc_u32[] = { {0} };
674static const BINU32_TEST_T g_aTests_btc_u32_locked[] = { {0} };
675static const BINU32_TEST_T g_aTests_btr_u32[] = { {0} };
676static const BINU32_TEST_T g_aTests_btr_u32_locked[] = { {0} };
677static const BINU32_TEST_T g_aTests_bts_u32[] = { {0} };
678static const BINU32_TEST_T g_aTests_bts_u32_locked[] = { {0} };
679#endif
680#ifndef HAVE_BINU32_TESTS_AMD
681static const BINU32_TEST_T g_aTests_bsf_u32_amd[] = { {0} };
682static const BINU32_TEST_T g_aTests_bsr_u32_amd[] = { {0} };
683static const BINU32_TEST_T g_aTests_imul_two_u32_amd[] = { {0} };
684#endif
685#ifndef HAVE_BINU32_TESTS_INTEL
686static const BINU32_TEST_T g_aTests_bsf_u32_intel[] = { {0} };
687static const BINU32_TEST_T g_aTests_bsr_u32_intel[] = { {0} };
688static const BINU32_TEST_T g_aTests_imul_two_u32_intel[] = { {0} };
689#endif
690
691static const BINU32_T g_aBinU32[] =
692{
693 ENTRY(add_u32),
694 ENTRY(add_u32_locked),
695 ENTRY(adc_u32),
696 ENTRY(adc_u32_locked),
697 ENTRY(sub_u32),
698 ENTRY(sub_u32_locked),
699 ENTRY(sbb_u32),
700 ENTRY(sbb_u32_locked),
701 ENTRY(or_u32),
702 ENTRY(or_u32_locked),
703 ENTRY(xor_u32),
704 ENTRY(xor_u32_locked),
705 ENTRY(and_u32),
706 ENTRY(and_u32_locked),
707 ENTRY(cmp_u32),
708 ENTRY(test_u32),
709 ENTRY_EX(bt_u32, 1),
710 ENTRY_EX(btc_u32, 1),
711 ENTRY_EX(btc_u32_locked, 1),
712 ENTRY_EX(btr_u32, 1),
713 ENTRY_EX(btr_u32_locked, 1),
714 ENTRY_EX(bts_u32, 1),
715 ENTRY_EX(bts_u32_locked, 1),
716 ENTRY_AMD( bsf_u32, X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_SF | X86_EFL_OF),
717 ENTRY_INTEL(bsf_u32, X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_SF | X86_EFL_OF),
718 ENTRY_AMD( bsr_u32, X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_SF | X86_EFL_OF),
719 ENTRY_INTEL(bsr_u32, X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_SF | X86_EFL_OF),
720 ENTRY_AMD( imul_two_u32, X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF),
721 ENTRY_INTEL(imul_two_u32, X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF),
722};
723
724TEST_BINARY_OPS(32, uint32_t, "%#010RX32", g_aBinU32)
725
726
727/*
728 * 64-bit binary operations.
729 */
730
731#ifndef HAVE_BINU64_TESTS
732static const BINU64_TEST_T g_aTests_add_u64[] = { {0} };
733static const BINU64_TEST_T g_aTests_add_u64_locked[] = { {0} };
734static const BINU64_TEST_T g_aTests_adc_u64[] = { {0} };
735static const BINU64_TEST_T g_aTests_adc_u64_locked[] = { {0} };
736static const BINU64_TEST_T g_aTests_sub_u64[] = { {0} };
737static const BINU64_TEST_T g_aTests_sub_u64_locked[] = { {0} };
738static const BINU64_TEST_T g_aTests_sbb_u64[] = { {0} };
739static const BINU64_TEST_T g_aTests_sbb_u64_locked[] = { {0} };
740static const BINU64_TEST_T g_aTests_or_u64[] = { {0} };
741static const BINU64_TEST_T g_aTests_or_u64_locked[] = { {0} };
742static const BINU64_TEST_T g_aTests_xor_u64[] = { {0} };
743static const BINU64_TEST_T g_aTests_xor_u64_locked[] = { {0} };
744static const BINU64_TEST_T g_aTests_and_u64[] = { {0} };
745static const BINU64_TEST_T g_aTests_and_u64_locked[] = { {0} };
746static const BINU64_TEST_T g_aTests_cmp_u64[] = { {0} };
747static const BINU64_TEST_T g_aTests_test_u64[] = { {0} };
748static const BINU64_TEST_T g_aTests_bt_u64[] = { {0} };
749static const BINU64_TEST_T g_aTests_btc_u64[] = { {0} };
750static const BINU64_TEST_T g_aTests_btc_u64_locked[] = { {0} };
751static const BINU64_TEST_T g_aTests_btr_u64[] = { {0} };
752static const BINU64_TEST_T g_aTests_btr_u64_locked[] = { {0} };
753static const BINU64_TEST_T g_aTests_bts_u64[] = { {0} };
754static const BINU64_TEST_T g_aTests_bts_u64_locked[] = { {0} };
755#endif
756#ifndef HAVE_BINU64_TESTS_AMD
757static const BINU64_TEST_T g_aTests_bsf_u64_amd[] = { {0} };
758static const BINU64_TEST_T g_aTests_bsr_u64_amd[] = { {0} };
759static const BINU64_TEST_T g_aTests_imul_two_u64_amd[] = { {0} };
760#endif
761#ifndef HAVE_BINU64_TESTS_INTEL
762static const BINU64_TEST_T g_aTests_bsf_u64_intel[] = { {0} };
763static const BINU64_TEST_T g_aTests_bsr_u64_intel[] = { {0} };
764static const BINU64_TEST_T g_aTests_imul_two_u64_intel[] = { {0} };
765#endif
766
767static const BINU64_T g_aBinU64[] =
768{
769 ENTRY(add_u64),
770 ENTRY(add_u64_locked),
771 ENTRY(adc_u64),
772 ENTRY(adc_u64_locked),
773 ENTRY(sub_u64),
774 ENTRY(sub_u64_locked),
775 ENTRY(sbb_u64),
776 ENTRY(sbb_u64_locked),
777 ENTRY(or_u64),
778 ENTRY(or_u64_locked),
779 ENTRY(xor_u64),
780 ENTRY(xor_u64_locked),
781 ENTRY(and_u64),
782 ENTRY(and_u64_locked),
783 ENTRY(cmp_u64),
784 ENTRY(test_u64),
785 ENTRY_EX(bt_u64, 1),
786 ENTRY_EX(btc_u64, 1),
787 ENTRY_EX(btc_u64_locked, 1),
788 ENTRY_EX(btr_u64, 1),
789 ENTRY_EX(btr_u64_locked, 1),
790 ENTRY_EX(bts_u64, 1),
791 ENTRY_EX(bts_u64_locked, 1),
792 ENTRY_AMD( bsf_u64, X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_SF | X86_EFL_OF),
793 ENTRY_INTEL(bsf_u64, X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_SF | X86_EFL_OF),
794 ENTRY_AMD( bsr_u64, X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_SF | X86_EFL_OF),
795 ENTRY_INTEL(bsr_u64, X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_SF | X86_EFL_OF),
796 ENTRY_AMD( imul_two_u64, X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF),
797 ENTRY_INTEL(imul_two_u64, X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF),
798};
799
800TEST_BINARY_OPS(64, uint64_t, "%#018RX64", g_aBinU64)
801
802
803/*
804 * XCHG
805 */
806static void XchgTest(void)
807{
808 RTTestSub(g_hTest, "xchg");
809 typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLXCHGU8, (uint8_t *pu8Mem, uint8_t *pu8Reg));
810 typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLXCHGU16,(uint16_t *pu16Mem, uint16_t *pu16Reg));
811 typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLXCHGU32,(uint32_t *pu32Mem, uint32_t *pu32Reg));
812 typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLXCHGU64,(uint64_t *pu64Mem, uint64_t *pu64Reg));
813
814 static struct
815 {
816 uint8_t cb; uint64_t fMask;
817 union
818 {
819 uintptr_t pfn;
820 FNIEMAIMPLXCHGU8 *pfnU8;
821 FNIEMAIMPLXCHGU16 *pfnU16;
822 FNIEMAIMPLXCHGU32 *pfnU32;
823 FNIEMAIMPLXCHGU64 *pfnU64;
824 } u;
825 }
826 s_aXchgWorkers[] =
827 {
828 { 1, UINT8_MAX, { (uintptr_t)iemAImpl_xchg_u8_locked } },
829 { 2, UINT16_MAX, { (uintptr_t)iemAImpl_xchg_u16_locked } },
830 { 4, UINT32_MAX, { (uintptr_t)iemAImpl_xchg_u32_locked } },
831 { 8, UINT64_MAX, { (uintptr_t)iemAImpl_xchg_u64_locked } },
832 { 1, UINT8_MAX, { (uintptr_t)iemAImpl_xchg_u8_unlocked } },
833 { 2, UINT16_MAX, { (uintptr_t)iemAImpl_xchg_u16_unlocked } },
834 { 4, UINT32_MAX, { (uintptr_t)iemAImpl_xchg_u32_unlocked } },
835 { 8, UINT64_MAX, { (uintptr_t)iemAImpl_xchg_u64_unlocked } },
836 };
837 for (size_t i = 0; i < RT_ELEMENTS(s_aXchgWorkers); i++)
838 {
839 RTUINT64U uIn1, uIn2, uMem, uDst;
840 uMem.u = uIn1.u = RTRandU64Ex(0, s_aXchgWorkers[i].fMask);
841 uDst.u = uIn2.u = RTRandU64Ex(0, s_aXchgWorkers[i].fMask);
842 if (uIn1.u == uIn2.u)
843 uDst.u = uIn2.u = ~uIn2.u;
844
845 switch (s_aXchgWorkers[i].cb)
846 {
847 case 1:
848 s_aXchgWorkers[i].u.pfnU8(g_pu8, g_pu8Two);
849 s_aXchgWorkers[i].u.pfnU8(&uMem.au8[0], &uDst.au8[0]);
850 break;
851 case 2:
852 s_aXchgWorkers[i].u.pfnU16(g_pu16, g_pu16Two);
853 s_aXchgWorkers[i].u.pfnU16(&uMem.Words.w0, &uDst.Words.w0);
854 break;
855 case 4:
856 s_aXchgWorkers[i].u.pfnU32(g_pu32, g_pu32Two);
857 s_aXchgWorkers[i].u.pfnU32(&uMem.DWords.dw0, &uDst.DWords.dw0);
858 break;
859 case 8:
860 s_aXchgWorkers[i].u.pfnU64(g_pu64, g_pu64Two);
861 s_aXchgWorkers[i].u.pfnU64(&uMem.u, &uDst.u);
862 break;
863 default: RTTestFailed(g_hTest, "%d\n", s_aXchgWorkers[i].cb); break;
864 }
865
866 if (uMem.u != uIn2.u || uDst.u != uIn1.u)
867 RTTestFailed(g_hTest, "i=%u: %#RX64, %#RX64 -> %#RX64, %#RX64\n", i, uIn1.u, uIn2.u, uMem.u, uDst.u);
868 }
869}
870
871
872/*
873 * XADD
874 */
875static void XaddTest(void)
876{
877#define TEST_XADD(a_cBits, a_Type, a_Fmt) do { \
878 typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLXADDU ## a_cBits, (a_Type *, a_Type *, uint32_t *)); \
879 static struct \
880 { \
881 const char *pszName; \
882 FNIEMAIMPLXADDU ## a_cBits *pfn; \
883 BINU ## a_cBits ## _TEST_T const *paTests; \
884 uint32_t cTests; \
885 } const s_aFuncs[] = \
886 { \
887 { "xadd_u" # a_cBits, iemAImpl_xadd_u ## a_cBits, \
888 g_aTests_add_u ## a_cBits, RT_ELEMENTS(g_aTests_add_u ## a_cBits) }, \
889 { "xadd_u" # a_cBits "8_locked", iemAImpl_xadd_u ## a_cBits ## _locked, \
890 g_aTests_add_u ## a_cBits, RT_ELEMENTS(g_aTests_add_u ## a_cBits) }, \
891 }; \
892 for (size_t iFn = 0; iFn < RT_ELEMENTS(s_aFuncs); iFn++) \
893 { \
894 RTTestSub(g_hTest, s_aFuncs[iFn].pszName); \
895 BINU ## a_cBits ## _TEST_T const * const paTests = s_aFuncs[iFn].paTests; \
896 uint32_t const cTests = s_aFuncs[iFn].cTests; \
897 for (uint32_t iTest = 0; iTest < cTests; iTest++) \
898 { \
899 uint32_t fEfl = paTests[iTest].fEflIn; \
900 a_Type uSrc = paTests[iTest].uSrcIn; \
901 *g_pu ## a_cBits = paTests[iTest].uDstIn; \
902 s_aFuncs[iFn].pfn(g_pu ## a_cBits, &uSrc, &fEfl); \
903 if ( fEfl != paTests[iTest].fEflOut \
904 || *g_pu ## a_cBits != paTests[iTest].uDstOut \
905 || uSrc != paTests[iTest].uDstIn) \
906 RTTestFailed(g_hTest, "%s/#%u: efl=%#08x dst=" a_Fmt " src=" a_Fmt " -> efl=%#08x dst=" a_Fmt " src=" a_Fmt ", expected %#08x, " a_Fmt ", " a_Fmt "%s\n", \
907 s_aFuncs[iFn].pszName, iTest, paTests[iTest].fEflIn, paTests[iTest].uDstIn, paTests[iTest].uSrcIn, \
908 fEfl, *g_pu ## a_cBits, uSrc, paTests[iTest].fEflOut, paTests[iTest].uDstOut, paTests[iTest].uDstIn, \
909 EFlagsDiff(fEfl, paTests[iTest].fEflOut)); \
910 } \
911 } \
912 } while(0)
913 TEST_XADD(8, uint8_t, "%#04x");
914 TEST_XADD(16, uint16_t, "%#06x");
915 TEST_XADD(32, uint32_t, "%#010RX32");
916 TEST_XADD(64, uint64_t, "%#010RX64");
917}
918
919
920/*
921 * CMPXCHG
922 */
923
924static void CmpXchgTest(void)
925{
926#define TEST_CMPXCHG(a_cBits, a_Type, a_Fmt) do {\
927 typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLCMPXCHGU ## a_cBits, (a_Type *, a_Type *, a_Type, uint32_t *)); \
928 static struct \
929 { \
930 const char *pszName; \
931 FNIEMAIMPLCMPXCHGU ## a_cBits *pfn; \
932 PFNIEMAIMPLBINU ## a_cBits pfnSub; \
933 BINU ## a_cBits ## _TEST_T const *paTests; \
934 uint32_t cTests; \
935 } const s_aFuncs[] = \
936 { \
937 { "cmpxchg_u" # a_cBits, iemAImpl_cmpxchg_u ## a_cBits, iemAImpl_sub_u ## a_cBits, \
938 g_aTests_cmp_u ## a_cBits, RT_ELEMENTS(g_aTests_cmp_u ## a_cBits) }, \
939 { "cmpxchg_u" # a_cBits "_locked", iemAImpl_cmpxchg_u ## a_cBits ## _locked, iemAImpl_sub_u ## a_cBits, \
940 g_aTests_cmp_u ## a_cBits, RT_ELEMENTS(g_aTests_cmp_u ## a_cBits) }, \
941 }; \
942 for (size_t iFn = 0; iFn < RT_ELEMENTS(s_aFuncs); iFn++) \
943 { \
944 RTTestSub(g_hTest, s_aFuncs[iFn].pszName); \
945 BINU ## a_cBits ## _TEST_T const * const paTests = s_aFuncs[iFn].paTests; \
946 uint32_t const cTests = s_aFuncs[iFn].cTests; \
947 for (uint32_t iTest = 0; iTest < cTests; iTest++) \
948 { \
949 /* as is (99% likely to be negative). */ \
950 uint32_t fEfl = paTests[iTest].fEflIn; \
951 a_Type const uNew = paTests[iTest].uSrcIn + 0x42; \
952 a_Type uA = paTests[iTest].uDstIn; \
953 *g_pu ## a_cBits = paTests[iTest].uSrcIn; \
954 a_Type const uExpect = uA != paTests[iTest].uSrcIn ? paTests[iTest].uSrcIn : uNew; \
955 s_aFuncs[iFn].pfn(g_pu ## a_cBits, &uA, uNew, &fEfl); \
956 if ( fEfl != paTests[iTest].fEflOut \
957 || *g_pu ## a_cBits != uExpect \
958 || uA != paTests[iTest].uSrcIn) \
959 RTTestFailed(g_hTest, "%s/#%ua: efl=%#08x dst=" a_Fmt " cmp=" a_Fmt " new=" a_Fmt " -> efl=%#08x dst=" a_Fmt " old=" a_Fmt ", expected %#08x, " a_Fmt ", " a_Fmt "%s\n", \
960 s_aFuncs[iFn].pszName, iTest, paTests[iTest].fEflIn, paTests[iTest].uSrcIn, paTests[iTest].uDstIn, \
961 uNew, fEfl, *g_pu ## a_cBits, uA, paTests[iTest].fEflOut, uExpect, paTests[iTest].uSrcIn, \
962 EFlagsDiff(fEfl, paTests[iTest].fEflOut)); \
963 /* positive */ \
964 uint32_t fEflExpect = paTests[iTest].fEflIn; \
965 uA = paTests[iTest].uDstIn; \
966 s_aFuncs[iFn].pfnSub(&uA, uA, &fEflExpect); \
967 fEfl = paTests[iTest].fEflIn; \
968 uA = paTests[iTest].uDstIn; \
969 *g_pu ## a_cBits = uA; \
970 s_aFuncs[iFn].pfn(g_pu ## a_cBits, &uA, uNew, &fEfl); \
971 if ( fEfl != fEflExpect \
972 || *g_pu ## a_cBits != uNew \
973 || uA != paTests[iTest].uDstIn) \
974 RTTestFailed(g_hTest, "%s/#%ua: efl=%#08x dst=" a_Fmt " cmp=" a_Fmt " new=" a_Fmt " -> efl=%#08x dst=" a_Fmt " old=" a_Fmt ", expected %#08x, " a_Fmt ", " a_Fmt "%s\n", \
975 s_aFuncs[iFn].pszName, iTest, paTests[iTest].fEflIn, paTests[iTest].uDstIn, paTests[iTest].uDstIn, \
976 uNew, fEfl, *g_pu ## a_cBits, uA, fEflExpect, uNew, paTests[iTest].uDstIn, \
977 EFlagsDiff(fEfl, fEflExpect)); \
978 } \
979 } \
980 } while(0)
981 TEST_CMPXCHG(8, uint8_t, "%#04RX8");
982 TEST_CMPXCHG(16, uint16_t, "%#06x");
983 TEST_CMPXCHG(32, uint32_t, "%#010RX32");
984#if ARCH_BITS != 32 /* calling convension issue, skipping as it's an unsupported host */
985 TEST_CMPXCHG(64, uint64_t, "%#010RX64");
986#endif
987}
988
989static void CmpXchg8bTest(void)
990{
991 typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLCMPXCHG8B,(uint64_t *, PRTUINT64U, PRTUINT64U, uint32_t *));
992 static struct
993 {
994 const char *pszName;
995 FNIEMAIMPLCMPXCHG8B *pfn;
996 } const s_aFuncs[] =
997 {
998 { "cmpxchg8b", iemAImpl_cmpxchg8b },
999 { "cmpxchg8b_locked", iemAImpl_cmpxchg8b_locked },
1000 };
1001 for (size_t iFn = 0; iFn < RT_ELEMENTS(s_aFuncs); iFn++)
1002 {
1003 RTTestSub(g_hTest, s_aFuncs[iFn].pszName);
1004 for (uint32_t iTest = 0; iTest < 4; iTest += 2)
1005 {
1006 uint64_t const uOldValue = RandU64();
1007 uint64_t const uNewValue = RandU64();
1008
1009 /* positive test. */
1010 RTUINT64U uA, uB;
1011 uB.u = uNewValue;
1012 uA.u = uOldValue;
1013 *g_pu64 = uOldValue;
1014 uint32_t fEflIn = RandEFlags();
1015 uint32_t fEfl = fEflIn;
1016 s_aFuncs[iFn].pfn(g_pu64, &uA, &uB, &fEfl);
1017 if ( fEfl != (fEflIn | X86_EFL_ZF)
1018 || *g_pu64 != uNewValue
1019 || uA.u != uOldValue)
1020 RTTestFailed(g_hTest, "#%u: efl=%#08x dst=%#018RX64 cmp=%#018RX64 new=%#018RX64\n -> efl=%#08x dst=%#018RX64 old=%#018RX64,\n wanted %#08x, %#018RX64, %#018RX64%s\n",
1021 iTest, fEflIn, uOldValue, uOldValue, uNewValue,
1022 fEfl, *g_pu64, uA.u,
1023 (fEflIn | X86_EFL_ZF), uNewValue, uOldValue, EFlagsDiff(fEfl, fEflIn | X86_EFL_ZF));
1024 RTTEST_CHECK(g_hTest, uB.u == uNewValue);
1025
1026 /* negative */
1027 uint64_t const uExpect = ~uOldValue;
1028 *g_pu64 = uExpect;
1029 uA.u = uOldValue;
1030 uB.u = uNewValue;
1031 fEfl = fEflIn = RandEFlags();
1032 s_aFuncs[iFn].pfn(g_pu64, &uA, &uB, &fEfl);
1033 if ( fEfl != (fEflIn & ~X86_EFL_ZF)
1034 || *g_pu64 != uExpect
1035 || uA.u != uExpect)
1036 RTTestFailed(g_hTest, "#%u: efl=%#08x dst=%#018RX64 cmp=%#018RX64 new=%#018RX64\n -> efl=%#08x dst=%#018RX64 old=%#018RX64,\n wanted %#08x, %#018RX64, %#018RX64%s\n",
1037 iTest + 1, fEflIn, uExpect, uOldValue, uNewValue,
1038 fEfl, *g_pu64, uA.u,
1039 (fEflIn & ~X86_EFL_ZF), uExpect, uExpect, EFlagsDiff(fEfl, fEflIn & ~X86_EFL_ZF));
1040 RTTEST_CHECK(g_hTest, uB.u == uNewValue);
1041 }
1042 }
1043}
1044
1045static void CmpXchg16bTest(void)
1046{
1047 typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLCMPXCHG16B,(PRTUINT128U, PRTUINT128U, PRTUINT128U, uint32_t *));
1048 static struct
1049 {
1050 const char *pszName;
1051 FNIEMAIMPLCMPXCHG16B *pfn;
1052 } const s_aFuncs[] =
1053 {
1054 { "cmpxchg16b", iemAImpl_cmpxchg16b },
1055 { "cmpxchg16b_locked", iemAImpl_cmpxchg16b_locked },
1056#if !defined(RT_ARCH_ARM64)
1057 { "cmpxchg16b_fallback", iemAImpl_cmpxchg16b_fallback },
1058#endif
1059 };
1060 for (size_t iFn = 0; iFn < RT_ELEMENTS(s_aFuncs); iFn++)
1061 {
1062#if !defined(IEM_WITHOUT_ASSEMBLY) && defined(RT_ARCH_AMD64)
1063 if (!(ASMCpuId_ECX(1) & X86_CPUID_FEATURE_ECX_CX16))
1064 continue;
1065#endif
1066 RTTestSub(g_hTest, s_aFuncs[iFn].pszName);
1067 for (uint32_t iTest = 0; iTest < 4; iTest += 2)
1068 {
1069 RTUINT128U const uOldValue = RandU128();
1070 RTUINT128U const uNewValue = RandU128();
1071
1072 /* positive test. */
1073 RTUINT128U uA, uB;
1074 uB = uNewValue;
1075 uA = uOldValue;
1076 *g_pu128 = uOldValue;
1077 uint32_t fEflIn = RandEFlags();
1078 uint32_t fEfl = fEflIn;
1079 s_aFuncs[iFn].pfn(g_pu128, &uA, &uB, &fEfl);
1080 if ( fEfl != (fEflIn | X86_EFL_ZF)
1081 || g_pu128->s.Lo != uNewValue.s.Lo
1082 || g_pu128->s.Hi != uNewValue.s.Hi
1083 || uA.s.Lo != uOldValue.s.Lo
1084 || uA.s.Hi != uOldValue.s.Hi)
1085 RTTestFailed(g_hTest, "#%u: efl=%#08x dst=%#018RX64'%016RX64 cmp=%#018RX64'%016RX64 new=%#018RX64'%016RX64\n"
1086 " -> efl=%#08x dst=%#018RX64'%016RX64 old=%#018RX64'%016RX64,\n"
1087 " wanted %#08x, %#018RX64'%016RX64, %#018RX64'%016RX64%s\n",
1088 iTest, fEflIn, uOldValue.s.Hi, uOldValue.s.Lo, uOldValue.s.Hi, uOldValue.s.Lo, uNewValue.s.Hi, uNewValue.s.Lo,
1089 fEfl, g_pu128->s.Hi, g_pu128->s.Lo, uA.s.Hi, uA.s.Lo,
1090 (fEflIn | X86_EFL_ZF), uNewValue.s.Hi, uNewValue.s.Lo, uOldValue.s.Hi, uOldValue.s.Lo,
1091 EFlagsDiff(fEfl, fEflIn | X86_EFL_ZF));
1092 RTTEST_CHECK(g_hTest, uB.s.Lo == uNewValue.s.Lo && uB.s.Hi == uNewValue.s.Hi);
1093
1094 /* negative */
1095 RTUINT128U const uExpect = RTUINT128_INIT(~uOldValue.s.Hi, ~uOldValue.s.Lo);
1096 *g_pu128 = uExpect;
1097 uA = uOldValue;
1098 uB = uNewValue;
1099 fEfl = fEflIn = RandEFlags();
1100 s_aFuncs[iFn].pfn(g_pu128, &uA, &uB, &fEfl);
1101 if ( fEfl != (fEflIn & ~X86_EFL_ZF)
1102 || g_pu128->s.Lo != uExpect.s.Lo
1103 || g_pu128->s.Hi != uExpect.s.Hi
1104 || uA.s.Lo != uExpect.s.Lo
1105 || uA.s.Hi != uExpect.s.Hi)
1106 RTTestFailed(g_hTest, "#%u: efl=%#08x dst=%#018RX64'%016RX64 cmp=%#018RX64'%016RX64 new=%#018RX64'%016RX64\n"
1107 " -> efl=%#08x dst=%#018RX64'%016RX64 old=%#018RX64'%016RX64,\n"
1108 " wanted %#08x, %#018RX64'%016RX64, %#018RX64'%016RX64%s\n",
1109 iTest + 1, fEflIn, uExpect.s.Hi, uExpect.s.Lo, uOldValue.s.Hi, uOldValue.s.Lo, uNewValue.s.Hi, uNewValue.s.Lo,
1110 fEfl, g_pu128->s.Hi, g_pu128->s.Lo, uA.s.Hi, uA.s.Lo,
1111 (fEflIn & ~X86_EFL_ZF), uExpect.s.Hi, uExpect.s.Lo, uExpect.s.Hi, uExpect.s.Lo,
1112 EFlagsDiff(fEfl, fEflIn & ~X86_EFL_ZF));
1113 RTTEST_CHECK(g_hTest, uB.s.Lo == uNewValue.s.Lo && uB.s.Hi == uNewValue.s.Hi);
1114 }
1115 }
1116}
1117
1118
1119/*
1120 * Double shifts.
1121 *
1122 * Note! We use BINUxx_TEST_T with the shift value in the uMisc field.
1123 */
1124
1125#ifndef HAVE_SHIFT_DBL_TESTS_AMD
1126static const BINU16_TEST_T g_aTests_shrd_u16_amd[] = { {0} };
1127static const BINU16_TEST_T g_aTests_shld_u16_amd[] = { {0} };
1128static const BINU32_TEST_T g_aTests_shrd_u32_amd[] = { {0} };
1129static const BINU32_TEST_T g_aTests_shld_u32_amd[] = { {0} };
1130static const BINU64_TEST_T g_aTests_shrd_u64_amd[] = { {0} };
1131static const BINU64_TEST_T g_aTests_shld_u64_amd[] = { {0} };
1132#endif
1133#ifndef HAVE_SHIFT_DBL_TESTS_INTEL
1134static const BINU16_TEST_T g_aTests_shrd_u16_intel[] = { {0} };
1135static const BINU16_TEST_T g_aTests_shld_u16_intel[] = { {0} };
1136static const BINU32_TEST_T g_aTests_shrd_u32_intel[] = { {0} };
1137static const BINU32_TEST_T g_aTests_shld_u32_intel[] = { {0} };
1138static const BINU64_TEST_T g_aTests_shrd_u64_intel[] = { {0} };
1139static const BINU64_TEST_T g_aTests_shld_u64_intel[] = { {0} };
1140#endif
1141
1142#ifdef TSTIEMAIMPL_WITH_GENERATOR
1143# define GEN_SHIFT_DBL(a_cBits, a_Fmt, a_aSubTests) \
1144void ShiftDblU ## a_cBits ## Generate(PRTSTREAM pOut, uint32_t cTests) \
1145{ \
1146 for (size_t iFn = 0; iFn < RT_ELEMENTS(a_aSubTests); iFn++) \
1147 { \
1148 if ( a_aSubTests[iFn].idxCpuEflFlavour != IEMTARGETCPU_EFL_BEHAVIOR_NATIVE \
1149 && a_aSubTests[iFn].idxCpuEflFlavour != g_idxCpuEflFlavour) \
1150 continue; \
1151 RTStrmPrintf(pOut, "static const BINU" #a_cBits "_TEST_T g_aTests_%s[] =\n{\n", a_aSubTests[iFn].pszName); \
1152 for (uint32_t iTest = 0; iTest < cTests; iTest++ ) \
1153 { \
1154 BINU ## a_cBits ## _TEST_T Test; \
1155 Test.fEflIn = RandEFlags(); \
1156 Test.fEflOut = Test.fEflIn; \
1157 Test.uDstIn = RandU ## a_cBits ## Dst(iTest); \
1158 Test.uDstOut = Test.uDstIn; \
1159 Test.uSrcIn = RandU ## a_cBits ## Src(iTest); \
1160 Test.uMisc = RandU8() & (a_cBits * 4 - 1); /* need to go way beyond the a_cBits limit */ \
1161 a_aSubTests[iFn].pfnNative(&Test.uDstOut, Test.uSrcIn, Test.uMisc, &Test.fEflOut); \
1162 RTStrmPrintf(pOut, " { %#08x, %#08x, " a_Fmt ", " a_Fmt ", " a_Fmt ", %2u }, /* #%u */\n", \
1163 Test.fEflIn, Test.fEflOut, Test.uDstIn, Test.uDstOut, Test.uSrcIn, Test.uMisc, iTest); \
1164 } \
1165 RTStrmPrintf(pOut, "};\n"); \
1166 } \
1167}
1168#else
1169# define GEN_SHIFT_DBL(a_cBits, a_Fmt, a_aSubTests)
1170#endif
1171
1172#define TEST_SHIFT_DBL(a_cBits, a_Type, a_Fmt, a_aSubTests) \
1173static const struct \
1174{ \
1175 const char *pszName; \
1176 PFNIEMAIMPLSHIFTDBLU ## a_cBits pfn; \
1177 PFNIEMAIMPLSHIFTDBLU ## a_cBits pfnNative; \
1178 BINU ## a_cBits ## _TEST_T const *paTests; \
1179 uint32_t cTests, uExtra; \
1180 uint8_t idxCpuEflFlavour; \
1181} a_aSubTests[] = \
1182{ \
1183 ENTRY_AMD(shld_u ## a_cBits, X86_EFL_OF | X86_EFL_CF), \
1184 ENTRY_INTEL(shld_u ## a_cBits, X86_EFL_OF | X86_EFL_CF), \
1185 ENTRY_AMD(shrd_u ## a_cBits, X86_EFL_OF | X86_EFL_CF), \
1186 ENTRY_INTEL(shrd_u ## a_cBits, X86_EFL_OF | X86_EFL_CF), \
1187}; \
1188\
1189GEN_SHIFT_DBL(a_cBits, a_Fmt, a_aSubTests) \
1190\
1191static void ShiftDblU ## a_cBits ## Test(void) \
1192{ \
1193 for (size_t iFn = 0; iFn < RT_ELEMENTS(a_aSubTests); iFn++) \
1194 { \
1195 RTTestSub(g_hTest, a_aSubTests[iFn].pszName); \
1196 BINU ## a_cBits ## _TEST_T const * const paTests = a_aSubTests[iFn].paTests; \
1197 uint32_t const cTests = a_aSubTests[iFn].cTests; \
1198 PFNIEMAIMPLSHIFTDBLU ## a_cBits pfn = a_aSubTests[iFn].pfn; \
1199 uint32_t const cVars = 1 + (a_aSubTests[iFn].idxCpuEflFlavour == g_idxCpuEflFlavour && a_aSubTests[iFn].pfnNative); \
1200 for (uint32_t iVar = 0; iVar < cVars; iVar++) \
1201 { \
1202 for (uint32_t iTest = 0; iTest < cTests; iTest++ ) \
1203 { \
1204 uint32_t fEfl = paTests[iTest].fEflIn; \
1205 a_Type uDst = paTests[iTest].uDstIn; \
1206 pfn(&uDst, paTests[iTest].uSrcIn, paTests[iTest].uMisc, &fEfl); \
1207 if ( uDst != paTests[iTest].uDstOut \
1208 || fEfl != paTests[iTest].fEflOut) \
1209 RTTestFailed(g_hTest, "#%03u%s: efl=%#08x dst=" a_Fmt " src=" a_Fmt " shift=%-2u -> efl=%#08x dst=" a_Fmt ", expected %#08x & " a_Fmt "%s%s\n", \
1210 iTest, iVar == 0 ? "" : "/n", paTests[iTest].fEflIn, \
1211 paTests[iTest].uDstIn, paTests[iTest].uSrcIn, (unsigned)paTests[iTest].uMisc, \
1212 fEfl, uDst, paTests[iTest].fEflOut, paTests[iTest].uDstOut, \
1213 EFlagsDiff(fEfl, paTests[iTest].fEflOut), uDst == paTests[iTest].uDstOut ? "" : " dst!"); \
1214 else \
1215 { \
1216 *g_pu ## a_cBits = paTests[iTest].uDstIn; \
1217 *g_pfEfl = paTests[iTest].fEflIn; \
1218 pfn(g_pu ## a_cBits, paTests[iTest].uSrcIn, paTests[iTest].uMisc, g_pfEfl); \
1219 RTTEST_CHECK(g_hTest, *g_pu ## a_cBits == paTests[iTest].uDstOut); \
1220 RTTEST_CHECK(g_hTest, *g_pfEfl == paTests[iTest].fEflOut); \
1221 } \
1222 } \
1223 pfn = a_aSubTests[iFn].pfnNative; \
1224 } \
1225 } \
1226}
1227TEST_SHIFT_DBL(16, uint16_t, "%#06RX16", g_aShiftDblU16)
1228TEST_SHIFT_DBL(32, uint32_t, "%#010RX32", g_aShiftDblU32)
1229TEST_SHIFT_DBL(64, uint64_t, "%#018RX64", g_aShiftDblU64)
1230
1231#ifdef TSTIEMAIMPL_WITH_GENERATOR
1232static void ShiftDblGenerate(PRTSTREAM pOut, const char *pszCpuSuffU, uint32_t cTests)
1233{
1234 RTStrmPrintf(pOut, "\n\n#define HAVE_SHIFT_DBL_TESTS%s\n", pszCpuSuffU);
1235 ShiftDblU16Generate(pOut, cTests);
1236 ShiftDblU32Generate(pOut, cTests);
1237 ShiftDblU64Generate(pOut, cTests);
1238}
1239#endif
1240
1241static void ShiftDblTest(void)
1242{
1243 ShiftDblU16Test();
1244 ShiftDblU32Test();
1245 ShiftDblU64Test();
1246}
1247
1248
1249/*
1250 * Unary operators.
1251 *
1252 * Note! We use BINUxx_TEST_T ignoreing uSrcIn and uMisc.
1253 */
1254
1255#ifndef HAVE_UNARY_TESTS
1256# define DUMMY_UNARY_TESTS(a_cBits, a_Type) \
1257 static const a_Type g_aTests_inc_u ## a_cBits[] = { {0} }; \
1258 static const a_Type g_aTests_inc_u ## a_cBits ## _locked[] = { {0} }; \
1259 static const a_Type g_aTests_dec_u ## a_cBits[] = { {0} }; \
1260 static const a_Type g_aTests_dec_u ## a_cBits ## _locked[] = { {0} }; \
1261 static const a_Type g_aTests_not_u ## a_cBits[] = { {0} }; \
1262 static const a_Type g_aTests_not_u ## a_cBits ## _locked[] = { {0} }; \
1263 static const a_Type g_aTests_neg_u ## a_cBits[] = { {0} }; \
1264 static const a_Type g_aTests_neg_u ## a_cBits ## _locked[] = { {0} }
1265DUMMY_UNARY_TESTS(8, BINU8_TEST_T);
1266DUMMY_UNARY_TESTS(16, BINU16_TEST_T);
1267DUMMY_UNARY_TESTS(32, BINU32_TEST_T);
1268DUMMY_UNARY_TESTS(64, BINU64_TEST_T);
1269#endif
1270
1271#define TEST_UNARY(a_cBits, a_Type, a_Fmt, a_TestType) \
1272static const struct \
1273{ \
1274 const char *pszName; \
1275 PFNIEMAIMPLUNARYU ## a_cBits pfn; \
1276 PFNIEMAIMPLUNARYU ## a_cBits pfnNative; \
1277 a_TestType const *paTests; \
1278 uint32_t cTests, uExtra; \
1279 uint8_t idxCpuEflFlavour; \
1280} g_aUnaryU ## a_cBits [] = \
1281{ \
1282 ENTRY(inc_u ## a_cBits), \
1283 ENTRY(inc_u ## a_cBits ## _locked), \
1284 ENTRY(dec_u ## a_cBits), \
1285 ENTRY(dec_u ## a_cBits ## _locked), \
1286 ENTRY(not_u ## a_cBits), \
1287 ENTRY(not_u ## a_cBits ## _locked), \
1288 ENTRY(neg_u ## a_cBits), \
1289 ENTRY(neg_u ## a_cBits ## _locked), \
1290}; \
1291\
1292void UnaryU ## a_cBits ## Generate(PRTSTREAM pOut, uint32_t cTests) \
1293{ \
1294 for (size_t iFn = 0; iFn < RT_ELEMENTS(g_aUnaryU ## a_cBits); iFn++) \
1295 { \
1296 RTStrmPrintf(pOut, "static const BINU" #a_cBits "_TEST_T g_aTests_%s[] =\n{\n", g_aUnaryU ## a_cBits[iFn].pszName); \
1297 for (uint32_t iTest = 0; iTest < cTests; iTest++ ) \
1298 { \
1299 a_TestType Test; \
1300 Test.fEflIn = RandEFlags(); \
1301 Test.fEflOut = Test.fEflIn; \
1302 Test.uDstIn = RandU ## a_cBits(); \
1303 Test.uDstOut = Test.uDstIn; \
1304 Test.uSrcIn = 0; \
1305 Test.uMisc = 0; \
1306 g_aUnaryU ## a_cBits[iFn].pfn(&Test.uDstOut, &Test.fEflOut); \
1307 RTStrmPrintf(pOut, " { %#08x, %#08x, " a_Fmt ", " a_Fmt ", 0, 0 }, /* #%u */\n", \
1308 Test.fEflIn, Test.fEflOut, Test.uDstIn, Test.uDstOut, iTest); \
1309 } \
1310 RTStrmPrintf(pOut, "};\n"); \
1311 } \
1312} \
1313\
1314static void UnaryU ## a_cBits ## Test(void) \
1315{ \
1316 for (size_t iFn = 0; iFn < RT_ELEMENTS(g_aUnaryU ## a_cBits); iFn++) \
1317 { \
1318 RTTestSub(g_hTest, g_aUnaryU ## a_cBits[iFn].pszName); \
1319 a_TestType const * const paTests = g_aUnaryU ## a_cBits[iFn].paTests; \
1320 uint32_t const cTests = g_aUnaryU ## a_cBits[iFn].cTests; \
1321 for (uint32_t iTest = 0; iTest < cTests; iTest++ ) \
1322 { \
1323 uint32_t fEfl = paTests[iTest].fEflIn; \
1324 a_Type uDst = paTests[iTest].uDstIn; \
1325 g_aUnaryU ## a_cBits[iFn].pfn(&uDst, &fEfl); \
1326 if ( uDst != paTests[iTest].uDstOut \
1327 || fEfl != paTests[iTest].fEflOut) \
1328 RTTestFailed(g_hTest, "#%u: efl=%#08x dst=" a_Fmt " -> efl=%#08x dst=" a_Fmt ", expected %#08x & " a_Fmt "%s\n", \
1329 iTest, paTests[iTest].fEflIn, paTests[iTest].uDstIn, \
1330 fEfl, uDst, paTests[iTest].fEflOut, paTests[iTest].uDstOut, \
1331 EFlagsDiff(fEfl, paTests[iTest].fEflOut)); \
1332 else \
1333 { \
1334 *g_pu ## a_cBits = paTests[iTest].uDstIn; \
1335 *g_pfEfl = paTests[iTest].fEflIn; \
1336 g_aUnaryU ## a_cBits[iFn].pfn(g_pu ## a_cBits, g_pfEfl); \
1337 RTTEST_CHECK(g_hTest, *g_pu ## a_cBits == paTests[iTest].uDstOut); \
1338 RTTEST_CHECK(g_hTest, *g_pfEfl == paTests[iTest].fEflOut); \
1339 } \
1340 } \
1341 } \
1342}
1343TEST_UNARY(8, uint8_t, "%#04RX8", BINU8_TEST_T)
1344TEST_UNARY(16, uint16_t, "%#06RX16", BINU16_TEST_T)
1345TEST_UNARY(32, uint32_t, "%#010RX32", BINU32_TEST_T)
1346TEST_UNARY(64, uint64_t, "%#018RX64", BINU64_TEST_T)
1347
1348#ifdef TSTIEMAIMPL_WITH_GENERATOR
1349static void UnaryGenerate(PRTSTREAM pOut, uint32_t cTests)
1350{
1351 RTStrmPrintf(pOut, "\n\n#define HAVE_UNARY_TESTS\n");
1352 UnaryU8Generate(pOut, cTests);
1353 UnaryU16Generate(pOut, cTests);
1354 UnaryU32Generate(pOut, cTests);
1355 UnaryU64Generate(pOut, cTests);
1356}
1357#endif
1358
1359static void UnaryTest(void)
1360{
1361 UnaryU8Test();
1362 UnaryU16Test();
1363 UnaryU32Test();
1364 UnaryU64Test();
1365}
1366
1367
1368/*
1369 * Shifts.
1370 *
1371 * Note! We use BINUxx_TEST_T with the shift count in uMisc and uSrcIn unused.
1372 */
1373#define DUMMY_SHIFT_TESTS(a_cBits, a_Type, a_Vendor) \
1374 static const a_Type g_aTests_rol_u ## a_cBits ## a_Vendor[] = { {0} }; \
1375 static const a_Type g_aTests_ror_u ## a_cBits ## a_Vendor[] = { {0} }; \
1376 static const a_Type g_aTests_rcl_u ## a_cBits ## a_Vendor[] = { {0} }; \
1377 static const a_Type g_aTests_rcr_u ## a_cBits ## a_Vendor[] = { {0} }; \
1378 static const a_Type g_aTests_shl_u ## a_cBits ## a_Vendor[] = { {0} }; \
1379 static const a_Type g_aTests_shr_u ## a_cBits ## a_Vendor[] = { {0} }; \
1380 static const a_Type g_aTests_sar_u ## a_cBits ## a_Vendor[] = { {0} }
1381#ifndef HAVE_SHIFT_TESTS_AMD
1382DUMMY_SHIFT_TESTS(8, BINU8_TEST_T, _amd);
1383DUMMY_SHIFT_TESTS(16, BINU16_TEST_T, _amd);
1384DUMMY_SHIFT_TESTS(32, BINU32_TEST_T, _amd);
1385DUMMY_SHIFT_TESTS(64, BINU64_TEST_T, _amd);
1386#endif
1387#ifndef HAVE_SHIFT_TESTS_INTEL
1388DUMMY_SHIFT_TESTS(8, BINU8_TEST_T, _intel);
1389DUMMY_SHIFT_TESTS(16, BINU16_TEST_T, _intel);
1390DUMMY_SHIFT_TESTS(32, BINU32_TEST_T, _intel);
1391DUMMY_SHIFT_TESTS(64, BINU64_TEST_T, _intel);
1392#endif
1393
1394#ifdef TSTIEMAIMPL_WITH_GENERATOR
1395# define GEN_SHIFT(a_cBits, a_Fmt, a_TestType, a_aSubTests) \
1396void ShiftU ## a_cBits ## Generate(PRTSTREAM pOut, uint32_t cTests) \
1397{ \
1398 for (size_t iFn = 0; iFn < RT_ELEMENTS(a_aSubTests); iFn++) \
1399 { \
1400 if ( a_aSubTests[iFn].idxCpuEflFlavour != IEMTARGETCPU_EFL_BEHAVIOR_NATIVE \
1401 && a_aSubTests[iFn].idxCpuEflFlavour != g_idxCpuEflFlavour) \
1402 continue; \
1403 RTStrmPrintf(pOut, "static const BINU" #a_cBits "_TEST_T g_aTests_%s[] =\n{\n", a_aSubTests[iFn].pszName); \
1404 for (uint32_t iTest = 0; iTest < cTests; iTest++ ) \
1405 { \
1406 a_TestType Test; \
1407 Test.fEflIn = RandEFlags(); \
1408 Test.fEflOut = Test.fEflIn; \
1409 Test.uDstIn = RandU ## a_cBits ## Dst(iTest); \
1410 Test.uDstOut = Test.uDstIn; \
1411 Test.uSrcIn = 0; \
1412 Test.uMisc = RandU8() & (a_cBits * 4 - 1); /* need to go way beyond the a_cBits limit */ \
1413 a_aSubTests[iFn].pfnNative(&Test.uDstOut, Test.uMisc, &Test.fEflOut); \
1414 RTStrmPrintf(pOut, " { %#08x, %#08x, " a_Fmt ", " a_Fmt ", 0, %-2u }, /* #%u */\n", \
1415 Test.fEflIn, Test.fEflOut, Test.uDstIn, Test.uDstOut, Test.uMisc, iTest); \
1416 \
1417 Test.fEflIn = (~Test.fEflIn & X86_EFL_LIVE_MASK) | X86_EFL_RA1_MASK; \
1418 Test.fEflOut = Test.fEflIn; \
1419 Test.uDstOut = Test.uDstIn; \
1420 a_aSubTests[iFn].pfnNative(&Test.uDstOut, Test.uMisc, &Test.fEflOut); \
1421 RTStrmPrintf(pOut, " { %#08x, %#08x, " a_Fmt ", " a_Fmt ", 0, %-2u }, /* #%u b */\n", \
1422 Test.fEflIn, Test.fEflOut, Test.uDstIn, Test.uDstOut, Test.uMisc, iTest); \
1423 } \
1424 RTStrmPrintf(pOut, "};\n"); \
1425 } \
1426}
1427#else
1428# define GEN_SHIFT(a_cBits, a_Fmt, a_TestType, a_aSubTests)
1429#endif
1430
1431#define TEST_SHIFT(a_cBits, a_Type, a_Fmt, a_TestType, a_aSubTests) \
1432static const struct \
1433{ \
1434 const char *pszName; \
1435 PFNIEMAIMPLSHIFTU ## a_cBits pfn; \
1436 PFNIEMAIMPLSHIFTU ## a_cBits pfnNative; \
1437 a_TestType const *paTests; \
1438 uint32_t cTests, uExtra; \
1439 uint8_t idxCpuEflFlavour; \
1440} a_aSubTests[] = \
1441{ \
1442 ENTRY_AMD( rol_u ## a_cBits, X86_EFL_OF), \
1443 ENTRY_INTEL(rol_u ## a_cBits, X86_EFL_OF), \
1444 ENTRY_AMD( ror_u ## a_cBits, X86_EFL_OF), \
1445 ENTRY_INTEL(ror_u ## a_cBits, X86_EFL_OF), \
1446 ENTRY_AMD( rcl_u ## a_cBits, X86_EFL_OF), \
1447 ENTRY_INTEL(rcl_u ## a_cBits, X86_EFL_OF), \
1448 ENTRY_AMD( rcr_u ## a_cBits, X86_EFL_OF), \
1449 ENTRY_INTEL(rcr_u ## a_cBits, X86_EFL_OF), \
1450 ENTRY_AMD( shl_u ## a_cBits, X86_EFL_OF | X86_EFL_AF), \
1451 ENTRY_INTEL(shl_u ## a_cBits, X86_EFL_OF | X86_EFL_AF), \
1452 ENTRY_AMD( shr_u ## a_cBits, X86_EFL_OF | X86_EFL_AF), \
1453 ENTRY_INTEL(shr_u ## a_cBits, X86_EFL_OF | X86_EFL_AF), \
1454 ENTRY_AMD( sar_u ## a_cBits, X86_EFL_OF | X86_EFL_AF), \
1455 ENTRY_INTEL(sar_u ## a_cBits, X86_EFL_OF | X86_EFL_AF), \
1456}; \
1457\
1458GEN_SHIFT(a_cBits, a_Fmt, a_TestType, a_aSubTests) \
1459\
1460static void ShiftU ## a_cBits ## Test(void) \
1461{ \
1462 for (size_t iFn = 0; iFn < RT_ELEMENTS(a_aSubTests); iFn++) \
1463 { \
1464 RTTestSub(g_hTest, a_aSubTests[iFn].pszName); \
1465 a_TestType const * const paTests = a_aSubTests[iFn].paTests; \
1466 uint32_t const cTests = a_aSubTests[iFn].cTests; \
1467 PFNIEMAIMPLSHIFTU ## a_cBits pfn = a_aSubTests[iFn].pfn; \
1468 uint32_t const cVars = 1 + (a_aSubTests[iFn].idxCpuEflFlavour == g_idxCpuEflFlavour && a_aSubTests[iFn].pfnNative); \
1469 for (uint32_t iVar = 0; iVar < cVars; iVar++) \
1470 { \
1471 for (uint32_t iTest = 0; iTest < cTests; iTest++ ) \
1472 { \
1473 uint32_t fEfl = paTests[iTest].fEflIn; \
1474 a_Type uDst = paTests[iTest].uDstIn; \
1475 pfn(&uDst, paTests[iTest].uMisc, &fEfl); \
1476 if ( uDst != paTests[iTest].uDstOut \
1477 || fEfl != paTests[iTest].fEflOut ) \
1478 RTTestFailed(g_hTest, "#%u%s: efl=%#08x dst=" a_Fmt " shift=%2u -> efl=%#08x dst=" a_Fmt ", expected %#08x & " a_Fmt "%s\n", \
1479 iTest, iVar == 0 ? "" : "/n", \
1480 paTests[iTest].fEflIn, paTests[iTest].uDstIn, paTests[iTest].uMisc, \
1481 fEfl, uDst, paTests[iTest].fEflOut, paTests[iTest].uDstOut, \
1482 EFlagsDiff(fEfl, paTests[iTest].fEflOut)); \
1483 else \
1484 { \
1485 *g_pu ## a_cBits = paTests[iTest].uDstIn; \
1486 *g_pfEfl = paTests[iTest].fEflIn; \
1487 pfn(g_pu ## a_cBits, paTests[iTest].uMisc, g_pfEfl); \
1488 RTTEST_CHECK(g_hTest, *g_pu ## a_cBits == paTests[iTest].uDstOut); \
1489 RTTEST_CHECK(g_hTest, *g_pfEfl == paTests[iTest].fEflOut); \
1490 } \
1491 } \
1492 pfn = a_aSubTests[iFn].pfnNative; \
1493 } \
1494 } \
1495}
1496TEST_SHIFT(8, uint8_t, "%#04RX8", BINU8_TEST_T, g_aShiftU8)
1497TEST_SHIFT(16, uint16_t, "%#06RX16", BINU16_TEST_T, g_aShiftU16)
1498TEST_SHIFT(32, uint32_t, "%#010RX32", BINU32_TEST_T, g_aShiftU32)
1499TEST_SHIFT(64, uint64_t, "%#018RX64", BINU64_TEST_T, g_aShiftU64)
1500
1501#ifdef TSTIEMAIMPL_WITH_GENERATOR
1502static void ShiftGenerate(PRTSTREAM pOut, const char *pszCpuSuffU, uint32_t cTests)
1503{
1504 RTStrmPrintf(pOut, "\n\n#define HAVE_SHIFT_TESTS%s\n", pszCpuSuffU);
1505 ShiftU8Generate(pOut, cTests);
1506 ShiftU16Generate(pOut, cTests);
1507 ShiftU32Generate(pOut, cTests);
1508 ShiftU64Generate(pOut, cTests);
1509}
1510#endif
1511
1512static void ShiftTest(void)
1513{
1514 ShiftU8Test();
1515 ShiftU16Test();
1516 ShiftU32Test();
1517 ShiftU64Test();
1518}
1519
1520
1521/*
1522 * Multiplication and division.
1523 *
1524 * Note! The 8-bit functions has a different format, so we need to duplicate things.
1525 * Note! Currently ignoring undefined bits.
1526 */
1527
1528# define DUMMY_MULDIV_TESTS(a_cBits, a_Type, a_Vendor) \
1529 static const a_Type g_aTests_mul_u ## a_cBits ## a_Vendor[] = { {0} }; \
1530 static const a_Type g_aTests_imul_u ## a_cBits ## a_Vendor[] = { {0} }; \
1531 static const a_Type g_aTests_div_u ## a_cBits ## a_Vendor[] = { {0} }; \
1532 static const a_Type g_aTests_idiv_u ## a_cBits ## a_Vendor[] = { {0} }
1533
1534#ifndef HAVE_MULDIV_TESTS_AMD
1535DUMMY_MULDIV_TESTS(8, MULDIVU8_TEST_T, _amd);
1536DUMMY_MULDIV_TESTS(16, MULDIVU16_TEST_T, _amd);
1537DUMMY_MULDIV_TESTS(32, MULDIVU32_TEST_T, _amd);
1538DUMMY_MULDIV_TESTS(64, MULDIVU64_TEST_T, _amd);
1539#endif
1540
1541#ifndef HAVE_MULDIV_TESTS_INTEL
1542DUMMY_MULDIV_TESTS(8, MULDIVU8_TEST_T, _intel);
1543DUMMY_MULDIV_TESTS(16, MULDIVU16_TEST_T, _intel);
1544DUMMY_MULDIV_TESTS(32, MULDIVU32_TEST_T, _intel);
1545DUMMY_MULDIV_TESTS(64, MULDIVU64_TEST_T, _intel);
1546#endif
1547
1548/* U8 */
1549static const struct
1550{
1551 const char *pszName;
1552 PFNIEMAIMPLMULDIVU8 pfn;
1553 PFNIEMAIMPLMULDIVU8 pfnNative;
1554 MULDIVU8_TEST_T const *paTests;
1555 uint32_t cTests, uExtra;
1556 uint8_t idxCpuEflFlavour;
1557} g_aMulDivU8[] =
1558{
1559 ENTRY_AMD_EX(mul_u8, X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF,
1560 X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF),
1561 ENTRY_INTEL_EX(mul_u8, X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF, 0),
1562 ENTRY_AMD_EX(imul_u8, X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF,
1563 X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF),
1564 ENTRY_INTEL_EX(imul_u8, X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF, 0),
1565 ENTRY_AMD_EX(div_u8, X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF | X86_EFL_CF | X86_EFL_OF, 0),
1566 ENTRY_INTEL_EX(div_u8, X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF | X86_EFL_CF | X86_EFL_OF, 0),
1567 ENTRY_AMD_EX(idiv_u8, X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF | X86_EFL_CF | X86_EFL_OF, 0),
1568 ENTRY_INTEL_EX(idiv_u8, X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF | X86_EFL_CF | X86_EFL_OF, 0),
1569};
1570
1571#ifdef TSTIEMAIMPL_WITH_GENERATOR
1572static void MulDivU8Generate(PRTSTREAM pOut, uint32_t cTests)
1573{
1574 for (size_t iFn = 0; iFn < RT_ELEMENTS(g_aMulDivU8); iFn++)
1575 {
1576 if ( g_aMulDivU8[iFn].idxCpuEflFlavour != IEMTARGETCPU_EFL_BEHAVIOR_NATIVE
1577 && g_aMulDivU8[iFn].idxCpuEflFlavour != g_idxCpuEflFlavour)
1578 continue;
1579 RTStrmPrintf(pOut, "static const MULDIVU8_TEST_T g_aTests_%s[] =\n{\n", g_aMulDivU8[iFn].pszName);
1580 for (uint32_t iTest = 0; iTest < cTests; iTest++ )
1581 {
1582 MULDIVU8_TEST_T Test;
1583 Test.fEflIn = RandEFlags();
1584 Test.fEflOut = Test.fEflIn;
1585 Test.uDstIn = RandU16Dst(iTest);
1586 Test.uDstOut = Test.uDstIn;
1587 Test.uSrcIn = RandU8Src(iTest);
1588 Test.rc = g_aMulDivU8[iFn].pfnNative(&Test.uDstOut, Test.uSrcIn, &Test.fEflOut);
1589 RTStrmPrintf(pOut, " { %#08x, %#08x, %#06RX16, %#06RX16, %#04RX8, %d }, /* #%u */\n",
1590 Test.fEflIn, Test.fEflOut, Test.uDstIn, Test.uDstOut, Test.uSrcIn, Test.rc, iTest);
1591 }
1592 RTStrmPrintf(pOut, "};\n");
1593 }
1594}
1595#endif
1596
1597static void MulDivU8Test(void)
1598{
1599 for (size_t iFn = 0; iFn < RT_ELEMENTS(g_aMulDivU8); iFn++)
1600 {
1601 RTTestSub(g_hTest, g_aMulDivU8[iFn].pszName);
1602 MULDIVU8_TEST_T const * const paTests = g_aMulDivU8[iFn].paTests;
1603 uint32_t const cTests = g_aMulDivU8[iFn].cTests;
1604 uint32_t const fEflIgn = g_aMulDivU8[iFn].uExtra;
1605 PFNIEMAIMPLMULDIVU8 pfn = g_aMulDivU8[iFn].pfn;
1606 uint32_t const cVars = 1 + (g_aMulDivU8[iFn].idxCpuEflFlavour == g_idxCpuEflFlavour && g_aMulDivU8[iFn].pfnNative);
1607 for (uint32_t iVar = 0; iVar < cVars; iVar++)
1608 {
1609 for (uint32_t iTest = 0; iTest < cTests; iTest++ )
1610 {
1611 uint32_t fEfl = paTests[iTest].fEflIn;
1612 uint16_t uDst = paTests[iTest].uDstIn;
1613 int rc = g_aMulDivU8[iFn].pfn(&uDst, paTests[iTest].uSrcIn, &fEfl);
1614 if ( uDst != paTests[iTest].uDstOut
1615 || (fEfl | fEflIgn) != (paTests[iTest].fEflOut | fEflIgn)
1616 || rc != paTests[iTest].rc)
1617 RTTestFailed(g_hTest, "#%02u%s: efl=%#08x dst=%#06RX16 src=%#04RX8\n"
1618 " %s-> efl=%#08x dst=%#06RX16 rc=%d\n"
1619 "%sexpected %#08x %#06RX16 %d%s\n",
1620 iTest, iVar ? "/n" : "", paTests[iTest].fEflIn, paTests[iTest].uDstIn, paTests[iTest].uSrcIn,
1621 iVar ? " " : "", fEfl, uDst, rc,
1622 iVar ? " " : "", paTests[iTest].fEflOut, paTests[iTest].uDstOut, paTests[iTest].rc,
1623 EFlagsDiff(fEfl | fEflIgn, paTests[iTest].fEflOut | fEflIgn));
1624 else
1625 {
1626 *g_pu16 = paTests[iTest].uDstIn;
1627 *g_pfEfl = paTests[iTest].fEflIn;
1628 rc = g_aMulDivU8[iFn].pfn(g_pu16, paTests[iTest].uSrcIn, g_pfEfl);
1629 RTTEST_CHECK(g_hTest, *g_pu16 == paTests[iTest].uDstOut);
1630 RTTEST_CHECK(g_hTest, (*g_pfEfl | fEflIgn) == (paTests[iTest].fEflOut | fEflIgn));
1631 RTTEST_CHECK(g_hTest, rc == paTests[iTest].rc);
1632 }
1633 }
1634 pfn = g_aMulDivU8[iFn].pfnNative;
1635 }
1636 }
1637}
1638
1639#ifdef TSTIEMAIMPL_WITH_GENERATOR
1640# define GEN_MULDIV(a_cBits, a_Fmt, a_TestType, a_aSubTests) \
1641void MulDivU ## a_cBits ## Generate(PRTSTREAM pOut, uint32_t cTests) \
1642{ \
1643 for (size_t iFn = 0; iFn < RT_ELEMENTS(a_aSubTests); iFn++) \
1644 { \
1645 if ( a_aSubTests[iFn].idxCpuEflFlavour != IEMTARGETCPU_EFL_BEHAVIOR_NATIVE \
1646 && a_aSubTests[iFn].idxCpuEflFlavour != g_idxCpuEflFlavour) \
1647 continue; \
1648 RTStrmPrintf(pOut, "static const MULDIVU" #a_cBits "_TEST_T g_aTests_%s[] =\n{\n", a_aSubTests[iFn].pszName); \
1649 for (uint32_t iTest = 0; iTest < cTests; iTest++ ) \
1650 { \
1651 a_TestType Test; \
1652 Test.fEflIn = RandEFlags(); \
1653 Test.fEflOut = Test.fEflIn; \
1654 Test.uDst1In = RandU ## a_cBits ## Dst(iTest); \
1655 Test.uDst1Out = Test.uDst1In; \
1656 Test.uDst2In = RandU ## a_cBits ## Dst(iTest); \
1657 Test.uDst2Out = Test.uDst2In; \
1658 Test.uSrcIn = RandU ## a_cBits ## Src(iTest); \
1659 Test.rc = a_aSubTests[iFn].pfnNative(&Test.uDst1Out, &Test.uDst2Out, Test.uSrcIn, &Test.fEflOut); \
1660 RTStrmPrintf(pOut, " { %#08x, %#08x, " a_Fmt ", " a_Fmt ", " a_Fmt ", " a_Fmt ", " a_Fmt ", %d }, /* #%u */\n", \
1661 Test.fEflIn, Test.fEflOut, Test.uDst1In, Test.uDst1Out, Test.uDst2In, Test.uDst2Out, Test.uSrcIn, \
1662 Test.rc, iTest); \
1663 } \
1664 RTStrmPrintf(pOut, "};\n"); \
1665 } \
1666}
1667#else
1668# define GEN_MULDIV(a_cBits, a_Fmt, a_TestType, a_aSubTests)
1669#endif
1670
1671#define TEST_MULDIV(a_cBits, a_Type, a_Fmt, a_TestType, a_aSubTests) \
1672static const struct \
1673{ \
1674 const char *pszName; \
1675 PFNIEMAIMPLMULDIVU ## a_cBits pfn; \
1676 PFNIEMAIMPLMULDIVU ## a_cBits pfnNative; \
1677 a_TestType const *paTests; \
1678 uint32_t cTests, uExtra; \
1679 uint8_t idxCpuEflFlavour; \
1680} a_aSubTests [] = \
1681{ \
1682 ENTRY_AMD_EX(mul_u ## a_cBits, X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF, 0), \
1683 ENTRY_INTEL_EX(mul_u ## a_cBits, X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF, 0), \
1684 ENTRY_AMD_EX(imul_u ## a_cBits, X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF, 0), \
1685 ENTRY_INTEL_EX(imul_u ## a_cBits, X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF, 0), \
1686 ENTRY_AMD_EX(div_u ## a_cBits, X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF | X86_EFL_CF | X86_EFL_OF, 0), \
1687 ENTRY_INTEL_EX(div_u ## a_cBits, X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF | X86_EFL_CF | X86_EFL_OF, 0), \
1688 ENTRY_AMD_EX(idiv_u ## a_cBits, X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF | X86_EFL_CF | X86_EFL_OF, 0), \
1689 ENTRY_INTEL_EX(idiv_u ## a_cBits, X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF | X86_EFL_CF | X86_EFL_OF, 0), \
1690}; \
1691\
1692GEN_MULDIV(a_cBits, a_Fmt, a_TestType, a_aSubTests) \
1693\
1694static void MulDivU ## a_cBits ## Test(void) \
1695{ \
1696 for (size_t iFn = 0; iFn < RT_ELEMENTS(a_aSubTests); iFn++) \
1697 { \
1698 RTTestSub(g_hTest, a_aSubTests[iFn].pszName); \
1699 a_TestType const * const paTests = a_aSubTests[iFn].paTests; \
1700 uint32_t const cTests = a_aSubTests[iFn].cTests; \
1701 uint32_t const fEflIgn = a_aSubTests[iFn].uExtra; \
1702 PFNIEMAIMPLMULDIVU ## a_cBits pfn = a_aSubTests[iFn].pfn; \
1703 uint32_t const cVars = 1 + (a_aSubTests[iFn].idxCpuEflFlavour == g_idxCpuEflFlavour && a_aSubTests[iFn].pfnNative); \
1704 for (uint32_t iVar = 0; iVar < cVars; iVar++) \
1705 { \
1706 for (uint32_t iTest = 0; iTest < cTests; iTest++ ) \
1707 { \
1708 uint32_t fEfl = paTests[iTest].fEflIn; \
1709 a_Type uDst1 = paTests[iTest].uDst1In; \
1710 a_Type uDst2 = paTests[iTest].uDst2In; \
1711 int rc = pfn(&uDst1, &uDst2, paTests[iTest].uSrcIn, &fEfl); \
1712 if ( uDst1 != paTests[iTest].uDst1Out \
1713 || uDst2 != paTests[iTest].uDst2Out \
1714 || (fEfl | fEflIgn) != (paTests[iTest].fEflOut | fEflIgn)\
1715 || rc != paTests[iTest].rc) \
1716 RTTestFailed(g_hTest, "#%02u%s: efl=%#08x dst1=" a_Fmt " dst2=" a_Fmt " src=" a_Fmt "\n" \
1717 " -> efl=%#08x dst1=" a_Fmt " dst2=" a_Fmt " rc=%d\n" \
1718 "expected %#08x " a_Fmt " " a_Fmt " %d%s -%s%s%s\n", \
1719 iTest, iVar == 0 ? "" : "/n", \
1720 paTests[iTest].fEflIn, paTests[iTest].uDst1In, paTests[iTest].uDst2In, paTests[iTest].uSrcIn, \
1721 fEfl, uDst1, uDst2, rc, \
1722 paTests[iTest].fEflOut, paTests[iTest].uDst1Out, paTests[iTest].uDst2Out, paTests[iTest].rc, \
1723 EFlagsDiff(fEfl | fEflIgn, paTests[iTest].fEflOut | fEflIgn), \
1724 uDst1 != paTests[iTest].uDst1Out ? " dst1" : "", uDst2 != paTests[iTest].uDst2Out ? " dst2" : "", \
1725 (fEfl | fEflIgn) != (paTests[iTest].fEflOut | fEflIgn) ? " eflags" : ""); \
1726 else \
1727 { \
1728 *g_pu ## a_cBits = paTests[iTest].uDst1In; \
1729 *g_pu ## a_cBits ## Two = paTests[iTest].uDst2In; \
1730 *g_pfEfl = paTests[iTest].fEflIn; \
1731 rc = pfn(g_pu ## a_cBits, g_pu ## a_cBits ## Two, paTests[iTest].uSrcIn, g_pfEfl); \
1732 RTTEST_CHECK(g_hTest, *g_pu ## a_cBits == paTests[iTest].uDst1Out); \
1733 RTTEST_CHECK(g_hTest, *g_pu ## a_cBits ## Two == paTests[iTest].uDst2Out); \
1734 RTTEST_CHECK(g_hTest, (*g_pfEfl | fEflIgn) == (paTests[iTest].fEflOut | fEflIgn)); \
1735 RTTEST_CHECK(g_hTest, rc == paTests[iTest].rc); \
1736 } \
1737 } \
1738 pfn = a_aSubTests[iFn].pfnNative; \
1739 } \
1740 } \
1741}
1742TEST_MULDIV(16, uint16_t, "%#06RX16", MULDIVU16_TEST_T, g_aMulDivU16)
1743TEST_MULDIV(32, uint32_t, "%#010RX32", MULDIVU32_TEST_T, g_aMulDivU32)
1744TEST_MULDIV(64, uint64_t, "%#018RX64", MULDIVU64_TEST_T, g_aMulDivU64)
1745
1746#ifdef TSTIEMAIMPL_WITH_GENERATOR
1747static void MulDivGenerate(PRTSTREAM pOut, const char *pszCpuSuffU, uint32_t cTests)
1748{
1749 RTStrmPrintf(pOut, "\n\n#define HAVE_MULDIV_TESTS%s\n", pszCpuSuffU);
1750 MulDivU8Generate(pOut, cTests);
1751 MulDivU16Generate(pOut, cTests);
1752 MulDivU32Generate(pOut, cTests);
1753 MulDivU64Generate(pOut, cTests);
1754}
1755#endif
1756
1757static void MulDivTest(void)
1758{
1759 MulDivU8Test();
1760 MulDivU16Test();
1761 MulDivU32Test();
1762 MulDivU64Test();
1763}
1764
1765
1766/*
1767 * BSWAP
1768 */
1769static void BswapTest(void)
1770{
1771 RTTestSub(g_hTest, "bswap_u16");
1772 *g_pu32 = UINT32_C(0x12345678);
1773 iemAImpl_bswap_u16(g_pu32);
1774#if 0
1775 RTTEST_CHECK_MSG(g_hTest, *g_pu32 == UINT32_C(0x12347856), (g_hTest, "*g_pu32=%#RX32\n", *g_pu32));
1776#else
1777 RTTEST_CHECK_MSG(g_hTest, *g_pu32 == UINT32_C(0x12340000), (g_hTest, "*g_pu32=%#RX32\n", *g_pu32));
1778#endif
1779 *g_pu32 = UINT32_C(0xffff1122);
1780 iemAImpl_bswap_u16(g_pu32);
1781#if 0
1782 RTTEST_CHECK_MSG(g_hTest, *g_pu32 == UINT32_C(0xffff2211), (g_hTest, "*g_pu32=%#RX32\n", *g_pu32));
1783#else
1784 RTTEST_CHECK_MSG(g_hTest, *g_pu32 == UINT32_C(0xffff0000), (g_hTest, "*g_pu32=%#RX32\n", *g_pu32));
1785#endif
1786
1787 RTTestSub(g_hTest, "bswap_u32");
1788 *g_pu32 = UINT32_C(0x12345678);
1789 iemAImpl_bswap_u32(g_pu32);
1790 RTTEST_CHECK(g_hTest, *g_pu32 == UINT32_C(0x78563412));
1791
1792 RTTestSub(g_hTest, "bswap_u64");
1793 *g_pu64 = UINT64_C(0x0123456789abcdef);
1794 iemAImpl_bswap_u64(g_pu64);
1795 RTTEST_CHECK(g_hTest, *g_pu64 == UINT64_C(0xefcdab8967452301));
1796}
1797
1798
1799int main(int argc, char **argv)
1800{
1801 int rc = RTR3InitExe(argc, &argv, 0);
1802 if (RT_FAILURE(rc))
1803 return RTMsgInitFailure(rc);
1804
1805 /*
1806 * Determin the host CPU.
1807 * If not using the IEMAllAImpl.asm code, this will be set to Intel.
1808 */
1809#if (defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64)) && !defined(IEM_WITHOUT_ASSEMBLY)
1810 g_idxCpuEflFlavour = ASMIsAmdCpu() || ASMIsHygonCpu()
1811 ? IEMTARGETCPU_EFL_BEHAVIOR_AMD
1812 : IEMTARGETCPU_EFL_BEHAVIOR_INTEL;
1813#else
1814 g_idxCpuEflFlavour = IEMTARGETCPU_EFL_BEHAVIOR_INTEL;
1815#endif
1816
1817 /*
1818 * Generate data?
1819 */
1820 if (argc > 2)
1821 {
1822#ifdef TSTIEMAIMPL_WITH_GENERATOR
1823 char szCpuDesc[256] = {0};
1824 RTMpGetDescription(NIL_RTCPUID, szCpuDesc, sizeof(szCpuDesc));
1825 const char * const pszCpuType = g_idxCpuEflFlavour == IEMTARGETCPU_EFL_BEHAVIOR_AMD ? "Amd" : "Intel";
1826 const char * const pszCpuSuff = g_idxCpuEflFlavour == IEMTARGETCPU_EFL_BEHAVIOR_AMD ? "_Amd" : "_Intel";
1827 const char * const pszCpuSuffU = g_idxCpuEflFlavour == IEMTARGETCPU_EFL_BEHAVIOR_AMD ? "_AMD" : "_INTEL";
1828
1829 PRTSTREAM pStrmData = NULL;
1830 rc = RTStrmOpen("tstIEMAImplData.h", "w", &pStrmData);
1831 if (!pStrmData)
1832 return RTMsgErrorExitFailure("Failed to open tstIEMAImplData.h for writing: %Rrc", rc);
1833
1834 PRTSTREAM pStrmDataCpu = NULL;
1835 rc = RTStrmOpenF("w", &pStrmDataCpu, "tstIEMAImplData-%s.h", pszCpuType);
1836 if (!pStrmData)
1837 return RTMsgErrorExitFailure("Failed to open tstIEMAImplData-%s.h for writing: %Rrc", pszCpuType, rc);
1838
1839 GenerateHeader(pStrmData, szCpuDesc, NULL, "");
1840 GenerateHeader(pStrmDataCpu, szCpuDesc, pszCpuType, pszCpuSuff);
1841
1842 uint32_t cTests = 96;
1843 g_cZeroDstTests = RT_MIN(cTests / 16, 32);
1844 g_cZeroSrcTests = g_cZeroDstTests * 2;
1845
1846 BinU8Generate( pStrmData, pStrmDataCpu, pszCpuSuffU, cTests);
1847 BinU16Generate(pStrmData, pStrmDataCpu, pszCpuSuffU, cTests);
1848 BinU32Generate(pStrmData, pStrmDataCpu, pszCpuSuffU, cTests);
1849 BinU64Generate(pStrmData, pStrmDataCpu, pszCpuSuffU, cTests);
1850 ShiftDblGenerate(pStrmDataCpu, pszCpuSuffU, RT_MAX(cTests, 128));
1851 UnaryGenerate(pStrmData, cTests);
1852 ShiftGenerate(pStrmDataCpu, pszCpuSuffU, cTests);
1853 MulDivGenerate(pStrmDataCpu, pszCpuSuffU, cTests);
1854
1855 return GenerateFooterAndClose(pStrmDataCpu, pszCpuType, pszCpuSuff,
1856 GenerateFooterAndClose(pStrmData, NULL, "", RTEXITCODE_SUCCESS));
1857#else
1858 return RTMsgErrorExitFailure("Test data generator not compiled in!");
1859#endif
1860 }
1861
1862 /*
1863 * Do testing. Currrently disabled by default as data needs to be checked
1864 * on both intel and AMD systems first.
1865 */
1866 rc = RTTestCreate("tstIEMAimpl", &g_hTest);
1867 AssertRCReturn(rc, RTEXITCODE_FAILURE);
1868 if (argc > 1)
1869 {
1870 /* Allocate guarded memory for use in the tests. */
1871#define ALLOC_GUARDED_VAR(a_puVar) do { \
1872 rc = RTTestGuardedAlloc(g_hTest, sizeof(*a_puVar), sizeof(*a_puVar), false /*fHead*/, (void **)&a_puVar); \
1873 if (RT_FAILURE(rc)) RTTestFailed(g_hTest, "Failed to allocate guarded mem: " #a_puVar); \
1874 } while (0)
1875 ALLOC_GUARDED_VAR(g_pu8);
1876 ALLOC_GUARDED_VAR(g_pu16);
1877 ALLOC_GUARDED_VAR(g_pu32);
1878 ALLOC_GUARDED_VAR(g_pu64);
1879 ALLOC_GUARDED_VAR(g_pu128);
1880 ALLOC_GUARDED_VAR(g_pu8Two);
1881 ALLOC_GUARDED_VAR(g_pu16Two);
1882 ALLOC_GUARDED_VAR(g_pu32Two);
1883 ALLOC_GUARDED_VAR(g_pu64Two);
1884 ALLOC_GUARDED_VAR(g_pu128Two);
1885 ALLOC_GUARDED_VAR(g_pfEfl);
1886 if (RTTestErrorCount(g_hTest) == 0)
1887 {
1888 BinU8Test();
1889 BinU16Test();
1890 BinU32Test();
1891 BinU64Test();
1892 XchgTest();
1893 XaddTest();
1894 CmpXchgTest();
1895 CmpXchg8bTest();
1896 CmpXchg16bTest();
1897 ShiftDblTest();
1898 UnaryTest();
1899 ShiftTest();
1900 MulDivTest();
1901 BswapTest();
1902 }
1903 return RTTestSummaryAndDestroy(g_hTest);
1904 }
1905 return RTTestSkipAndDestroy(g_hTest, "unfinished testcase");
1906}
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette