VirtualBox

source: vbox/trunk/src/VBox/VMM/testcase/tstIEMAImpl.cpp@ 94155

Last change on this file since 94155 was 94155, checked in by vboxsync, 3 years ago

VMM/IEM: Try deal with basic Intel/AMD EFLAGS difference for binary and div/mul operations (intel side). bugref:9898

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 74.4 KB
Line 
1/* $Id: tstIEMAImpl.cpp 94155 2022-03-10 13:59:02Z vboxsync $ */
2/** @file
3 * IEM Assembly Instruction Helper Testcase.
4 */
5
6/*
7 * Copyright (C) 2022 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#include "../include/IEMInternal.h"
23
24#include <iprt/errcore.h>
25#include <VBox/log.h>
26#include <iprt/assert.h>
27#include <iprt/ctype.h>
28#include <iprt/initterm.h>
29#include <iprt/message.h>
30#include <iprt/mp.h>
31#include <iprt/rand.h>
32#include <iprt/stream.h>
33#include <iprt/string.h>
34#include <iprt/test.h>
35
36
37/*********************************************************************************************************************************
38* Structures and Typedefs *
39*********************************************************************************************************************************/
40/** @name 8-bit binary (PFNIEMAIMPLBINU8)
41 * @{ */
42typedef struct BINU8_TEST_T
43{
44 uint32_t fEflIn;
45 uint32_t fEflOut;
46 uint8_t uDstIn;
47 uint8_t uDstOut;
48 uint8_t uSrcIn;
49 uint8_t uMisc;
50} BINU8_TEST_T;
51
52typedef struct BINU8_T
53{
54 const char *pszName;
55 PFNIEMAIMPLBINU8 pfn;
56 PFNIEMAIMPLBINU8 pfnNative;
57 BINU8_TEST_T const *paTests;
58 uint32_t cTests;
59 uint32_t uExtra;
60 uint8_t idxCpuEflFlavour;
61} BINU8_T;
62/** @} */
63
64
65/** @name 16-bit binary (PFNIEMAIMPLBINU16)
66 * @{ */
67typedef struct BINU16_TEST_T
68{
69 uint32_t fEflIn;
70 uint32_t fEflOut;
71 uint16_t uDstIn;
72 uint16_t uDstOut;
73 uint16_t uSrcIn;
74 uint16_t uMisc;
75} BINU16_TEST_T;
76
77typedef struct BINU16_T
78{
79 const char *pszName;
80 PFNIEMAIMPLBINU16 pfn;
81 PFNIEMAIMPLBINU16 pfnNative;
82 BINU16_TEST_T const *paTests;
83 uint32_t cTests;
84 uint32_t uExtra;
85 uint8_t idxCpuEflFlavour;
86} BINU16_T;
87/** @} */
88
89
90/** @name 32-bit binary (PFNIEMAIMPLBINU32)
91 * @{ */
92typedef struct BINU32_TEST_T
93{
94 uint32_t fEflIn;
95 uint32_t fEflOut;
96 uint32_t uDstIn;
97 uint32_t uDstOut;
98 uint32_t uSrcIn;
99 uint32_t uMisc;
100} BINU32_TEST_T;
101
102typedef struct BINU32_T
103{
104 const char *pszName;
105 PFNIEMAIMPLBINU32 pfn;
106 PFNIEMAIMPLBINU32 pfnNative;
107 BINU32_TEST_T const *paTests;
108 uint32_t cTests;
109 uint32_t uExtra;
110 uint8_t idxCpuEflFlavour;
111} BINU32_T;
112/** @} */
113
114
115/** @name 64-bit binary (PFNIEMAIMPLBINU64)
116 * @{ */
117typedef struct BINU64_TEST_T
118{
119 uint32_t fEflIn;
120 uint32_t fEflOut;
121 uint64_t uDstIn;
122 uint64_t uDstOut;
123 uint64_t uSrcIn;
124 uint64_t uMisc;
125} BINU64_TEST_T;
126
127typedef struct BINU64_T
128{
129 const char *pszName;
130 PFNIEMAIMPLBINU64 pfn;
131 PFNIEMAIMPLBINU64 pfnNative;
132 BINU64_TEST_T const *paTests;
133 uint32_t cTests;
134 uint32_t uExtra;
135 uint8_t idxCpuEflFlavour;
136} BINU64_T;
137/** @} */
138
139
140/** @name mult/div (PFNIEMAIMPLBINU8, PFNIEMAIMPLBINU16, PFNIEMAIMPLBINU32, PFNIEMAIMPLBINU64)
141 * @{ */
142typedef struct MULDIVU8_TEST_T
143{
144 uint32_t fEflIn;
145 uint32_t fEflOut;
146 uint16_t uDstIn;
147 uint16_t uDstOut;
148 uint8_t uSrcIn;
149 int32_t rc;
150} MULDIVU8_TEST_T;
151
152typedef struct MULDIVU16_TEST_T
153{
154 uint32_t fEflIn;
155 uint32_t fEflOut;
156 uint16_t uDst1In;
157 uint16_t uDst1Out;
158 uint16_t uDst2In;
159 uint16_t uDst2Out;
160 uint16_t uSrcIn;
161 int32_t rc;
162} MULDIVU16_TEST_T;
163
164typedef struct MULDIVU32_TEST_T
165{
166 uint32_t fEflIn;
167 uint32_t fEflOut;
168 uint32_t uDst1In;
169 uint32_t uDst1Out;
170 uint32_t uDst2In;
171 uint32_t uDst2Out;
172 uint32_t uSrcIn;
173 int32_t rc;
174} MULDIVU32_TEST_T;
175
176typedef struct MULDIVU64_TEST_T
177{
178 uint32_t fEflIn;
179 uint32_t fEflOut;
180 uint64_t uDst1In;
181 uint64_t uDst1Out;
182 uint64_t uDst2In;
183 uint64_t uDst2Out;
184 uint64_t uSrcIn;
185 int32_t rc;
186} MULDIVU64_TEST_T;
187/** @} */
188
189
190/*********************************************************************************************************************************
191* Defined Constants And Macros *
192*********************************************************************************************************************************/
193#define ENTRY(a_Name) ENTRY_EX(a_Name, 0)
194#define ENTRY_EX(a_Name, a_uExtra) \
195 { #a_Name, iemAImpl_ ## a_Name, NULL, \
196 g_aTests_ ## a_Name, RT_ELEMENTS(g_aTests_ ## a_Name), \
197 a_uExtra, IEMTARGETCPU_EFL_BEHAVIOR_NATIVE /* means same for all here */ }
198
199#define ENTRY_INTEL(a_Name, a_fEflUndef) ENTRY_INTEL_EX(a_Name, a_fEflUndef, 0)
200#define ENTRY_INTEL_EX(a_Name, a_fEflUndef, a_uExtra) \
201 { #a_Name "_intel", iemAImpl_ ## a_Name ## _intel, iemAImpl_ ## a_Name, \
202 g_aTests_ ## a_Name ## _intel, RT_ELEMENTS(g_aTests_ ## a_Name ## _intel), \
203 a_uExtra, IEMTARGETCPU_EFL_BEHAVIOR_INTEL }
204
205#define ENTRY_AMD(a_Name, a_fEflUndef) ENTRY_AMD_EX(a_Name, a_fEflUndef, 0)
206#define ENTRY_AMD_EX(a_Name, a_fEflUndef, a_uExtra) \
207 { #a_Name "_amd", iemAImpl_ ## a_Name ## _amd, iemAImpl_ ## a_Name, \
208 g_aTests_ ## a_Name ## _amd, RT_ELEMENTS(g_aTests_ ## a_Name ## _amd), \
209 a_uExtra, IEMTARGETCPU_EFL_BEHAVIOR_AMD }
210
211
212/*********************************************************************************************************************************
213* Global Variables *
214*********************************************************************************************************************************/
215static RTTEST g_hTest;
216static uint8_t g_idxCpuEflFlavour = IEMTARGETCPU_EFL_BEHAVIOR_INTEL;
217#ifdef TSTIEMAIMPL_WITH_GENERATOR
218static uint32_t g_cZeroDstTests = 2;
219static uint32_t g_cZeroSrcTests = 4;
220#endif
221static uint8_t *g_pu8, *g_pu8Two;
222static uint16_t *g_pu16, *g_pu16Two;
223static uint32_t *g_pu32, *g_pu32Two, *g_pfEfl;
224static uint64_t *g_pu64, *g_pu64Two;
225static RTUINT128U *g_pu128, *g_pu128Two;
226
227
228#include "tstIEMAImplData.h"
229#include "tstIEMAImplData-Intel.h"
230#include "tstIEMAImplData-Amd.h"
231
232
233/*
234 * Random helpers.
235 */
236
237static uint32_t RandEFlags(void)
238{
239 uint32_t fEfl = RTRandU32();
240 return (fEfl & X86_EFL_LIVE_MASK) | X86_EFL_RA1_MASK;
241}
242
243
244static uint8_t RandU8(void)
245{
246 return RTRandU32Ex(0, 0xff);
247}
248
249
250static uint16_t RandU16(void)
251{
252 return RTRandU32Ex(0, 0xffff);
253}
254
255
256static uint32_t RandU32(void)
257{
258 return RTRandU32();
259}
260
261
262static uint64_t RandU64(void)
263{
264 return RTRandU64();
265}
266
267
268static RTUINT128U RandU128(void)
269{
270 RTUINT128U Ret;
271 Ret.s.Hi = RTRandU64();
272 Ret.s.Lo = RTRandU64();
273 return Ret;
274}
275
276#ifdef TSTIEMAIMPL_WITH_GENERATOR
277
278static uint8_t RandU8Dst(uint32_t iTest)
279{
280 if (iTest < g_cZeroDstTests)
281 return 0;
282 return RandU8();
283}
284
285
286static uint8_t RandU8Src(uint32_t iTest)
287{
288 if (iTest < g_cZeroSrcTests)
289 return 0;
290 return RandU8();
291}
292
293
294static uint16_t RandU16Dst(uint32_t iTest)
295{
296 if (iTest < g_cZeroDstTests)
297 return 0;
298 return RandU16();
299}
300
301
302static uint16_t RandU16Src(uint32_t iTest)
303{
304 if (iTest < g_cZeroSrcTests)
305 return 0;
306 return RandU16();
307}
308
309
310static uint32_t RandU32Dst(uint32_t iTest)
311{
312 if (iTest < g_cZeroDstTests)
313 return 0;
314 return RandU32();
315}
316
317
318static uint32_t RandU32Src(uint32_t iTest)
319{
320 if (iTest < g_cZeroSrcTests)
321 return 0;
322 return RandU32();
323}
324
325
326static uint64_t RandU64Dst(uint32_t iTest)
327{
328 if (iTest < g_cZeroDstTests)
329 return 0;
330 return RandU64();
331}
332
333
334static uint64_t RandU64Src(uint32_t iTest)
335{
336 if (iTest < g_cZeroSrcTests)
337 return 0;
338 return RandU64();
339}
340
341
342static void GenerateHeader(PRTSTREAM pOut, const char *pszCpuDesc, const char *pszCpuType, const char *pszCpuSuffU)
343{
344 /* We want to tag the generated source code with the revision that produced it. */
345 static char s_szRev[] = "$Revision: 94155 $";
346 const char *pszRev = RTStrStripL(strchr(s_szRev, ':') + 1);
347 size_t cchRev = 0;
348 while (RT_C_IS_DIGIT(pszRev[cchRev]))
349 cchRev++;
350
351 RTStrmPrintf(pOut,
352 "/* $Id: tstIEMAImpl.cpp 94155 2022-03-10 13:59:02Z vboxsync $ */\n"
353 "/** @file\n"
354 " * IEM Assembly Instruction Helper Testcase Data%s%s - r%.*s on %s.\n"
355 " */\n"
356 "\n"
357 "/*\n"
358 " * Copyright (C) 2022 Oracle Corporation\n"
359 " *\n"
360 " * This file is part of VirtualBox Open Source Edition (OSE), as\n"
361 " * available from http://www.virtualbox.org. This file is free software;\n"
362 " * you can redistribute it and/or modify it under the terms of the GNU\n"
363 " * General Public License (GPL) as published by the Free Software\n"
364 " * Foundation, in version 2 as it comes in the \"COPYING\" file of the\n"
365 " * VirtualBox OSE distribution. VirtualBox OSE is distributed in the\n"
366 " * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.\n"
367 " */\n"
368 "\n"
369 "#ifndef VMM_INCLUDE_SRC_testcase_tstIEMAImplData%s_h\n"
370 "#define VMM_INCLUDE_SRC_testcase_tstIEMAImplData%s_h\n"
371 "#ifndef RT_WITHOUT_PRAGMA_ONCE\n"
372 "# pragma once\n"
373 "#endif\n"
374 ,
375 pszCpuType ? " " : "", pszCpuType ? pszCpuType : "", cchRev, pszRev, pszCpuDesc,
376 pszCpuSuffU,
377 pszCpuSuffU);
378}
379
380
381static RTEXITCODE GenerateFooterAndClose(PRTSTREAM pOut, const char *pszCpuType, const char *pszCpuSuff, RTEXITCODE rcExit)
382{
383 RTStrmPrintf(pOut,
384 "\n"
385 "#endif /* !VMM_INCLUDE_SRC_testcase_tstIEMAImplData%s_h */\n", pszCpuSuff);
386 int rc = RTStrmClose(pOut);
387 if (RT_SUCCESS(rc))
388 return rcExit;
389 return RTMsgErrorExitFailure("RTStrmClose failed on tstIEMAImplData%s%s.h: %Rrc",
390 pszCpuType ? "-" : "", pszCpuType ? pszCpuType : "", rc);
391}
392
393#endif
394
395
396/*
397 * Test helpers.
398 */
399static const char *EFlagsDiff(uint32_t fActual, uint32_t fExpected)
400{
401 if (fActual == fExpected)
402 return "";
403
404 uint32_t const fXor = fActual ^ fExpected;
405 static char s_szBuf[256];
406 size_t cch = RTStrPrintf(s_szBuf, sizeof(s_szBuf), " - %#x", fXor);
407
408 static struct
409 {
410 const char *pszName;
411 uint32_t fFlag;
412 } const s_aFlags[] =
413 {
414#define EFL_ENTRY(a_Flags) { #a_Flags, X86_EFL_ ## a_Flags }
415 EFL_ENTRY(CF),
416 EFL_ENTRY(PF),
417 EFL_ENTRY(AF),
418 EFL_ENTRY(ZF),
419 EFL_ENTRY(SF),
420 EFL_ENTRY(TF),
421 EFL_ENTRY(IF),
422 EFL_ENTRY(DF),
423 EFL_ENTRY(OF),
424 EFL_ENTRY(IOPL),
425 EFL_ENTRY(NT),
426 EFL_ENTRY(RF),
427 EFL_ENTRY(VM),
428 EFL_ENTRY(AC),
429 EFL_ENTRY(VIF),
430 EFL_ENTRY(VIP),
431 EFL_ENTRY(ID),
432 };
433 for (size_t i = 0; i < RT_ELEMENTS(s_aFlags); i++)
434 if (s_aFlags[i].fFlag & fXor)
435 cch += RTStrPrintf(&s_szBuf[cch], sizeof(s_szBuf) - cch,
436 s_aFlags[i].fFlag & fActual ? "/%s" : "/!%s", s_aFlags[i].pszName);
437 RTStrPrintf(&s_szBuf[cch], sizeof(s_szBuf) - cch, "");
438 return s_szBuf;
439}
440
441
442/*
443 * Binary operations.
444 */
445#ifdef TSTIEMAIMPL_WITH_GENERATOR
446# define GEN_BINARY_TESTS(a_cBits, a_Fmt) \
447static void BinU ## a_cBits ## Generate(PRTSTREAM pOut, PRTSTREAM pOutCpu, const char *pszCpuSuffU, uint32_t cTests) \
448{ \
449 RTStrmPrintf(pOut, "\n\n#define HAVE_BINU%u_TESTS\n", a_cBits); \
450 RTStrmPrintf(pOutCpu, "\n\n#define HAVE_BINU%u_TESTS%s\n", a_cBits, pszCpuSuffU); \
451 for (size_t iFn = 0; iFn < RT_ELEMENTS(g_aBinU ## a_cBits); iFn++) \
452 { \
453 PFNIEMAIMPLBINU ## a_cBits const pfn = g_aBinU ## a_cBits[iFn].pfnNative \
454 ? g_aBinU ## a_cBits[iFn].pfnNative : g_aBinU ## a_cBits[iFn].pfn; \
455 PRTSTREAM pOutFn = pOut; \
456 if (g_aBinU ## a_cBits[iFn].idxCpuEflFlavour != IEMTARGETCPU_EFL_BEHAVIOR_NATIVE) \
457 { \
458 if (g_aBinU ## a_cBits[iFn].idxCpuEflFlavour != g_idxCpuEflFlavour) \
459 continue; \
460 pOutFn = pOutCpu; \
461 } \
462 \
463 RTStrmPrintf(pOutFn, "static const BINU%u_TEST_T g_aTests_%s[] =\n{\n", a_cBits, g_aBinU ## a_cBits[iFn].pszName); \
464 for (uint32_t iTest = 0; iTest < cTests; iTest++ ) \
465 { \
466 BINU ## a_cBits ## _TEST_T Test; \
467 Test.fEflIn = RandEFlags(); \
468 Test.fEflOut = Test.fEflIn; \
469 Test.uDstIn = RandU ## a_cBits ## Dst(iTest); \
470 Test.uDstOut = Test.uDstIn; \
471 Test.uSrcIn = RandU ## a_cBits ## Src(iTest); \
472 if (g_aBinU ## a_cBits[iFn].uExtra) \
473 Test.uSrcIn &= a_cBits - 1; /* Restrict bit index according to operand width */ \
474 Test.uMisc = 0; \
475 pfn(&Test.uDstOut, Test.uSrcIn, &Test.fEflOut); \
476 RTStrmPrintf(pOutFn, " { %#08x, %#08x, " a_Fmt ", " a_Fmt ", " a_Fmt ", %#x }, /* #%u */\n", \
477 Test.fEflIn, Test.fEflOut, Test.uDstIn, Test.uDstOut, Test.uSrcIn, Test.uMisc, iTest); \
478 } \
479 RTStrmPrintf(pOutFn, "};\n"); \
480 } \
481}
482#else
483# define GEN_BINARY_TESTS(a_cBits, a_Fmt)
484#endif
485
486#define TEST_BINARY_OPS(a_cBits, a_uType, a_Fmt, a_aSubTests) \
487GEN_BINARY_TESTS(a_cBits, a_Fmt) \
488\
489static void BinU ## a_cBits ## Test(void) \
490{ \
491 for (size_t iFn = 0; iFn < RT_ELEMENTS(a_aSubTests); iFn++) \
492 { \
493 if ( a_aSubTests[iFn].idxCpuEflFlavour != IEMTARGETCPU_EFL_BEHAVIOR_NATIVE \
494 && a_aSubTests[iFn].idxCpuEflFlavour != g_idxCpuEflFlavour) \
495 continue; \
496 \
497 RTTestSub(g_hTest, a_aSubTests[iFn].pszName); \
498 BINU ## a_cBits ## _TEST_T const * const paTests = a_aSubTests[iFn].paTests; \
499 uint32_t const cTests = a_aSubTests[iFn].cTests; \
500 PFNIEMAIMPLBINU ## a_cBits pfn = a_aSubTests[iFn].pfn; \
501 for (uint32_t iCpu = 0; iCpu < 2 && pfn; iCpu++) \
502 { \
503 for (uint32_t iTest = 0; iTest < cTests; iTest++ ) \
504 { \
505 uint32_t fEfl = paTests[iTest].fEflIn; \
506 a_uType uDst = paTests[iTest].uDstIn; \
507 pfn(&uDst, paTests[iTest].uSrcIn, &fEfl); \
508 if ( uDst != paTests[iTest].uDstOut \
509 || fEfl != paTests[iTest].fEflOut) \
510 RTTestFailed(g_hTest, "#%u%s: efl=%#08x dst=" a_Fmt " src=" a_Fmt " -> efl=%#08x dst=" a_Fmt ", expected %#08x & " a_Fmt "%s - %s\n", \
511 iTest, !iCpu ? "" : "/n", paTests[iTest].fEflIn, paTests[iTest].uDstIn, paTests[iTest].uSrcIn, \
512 fEfl, uDst, paTests[iTest].fEflOut, paTests[iTest].uDstOut, \
513 EFlagsDiff(fEfl, paTests[iTest].fEflOut), \
514 uDst == paTests[iTest].uDstOut ? "eflags" : fEfl == paTests[iTest].fEflOut ? "dst" : "both"); \
515 else \
516 { \
517 *g_pu ## a_cBits = paTests[iTest].uDstIn; \
518 *g_pfEfl = paTests[iTest].fEflIn; \
519 pfn(g_pu ## a_cBits, paTests[iTest].uSrcIn, g_pfEfl); \
520 RTTEST_CHECK(g_hTest, *g_pu ## a_cBits == paTests[iTest].uDstOut); \
521 RTTEST_CHECK(g_hTest, *g_pfEfl == paTests[iTest].fEflOut); \
522 } \
523 } \
524 pfn = a_aSubTests[iFn].pfnNative; \
525 } \
526 } \
527}
528
529
530/*
531 * 8-bit binary operations.
532 */
533
534#ifndef HAVE_BINU8_TESTS
535static const BINU8_TEST_T g_aTests_add_u8[] = { {0} };
536static const BINU8_TEST_T g_aTests_add_u8_locked[] = { {0} };
537static const BINU8_TEST_T g_aTests_adc_u8[] = { {0} };
538static const BINU8_TEST_T g_aTests_adc_u8_locked[] = { {0} };
539static const BINU8_TEST_T g_aTests_sub_u8[] = { {0} };
540static const BINU8_TEST_T g_aTests_sub_u8_locked[] = { {0} };
541static const BINU8_TEST_T g_aTests_sbb_u8[] = { {0} };
542static const BINU8_TEST_T g_aTests_sbb_u8_locked[] = { {0} };
543static const BINU8_TEST_T g_aTests_or_u8[] = { {0} };
544static const BINU8_TEST_T g_aTests_or_u8_locked[] = { {0} };
545static const BINU8_TEST_T g_aTests_xor_u8[] = { {0} };
546static const BINU8_TEST_T g_aTests_xor_u8_locked[] = { {0} };
547static const BINU8_TEST_T g_aTests_and_u8[] = { {0} };
548static const BINU8_TEST_T g_aTests_and_u8_locked[] = { {0} };
549static const BINU8_TEST_T g_aTests_cmp_u8[] = { {0} };
550static const BINU8_TEST_T g_aTests_test_u8[] = { {0} };
551#endif
552
553static const BINU8_T g_aBinU8[] =
554{
555 ENTRY(add_u8),
556 ENTRY(add_u8_locked),
557 ENTRY(adc_u8),
558 ENTRY(adc_u8_locked),
559 ENTRY(sub_u8),
560 ENTRY(sub_u8_locked),
561 ENTRY(sbb_u8),
562 ENTRY(sbb_u8_locked),
563 ENTRY(or_u8),
564 ENTRY(or_u8_locked),
565 ENTRY(xor_u8),
566 ENTRY(xor_u8_locked),
567 ENTRY(and_u8),
568 ENTRY(and_u8_locked),
569 ENTRY(cmp_u8),
570 ENTRY(test_u8),
571};
572
573TEST_BINARY_OPS(8, uint8_t, "%#04x", g_aBinU8)
574
575
576/*
577 * 16-bit binary operations.
578 */
579
580#ifndef HAVE_BINU16_TESTS
581static const BINU16_TEST_T g_aTests_add_u16[] = { {0} };
582static const BINU16_TEST_T g_aTests_add_u16_locked[] = { {0} };
583static const BINU16_TEST_T g_aTests_adc_u16[] = { {0} };
584static const BINU16_TEST_T g_aTests_adc_u16_locked[] = { {0} };
585static const BINU16_TEST_T g_aTests_sub_u16[] = { {0} };
586static const BINU16_TEST_T g_aTests_sub_u16_locked[] = { {0} };
587static const BINU16_TEST_T g_aTests_sbb_u16[] = { {0} };
588static const BINU16_TEST_T g_aTests_sbb_u16_locked[] = { {0} };
589static const BINU16_TEST_T g_aTests_or_u16[] = { {0} };
590static const BINU16_TEST_T g_aTests_or_u16_locked[] = { {0} };
591static const BINU16_TEST_T g_aTests_xor_u16[] = { {0} };
592static const BINU16_TEST_T g_aTests_xor_u16_locked[] = { {0} };
593static const BINU16_TEST_T g_aTests_and_u16[] = { {0} };
594static const BINU16_TEST_T g_aTests_and_u16_locked[] = { {0} };
595static const BINU16_TEST_T g_aTests_cmp_u16[] = { {0} };
596static const BINU16_TEST_T g_aTests_test_u16[] = { {0} };
597static const BINU16_TEST_T g_aTests_bt_u16[] = { {0} };
598static const BINU16_TEST_T g_aTests_btc_u16[] = { {0} };
599static const BINU16_TEST_T g_aTests_btc_u16_locked[] = { {0} };
600static const BINU16_TEST_T g_aTests_btr_u16[] = { {0} };
601static const BINU16_TEST_T g_aTests_btr_u16_locked[] = { {0} };
602static const BINU16_TEST_T g_aTests_bts_u16[] = { {0} };
603static const BINU16_TEST_T g_aTests_bts_u16_locked[] = { {0} };
604static const BINU16_TEST_T g_aTests_arpl[] = { {0} };
605#endif
606#ifndef HAVE_BINU16_TESTS_AMD
607static const BINU16_TEST_T g_aTests_bsf_u16_amd[] = { {0} };
608static const BINU16_TEST_T g_aTests_bsr_u16_amd[] = { {0} };
609static const BINU16_TEST_T g_aTests_imul_two_u16_amd[] = { {0} };
610#endif
611#ifndef HAVE_BINU16_TESTS_INTEL
612static const BINU16_TEST_T g_aTests_bsf_u16_intel[] = { {0} };
613static const BINU16_TEST_T g_aTests_bsr_u16_intel[] = { {0} };
614static const BINU16_TEST_T g_aTests_imul_two_u16_intel[] = { {0} };
615#endif
616
617static const BINU16_T g_aBinU16[] =
618{
619 ENTRY(add_u16),
620 ENTRY(add_u16_locked),
621 ENTRY(adc_u16),
622 ENTRY(adc_u16_locked),
623 ENTRY(sub_u16),
624 ENTRY(sub_u16_locked),
625 ENTRY(sbb_u16),
626 ENTRY(sbb_u16_locked),
627 ENTRY(or_u16),
628 ENTRY(or_u16_locked),
629 ENTRY(xor_u16),
630 ENTRY(xor_u16_locked),
631 ENTRY(and_u16),
632 ENTRY(and_u16_locked),
633 ENTRY(cmp_u16),
634 ENTRY(test_u16),
635 ENTRY_EX(bt_u16, 1),
636 ENTRY_EX(btc_u16, 1),
637 ENTRY_EX(btc_u16_locked, 1),
638 ENTRY_EX(btr_u16, 1),
639 ENTRY_EX(btr_u16_locked, 1),
640 ENTRY_EX(bts_u16, 1),
641 ENTRY_EX(bts_u16_locked, 1),
642 ENTRY_AMD( bsf_u16, X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_SF | X86_EFL_OF),
643 ENTRY_INTEL(bsf_u16, X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_SF | X86_EFL_OF),
644 ENTRY_AMD( bsr_u16, X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_SF | X86_EFL_OF),
645 ENTRY_INTEL(bsr_u16, X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_SF | X86_EFL_OF),
646 ENTRY_AMD( imul_two_u16, X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF),
647 ENTRY_INTEL(imul_two_u16, X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF),
648 ENTRY(arpl),
649};
650
651TEST_BINARY_OPS(16, uint16_t, "%#06x", g_aBinU16)
652
653
654/*
655 * 32-bit binary operations.
656 */
657
658#ifndef HAVE_BINU32_TESTS
659static const BINU32_TEST_T g_aTests_add_u32[] = { {0} };
660static const BINU32_TEST_T g_aTests_add_u32_locked[] = { {0} };
661static const BINU32_TEST_T g_aTests_adc_u32[] = { {0} };
662static const BINU32_TEST_T g_aTests_adc_u32_locked[] = { {0} };
663static const BINU32_TEST_T g_aTests_sub_u32[] = { {0} };
664static const BINU32_TEST_T g_aTests_sub_u32_locked[] = { {0} };
665static const BINU32_TEST_T g_aTests_sbb_u32[] = { {0} };
666static const BINU32_TEST_T g_aTests_sbb_u32_locked[] = { {0} };
667static const BINU32_TEST_T g_aTests_or_u32[] = { {0} };
668static const BINU32_TEST_T g_aTests_or_u32_locked[] = { {0} };
669static const BINU32_TEST_T g_aTests_xor_u32[] = { {0} };
670static const BINU32_TEST_T g_aTests_xor_u32_locked[] = { {0} };
671static const BINU32_TEST_T g_aTests_and_u32[] = { {0} };
672static const BINU32_TEST_T g_aTests_and_u32_locked[] = { {0} };
673static const BINU32_TEST_T g_aTests_cmp_u32[] = { {0} };
674static const BINU32_TEST_T g_aTests_test_u32[] = { {0} };
675static const BINU32_TEST_T g_aTests_bt_u32[] = { {0} };
676static const BINU32_TEST_T g_aTests_btc_u32[] = { {0} };
677static const BINU32_TEST_T g_aTests_btc_u32_locked[] = { {0} };
678static const BINU32_TEST_T g_aTests_btr_u32[] = { {0} };
679static const BINU32_TEST_T g_aTests_btr_u32_locked[] = { {0} };
680static const BINU32_TEST_T g_aTests_bts_u32[] = { {0} };
681static const BINU32_TEST_T g_aTests_bts_u32_locked[] = { {0} };
682#endif
683#ifndef HAVE_BINU32_TESTS_AMD
684static const BINU32_TEST_T g_aTests_bsf_u32_amd[] = { {0} };
685static const BINU32_TEST_T g_aTests_bsr_u32_amd[] = { {0} };
686static const BINU32_TEST_T g_aTests_imul_two_u32_amd[] = { {0} };
687#endif
688#ifndef HAVE_BINU32_TESTS_INTEL
689static const BINU32_TEST_T g_aTests_bsf_u32_intel[] = { {0} };
690static const BINU32_TEST_T g_aTests_bsr_u32_intel[] = { {0} };
691static const BINU32_TEST_T g_aTests_imul_two_u32_intel[] = { {0} };
692#endif
693
694static const BINU32_T g_aBinU32[] =
695{
696 ENTRY(add_u32),
697 ENTRY(add_u32_locked),
698 ENTRY(adc_u32),
699 ENTRY(adc_u32_locked),
700 ENTRY(sub_u32),
701 ENTRY(sub_u32_locked),
702 ENTRY(sbb_u32),
703 ENTRY(sbb_u32_locked),
704 ENTRY(or_u32),
705 ENTRY(or_u32_locked),
706 ENTRY(xor_u32),
707 ENTRY(xor_u32_locked),
708 ENTRY(and_u32),
709 ENTRY(and_u32_locked),
710 ENTRY(cmp_u32),
711 ENTRY(test_u32),
712 ENTRY_EX(bt_u32, 1),
713 ENTRY_EX(btc_u32, 1),
714 ENTRY_EX(btc_u32_locked, 1),
715 ENTRY_EX(btr_u32, 1),
716 ENTRY_EX(btr_u32_locked, 1),
717 ENTRY_EX(bts_u32, 1),
718 ENTRY_EX(bts_u32_locked, 1),
719 ENTRY_AMD( bsf_u32, X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_SF | X86_EFL_OF),
720 ENTRY_INTEL(bsf_u32, X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_SF | X86_EFL_OF),
721 ENTRY_AMD( bsr_u32, X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_SF | X86_EFL_OF),
722 ENTRY_INTEL(bsr_u32, X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_SF | X86_EFL_OF),
723 ENTRY_AMD( imul_two_u32, X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF),
724 ENTRY_INTEL(imul_two_u32, X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF),
725};
726
727TEST_BINARY_OPS(32, uint32_t, "%#010RX32", g_aBinU32)
728
729
730/*
731 * 64-bit binary operations.
732 */
733
734#ifndef HAVE_BINU64_TESTS
735static const BINU64_TEST_T g_aTests_add_u64[] = { {0} };
736static const BINU64_TEST_T g_aTests_add_u64_locked[] = { {0} };
737static const BINU64_TEST_T g_aTests_adc_u64[] = { {0} };
738static const BINU64_TEST_T g_aTests_adc_u64_locked[] = { {0} };
739static const BINU64_TEST_T g_aTests_sub_u64[] = { {0} };
740static const BINU64_TEST_T g_aTests_sub_u64_locked[] = { {0} };
741static const BINU64_TEST_T g_aTests_sbb_u64[] = { {0} };
742static const BINU64_TEST_T g_aTests_sbb_u64_locked[] = { {0} };
743static const BINU64_TEST_T g_aTests_or_u64[] = { {0} };
744static const BINU64_TEST_T g_aTests_or_u64_locked[] = { {0} };
745static const BINU64_TEST_T g_aTests_xor_u64[] = { {0} };
746static const BINU64_TEST_T g_aTests_xor_u64_locked[] = { {0} };
747static const BINU64_TEST_T g_aTests_and_u64[] = { {0} };
748static const BINU64_TEST_T g_aTests_and_u64_locked[] = { {0} };
749static const BINU64_TEST_T g_aTests_cmp_u64[] = { {0} };
750static const BINU64_TEST_T g_aTests_test_u64[] = { {0} };
751static const BINU64_TEST_T g_aTests_bt_u64[] = { {0} };
752static const BINU64_TEST_T g_aTests_btc_u64[] = { {0} };
753static const BINU64_TEST_T g_aTests_btc_u64_locked[] = { {0} };
754static const BINU64_TEST_T g_aTests_btr_u64[] = { {0} };
755static const BINU64_TEST_T g_aTests_btr_u64_locked[] = { {0} };
756static const BINU64_TEST_T g_aTests_bts_u64[] = { {0} };
757static const BINU64_TEST_T g_aTests_bts_u64_locked[] = { {0} };
758#endif
759#ifndef HAVE_BINU64_TESTS_AMD
760static const BINU64_TEST_T g_aTests_bsf_u64_amd[] = { {0} };
761static const BINU64_TEST_T g_aTests_bsr_u64_amd[] = { {0} };
762static const BINU64_TEST_T g_aTests_imul_two_u64_amd[] = { {0} };
763#endif
764#ifndef HAVE_BINU64_TESTS_INTEL
765static const BINU64_TEST_T g_aTests_bsf_u64_intel[] = { {0} };
766static const BINU64_TEST_T g_aTests_bsr_u64_intel[] = { {0} };
767static const BINU64_TEST_T g_aTests_imul_two_u64_intel[] = { {0} };
768#endif
769
770static const BINU64_T g_aBinU64[] =
771{
772 ENTRY(add_u64),
773 ENTRY(add_u64_locked),
774 ENTRY(adc_u64),
775 ENTRY(adc_u64_locked),
776 ENTRY(sub_u64),
777 ENTRY(sub_u64_locked),
778 ENTRY(sbb_u64),
779 ENTRY(sbb_u64_locked),
780 ENTRY(or_u64),
781 ENTRY(or_u64_locked),
782 ENTRY(xor_u64),
783 ENTRY(xor_u64_locked),
784 ENTRY(and_u64),
785 ENTRY(and_u64_locked),
786 ENTRY(cmp_u64),
787 ENTRY(test_u64),
788 ENTRY_EX(bt_u64, 1),
789 ENTRY_EX(btc_u64, 1),
790 ENTRY_EX(btc_u64_locked, 1),
791 ENTRY_EX(btr_u64, 1),
792 ENTRY_EX(btr_u64_locked, 1),
793 ENTRY_EX(bts_u64, 1),
794 ENTRY_EX(bts_u64_locked, 1),
795 ENTRY_AMD( bsf_u64, X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_SF | X86_EFL_OF),
796 ENTRY_INTEL(bsf_u64, X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_SF | X86_EFL_OF),
797 ENTRY_AMD( bsr_u64, X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_SF | X86_EFL_OF),
798 ENTRY_INTEL(bsr_u64, X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_SF | X86_EFL_OF),
799 ENTRY_AMD( imul_two_u64, X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF),
800 ENTRY_INTEL(imul_two_u64, X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF),
801};
802
803TEST_BINARY_OPS(64, uint64_t, "%#018RX64", g_aBinU64)
804
805
806/*
807 * XCHG
808 */
809static void XchgTest(void)
810{
811 RTTestSub(g_hTest, "xchg");
812 typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLXCHGU8, (uint8_t *pu8Mem, uint8_t *pu8Reg));
813 typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLXCHGU16,(uint16_t *pu16Mem, uint16_t *pu16Reg));
814 typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLXCHGU32,(uint32_t *pu32Mem, uint32_t *pu32Reg));
815 typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLXCHGU64,(uint64_t *pu64Mem, uint64_t *pu64Reg));
816
817 static struct
818 {
819 uint8_t cb; uint64_t fMask;
820 union
821 {
822 uintptr_t pfn;
823 FNIEMAIMPLXCHGU8 *pfnU8;
824 FNIEMAIMPLXCHGU16 *pfnU16;
825 FNIEMAIMPLXCHGU32 *pfnU32;
826 FNIEMAIMPLXCHGU64 *pfnU64;
827 } u;
828 }
829 s_aXchgWorkers[] =
830 {
831 { 1, UINT8_MAX, { (uintptr_t)iemAImpl_xchg_u8_locked } },
832 { 2, UINT16_MAX, { (uintptr_t)iemAImpl_xchg_u16_locked } },
833 { 4, UINT32_MAX, { (uintptr_t)iemAImpl_xchg_u32_locked } },
834 { 8, UINT64_MAX, { (uintptr_t)iemAImpl_xchg_u64_locked } },
835 { 1, UINT8_MAX, { (uintptr_t)iemAImpl_xchg_u8_unlocked } },
836 { 2, UINT16_MAX, { (uintptr_t)iemAImpl_xchg_u16_unlocked } },
837 { 4, UINT32_MAX, { (uintptr_t)iemAImpl_xchg_u32_unlocked } },
838 { 8, UINT64_MAX, { (uintptr_t)iemAImpl_xchg_u64_unlocked } },
839 };
840 for (size_t i = 0; i < RT_ELEMENTS(s_aXchgWorkers); i++)
841 {
842 RTUINT64U uIn1, uIn2, uMem, uDst;
843 uMem.u = uIn1.u = RTRandU64Ex(0, s_aXchgWorkers[i].fMask);
844 uDst.u = uIn2.u = RTRandU64Ex(0, s_aXchgWorkers[i].fMask);
845 if (uIn1.u == uIn2.u)
846 uDst.u = uIn2.u = ~uIn2.u;
847
848 switch (s_aXchgWorkers[i].cb)
849 {
850 case 1:
851 s_aXchgWorkers[i].u.pfnU8(g_pu8, g_pu8Two);
852 s_aXchgWorkers[i].u.pfnU8(&uMem.au8[0], &uDst.au8[0]);
853 break;
854 case 2:
855 s_aXchgWorkers[i].u.pfnU16(g_pu16, g_pu16Two);
856 s_aXchgWorkers[i].u.pfnU16(&uMem.Words.w0, &uDst.Words.w0);
857 break;
858 case 4:
859 s_aXchgWorkers[i].u.pfnU32(g_pu32, g_pu32Two);
860 s_aXchgWorkers[i].u.pfnU32(&uMem.DWords.dw0, &uDst.DWords.dw0);
861 break;
862 case 8:
863 s_aXchgWorkers[i].u.pfnU64(g_pu64, g_pu64Two);
864 s_aXchgWorkers[i].u.pfnU64(&uMem.u, &uDst.u);
865 break;
866 default: RTTestFailed(g_hTest, "%d\n", s_aXchgWorkers[i].cb); break;
867 }
868
869 if (uMem.u != uIn2.u || uDst.u != uIn1.u)
870 RTTestFailed(g_hTest, "i=%u: %#RX64, %#RX64 -> %#RX64, %#RX64\n", i, uIn1.u, uIn2.u, uMem.u, uDst.u);
871 }
872}
873
874
875/*
876 * XADD
877 */
878static void XaddTest(void)
879{
880#define TEST_XADD(a_cBits, a_Type, a_Fmt) do { \
881 typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLXADDU ## a_cBits, (a_Type *, a_Type *, uint32_t *)); \
882 static struct \
883 { \
884 const char *pszName; \
885 FNIEMAIMPLXADDU ## a_cBits *pfn; \
886 BINU ## a_cBits ## _TEST_T const *paTests; \
887 uint32_t cTests; \
888 } const s_aFuncs[] = \
889 { \
890 { "xadd_u" # a_cBits, iemAImpl_xadd_u ## a_cBits, \
891 g_aTests_add_u ## a_cBits, RT_ELEMENTS(g_aTests_add_u ## a_cBits) }, \
892 { "xadd_u" # a_cBits "8_locked", iemAImpl_xadd_u ## a_cBits ## _locked, \
893 g_aTests_add_u ## a_cBits, RT_ELEMENTS(g_aTests_add_u ## a_cBits) }, \
894 }; \
895 for (size_t iFn = 0; iFn < RT_ELEMENTS(s_aFuncs); iFn++) \
896 { \
897 RTTestSub(g_hTest, s_aFuncs[iFn].pszName); \
898 BINU ## a_cBits ## _TEST_T const * const paTests = s_aFuncs[iFn].paTests; \
899 uint32_t const cTests = s_aFuncs[iFn].cTests; \
900 for (uint32_t iTest = 0; iTest < cTests; iTest++) \
901 { \
902 uint32_t fEfl = paTests[iTest].fEflIn; \
903 a_Type uSrc = paTests[iTest].uSrcIn; \
904 *g_pu ## a_cBits = paTests[iTest].uDstIn; \
905 s_aFuncs[iFn].pfn(g_pu ## a_cBits, &uSrc, &fEfl); \
906 if ( fEfl != paTests[iTest].fEflOut \
907 || *g_pu ## a_cBits != paTests[iTest].uDstOut \
908 || uSrc != paTests[iTest].uDstIn) \
909 RTTestFailed(g_hTest, "%s/#%u: efl=%#08x dst=" a_Fmt " src=" a_Fmt " -> efl=%#08x dst=" a_Fmt " src=" a_Fmt ", expected %#08x, " a_Fmt ", " a_Fmt "%s\n", \
910 s_aFuncs[iFn].pszName, iTest, paTests[iTest].fEflIn, paTests[iTest].uDstIn, paTests[iTest].uSrcIn, \
911 fEfl, *g_pu ## a_cBits, uSrc, paTests[iTest].fEflOut, paTests[iTest].uDstOut, paTests[iTest].uDstIn, \
912 EFlagsDiff(fEfl, paTests[iTest].fEflOut)); \
913 } \
914 } \
915 } while(0)
916 TEST_XADD(8, uint8_t, "%#04x");
917 TEST_XADD(16, uint16_t, "%#06x");
918 TEST_XADD(32, uint32_t, "%#010RX32");
919 TEST_XADD(64, uint64_t, "%#010RX64");
920}
921
922
923/*
924 * CMPXCHG
925 */
926
927static void CmpXchgTest(void)
928{
929#define TEST_CMPXCHG(a_cBits, a_Type, a_Fmt) do {\
930 typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLCMPXCHGU ## a_cBits, (a_Type *, a_Type *, a_Type, uint32_t *)); \
931 static struct \
932 { \
933 const char *pszName; \
934 FNIEMAIMPLCMPXCHGU ## a_cBits *pfn; \
935 PFNIEMAIMPLBINU ## a_cBits pfnSub; \
936 BINU ## a_cBits ## _TEST_T const *paTests; \
937 uint32_t cTests; \
938 } const s_aFuncs[] = \
939 { \
940 { "cmpxchg_u" # a_cBits, iemAImpl_cmpxchg_u ## a_cBits, iemAImpl_sub_u ## a_cBits, \
941 g_aTests_cmp_u ## a_cBits, RT_ELEMENTS(g_aTests_cmp_u ## a_cBits) }, \
942 { "cmpxchg_u" # a_cBits "_locked", iemAImpl_cmpxchg_u ## a_cBits ## _locked, iemAImpl_sub_u ## a_cBits, \
943 g_aTests_cmp_u ## a_cBits, RT_ELEMENTS(g_aTests_cmp_u ## a_cBits) }, \
944 }; \
945 for (size_t iFn = 0; iFn < RT_ELEMENTS(s_aFuncs); iFn++) \
946 { \
947 RTTestSub(g_hTest, s_aFuncs[iFn].pszName); \
948 BINU ## a_cBits ## _TEST_T const * const paTests = s_aFuncs[iFn].paTests; \
949 uint32_t const cTests = s_aFuncs[iFn].cTests; \
950 for (uint32_t iTest = 0; iTest < cTests; iTest++) \
951 { \
952 /* as is (99% likely to be negative). */ \
953 uint32_t fEfl = paTests[iTest].fEflIn; \
954 a_Type const uNew = paTests[iTest].uSrcIn + 0x42; \
955 a_Type uA = paTests[iTest].uDstIn; \
956 *g_pu ## a_cBits = paTests[iTest].uSrcIn; \
957 a_Type const uExpect = uA != paTests[iTest].uSrcIn ? paTests[iTest].uSrcIn : uNew; \
958 s_aFuncs[iFn].pfn(g_pu ## a_cBits, &uA, uNew, &fEfl); \
959 if ( fEfl != paTests[iTest].fEflOut \
960 || *g_pu ## a_cBits != uExpect \
961 || uA != paTests[iTest].uSrcIn) \
962 RTTestFailed(g_hTest, "%s/#%ua: efl=%#08x dst=" a_Fmt " cmp=" a_Fmt " new=" a_Fmt " -> efl=%#08x dst=" a_Fmt " old=" a_Fmt ", expected %#08x, " a_Fmt ", " a_Fmt "%s\n", \
963 s_aFuncs[iFn].pszName, iTest, paTests[iTest].fEflIn, paTests[iTest].uSrcIn, paTests[iTest].uDstIn, \
964 uNew, fEfl, *g_pu ## a_cBits, uA, paTests[iTest].fEflOut, uExpect, paTests[iTest].uSrcIn, \
965 EFlagsDiff(fEfl, paTests[iTest].fEflOut)); \
966 /* positive */ \
967 uint32_t fEflExpect = paTests[iTest].fEflIn; \
968 uA = paTests[iTest].uDstIn; \
969 s_aFuncs[iFn].pfnSub(&uA, uA, &fEflExpect); \
970 fEfl = paTests[iTest].fEflIn; \
971 uA = paTests[iTest].uDstIn; \
972 *g_pu ## a_cBits = uA; \
973 s_aFuncs[iFn].pfn(g_pu ## a_cBits, &uA, uNew, &fEfl); \
974 if ( fEfl != fEflExpect \
975 || *g_pu ## a_cBits != uNew \
976 || uA != paTests[iTest].uDstIn) \
977 RTTestFailed(g_hTest, "%s/#%ua: efl=%#08x dst=" a_Fmt " cmp=" a_Fmt " new=" a_Fmt " -> efl=%#08x dst=" a_Fmt " old=" a_Fmt ", expected %#08x, " a_Fmt ", " a_Fmt "%s\n", \
978 s_aFuncs[iFn].pszName, iTest, paTests[iTest].fEflIn, paTests[iTest].uDstIn, paTests[iTest].uDstIn, \
979 uNew, fEfl, *g_pu ## a_cBits, uA, fEflExpect, uNew, paTests[iTest].uDstIn, \
980 EFlagsDiff(fEfl, fEflExpect)); \
981 } \
982 } \
983 } while(0)
984 TEST_CMPXCHG(8, uint8_t, "%#04RX8");
985 TEST_CMPXCHG(16, uint16_t, "%#06x");
986 TEST_CMPXCHG(32, uint32_t, "%#010RX32");
987#if ARCH_BITS != 32 /* calling convension issue, skipping as it's an unsupported host */
988 TEST_CMPXCHG(64, uint64_t, "%#010RX64");
989#endif
990}
991
992static void CmpXchg8bTest(void)
993{
994 typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLCMPXCHG8B,(uint64_t *, PRTUINT64U, PRTUINT64U, uint32_t *));
995 static struct
996 {
997 const char *pszName;
998 FNIEMAIMPLCMPXCHG8B *pfn;
999 } const s_aFuncs[] =
1000 {
1001 { "cmpxchg8b", iemAImpl_cmpxchg8b },
1002 { "cmpxchg8b_locked", iemAImpl_cmpxchg8b_locked },
1003 };
1004 for (size_t iFn = 0; iFn < RT_ELEMENTS(s_aFuncs); iFn++)
1005 {
1006 RTTestSub(g_hTest, s_aFuncs[iFn].pszName);
1007 for (uint32_t iTest = 0; iTest < 4; iTest += 2)
1008 {
1009 uint64_t const uOldValue = RandU64();
1010 uint64_t const uNewValue = RandU64();
1011
1012 /* positive test. */
1013 RTUINT64U uA, uB;
1014 uB.u = uNewValue;
1015 uA.u = uOldValue;
1016 *g_pu64 = uOldValue;
1017 uint32_t fEflIn = RandEFlags();
1018 uint32_t fEfl = fEflIn;
1019 s_aFuncs[iFn].pfn(g_pu64, &uA, &uB, &fEfl);
1020 if ( fEfl != (fEflIn | X86_EFL_ZF)
1021 || *g_pu64 != uNewValue
1022 || uA.u != uOldValue)
1023 RTTestFailed(g_hTest, "#%u: efl=%#08x dst=%#018RX64 cmp=%#018RX64 new=%#018RX64\n -> efl=%#08x dst=%#018RX64 old=%#018RX64,\n wanted %#08x, %#018RX64, %#018RX64%s\n",
1024 iTest, fEflIn, uOldValue, uOldValue, uNewValue,
1025 fEfl, *g_pu64, uA.u,
1026 (fEflIn | X86_EFL_ZF), uNewValue, uOldValue, EFlagsDiff(fEfl, fEflIn | X86_EFL_ZF));
1027 RTTEST_CHECK(g_hTest, uB.u == uNewValue);
1028
1029 /* negative */
1030 uint64_t const uExpect = ~uOldValue;
1031 *g_pu64 = uExpect;
1032 uA.u = uOldValue;
1033 uB.u = uNewValue;
1034 fEfl = fEflIn = RandEFlags();
1035 s_aFuncs[iFn].pfn(g_pu64, &uA, &uB, &fEfl);
1036 if ( fEfl != (fEflIn & ~X86_EFL_ZF)
1037 || *g_pu64 != uExpect
1038 || uA.u != uExpect)
1039 RTTestFailed(g_hTest, "#%u: efl=%#08x dst=%#018RX64 cmp=%#018RX64 new=%#018RX64\n -> efl=%#08x dst=%#018RX64 old=%#018RX64,\n wanted %#08x, %#018RX64, %#018RX64%s\n",
1040 iTest + 1, fEflIn, uExpect, uOldValue, uNewValue,
1041 fEfl, *g_pu64, uA.u,
1042 (fEflIn & ~X86_EFL_ZF), uExpect, uExpect, EFlagsDiff(fEfl, fEflIn & ~X86_EFL_ZF));
1043 RTTEST_CHECK(g_hTest, uB.u == uNewValue);
1044 }
1045 }
1046}
1047
1048static void CmpXchg16bTest(void)
1049{
1050 typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLCMPXCHG16B,(PRTUINT128U, PRTUINT128U, PRTUINT128U, uint32_t *));
1051 static struct
1052 {
1053 const char *pszName;
1054 FNIEMAIMPLCMPXCHG16B *pfn;
1055 } const s_aFuncs[] =
1056 {
1057 { "cmpxchg16b", iemAImpl_cmpxchg16b },
1058 { "cmpxchg16b_locked", iemAImpl_cmpxchg16b_locked },
1059#if !defined(RT_ARCH_ARM64)
1060 { "cmpxchg16b_fallback", iemAImpl_cmpxchg16b_fallback },
1061#endif
1062 };
1063 for (size_t iFn = 0; iFn < RT_ELEMENTS(s_aFuncs); iFn++)
1064 {
1065#if !defined(IEM_WITHOUT_ASSEMBLY) && defined(RT_ARCH_AMD64)
1066 if (!(ASMCpuId_ECX(1) & X86_CPUID_FEATURE_ECX_CX16))
1067 continue;
1068#endif
1069 RTTestSub(g_hTest, s_aFuncs[iFn].pszName);
1070 for (uint32_t iTest = 0; iTest < 4; iTest += 2)
1071 {
1072 RTUINT128U const uOldValue = RandU128();
1073 RTUINT128U const uNewValue = RandU128();
1074
1075 /* positive test. */
1076 RTUINT128U uA, uB;
1077 uB = uNewValue;
1078 uA = uOldValue;
1079 *g_pu128 = uOldValue;
1080 uint32_t fEflIn = RandEFlags();
1081 uint32_t fEfl = fEflIn;
1082 s_aFuncs[iFn].pfn(g_pu128, &uA, &uB, &fEfl);
1083 if ( fEfl != (fEflIn | X86_EFL_ZF)
1084 || g_pu128->s.Lo != uNewValue.s.Lo
1085 || g_pu128->s.Hi != uNewValue.s.Hi
1086 || uA.s.Lo != uOldValue.s.Lo
1087 || uA.s.Hi != uOldValue.s.Hi)
1088 RTTestFailed(g_hTest, "#%u: efl=%#08x dst=%#018RX64'%016RX64 cmp=%#018RX64'%016RX64 new=%#018RX64'%016RX64\n"
1089 " -> efl=%#08x dst=%#018RX64'%016RX64 old=%#018RX64'%016RX64,\n"
1090 " wanted %#08x, %#018RX64'%016RX64, %#018RX64'%016RX64%s\n",
1091 iTest, fEflIn, uOldValue.s.Hi, uOldValue.s.Lo, uOldValue.s.Hi, uOldValue.s.Lo, uNewValue.s.Hi, uNewValue.s.Lo,
1092 fEfl, g_pu128->s.Hi, g_pu128->s.Lo, uA.s.Hi, uA.s.Lo,
1093 (fEflIn | X86_EFL_ZF), uNewValue.s.Hi, uNewValue.s.Lo, uOldValue.s.Hi, uOldValue.s.Lo,
1094 EFlagsDiff(fEfl, fEflIn | X86_EFL_ZF));
1095 RTTEST_CHECK(g_hTest, uB.s.Lo == uNewValue.s.Lo && uB.s.Hi == uNewValue.s.Hi);
1096
1097 /* negative */
1098 RTUINT128U const uExpect = RTUINT128_INIT(~uOldValue.s.Hi, ~uOldValue.s.Lo);
1099 *g_pu128 = uExpect;
1100 uA = uOldValue;
1101 uB = uNewValue;
1102 fEfl = fEflIn = RandEFlags();
1103 s_aFuncs[iFn].pfn(g_pu128, &uA, &uB, &fEfl);
1104 if ( fEfl != (fEflIn & ~X86_EFL_ZF)
1105 || g_pu128->s.Lo != uExpect.s.Lo
1106 || g_pu128->s.Hi != uExpect.s.Hi
1107 || uA.s.Lo != uExpect.s.Lo
1108 || uA.s.Hi != uExpect.s.Hi)
1109 RTTestFailed(g_hTest, "#%u: efl=%#08x dst=%#018RX64'%016RX64 cmp=%#018RX64'%016RX64 new=%#018RX64'%016RX64\n"
1110 " -> efl=%#08x dst=%#018RX64'%016RX64 old=%#018RX64'%016RX64,\n"
1111 " wanted %#08x, %#018RX64'%016RX64, %#018RX64'%016RX64%s\n",
1112 iTest + 1, fEflIn, uExpect.s.Hi, uExpect.s.Lo, uOldValue.s.Hi, uOldValue.s.Lo, uNewValue.s.Hi, uNewValue.s.Lo,
1113 fEfl, g_pu128->s.Hi, g_pu128->s.Lo, uA.s.Hi, uA.s.Lo,
1114 (fEflIn & ~X86_EFL_ZF), uExpect.s.Hi, uExpect.s.Lo, uExpect.s.Hi, uExpect.s.Lo,
1115 EFlagsDiff(fEfl, fEflIn & ~X86_EFL_ZF));
1116 RTTEST_CHECK(g_hTest, uB.s.Lo == uNewValue.s.Lo && uB.s.Hi == uNewValue.s.Hi);
1117 }
1118 }
1119}
1120
1121
1122/*
1123 * Double shifts.
1124 *
1125 * Note! We use BINUxx_TEST_T with the shift value in the uMisc field.
1126 */
1127
1128#ifndef HAVE_SHIFT_DBL_TESTS
1129static const BINU16_TEST_T g_aTests_shrd_u16[] = { {0} };
1130static const BINU16_TEST_T g_aTests_shld_u16[] = { {0} };
1131static const BINU32_TEST_T g_aTests_shrd_u32[] = { {0} };
1132static const BINU32_TEST_T g_aTests_shld_u32[] = { {0} };
1133static const BINU64_TEST_T g_aTests_shrd_u64[] = { {0} };
1134static const BINU64_TEST_T g_aTests_shld_u64[] = { {0} };
1135#endif
1136
1137#define TEST_SHIFT_DBL(a_cBits, a_Type, a_Fmt) \
1138static const struct \
1139{ \
1140 const char *pszName; \
1141 PFNIEMAIMPLSHIFTDBLU ## a_cBits pfn; \
1142 PFNIEMAIMPLSHIFTDBLU ## a_cBits pfnNative; \
1143 BINU ## a_cBits ## _TEST_T const *paTests; \
1144 uint32_t cTests, uExtra; \
1145 uint8_t idxCpuEflFlavour; \
1146} g_aShiftDblU ## a_cBits [] = \
1147{ \
1148 ENTRY(shld_u ## a_cBits), \
1149 ENTRY(shrd_u ## a_cBits), \
1150}; \
1151\
1152void ShiftDblU ## a_cBits ## Generate(PRTSTREAM pOut, uint32_t cTests) \
1153{ \
1154 for (size_t iFn = 0; iFn < RT_ELEMENTS(g_aShiftDblU ## a_cBits); iFn++) \
1155 { \
1156 RTStrmPrintf(pOut, "static const BINU" #a_cBits "_TEST_T g_aTests_%s[] =\n{\n", g_aShiftDblU ## a_cBits[iFn].pszName); \
1157 for (uint32_t iTest = 0; iTest < cTests; iTest++ ) \
1158 { \
1159 BINU ## a_cBits ## _TEST_T Test; \
1160 Test.fEflIn = RandEFlags(); \
1161 Test.fEflOut = Test.fEflIn; \
1162 Test.uDstIn = RandU ## a_cBits(); \
1163 Test.uDstOut = Test.uDstIn; \
1164 Test.uSrcIn = RandU ## a_cBits(); \
1165 Test.uMisc = RandU8() & (a_cBits - 1); \
1166 g_aShiftDblU ## a_cBits[iFn].pfn(&Test.uDstOut, Test.uSrcIn, Test.uMisc, &Test.fEflOut); \
1167 RTStrmPrintf(pOut, " { %#08x, %#08x, " a_Fmt ", " a_Fmt ", " a_Fmt ", %2u }, /* #%u */\n", \
1168 Test.fEflIn, Test.fEflOut, Test.uDstIn, Test.uDstOut, Test.uSrcIn, Test.uMisc, iTest); \
1169 } \
1170 RTStrmPrintf(pOut, "};\n"); \
1171 } \
1172} \
1173\
1174static void ShiftDblU ## a_cBits ## Test(void) \
1175{ \
1176 for (size_t iFn = 0; iFn < RT_ELEMENTS(g_aShiftDblU ## a_cBits); iFn++) \
1177 { \
1178 RTTestSub(g_hTest, g_aShiftDblU ## a_cBits[iFn].pszName); \
1179 BINU ## a_cBits ## _TEST_T const * const paTests = g_aShiftDblU ## a_cBits[iFn].paTests; \
1180 uint32_t const cTests = g_aShiftDblU ## a_cBits[iFn].cTests; \
1181 for (uint32_t iTest = 0; iTest < cTests; iTest++ ) \
1182 { \
1183 uint32_t fEfl = paTests[iTest].fEflIn; \
1184 a_Type uDst = paTests[iTest].uDstIn; \
1185 g_aShiftDblU ## a_cBits[iFn].pfn(&uDst, paTests[iTest].uSrcIn, paTests[iTest].uMisc, &fEfl); \
1186 if ( uDst != paTests[iTest].uDstOut \
1187 || (fEfl /*| X86_EFL_AF*/) != (paTests[iTest].fEflOut /*| X86_EFL_AF*/)) \
1188 RTTestFailed(g_hTest, "#%u: efl=%#08x dst=" a_Fmt " src=" a_Fmt " shift=%-2u -> efl=%#08x dst=" a_Fmt ", expected %#08x & " a_Fmt "%s\n", \
1189 iTest, paTests[iTest].fEflIn, paTests[iTest].uDstIn, paTests[iTest].uSrcIn, (unsigned)paTests[iTest].uMisc, \
1190 fEfl, uDst, paTests[iTest].fEflOut, paTests[iTest].uDstOut, \
1191 EFlagsDiff(fEfl /*| X86_EFL_AF*/, paTests[iTest].fEflOut /*| X86_EFL_AF*/)); \
1192 else \
1193 { \
1194 *g_pu ## a_cBits = paTests[iTest].uDstIn; \
1195 *g_pfEfl = paTests[iTest].fEflIn; \
1196 g_aShiftDblU ## a_cBits[iFn].pfn(g_pu ## a_cBits, paTests[iTest].uSrcIn, paTests[iTest].uMisc, g_pfEfl); \
1197 RTTEST_CHECK(g_hTest, *g_pu ## a_cBits == paTests[iTest].uDstOut); \
1198 RTTEST_CHECK(g_hTest, *g_pfEfl == paTests[iTest].fEflOut); \
1199 } \
1200 } \
1201 } \
1202}
1203TEST_SHIFT_DBL(16, uint16_t, "%#06RX16")
1204TEST_SHIFT_DBL(32, uint32_t, "%#010RX32")
1205TEST_SHIFT_DBL(64, uint64_t, "%#018RX64")
1206
1207#ifdef TSTIEMAIMPL_WITH_GENERATOR
1208static void ShiftDblGenerate(PRTSTREAM pOut, uint32_t cTests)
1209{
1210 RTStrmPrintf(pOut, "\n\n#define HAVE_SHIFT_DBL_TESTS\n");
1211 ShiftDblU16Generate(pOut, cTests);
1212 ShiftDblU32Generate(pOut, cTests);
1213 ShiftDblU64Generate(pOut, cTests);
1214}
1215#endif
1216
1217static void ShiftDblTest(void)
1218{
1219 ShiftDblU16Test();
1220 ShiftDblU32Test();
1221 ShiftDblU64Test();
1222}
1223
1224
1225/*
1226 * Unary operators.
1227 *
1228 * Note! We use BINUxx_TEST_T ignoreing uSrcIn and uMisc.
1229 */
1230
1231#ifndef HAVE_UNARY_TESTS
1232# define DUMMY_UNARY_TESTS(a_cBits, a_Type) \
1233 static const a_Type g_aTests_inc_u ## a_cBits[] = { {0} }; \
1234 static const a_Type g_aTests_inc_u ## a_cBits ## _locked[] = { {0} }; \
1235 static const a_Type g_aTests_dec_u ## a_cBits[] = { {0} }; \
1236 static const a_Type g_aTests_dec_u ## a_cBits ## _locked[] = { {0} }; \
1237 static const a_Type g_aTests_not_u ## a_cBits[] = { {0} }; \
1238 static const a_Type g_aTests_not_u ## a_cBits ## _locked[] = { {0} }; \
1239 static const a_Type g_aTests_neg_u ## a_cBits[] = { {0} }; \
1240 static const a_Type g_aTests_neg_u ## a_cBits ## _locked[] = { {0} }
1241DUMMY_UNARY_TESTS(8, BINU8_TEST_T);
1242DUMMY_UNARY_TESTS(16, BINU16_TEST_T);
1243DUMMY_UNARY_TESTS(32, BINU32_TEST_T);
1244DUMMY_UNARY_TESTS(64, BINU64_TEST_T);
1245#endif
1246
1247#define TEST_UNARY(a_cBits, a_Type, a_Fmt, a_TestType) \
1248static const struct \
1249{ \
1250 const char *pszName; \
1251 PFNIEMAIMPLUNARYU ## a_cBits pfn; \
1252 PFNIEMAIMPLUNARYU ## a_cBits pfnNative; \
1253 a_TestType const *paTests; \
1254 uint32_t cTests, uExtra; \
1255 uint8_t idxCpuEflFlavour; \
1256} g_aUnaryU ## a_cBits [] = \
1257{ \
1258 ENTRY(inc_u ## a_cBits), \
1259 ENTRY(inc_u ## a_cBits ## _locked), \
1260 ENTRY(dec_u ## a_cBits), \
1261 ENTRY(dec_u ## a_cBits ## _locked), \
1262 ENTRY(not_u ## a_cBits), \
1263 ENTRY(not_u ## a_cBits ## _locked), \
1264 ENTRY(neg_u ## a_cBits), \
1265 ENTRY(neg_u ## a_cBits ## _locked), \
1266}; \
1267\
1268void UnaryU ## a_cBits ## Generate(PRTSTREAM pOut, uint32_t cTests) \
1269{ \
1270 for (size_t iFn = 0; iFn < RT_ELEMENTS(g_aUnaryU ## a_cBits); iFn++) \
1271 { \
1272 RTStrmPrintf(pOut, "static const BINU" #a_cBits "_TEST_T g_aTests_%s[] =\n{\n", g_aUnaryU ## a_cBits[iFn].pszName); \
1273 for (uint32_t iTest = 0; iTest < cTests; iTest++ ) \
1274 { \
1275 a_TestType Test; \
1276 Test.fEflIn = RandEFlags(); \
1277 Test.fEflOut = Test.fEflIn; \
1278 Test.uDstIn = RandU ## a_cBits(); \
1279 Test.uDstOut = Test.uDstIn; \
1280 Test.uSrcIn = 0; \
1281 Test.uMisc = 0; \
1282 g_aUnaryU ## a_cBits[iFn].pfn(&Test.uDstOut, &Test.fEflOut); \
1283 RTStrmPrintf(pOut, " { %#08x, %#08x, " a_Fmt ", " a_Fmt ", 0, 0 }, /* #%u */\n", \
1284 Test.fEflIn, Test.fEflOut, Test.uDstIn, Test.uDstOut, iTest); \
1285 } \
1286 RTStrmPrintf(pOut, "};\n"); \
1287 } \
1288} \
1289\
1290static void UnaryU ## a_cBits ## Test(void) \
1291{ \
1292 for (size_t iFn = 0; iFn < RT_ELEMENTS(g_aUnaryU ## a_cBits); iFn++) \
1293 { \
1294 RTTestSub(g_hTest, g_aUnaryU ## a_cBits[iFn].pszName); \
1295 a_TestType const * const paTests = g_aUnaryU ## a_cBits[iFn].paTests; \
1296 uint32_t const cTests = g_aUnaryU ## a_cBits[iFn].cTests; \
1297 for (uint32_t iTest = 0; iTest < cTests; iTest++ ) \
1298 { \
1299 uint32_t fEfl = paTests[iTest].fEflIn; \
1300 a_Type uDst = paTests[iTest].uDstIn; \
1301 g_aUnaryU ## a_cBits[iFn].pfn(&uDst, &fEfl); \
1302 if ( uDst != paTests[iTest].uDstOut \
1303 || fEfl != paTests[iTest].fEflOut) \
1304 RTTestFailed(g_hTest, "#%u: efl=%#08x dst=" a_Fmt " -> efl=%#08x dst=" a_Fmt ", expected %#08x & " a_Fmt "%s\n", \
1305 iTest, paTests[iTest].fEflIn, paTests[iTest].uDstIn, \
1306 fEfl, uDst, paTests[iTest].fEflOut, paTests[iTest].uDstOut, \
1307 EFlagsDiff(fEfl, paTests[iTest].fEflOut)); \
1308 else \
1309 { \
1310 *g_pu ## a_cBits = paTests[iTest].uDstIn; \
1311 *g_pfEfl = paTests[iTest].fEflIn; \
1312 g_aUnaryU ## a_cBits[iFn].pfn(g_pu ## a_cBits, g_pfEfl); \
1313 RTTEST_CHECK(g_hTest, *g_pu ## a_cBits == paTests[iTest].uDstOut); \
1314 RTTEST_CHECK(g_hTest, *g_pfEfl == paTests[iTest].fEflOut); \
1315 } \
1316 } \
1317 } \
1318}
1319TEST_UNARY(8, uint8_t, "%#04RX8", BINU8_TEST_T)
1320TEST_UNARY(16, uint16_t, "%#06RX16", BINU16_TEST_T)
1321TEST_UNARY(32, uint32_t, "%#010RX32", BINU32_TEST_T)
1322TEST_UNARY(64, uint64_t, "%#018RX64", BINU64_TEST_T)
1323
1324#ifdef TSTIEMAIMPL_WITH_GENERATOR
1325static void UnaryGenerate(PRTSTREAM pOut, uint32_t cTests)
1326{
1327 RTStrmPrintf(pOut, "\n\n#define HAVE_UNARY_TESTS\n");
1328 UnaryU8Generate(pOut, cTests);
1329 UnaryU16Generate(pOut, cTests);
1330 UnaryU32Generate(pOut, cTests);
1331 UnaryU64Generate(pOut, cTests);
1332}
1333#endif
1334
1335static void UnaryTest(void)
1336{
1337 UnaryU8Test();
1338 UnaryU16Test();
1339 UnaryU32Test();
1340 UnaryU64Test();
1341}
1342
1343
1344/*
1345 * Shifts.
1346 *
1347 * Note! We use BINUxx_TEST_T with the shift count in uMisc and uSrcIn unused.
1348 */
1349
1350#ifndef HAVE_SHIFT_TESTS
1351# define DUMMY_SHIFT_TESTS(a_cBits, a_Type) \
1352 static const a_Type g_aTests_rol_u ## a_cBits[] = { {0} }; \
1353 static const a_Type g_aTests_ror_u ## a_cBits[] = { {0} }; \
1354 static const a_Type g_aTests_rcl_u ## a_cBits[] = { {0} }; \
1355 static const a_Type g_aTests_rcr_u ## a_cBits[] = { {0} }; \
1356 static const a_Type g_aTests_shl_u ## a_cBits[] = { {0} }; \
1357 static const a_Type g_aTests_shr_u ## a_cBits[] = { {0} }; \
1358 static const a_Type g_aTests_sar_u ## a_cBits[] = { {0} }
1359DUMMY_SHIFT_TESTS(8, BINU8_TEST_T);
1360DUMMY_SHIFT_TESTS(16, BINU16_TEST_T);
1361DUMMY_SHIFT_TESTS(32, BINU32_TEST_T);
1362DUMMY_SHIFT_TESTS(64, BINU64_TEST_T);
1363#endif
1364
1365#define TEST_SHIFT(a_cBits, a_Type, a_Fmt, a_TestType) \
1366static const struct \
1367{ \
1368 const char *pszName; \
1369 PFNIEMAIMPLSHIFTU ## a_cBits pfn; \
1370 PFNIEMAIMPLSHIFTU ## a_cBits pfnNative; \
1371 a_TestType const *paTests; \
1372 uint32_t cTests, uExtra; \
1373 uint8_t idxCpuEflFlavour; \
1374} g_aShiftU ## a_cBits [] = \
1375{ \
1376 ENTRY(rol_u ## a_cBits), \
1377 ENTRY(ror_u ## a_cBits), \
1378 ENTRY(rcl_u ## a_cBits), \
1379 ENTRY(rcr_u ## a_cBits), \
1380 ENTRY(shl_u ## a_cBits), \
1381 ENTRY(shr_u ## a_cBits), \
1382 ENTRY(sar_u ## a_cBits), \
1383}; \
1384\
1385void ShiftU ## a_cBits ## Generate(PRTSTREAM pOut, uint32_t cTests) \
1386{ \
1387 for (size_t iFn = 0; iFn < RT_ELEMENTS(g_aShiftU ## a_cBits); iFn++) \
1388 { \
1389 RTStrmPrintf(pOut, "static const BINU" #a_cBits "_TEST_T g_aTests_%s[] =\n{\n", g_aShiftU ## a_cBits[iFn].pszName); \
1390 for (uint32_t iTest = 0; iTest < cTests; iTest++ ) \
1391 { \
1392 a_TestType Test; \
1393 Test.fEflIn = RandEFlags(); \
1394 Test.fEflOut = Test.fEflIn; \
1395 Test.uDstIn = RandU ## a_cBits(); \
1396 Test.uDstOut = Test.uDstIn; \
1397 Test.uSrcIn = 0; \
1398 Test.uMisc = RandU8() & (a_cBits - 1); \
1399 g_aShiftU ## a_cBits[iFn].pfn(&Test.uDstOut, Test.uMisc, &Test.fEflOut); \
1400 RTStrmPrintf(pOut, " { %#08x, %#08x, " a_Fmt ", " a_Fmt ", 0, %-2u }, /* #%u */\n", \
1401 Test.fEflIn, Test.fEflOut, Test.uDstIn, Test.uDstOut, Test.uMisc, iTest); \
1402 } \
1403 RTStrmPrintf(pOut, "};\n"); \
1404 } \
1405} \
1406\
1407static void ShiftU ## a_cBits ## Test(void) \
1408{ \
1409 for (size_t iFn = 0; iFn < RT_ELEMENTS(g_aShiftU ## a_cBits); iFn++) \
1410 { \
1411 RTTestSub(g_hTest, g_aShiftU ## a_cBits[iFn].pszName); \
1412 a_TestType const * const paTests = g_aShiftU ## a_cBits[iFn].paTests; \
1413 uint32_t const cTests = g_aShiftU ## a_cBits[iFn].cTests; \
1414 for (uint32_t iTest = 0; iTest < cTests; iTest++ ) \
1415 { \
1416 uint32_t fEfl = paTests[iTest].fEflIn; \
1417 a_Type uDst = paTests[iTest].uDstIn; \
1418 g_aShiftU ## a_cBits[iFn].pfn(&uDst, paTests[iTest].uMisc, &fEfl); \
1419 if ( uDst != paTests[iTest].uDstOut \
1420 || fEfl != paTests[iTest].fEflOut) \
1421 RTTestFailed(g_hTest, "#%u: efl=%#08x dst=" a_Fmt " shift=%2u -> efl=%#08x dst=" a_Fmt ", expected %#08x & " a_Fmt "%s\n", \
1422 iTest, paTests[iTest].fEflIn, paTests[iTest].uDstIn, paTests[iTest].uMisc, \
1423 fEfl, uDst, paTests[iTest].fEflOut, paTests[iTest].uDstOut, \
1424 EFlagsDiff(fEfl, paTests[iTest].fEflOut)); \
1425 else \
1426 { \
1427 *g_pu ## a_cBits = paTests[iTest].uDstIn; \
1428 *g_pfEfl = paTests[iTest].fEflIn; \
1429 g_aShiftU ## a_cBits[iFn].pfn(g_pu ## a_cBits, paTests[iTest].uMisc, g_pfEfl); \
1430 RTTEST_CHECK(g_hTest, *g_pu ## a_cBits == paTests[iTest].uDstOut); \
1431 RTTEST_CHECK(g_hTest, *g_pfEfl == paTests[iTest].fEflOut); \
1432 } \
1433 } \
1434 } \
1435}
1436TEST_SHIFT(8, uint8_t, "%#04RX8", BINU8_TEST_T)
1437TEST_SHIFT(16, uint16_t, "%#06RX16", BINU16_TEST_T)
1438TEST_SHIFT(32, uint32_t, "%#010RX32", BINU32_TEST_T)
1439TEST_SHIFT(64, uint64_t, "%#018RX64", BINU64_TEST_T)
1440
1441#ifdef TSTIEMAIMPL_WITH_GENERATOR
1442static void ShiftGenerate(PRTSTREAM pOut, uint32_t cTests)
1443{
1444 RTStrmPrintf(pOut, "\n\n#define HAVE_SHIFT_TESTS\n");
1445 ShiftU8Generate(pOut, cTests);
1446 ShiftU16Generate(pOut, cTests);
1447 ShiftU32Generate(pOut, cTests);
1448 ShiftU64Generate(pOut, cTests);
1449}
1450#endif
1451
1452static void ShiftTest(void)
1453{
1454 ShiftU8Test();
1455 ShiftU16Test();
1456 ShiftU32Test();
1457 ShiftU64Test();
1458}
1459
1460
1461/*
1462 * Multiplication and division.
1463 *
1464 * Note! The 8-bit functions has a different format, so we need to duplicate things.
1465 * Note! Currently ignoring undefined bits.
1466 */
1467
1468# define DUMMY_MULDIV_TESTS(a_cBits, a_Type, a_Vendor) \
1469 static const a_Type g_aTests_mul_u ## a_cBits ## a_Vendor[] = { {0} }; \
1470 static const a_Type g_aTests_imul_u ## a_cBits ## a_Vendor[] = { {0} }; \
1471 static const a_Type g_aTests_div_u ## a_cBits ## a_Vendor[] = { {0} }; \
1472 static const a_Type g_aTests_idiv_u ## a_cBits ## a_Vendor[] = { {0} }
1473
1474#ifndef HAVE_MULDIV_TESTS_AMD
1475DUMMY_MULDIV_TESTS(8, MULDIVU8_TEST_T, _amd);
1476DUMMY_MULDIV_TESTS(16, MULDIVU16_TEST_T, _amd);
1477DUMMY_MULDIV_TESTS(32, MULDIVU32_TEST_T, _amd);
1478DUMMY_MULDIV_TESTS(64, MULDIVU64_TEST_T, _amd);
1479#endif
1480
1481#ifndef HAVE_MULDIV_TESTS_INTEL
1482DUMMY_MULDIV_TESTS(8, MULDIVU8_TEST_T, _intel);
1483DUMMY_MULDIV_TESTS(16, MULDIVU16_TEST_T, _intel);
1484DUMMY_MULDIV_TESTS(32, MULDIVU32_TEST_T, _intel);
1485DUMMY_MULDIV_TESTS(64, MULDIVU64_TEST_T, _intel);
1486#endif
1487
1488/* U8 */
1489static const struct
1490{
1491 const char *pszName;
1492 PFNIEMAIMPLMULDIVU8 pfn;
1493 PFNIEMAIMPLMULDIVU8 pfnNative;
1494 MULDIVU8_TEST_T const *paTests;
1495 uint32_t cTests, uExtra;
1496 uint8_t idxCpuEflFlavour;
1497} g_aMulDivU8[] =
1498{
1499 ENTRY_AMD_EX(mul_u8, X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF,
1500 X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF),
1501 ENTRY_INTEL_EX(mul_u8, X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF, 0),
1502 ENTRY_AMD_EX(imul_u8, X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF,
1503 X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF),
1504 ENTRY_INTEL_EX(imul_u8, X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF, 0),
1505 ENTRY_AMD_EX(div_u8, X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF | X86_EFL_CF | X86_EFL_OF,
1506 X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF | X86_EFL_CF | X86_EFL_OF),
1507 ENTRY_INTEL_EX(div_u8, X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF | X86_EFL_CF | X86_EFL_OF, 0),
1508 ENTRY_AMD_EX(idiv_u8, X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF | X86_EFL_CF | X86_EFL_OF,
1509 X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF | X86_EFL_CF | X86_EFL_OF),
1510 ENTRY_INTEL_EX(idiv_u8, X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF | X86_EFL_CF | X86_EFL_OF, 0),
1511};
1512
1513#ifdef TSTIEMAIMPL_WITH_GENERATOR
1514static void MulDivU8Generate(PRTSTREAM pOut, uint32_t cTests)
1515{
1516 for (size_t iFn = 0; iFn < RT_ELEMENTS(g_aMulDivU8); iFn++)
1517 {
1518 if ( g_aMulDivU8[iFn].idxCpuEflFlavour != IEMTARGETCPU_EFL_BEHAVIOR_NATIVE
1519 && g_aMulDivU8[iFn].idxCpuEflFlavour != g_idxCpuEflFlavour)
1520 continue;
1521 RTStrmPrintf(pOut, "static const MULDIVU8_TEST_T g_aTests_%s[] =\n{\n", g_aMulDivU8[iFn].pszName);
1522 for (uint32_t iTest = 0; iTest < cTests; iTest++ )
1523 {
1524 MULDIVU8_TEST_T Test;
1525 Test.fEflIn = RandEFlags();
1526 Test.fEflOut = Test.fEflIn;
1527 Test.uDstIn = RandU16Dst(iTest);
1528 Test.uDstOut = Test.uDstIn;
1529 Test.uSrcIn = RandU8Src(iTest);
1530 Test.rc = g_aMulDivU8[iFn].pfnNative(&Test.uDstOut, Test.uSrcIn, &Test.fEflOut);
1531 RTStrmPrintf(pOut, " { %#08x, %#08x, %#06RX16, %#06RX16, %#04RX8, %d }, /* #%u */\n",
1532 Test.fEflIn, Test.fEflOut, Test.uDstIn, Test.uDstOut, Test.uSrcIn, Test.rc, iTest);
1533 }
1534 RTStrmPrintf(pOut, "};\n");
1535 }
1536}
1537#endif
1538
1539static void MulDivU8Test(void)
1540{
1541 for (size_t iFn = 0; iFn < RT_ELEMENTS(g_aMulDivU8); iFn++)
1542 {
1543 if ( g_aMulDivU8[iFn].idxCpuEflFlavour != IEMTARGETCPU_EFL_BEHAVIOR_NATIVE
1544 && g_aMulDivU8[iFn].idxCpuEflFlavour != g_idxCpuEflFlavour)
1545 continue;
1546
1547 RTTestSub(g_hTest, g_aMulDivU8[iFn].pszName);
1548 MULDIVU8_TEST_T const * const paTests = g_aMulDivU8[iFn].paTests;
1549 uint32_t const cTests = g_aMulDivU8[iFn].cTests;
1550 uint32_t const fEflIgn = g_aMulDivU8[iFn].uExtra;
1551 for (uint32_t iTest = 0; iTest < cTests; iTest++ )
1552 {
1553 uint32_t fEfl = paTests[iTest].fEflIn;
1554 uint16_t uDst = paTests[iTest].uDstIn;
1555 int rc = g_aMulDivU8[iFn].pfn(&uDst, paTests[iTest].uSrcIn, &fEfl);
1556 if ( uDst != paTests[iTest].uDstOut
1557 || (fEfl | fEflIgn) != (paTests[iTest].fEflOut | fEflIgn)
1558 || rc != paTests[iTest].rc)
1559 RTTestFailed(g_hTest, "#%02u: efl=%#08x dst=%#06RX16 src=%#04RX8\n"
1560 " -> efl=%#08x dst=%#06RX16 rc=%d\n"
1561 "expected %#08x %#06RX16 %d%s\n",
1562 iTest, paTests[iTest].fEflIn, paTests[iTest].uDstIn, paTests[iTest].uSrcIn,
1563 fEfl, uDst, rc, paTests[iTest].fEflOut, paTests[iTest].uDstOut, paTests[iTest].rc,
1564 EFlagsDiff(fEfl | fEflIgn, paTests[iTest].fEflOut | fEflIgn));
1565 else
1566 {
1567 *g_pu16 = paTests[iTest].uDstIn;
1568 *g_pfEfl = paTests[iTest].fEflIn;
1569 rc = g_aMulDivU8[iFn].pfn(g_pu16, paTests[iTest].uSrcIn, g_pfEfl);
1570 RTTEST_CHECK(g_hTest, *g_pu16 == paTests[iTest].uDstOut);
1571 RTTEST_CHECK(g_hTest, (*g_pfEfl | fEflIgn) == (paTests[iTest].fEflOut | fEflIgn));
1572 RTTEST_CHECK(g_hTest, rc == paTests[iTest].rc);
1573 }
1574 }
1575 }
1576}
1577
1578#ifdef TSTIEMAIMPL_WITH_GENERATOR
1579# define GEN_MULDIV(a_cBits, a_Fmt, a_TestType, a_aSubTests) \
1580void MulDivU ## a_cBits ## Generate(PRTSTREAM pOut, uint32_t cTests) \
1581{ \
1582 for (size_t iFn = 0; iFn < RT_ELEMENTS(a_aSubTests); iFn++) \
1583 { \
1584 if ( a_aSubTests[iFn].idxCpuEflFlavour != IEMTARGETCPU_EFL_BEHAVIOR_NATIVE \
1585 && a_aSubTests[iFn].idxCpuEflFlavour != g_idxCpuEflFlavour) \
1586 continue; \
1587 RTStrmPrintf(pOut, "static const MULDIVU" #a_cBits "_TEST_T g_aTests_%s[] =\n{\n", a_aSubTests[iFn].pszName); \
1588 for (uint32_t iTest = 0; iTest < cTests; iTest++ ) \
1589 { \
1590 a_TestType Test; \
1591 Test.fEflIn = RandEFlags(); \
1592 Test.fEflOut = Test.fEflIn; \
1593 Test.uDst1In = RandU ## a_cBits ## Dst(iTest); \
1594 Test.uDst1Out = Test.uDst1In; \
1595 Test.uDst2In = RandU ## a_cBits ## Dst(iTest); \
1596 Test.uDst2Out = Test.uDst2In; \
1597 Test.uSrcIn = RandU ## a_cBits ## Src(iTest); \
1598 Test.rc = a_aSubTests[iFn].pfnNative(&Test.uDst1Out, &Test.uDst2Out, Test.uSrcIn, &Test.fEflOut); \
1599 RTStrmPrintf(pOut, " { %#08x, %#08x, " a_Fmt ", " a_Fmt ", " a_Fmt ", " a_Fmt ", " a_Fmt ", %d }, /* #%u */\n", \
1600 Test.fEflIn, Test.fEflOut, Test.uDst1In, Test.uDst1Out, Test.uDst2In, Test.uDst2Out, Test.uSrcIn, \
1601 Test.rc, iTest); \
1602 } \
1603 RTStrmPrintf(pOut, "};\n"); \
1604 } \
1605}
1606#else
1607# define GEN_MULDIV(a_cBits, a_Fmt, a_TestType, a_aSubTests)
1608#endif
1609
1610#define TEST_MULDIV(a_cBits, a_Type, a_Fmt, a_TestType, a_aSubTests) \
1611static const struct \
1612{ \
1613 const char *pszName; \
1614 PFNIEMAIMPLMULDIVU ## a_cBits pfn; \
1615 PFNIEMAIMPLMULDIVU ## a_cBits pfnNative; \
1616 a_TestType const *paTests; \
1617 uint32_t cTests, uExtra; \
1618 uint8_t idxCpuEflFlavour; \
1619} a_aSubTests [] = \
1620{ \
1621 ENTRY_AMD_EX(mul_u ## a_cBits, X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF, \
1622 X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF /** @todo check out AMD flags */ ), \
1623 ENTRY_INTEL_EX(mul_u ## a_cBits, X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF, 0), \
1624 ENTRY_AMD_EX(imul_u ## a_cBits, X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF, \
1625 X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF), \
1626 ENTRY_INTEL_EX(imul_u ## a_cBits, X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF, 0), \
1627 ENTRY_AMD_EX(div_u ## a_cBits, X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF | X86_EFL_CF | X86_EFL_OF, \
1628 X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF | X86_EFL_CF | X86_EFL_OF), \
1629 ENTRY_INTEL_EX(div_u ## a_cBits, X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF | X86_EFL_CF | X86_EFL_OF, 0), \
1630 ENTRY_AMD_EX(idiv_u ## a_cBits, X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF | X86_EFL_CF | X86_EFL_OF, \
1631 X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF | X86_EFL_CF | X86_EFL_OF), \
1632 ENTRY_INTEL_EX(idiv_u ## a_cBits, X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF | X86_EFL_CF | X86_EFL_OF, 0), \
1633}; \
1634\
1635GEN_MULDIV(a_cBits, a_Fmt, a_TestType, a_aSubTests) \
1636\
1637static void MulDivU ## a_cBits ## Test(void) \
1638{ \
1639 for (size_t iFn = 0; iFn < RT_ELEMENTS(a_aSubTests); iFn++) \
1640 { \
1641 if ( a_aSubTests[iFn].idxCpuEflFlavour != IEMTARGETCPU_EFL_BEHAVIOR_NATIVE \
1642 && a_aSubTests[iFn].idxCpuEflFlavour != g_idxCpuEflFlavour) \
1643 continue; \
1644 \
1645 RTTestSub(g_hTest, a_aSubTests[iFn].pszName); \
1646 a_TestType const * const paTests = a_aSubTests[iFn].paTests; \
1647 uint32_t const cTests = a_aSubTests[iFn].cTests; \
1648 uint32_t const fEflIgn = a_aSubTests[iFn].uExtra; \
1649 PFNIEMAIMPLMULDIVU ## a_cBits pfn = a_aSubTests[iFn].pfn; \
1650 for (uint32_t iCpu = 0; iCpu < 2 && pfn; iCpu++) \
1651 { \
1652 for (uint32_t iTest = 0; iTest < cTests; iTest++ ) \
1653 { \
1654 uint32_t fEfl = paTests[iTest].fEflIn; \
1655 a_Type uDst1 = paTests[iTest].uDst1In; \
1656 a_Type uDst2 = paTests[iTest].uDst2In; \
1657 int rc = a_aSubTests[iFn].pfn(&uDst1, &uDst2, paTests[iTest].uSrcIn, &fEfl); \
1658 if ( uDst1 != paTests[iTest].uDst1Out \
1659 || uDst2 != paTests[iTest].uDst2Out \
1660 || (fEfl | fEflIgn) != (paTests[iTest].fEflOut | fEflIgn)\
1661 || rc != paTests[iTest].rc) \
1662 RTTestFailed(g_hTest, "#%02u%s: efl=%#08x dst1=" a_Fmt " dst2=" a_Fmt " src=" a_Fmt "\n" \
1663 " -> efl=%#08x dst1=" a_Fmt " dst2=" a_Fmt " rc=%d\n" \
1664 "expected %#08x " a_Fmt " " a_Fmt " %d%s -%s%s%s\n", \
1665 iTest, iCpu == 0 ? "" : "/n", \
1666 paTests[iTest].fEflIn, paTests[iTest].uDst1In, paTests[iTest].uDst2In, paTests[iTest].uSrcIn, \
1667 fEfl, uDst1, uDst2, rc, \
1668 paTests[iTest].fEflOut, paTests[iTest].uDst1Out, paTests[iTest].uDst2Out, paTests[iTest].rc, \
1669 EFlagsDiff(fEfl | fEflIgn, paTests[iTest].fEflOut | fEflIgn), \
1670 uDst1 != paTests[iTest].uDst1Out ? " dst1" : "", uDst2 != paTests[iTest].uDst2Out ? " dst2" : "", \
1671 (fEfl | fEflIgn) != (paTests[iTest].fEflOut | fEflIgn) ? " eflags" : ""); \
1672 else \
1673 { \
1674 *g_pu ## a_cBits = paTests[iTest].uDst1In; \
1675 *g_pu ## a_cBits ## Two = paTests[iTest].uDst2In; \
1676 *g_pfEfl = paTests[iTest].fEflIn; \
1677 rc = a_aSubTests[iFn].pfn(g_pu ## a_cBits, g_pu ## a_cBits ## Two, paTests[iTest].uSrcIn, g_pfEfl); \
1678 RTTEST_CHECK(g_hTest, *g_pu ## a_cBits == paTests[iTest].uDst1Out); \
1679 RTTEST_CHECK(g_hTest, *g_pu ## a_cBits ## Two == paTests[iTest].uDst2Out); \
1680 RTTEST_CHECK(g_hTest, (*g_pfEfl | fEflIgn) == (paTests[iTest].fEflOut | fEflIgn)); \
1681 RTTEST_CHECK(g_hTest, rc == paTests[iTest].rc); \
1682 } \
1683 } \
1684 pfn = a_aSubTests[iFn].pfnNative; \
1685 } \
1686 } \
1687}
1688TEST_MULDIV(16, uint16_t, "%#06RX16", MULDIVU16_TEST_T, g_aMulDivU16)
1689TEST_MULDIV(32, uint32_t, "%#010RX32", MULDIVU32_TEST_T, g_aMulDivU32)
1690TEST_MULDIV(64, uint64_t, "%#018RX64", MULDIVU64_TEST_T, g_aMulDivU64)
1691
1692#ifdef TSTIEMAIMPL_WITH_GENERATOR
1693static void MulDivGenerate(PRTSTREAM pOut, const char *pszCpuSuffU, uint32_t cTests)
1694{
1695 RTStrmPrintf(pOut, "\n\n#define HAVE_MULDIV_TESTS%s\n", pszCpuSuffU);
1696 MulDivU8Generate(pOut, cTests);
1697 MulDivU16Generate(pOut, cTests);
1698 MulDivU32Generate(pOut, cTests);
1699 MulDivU64Generate(pOut, cTests);
1700}
1701#endif
1702
1703static void MulDivTest(void)
1704{
1705 MulDivU8Test();
1706 MulDivU16Test();
1707 MulDivU32Test();
1708 MulDivU64Test();
1709}
1710
1711
1712/*
1713 * BSWAP
1714 */
1715static void BswapTest(void)
1716{
1717 RTTestSub(g_hTest, "bswap_u16");
1718 *g_pu32 = UINT32_C(0x12345678);
1719 iemAImpl_bswap_u16(g_pu32);
1720#if 0
1721 RTTEST_CHECK_MSG(g_hTest, *g_pu32 == UINT32_C(0x12347856), (g_hTest, "*g_pu32=%#RX32\n", *g_pu32));
1722#else
1723 RTTEST_CHECK_MSG(g_hTest, *g_pu32 == UINT32_C(0x12340000), (g_hTest, "*g_pu32=%#RX32\n", *g_pu32));
1724#endif
1725 *g_pu32 = UINT32_C(0xffff1122);
1726 iemAImpl_bswap_u16(g_pu32);
1727#if 0
1728 RTTEST_CHECK_MSG(g_hTest, *g_pu32 == UINT32_C(0xffff2211), (g_hTest, "*g_pu32=%#RX32\n", *g_pu32));
1729#else
1730 RTTEST_CHECK_MSG(g_hTest, *g_pu32 == UINT32_C(0xffff0000), (g_hTest, "*g_pu32=%#RX32\n", *g_pu32));
1731#endif
1732
1733 RTTestSub(g_hTest, "bswap_u32");
1734 *g_pu32 = UINT32_C(0x12345678);
1735 iemAImpl_bswap_u32(g_pu32);
1736 RTTEST_CHECK(g_hTest, *g_pu32 == UINT32_C(0x78563412));
1737
1738 RTTestSub(g_hTest, "bswap_u64");
1739 *g_pu64 = UINT64_C(0x0123456789abcdef);
1740 iemAImpl_bswap_u64(g_pu64);
1741 RTTEST_CHECK(g_hTest, *g_pu64 == UINT64_C(0xefcdab8967452301));
1742}
1743
1744
1745int main(int argc, char **argv)
1746{
1747 int rc = RTR3InitExe(argc, &argv, 0);
1748 if (RT_FAILURE(rc))
1749 return RTMsgInitFailure(rc);
1750
1751 /*
1752 * Determin the host CPU.
1753 */
1754#if defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64)
1755 g_idxCpuEflFlavour = ASMIsAmdCpu() || ASMIsHygonCpu()
1756 ? IEMTARGETCPU_EFL_BEHAVIOR_AMD
1757 : IEMTARGETCPU_EFL_BEHAVIOR_INTEL;
1758#endif
1759
1760 /*
1761 * Generate data?
1762 */
1763 if (argc > 2)
1764 {
1765#ifdef TSTIEMAIMPL_WITH_GENERATOR
1766 char szCpuDesc[256] = {0};
1767 RTMpGetDescription(NIL_RTCPUID, szCpuDesc, sizeof(szCpuDesc));
1768 const char * const pszCpuType = g_idxCpuEflFlavour == IEMTARGETCPU_EFL_BEHAVIOR_AMD ? "Amd" : "Intel";
1769 //const char * const pszCpuTypeU = g_idxCpuEflFlavour == IEMTARGETCPU_EFL_BEHAVIOR_AMD ? "AMD" : "INTEL";
1770 const char * const pszCpuSuff = g_idxCpuEflFlavour == IEMTARGETCPU_EFL_BEHAVIOR_AMD ? "_Amd" : "_Intel";
1771 //const char * const pszCpuSuffL = g_idxCpuEflFlavour == IEMTARGETCPU_EFL_BEHAVIOR_AMD ? "_amd" : "_intel";
1772 const char * const pszCpuSuffU = g_idxCpuEflFlavour == IEMTARGETCPU_EFL_BEHAVIOR_AMD ? "_AMD" : "_INTEL";
1773
1774 PRTSTREAM pStrmData = NULL;
1775 rc = RTStrmOpen("tstIEMAImplData.h", "w", &pStrmData);
1776 if (!pStrmData)
1777 return RTMsgErrorExitFailure("Failed to open tstIEMAImplData.h for writing: %Rrc", rc);
1778
1779 PRTSTREAM pStrmDataCpu = NULL;
1780 rc = RTStrmOpenF("w", &pStrmDataCpu, "tstIEMAImplData-%s.h", pszCpuType);
1781 if (!pStrmData)
1782 return RTMsgErrorExitFailure("Failed to open tstIEMAImplData-%s.h for writing: %Rrc", pszCpuType, rc);
1783
1784 GenerateHeader(pStrmData, szCpuDesc, NULL, "");
1785 GenerateHeader(pStrmDataCpu, szCpuDesc, pszCpuType, pszCpuSuff);
1786
1787 uint32_t cTests = 64;
1788 g_cZeroDstTests = RT_MIN(cTests / 16, 32);
1789 g_cZeroSrcTests = g_cZeroDstTests * 2;
1790
1791 BinU8Generate( pStrmData, pStrmDataCpu, pszCpuSuffU, cTests);
1792 BinU16Generate(pStrmData, pStrmDataCpu, pszCpuSuffU, cTests);
1793 BinU32Generate(pStrmData, pStrmDataCpu, pszCpuSuffU, cTests);
1794 BinU64Generate(pStrmData, pStrmDataCpu, pszCpuSuffU, cTests);
1795 ShiftDblGenerate(pStrmData, cTests);
1796 UnaryGenerate(pStrmData, cTests);
1797 ShiftGenerate(pStrmData, cTests);
1798 MulDivGenerate(pStrmDataCpu, pszCpuSuffU, cTests);
1799
1800 return GenerateFooterAndClose(pStrmDataCpu, pszCpuType, pszCpuSuff,
1801 GenerateFooterAndClose(pStrmData, NULL, "", RTEXITCODE_SUCCESS));
1802#else
1803 return RTMsgErrorExitFailure("Test data generator not compiled in!");
1804#endif
1805 }
1806
1807 /*
1808 * Do testing. Currrently disabled by default as data needs to be checked
1809 * on both intel and AMD systems first.
1810 */
1811 rc = RTTestCreate("tstIEMAimpl", &g_hTest);
1812 AssertRCReturn(rc, RTEXITCODE_FAILURE);
1813 if (argc > 1)
1814 {
1815 /* Allocate guarded memory for use in the tests. */
1816#define ALLOC_GUARDED_VAR(a_puVar) do { \
1817 rc = RTTestGuardedAlloc(g_hTest, sizeof(*a_puVar), sizeof(*a_puVar), false /*fHead*/, (void **)&a_puVar); \
1818 if (RT_FAILURE(rc)) RTTestFailed(g_hTest, "Failed to allocate guarded mem: " #a_puVar); \
1819 } while (0)
1820 ALLOC_GUARDED_VAR(g_pu8);
1821 ALLOC_GUARDED_VAR(g_pu16);
1822 ALLOC_GUARDED_VAR(g_pu32);
1823 ALLOC_GUARDED_VAR(g_pu64);
1824 ALLOC_GUARDED_VAR(g_pu128);
1825 ALLOC_GUARDED_VAR(g_pu8Two);
1826 ALLOC_GUARDED_VAR(g_pu16Two);
1827 ALLOC_GUARDED_VAR(g_pu32Two);
1828 ALLOC_GUARDED_VAR(g_pu64Two);
1829 ALLOC_GUARDED_VAR(g_pu128Two);
1830 ALLOC_GUARDED_VAR(g_pfEfl);
1831 if (RTTestErrorCount(g_hTest) == 0)
1832 {
1833 BinU8Test();
1834 BinU16Test();
1835 BinU32Test();
1836 BinU64Test();
1837 XchgTest();
1838 XaddTest();
1839 CmpXchgTest();
1840 CmpXchg8bTest();
1841 CmpXchg16bTest();
1842 ShiftDblTest();
1843 UnaryTest();
1844 ShiftTest();
1845 MulDivTest();
1846 BswapTest();
1847 }
1848 return RTTestSummaryAndDestroy(g_hTest);
1849 }
1850 return RTTestSkipAndDestroy(g_hTest, "unfinished testcase");
1851}
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette