VirtualBox

source: vbox/trunk/src/VBox/VMM/testcase/tstIEMAImpl.cpp@ 94162

Last change on this file since 94162 was 94162, checked in by vboxsync, 3 years ago

VMM/IEM: Try deal with basic Intel/AMD EFLAGS difference for double shifts (intel side tests). bugref:9898

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 75.9 KB
Line 
1/* $Id: tstIEMAImpl.cpp 94162 2022-03-10 22:29:05Z vboxsync $ */
2/** @file
3 * IEM Assembly Instruction Helper Testcase.
4 */
5
6/*
7 * Copyright (C) 2022 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#include "../include/IEMInternal.h"
23
24#include <iprt/errcore.h>
25#include <VBox/log.h>
26#include <iprt/assert.h>
27#include <iprt/ctype.h>
28#include <iprt/initterm.h>
29#include <iprt/message.h>
30#include <iprt/mp.h>
31#include <iprt/rand.h>
32#include <iprt/stream.h>
33#include <iprt/string.h>
34#include <iprt/test.h>
35
36
37/*********************************************************************************************************************************
38* Structures and Typedefs *
39*********************************************************************************************************************************/
40/** @name 8-bit binary (PFNIEMAIMPLBINU8)
41 * @{ */
42typedef struct BINU8_TEST_T
43{
44 uint32_t fEflIn;
45 uint32_t fEflOut;
46 uint8_t uDstIn;
47 uint8_t uDstOut;
48 uint8_t uSrcIn;
49 uint8_t uMisc;
50} BINU8_TEST_T;
51
52typedef struct BINU8_T
53{
54 const char *pszName;
55 PFNIEMAIMPLBINU8 pfn;
56 PFNIEMAIMPLBINU8 pfnNative;
57 BINU8_TEST_T const *paTests;
58 uint32_t cTests;
59 uint32_t uExtra;
60 uint8_t idxCpuEflFlavour;
61} BINU8_T;
62/** @} */
63
64
65/** @name 16-bit binary (PFNIEMAIMPLBINU16)
66 * @{ */
67typedef struct BINU16_TEST_T
68{
69 uint32_t fEflIn;
70 uint32_t fEflOut;
71 uint16_t uDstIn;
72 uint16_t uDstOut;
73 uint16_t uSrcIn;
74 uint16_t uMisc;
75} BINU16_TEST_T;
76
77typedef struct BINU16_T
78{
79 const char *pszName;
80 PFNIEMAIMPLBINU16 pfn;
81 PFNIEMAIMPLBINU16 pfnNative;
82 BINU16_TEST_T const *paTests;
83 uint32_t cTests;
84 uint32_t uExtra;
85 uint8_t idxCpuEflFlavour;
86} BINU16_T;
87/** @} */
88
89
90/** @name 32-bit binary (PFNIEMAIMPLBINU32)
91 * @{ */
92typedef struct BINU32_TEST_T
93{
94 uint32_t fEflIn;
95 uint32_t fEflOut;
96 uint32_t uDstIn;
97 uint32_t uDstOut;
98 uint32_t uSrcIn;
99 uint32_t uMisc;
100} BINU32_TEST_T;
101
102typedef struct BINU32_T
103{
104 const char *pszName;
105 PFNIEMAIMPLBINU32 pfn;
106 PFNIEMAIMPLBINU32 pfnNative;
107 BINU32_TEST_T const *paTests;
108 uint32_t cTests;
109 uint32_t uExtra;
110 uint8_t idxCpuEflFlavour;
111} BINU32_T;
112/** @} */
113
114
115/** @name 64-bit binary (PFNIEMAIMPLBINU64)
116 * @{ */
117typedef struct BINU64_TEST_T
118{
119 uint32_t fEflIn;
120 uint32_t fEflOut;
121 uint64_t uDstIn;
122 uint64_t uDstOut;
123 uint64_t uSrcIn;
124 uint64_t uMisc;
125} BINU64_TEST_T;
126
127typedef struct BINU64_T
128{
129 const char *pszName;
130 PFNIEMAIMPLBINU64 pfn;
131 PFNIEMAIMPLBINU64 pfnNative;
132 BINU64_TEST_T const *paTests;
133 uint32_t cTests;
134 uint32_t uExtra;
135 uint8_t idxCpuEflFlavour;
136} BINU64_T;
137/** @} */
138
139
140/** @name mult/div (PFNIEMAIMPLBINU8, PFNIEMAIMPLBINU16, PFNIEMAIMPLBINU32, PFNIEMAIMPLBINU64)
141 * @{ */
142typedef struct MULDIVU8_TEST_T
143{
144 uint32_t fEflIn;
145 uint32_t fEflOut;
146 uint16_t uDstIn;
147 uint16_t uDstOut;
148 uint8_t uSrcIn;
149 int32_t rc;
150} MULDIVU8_TEST_T;
151
152typedef struct MULDIVU16_TEST_T
153{
154 uint32_t fEflIn;
155 uint32_t fEflOut;
156 uint16_t uDst1In;
157 uint16_t uDst1Out;
158 uint16_t uDst2In;
159 uint16_t uDst2Out;
160 uint16_t uSrcIn;
161 int32_t rc;
162} MULDIVU16_TEST_T;
163
164typedef struct MULDIVU32_TEST_T
165{
166 uint32_t fEflIn;
167 uint32_t fEflOut;
168 uint32_t uDst1In;
169 uint32_t uDst1Out;
170 uint32_t uDst2In;
171 uint32_t uDst2Out;
172 uint32_t uSrcIn;
173 int32_t rc;
174} MULDIVU32_TEST_T;
175
176typedef struct MULDIVU64_TEST_T
177{
178 uint32_t fEflIn;
179 uint32_t fEflOut;
180 uint64_t uDst1In;
181 uint64_t uDst1Out;
182 uint64_t uDst2In;
183 uint64_t uDst2Out;
184 uint64_t uSrcIn;
185 int32_t rc;
186} MULDIVU64_TEST_T;
187/** @} */
188
189
190/*********************************************************************************************************************************
191* Defined Constants And Macros *
192*********************************************************************************************************************************/
193#define ENTRY(a_Name) ENTRY_EX(a_Name, 0)
194#define ENTRY_EX(a_Name, a_uExtra) \
195 { #a_Name, iemAImpl_ ## a_Name, NULL, \
196 g_aTests_ ## a_Name, RT_ELEMENTS(g_aTests_ ## a_Name), \
197 a_uExtra, IEMTARGETCPU_EFL_BEHAVIOR_NATIVE /* means same for all here */ }
198
199#define ENTRY_INTEL(a_Name, a_fEflUndef) ENTRY_INTEL_EX(a_Name, a_fEflUndef, 0)
200#define ENTRY_INTEL_EX(a_Name, a_fEflUndef, a_uExtra) \
201 { #a_Name "_intel", iemAImpl_ ## a_Name ## _intel, iemAImpl_ ## a_Name, \
202 g_aTests_ ## a_Name ## _intel, RT_ELEMENTS(g_aTests_ ## a_Name ## _intel), \
203 a_uExtra, IEMTARGETCPU_EFL_BEHAVIOR_INTEL }
204
205#define ENTRY_AMD(a_Name, a_fEflUndef) ENTRY_AMD_EX(a_Name, a_fEflUndef, 0)
206#define ENTRY_AMD_EX(a_Name, a_fEflUndef, a_uExtra) \
207 { #a_Name "_amd", iemAImpl_ ## a_Name ## _amd, iemAImpl_ ## a_Name, \
208 g_aTests_ ## a_Name ## _amd, RT_ELEMENTS(g_aTests_ ## a_Name ## _amd), \
209 a_uExtra, IEMTARGETCPU_EFL_BEHAVIOR_AMD }
210
211
212/*********************************************************************************************************************************
213* Global Variables *
214*********************************************************************************************************************************/
215static RTTEST g_hTest;
216static uint8_t g_idxCpuEflFlavour = IEMTARGETCPU_EFL_BEHAVIOR_INTEL;
217#ifdef TSTIEMAIMPL_WITH_GENERATOR
218static uint32_t g_cZeroDstTests = 2;
219static uint32_t g_cZeroSrcTests = 4;
220#endif
221static uint8_t *g_pu8, *g_pu8Two;
222static uint16_t *g_pu16, *g_pu16Two;
223static uint32_t *g_pu32, *g_pu32Two, *g_pfEfl;
224static uint64_t *g_pu64, *g_pu64Two;
225static RTUINT128U *g_pu128, *g_pu128Two;
226
227
228#include "tstIEMAImplData.h"
229#include "tstIEMAImplData-Intel.h"
230#include "tstIEMAImplData-Amd.h"
231
232
233/*
234 * Random helpers.
235 */
236
237static uint32_t RandEFlags(void)
238{
239 uint32_t fEfl = RTRandU32();
240 return (fEfl & X86_EFL_LIVE_MASK) | X86_EFL_RA1_MASK;
241}
242
243
244static uint8_t RandU8(void)
245{
246 return RTRandU32Ex(0, 0xff);
247}
248
249
250static uint16_t RandU16(void)
251{
252 return RTRandU32Ex(0, 0xffff);
253}
254
255
256static uint32_t RandU32(void)
257{
258 return RTRandU32();
259}
260
261
262static uint64_t RandU64(void)
263{
264 return RTRandU64();
265}
266
267
268static RTUINT128U RandU128(void)
269{
270 RTUINT128U Ret;
271 Ret.s.Hi = RTRandU64();
272 Ret.s.Lo = RTRandU64();
273 return Ret;
274}
275
276#ifdef TSTIEMAIMPL_WITH_GENERATOR
277
278static uint8_t RandU8Dst(uint32_t iTest)
279{
280 if (iTest < g_cZeroDstTests)
281 return 0;
282 return RandU8();
283}
284
285
286static uint8_t RandU8Src(uint32_t iTest)
287{
288 if (iTest < g_cZeroSrcTests)
289 return 0;
290 return RandU8();
291}
292
293
294static uint16_t RandU16Dst(uint32_t iTest)
295{
296 if (iTest < g_cZeroDstTests)
297 return 0;
298 return RandU16();
299}
300
301
302static uint16_t RandU16Src(uint32_t iTest)
303{
304 if (iTest < g_cZeroSrcTests)
305 return 0;
306 return RandU16();
307}
308
309
310static uint32_t RandU32Dst(uint32_t iTest)
311{
312 if (iTest < g_cZeroDstTests)
313 return 0;
314 return RandU32();
315}
316
317
318static uint32_t RandU32Src(uint32_t iTest)
319{
320 if (iTest < g_cZeroSrcTests)
321 return 0;
322 return RandU32();
323}
324
325
326static uint64_t RandU64Dst(uint32_t iTest)
327{
328 if (iTest < g_cZeroDstTests)
329 return 0;
330 return RandU64();
331}
332
333
334static uint64_t RandU64Src(uint32_t iTest)
335{
336 if (iTest < g_cZeroSrcTests)
337 return 0;
338 return RandU64();
339}
340
341
342static void GenerateHeader(PRTSTREAM pOut, const char *pszCpuDesc, const char *pszCpuType, const char *pszCpuSuffU)
343{
344 /* We want to tag the generated source code with the revision that produced it. */
345 static char s_szRev[] = "$Revision: 94162 $";
346 const char *pszRev = RTStrStripL(strchr(s_szRev, ':') + 1);
347 size_t cchRev = 0;
348 while (RT_C_IS_DIGIT(pszRev[cchRev]))
349 cchRev++;
350
351 RTStrmPrintf(pOut,
352 "/* $Id: tstIEMAImpl.cpp 94162 2022-03-10 22:29:05Z vboxsync $ */\n"
353 "/** @file\n"
354 " * IEM Assembly Instruction Helper Testcase Data%s%s - r%.*s on %s.\n"
355 " */\n"
356 "\n"
357 "/*\n"
358 " * Copyright (C) 2022 Oracle Corporation\n"
359 " *\n"
360 " * This file is part of VirtualBox Open Source Edition (OSE), as\n"
361 " * available from http://www.virtualbox.org. This file is free software;\n"
362 " * you can redistribute it and/or modify it under the terms of the GNU\n"
363 " * General Public License (GPL) as published by the Free Software\n"
364 " * Foundation, in version 2 as it comes in the \"COPYING\" file of the\n"
365 " * VirtualBox OSE distribution. VirtualBox OSE is distributed in the\n"
366 " * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.\n"
367 " */\n"
368 "\n"
369 "#ifndef VMM_INCLUDED_SRC_testcase_tstIEMAImplData%s_h\n"
370 "#define VMM_INCLUDED_SRC_testcase_tstIEMAImplData%s_h\n"
371 "#ifndef RT_WITHOUT_PRAGMA_ONCE\n"
372 "# pragma once\n"
373 "#endif\n"
374 ,
375 pszCpuType ? " " : "", pszCpuType ? pszCpuType : "", cchRev, pszRev, pszCpuDesc,
376 pszCpuSuffU,
377 pszCpuSuffU);
378}
379
380
381static RTEXITCODE GenerateFooterAndClose(PRTSTREAM pOut, const char *pszCpuType, const char *pszCpuSuff, RTEXITCODE rcExit)
382{
383 RTStrmPrintf(pOut,
384 "\n"
385 "#endif /* !VMM_INCLUDED_SRC_testcase_tstIEMAImplData%s_h */\n", pszCpuSuff);
386 int rc = RTStrmClose(pOut);
387 if (RT_SUCCESS(rc))
388 return rcExit;
389 return RTMsgErrorExitFailure("RTStrmClose failed on tstIEMAImplData%s%s.h: %Rrc",
390 pszCpuType ? "-" : "", pszCpuType ? pszCpuType : "", rc);
391}
392
393#endif
394
395
396/*
397 * Test helpers.
398 */
399static const char *EFlagsDiff(uint32_t fActual, uint32_t fExpected)
400{
401 if (fActual == fExpected)
402 return "";
403
404 uint32_t const fXor = fActual ^ fExpected;
405 static char s_szBuf[256];
406 size_t cch = RTStrPrintf(s_szBuf, sizeof(s_szBuf), " - %#x", fXor);
407
408 static struct
409 {
410 const char *pszName;
411 uint32_t fFlag;
412 } const s_aFlags[] =
413 {
414#define EFL_ENTRY(a_Flags) { #a_Flags, X86_EFL_ ## a_Flags }
415 EFL_ENTRY(CF),
416 EFL_ENTRY(PF),
417 EFL_ENTRY(AF),
418 EFL_ENTRY(ZF),
419 EFL_ENTRY(SF),
420 EFL_ENTRY(TF),
421 EFL_ENTRY(IF),
422 EFL_ENTRY(DF),
423 EFL_ENTRY(OF),
424 EFL_ENTRY(IOPL),
425 EFL_ENTRY(NT),
426 EFL_ENTRY(RF),
427 EFL_ENTRY(VM),
428 EFL_ENTRY(AC),
429 EFL_ENTRY(VIF),
430 EFL_ENTRY(VIP),
431 EFL_ENTRY(ID),
432 };
433 for (size_t i = 0; i < RT_ELEMENTS(s_aFlags); i++)
434 if (s_aFlags[i].fFlag & fXor)
435 cch += RTStrPrintf(&s_szBuf[cch], sizeof(s_szBuf) - cch,
436 s_aFlags[i].fFlag & fActual ? "/%s" : "/!%s", s_aFlags[i].pszName);
437 RTStrPrintf(&s_szBuf[cch], sizeof(s_szBuf) - cch, "");
438 return s_szBuf;
439}
440
441
442/*
443 * Binary operations.
444 */
445#ifdef TSTIEMAIMPL_WITH_GENERATOR
446# define GEN_BINARY_TESTS(a_cBits, a_Fmt) \
447static void BinU ## a_cBits ## Generate(PRTSTREAM pOut, PRTSTREAM pOutCpu, const char *pszCpuSuffU, uint32_t cTests) \
448{ \
449 RTStrmPrintf(pOut, "\n\n#define HAVE_BINU%u_TESTS\n", a_cBits); \
450 RTStrmPrintf(pOutCpu, "\n\n#define HAVE_BINU%u_TESTS%s\n", a_cBits, pszCpuSuffU); \
451 for (size_t iFn = 0; iFn < RT_ELEMENTS(g_aBinU ## a_cBits); iFn++) \
452 { \
453 PFNIEMAIMPLBINU ## a_cBits const pfn = g_aBinU ## a_cBits[iFn].pfnNative \
454 ? g_aBinU ## a_cBits[iFn].pfnNative : g_aBinU ## a_cBits[iFn].pfn; \
455 PRTSTREAM pOutFn = pOut; \
456 if (g_aBinU ## a_cBits[iFn].idxCpuEflFlavour != IEMTARGETCPU_EFL_BEHAVIOR_NATIVE) \
457 { \
458 if (g_aBinU ## a_cBits[iFn].idxCpuEflFlavour != g_idxCpuEflFlavour) \
459 continue; \
460 pOutFn = pOutCpu; \
461 } \
462 \
463 RTStrmPrintf(pOutFn, "static const BINU%u_TEST_T g_aTests_%s[] =\n{\n", a_cBits, g_aBinU ## a_cBits[iFn].pszName); \
464 for (uint32_t iTest = 0; iTest < cTests; iTest++ ) \
465 { \
466 BINU ## a_cBits ## _TEST_T Test; \
467 Test.fEflIn = RandEFlags(); \
468 Test.fEflOut = Test.fEflIn; \
469 Test.uDstIn = RandU ## a_cBits ## Dst(iTest); \
470 Test.uDstOut = Test.uDstIn; \
471 Test.uSrcIn = RandU ## a_cBits ## Src(iTest); \
472 if (g_aBinU ## a_cBits[iFn].uExtra) \
473 Test.uSrcIn &= a_cBits - 1; /* Restrict bit index according to operand width */ \
474 Test.uMisc = 0; \
475 pfn(&Test.uDstOut, Test.uSrcIn, &Test.fEflOut); \
476 RTStrmPrintf(pOutFn, " { %#08x, %#08x, " a_Fmt ", " a_Fmt ", " a_Fmt ", %#x }, /* #%u */\n", \
477 Test.fEflIn, Test.fEflOut, Test.uDstIn, Test.uDstOut, Test.uSrcIn, Test.uMisc, iTest); \
478 } \
479 RTStrmPrintf(pOutFn, "};\n"); \
480 } \
481}
482#else
483# define GEN_BINARY_TESTS(a_cBits, a_Fmt)
484#endif
485
486#define TEST_BINARY_OPS(a_cBits, a_uType, a_Fmt, a_aSubTests) \
487GEN_BINARY_TESTS(a_cBits, a_Fmt) \
488\
489static void BinU ## a_cBits ## Test(void) \
490{ \
491 for (size_t iFn = 0; iFn < RT_ELEMENTS(a_aSubTests); iFn++) \
492 { \
493 if ( a_aSubTests[iFn].idxCpuEflFlavour != IEMTARGETCPU_EFL_BEHAVIOR_NATIVE \
494 && a_aSubTests[iFn].idxCpuEflFlavour != g_idxCpuEflFlavour) \
495 continue; \
496 \
497 RTTestSub(g_hTest, a_aSubTests[iFn].pszName); \
498 BINU ## a_cBits ## _TEST_T const * const paTests = a_aSubTests[iFn].paTests; \
499 uint32_t const cTests = a_aSubTests[iFn].cTests; \
500 PFNIEMAIMPLBINU ## a_cBits pfn = a_aSubTests[iFn].pfn; \
501 for (uint32_t iCpu = 0; iCpu < 2 && pfn; iCpu++) \
502 { \
503 for (uint32_t iTest = 0; iTest < cTests; iTest++ ) \
504 { \
505 uint32_t fEfl = paTests[iTest].fEflIn; \
506 a_uType uDst = paTests[iTest].uDstIn; \
507 pfn(&uDst, paTests[iTest].uSrcIn, &fEfl); \
508 if ( uDst != paTests[iTest].uDstOut \
509 || fEfl != paTests[iTest].fEflOut) \
510 RTTestFailed(g_hTest, "#%u%s: efl=%#08x dst=" a_Fmt " src=" a_Fmt " -> efl=%#08x dst=" a_Fmt ", expected %#08x & " a_Fmt "%s - %s\n", \
511 iTest, !iCpu ? "" : "/n", paTests[iTest].fEflIn, paTests[iTest].uDstIn, paTests[iTest].uSrcIn, \
512 fEfl, uDst, paTests[iTest].fEflOut, paTests[iTest].uDstOut, \
513 EFlagsDiff(fEfl, paTests[iTest].fEflOut), \
514 uDst == paTests[iTest].uDstOut ? "eflags" : fEfl == paTests[iTest].fEflOut ? "dst" : "both"); \
515 else \
516 { \
517 *g_pu ## a_cBits = paTests[iTest].uDstIn; \
518 *g_pfEfl = paTests[iTest].fEflIn; \
519 pfn(g_pu ## a_cBits, paTests[iTest].uSrcIn, g_pfEfl); \
520 RTTEST_CHECK(g_hTest, *g_pu ## a_cBits == paTests[iTest].uDstOut); \
521 RTTEST_CHECK(g_hTest, *g_pfEfl == paTests[iTest].fEflOut); \
522 } \
523 } \
524 pfn = a_aSubTests[iFn].pfnNative; \
525 } \
526 } \
527}
528
529
530/*
531 * 8-bit binary operations.
532 */
533
534#ifndef HAVE_BINU8_TESTS
535static const BINU8_TEST_T g_aTests_add_u8[] = { {0} };
536static const BINU8_TEST_T g_aTests_add_u8_locked[] = { {0} };
537static const BINU8_TEST_T g_aTests_adc_u8[] = { {0} };
538static const BINU8_TEST_T g_aTests_adc_u8_locked[] = { {0} };
539static const BINU8_TEST_T g_aTests_sub_u8[] = { {0} };
540static const BINU8_TEST_T g_aTests_sub_u8_locked[] = { {0} };
541static const BINU8_TEST_T g_aTests_sbb_u8[] = { {0} };
542static const BINU8_TEST_T g_aTests_sbb_u8_locked[] = { {0} };
543static const BINU8_TEST_T g_aTests_or_u8[] = { {0} };
544static const BINU8_TEST_T g_aTests_or_u8_locked[] = { {0} };
545static const BINU8_TEST_T g_aTests_xor_u8[] = { {0} };
546static const BINU8_TEST_T g_aTests_xor_u8_locked[] = { {0} };
547static const BINU8_TEST_T g_aTests_and_u8[] = { {0} };
548static const BINU8_TEST_T g_aTests_and_u8_locked[] = { {0} };
549static const BINU8_TEST_T g_aTests_cmp_u8[] = { {0} };
550static const BINU8_TEST_T g_aTests_test_u8[] = { {0} };
551#endif
552
553static const BINU8_T g_aBinU8[] =
554{
555 ENTRY(add_u8),
556 ENTRY(add_u8_locked),
557 ENTRY(adc_u8),
558 ENTRY(adc_u8_locked),
559 ENTRY(sub_u8),
560 ENTRY(sub_u8_locked),
561 ENTRY(sbb_u8),
562 ENTRY(sbb_u8_locked),
563 ENTRY(or_u8),
564 ENTRY(or_u8_locked),
565 ENTRY(xor_u8),
566 ENTRY(xor_u8_locked),
567 ENTRY(and_u8),
568 ENTRY(and_u8_locked),
569 ENTRY(cmp_u8),
570 ENTRY(test_u8),
571};
572
573TEST_BINARY_OPS(8, uint8_t, "%#04x", g_aBinU8)
574
575
576/*
577 * 16-bit binary operations.
578 */
579
580#ifndef HAVE_BINU16_TESTS
581static const BINU16_TEST_T g_aTests_add_u16[] = { {0} };
582static const BINU16_TEST_T g_aTests_add_u16_locked[] = { {0} };
583static const BINU16_TEST_T g_aTests_adc_u16[] = { {0} };
584static const BINU16_TEST_T g_aTests_adc_u16_locked[] = { {0} };
585static const BINU16_TEST_T g_aTests_sub_u16[] = { {0} };
586static const BINU16_TEST_T g_aTests_sub_u16_locked[] = { {0} };
587static const BINU16_TEST_T g_aTests_sbb_u16[] = { {0} };
588static const BINU16_TEST_T g_aTests_sbb_u16_locked[] = { {0} };
589static const BINU16_TEST_T g_aTests_or_u16[] = { {0} };
590static const BINU16_TEST_T g_aTests_or_u16_locked[] = { {0} };
591static const BINU16_TEST_T g_aTests_xor_u16[] = { {0} };
592static const BINU16_TEST_T g_aTests_xor_u16_locked[] = { {0} };
593static const BINU16_TEST_T g_aTests_and_u16[] = { {0} };
594static const BINU16_TEST_T g_aTests_and_u16_locked[] = { {0} };
595static const BINU16_TEST_T g_aTests_cmp_u16[] = { {0} };
596static const BINU16_TEST_T g_aTests_test_u16[] = { {0} };
597static const BINU16_TEST_T g_aTests_bt_u16[] = { {0} };
598static const BINU16_TEST_T g_aTests_btc_u16[] = { {0} };
599static const BINU16_TEST_T g_aTests_btc_u16_locked[] = { {0} };
600static const BINU16_TEST_T g_aTests_btr_u16[] = { {0} };
601static const BINU16_TEST_T g_aTests_btr_u16_locked[] = { {0} };
602static const BINU16_TEST_T g_aTests_bts_u16[] = { {0} };
603static const BINU16_TEST_T g_aTests_bts_u16_locked[] = { {0} };
604static const BINU16_TEST_T g_aTests_arpl[] = { {0} };
605#endif
606#ifndef HAVE_BINU16_TESTS_AMD
607static const BINU16_TEST_T g_aTests_bsf_u16_amd[] = { {0} };
608static const BINU16_TEST_T g_aTests_bsr_u16_amd[] = { {0} };
609static const BINU16_TEST_T g_aTests_imul_two_u16_amd[] = { {0} };
610#endif
611#ifndef HAVE_BINU16_TESTS_INTEL
612static const BINU16_TEST_T g_aTests_bsf_u16_intel[] = { {0} };
613static const BINU16_TEST_T g_aTests_bsr_u16_intel[] = { {0} };
614static const BINU16_TEST_T g_aTests_imul_two_u16_intel[] = { {0} };
615#endif
616
617static const BINU16_T g_aBinU16[] =
618{
619 ENTRY(add_u16),
620 ENTRY(add_u16_locked),
621 ENTRY(adc_u16),
622 ENTRY(adc_u16_locked),
623 ENTRY(sub_u16),
624 ENTRY(sub_u16_locked),
625 ENTRY(sbb_u16),
626 ENTRY(sbb_u16_locked),
627 ENTRY(or_u16),
628 ENTRY(or_u16_locked),
629 ENTRY(xor_u16),
630 ENTRY(xor_u16_locked),
631 ENTRY(and_u16),
632 ENTRY(and_u16_locked),
633 ENTRY(cmp_u16),
634 ENTRY(test_u16),
635 ENTRY_EX(bt_u16, 1),
636 ENTRY_EX(btc_u16, 1),
637 ENTRY_EX(btc_u16_locked, 1),
638 ENTRY_EX(btr_u16, 1),
639 ENTRY_EX(btr_u16_locked, 1),
640 ENTRY_EX(bts_u16, 1),
641 ENTRY_EX(bts_u16_locked, 1),
642 ENTRY_AMD( bsf_u16, X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_SF | X86_EFL_OF),
643 ENTRY_INTEL(bsf_u16, X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_SF | X86_EFL_OF),
644 ENTRY_AMD( bsr_u16, X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_SF | X86_EFL_OF),
645 ENTRY_INTEL(bsr_u16, X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_SF | X86_EFL_OF),
646 ENTRY_AMD( imul_two_u16, X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF),
647 ENTRY_INTEL(imul_two_u16, X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF),
648 ENTRY(arpl),
649};
650
651TEST_BINARY_OPS(16, uint16_t, "%#06x", g_aBinU16)
652
653
654/*
655 * 32-bit binary operations.
656 */
657
658#ifndef HAVE_BINU32_TESTS
659static const BINU32_TEST_T g_aTests_add_u32[] = { {0} };
660static const BINU32_TEST_T g_aTests_add_u32_locked[] = { {0} };
661static const BINU32_TEST_T g_aTests_adc_u32[] = { {0} };
662static const BINU32_TEST_T g_aTests_adc_u32_locked[] = { {0} };
663static const BINU32_TEST_T g_aTests_sub_u32[] = { {0} };
664static const BINU32_TEST_T g_aTests_sub_u32_locked[] = { {0} };
665static const BINU32_TEST_T g_aTests_sbb_u32[] = { {0} };
666static const BINU32_TEST_T g_aTests_sbb_u32_locked[] = { {0} };
667static const BINU32_TEST_T g_aTests_or_u32[] = { {0} };
668static const BINU32_TEST_T g_aTests_or_u32_locked[] = { {0} };
669static const BINU32_TEST_T g_aTests_xor_u32[] = { {0} };
670static const BINU32_TEST_T g_aTests_xor_u32_locked[] = { {0} };
671static const BINU32_TEST_T g_aTests_and_u32[] = { {0} };
672static const BINU32_TEST_T g_aTests_and_u32_locked[] = { {0} };
673static const BINU32_TEST_T g_aTests_cmp_u32[] = { {0} };
674static const BINU32_TEST_T g_aTests_test_u32[] = { {0} };
675static const BINU32_TEST_T g_aTests_bt_u32[] = { {0} };
676static const BINU32_TEST_T g_aTests_btc_u32[] = { {0} };
677static const BINU32_TEST_T g_aTests_btc_u32_locked[] = { {0} };
678static const BINU32_TEST_T g_aTests_btr_u32[] = { {0} };
679static const BINU32_TEST_T g_aTests_btr_u32_locked[] = { {0} };
680static const BINU32_TEST_T g_aTests_bts_u32[] = { {0} };
681static const BINU32_TEST_T g_aTests_bts_u32_locked[] = { {0} };
682#endif
683#ifndef HAVE_BINU32_TESTS_AMD
684static const BINU32_TEST_T g_aTests_bsf_u32_amd[] = { {0} };
685static const BINU32_TEST_T g_aTests_bsr_u32_amd[] = { {0} };
686static const BINU32_TEST_T g_aTests_imul_two_u32_amd[] = { {0} };
687#endif
688#ifndef HAVE_BINU32_TESTS_INTEL
689static const BINU32_TEST_T g_aTests_bsf_u32_intel[] = { {0} };
690static const BINU32_TEST_T g_aTests_bsr_u32_intel[] = { {0} };
691static const BINU32_TEST_T g_aTests_imul_two_u32_intel[] = { {0} };
692#endif
693
694static const BINU32_T g_aBinU32[] =
695{
696 ENTRY(add_u32),
697 ENTRY(add_u32_locked),
698 ENTRY(adc_u32),
699 ENTRY(adc_u32_locked),
700 ENTRY(sub_u32),
701 ENTRY(sub_u32_locked),
702 ENTRY(sbb_u32),
703 ENTRY(sbb_u32_locked),
704 ENTRY(or_u32),
705 ENTRY(or_u32_locked),
706 ENTRY(xor_u32),
707 ENTRY(xor_u32_locked),
708 ENTRY(and_u32),
709 ENTRY(and_u32_locked),
710 ENTRY(cmp_u32),
711 ENTRY(test_u32),
712 ENTRY_EX(bt_u32, 1),
713 ENTRY_EX(btc_u32, 1),
714 ENTRY_EX(btc_u32_locked, 1),
715 ENTRY_EX(btr_u32, 1),
716 ENTRY_EX(btr_u32_locked, 1),
717 ENTRY_EX(bts_u32, 1),
718 ENTRY_EX(bts_u32_locked, 1),
719 ENTRY_AMD( bsf_u32, X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_SF | X86_EFL_OF),
720 ENTRY_INTEL(bsf_u32, X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_SF | X86_EFL_OF),
721 ENTRY_AMD( bsr_u32, X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_SF | X86_EFL_OF),
722 ENTRY_INTEL(bsr_u32, X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_SF | X86_EFL_OF),
723 ENTRY_AMD( imul_two_u32, X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF),
724 ENTRY_INTEL(imul_two_u32, X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF),
725};
726
727TEST_BINARY_OPS(32, uint32_t, "%#010RX32", g_aBinU32)
728
729
730/*
731 * 64-bit binary operations.
732 */
733
734#ifndef HAVE_BINU64_TESTS
735static const BINU64_TEST_T g_aTests_add_u64[] = { {0} };
736static const BINU64_TEST_T g_aTests_add_u64_locked[] = { {0} };
737static const BINU64_TEST_T g_aTests_adc_u64[] = { {0} };
738static const BINU64_TEST_T g_aTests_adc_u64_locked[] = { {0} };
739static const BINU64_TEST_T g_aTests_sub_u64[] = { {0} };
740static const BINU64_TEST_T g_aTests_sub_u64_locked[] = { {0} };
741static const BINU64_TEST_T g_aTests_sbb_u64[] = { {0} };
742static const BINU64_TEST_T g_aTests_sbb_u64_locked[] = { {0} };
743static const BINU64_TEST_T g_aTests_or_u64[] = { {0} };
744static const BINU64_TEST_T g_aTests_or_u64_locked[] = { {0} };
745static const BINU64_TEST_T g_aTests_xor_u64[] = { {0} };
746static const BINU64_TEST_T g_aTests_xor_u64_locked[] = { {0} };
747static const BINU64_TEST_T g_aTests_and_u64[] = { {0} };
748static const BINU64_TEST_T g_aTests_and_u64_locked[] = { {0} };
749static const BINU64_TEST_T g_aTests_cmp_u64[] = { {0} };
750static const BINU64_TEST_T g_aTests_test_u64[] = { {0} };
751static const BINU64_TEST_T g_aTests_bt_u64[] = { {0} };
752static const BINU64_TEST_T g_aTests_btc_u64[] = { {0} };
753static const BINU64_TEST_T g_aTests_btc_u64_locked[] = { {0} };
754static const BINU64_TEST_T g_aTests_btr_u64[] = { {0} };
755static const BINU64_TEST_T g_aTests_btr_u64_locked[] = { {0} };
756static const BINU64_TEST_T g_aTests_bts_u64[] = { {0} };
757static const BINU64_TEST_T g_aTests_bts_u64_locked[] = { {0} };
758#endif
759#ifndef HAVE_BINU64_TESTS_AMD
760static const BINU64_TEST_T g_aTests_bsf_u64_amd[] = { {0} };
761static const BINU64_TEST_T g_aTests_bsr_u64_amd[] = { {0} };
762static const BINU64_TEST_T g_aTests_imul_two_u64_amd[] = { {0} };
763#endif
764#ifndef HAVE_BINU64_TESTS_INTEL
765static const BINU64_TEST_T g_aTests_bsf_u64_intel[] = { {0} };
766static const BINU64_TEST_T g_aTests_bsr_u64_intel[] = { {0} };
767static const BINU64_TEST_T g_aTests_imul_two_u64_intel[] = { {0} };
768#endif
769
770static const BINU64_T g_aBinU64[] =
771{
772 ENTRY(add_u64),
773 ENTRY(add_u64_locked),
774 ENTRY(adc_u64),
775 ENTRY(adc_u64_locked),
776 ENTRY(sub_u64),
777 ENTRY(sub_u64_locked),
778 ENTRY(sbb_u64),
779 ENTRY(sbb_u64_locked),
780 ENTRY(or_u64),
781 ENTRY(or_u64_locked),
782 ENTRY(xor_u64),
783 ENTRY(xor_u64_locked),
784 ENTRY(and_u64),
785 ENTRY(and_u64_locked),
786 ENTRY(cmp_u64),
787 ENTRY(test_u64),
788 ENTRY_EX(bt_u64, 1),
789 ENTRY_EX(btc_u64, 1),
790 ENTRY_EX(btc_u64_locked, 1),
791 ENTRY_EX(btr_u64, 1),
792 ENTRY_EX(btr_u64_locked, 1),
793 ENTRY_EX(bts_u64, 1),
794 ENTRY_EX(bts_u64_locked, 1),
795 ENTRY_AMD( bsf_u64, X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_SF | X86_EFL_OF),
796 ENTRY_INTEL(bsf_u64, X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_SF | X86_EFL_OF),
797 ENTRY_AMD( bsr_u64, X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_SF | X86_EFL_OF),
798 ENTRY_INTEL(bsr_u64, X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_SF | X86_EFL_OF),
799 ENTRY_AMD( imul_two_u64, X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF),
800 ENTRY_INTEL(imul_two_u64, X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF),
801};
802
803TEST_BINARY_OPS(64, uint64_t, "%#018RX64", g_aBinU64)
804
805
806/*
807 * XCHG
808 */
809static void XchgTest(void)
810{
811 RTTestSub(g_hTest, "xchg");
812 typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLXCHGU8, (uint8_t *pu8Mem, uint8_t *pu8Reg));
813 typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLXCHGU16,(uint16_t *pu16Mem, uint16_t *pu16Reg));
814 typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLXCHGU32,(uint32_t *pu32Mem, uint32_t *pu32Reg));
815 typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLXCHGU64,(uint64_t *pu64Mem, uint64_t *pu64Reg));
816
817 static struct
818 {
819 uint8_t cb; uint64_t fMask;
820 union
821 {
822 uintptr_t pfn;
823 FNIEMAIMPLXCHGU8 *pfnU8;
824 FNIEMAIMPLXCHGU16 *pfnU16;
825 FNIEMAIMPLXCHGU32 *pfnU32;
826 FNIEMAIMPLXCHGU64 *pfnU64;
827 } u;
828 }
829 s_aXchgWorkers[] =
830 {
831 { 1, UINT8_MAX, { (uintptr_t)iemAImpl_xchg_u8_locked } },
832 { 2, UINT16_MAX, { (uintptr_t)iemAImpl_xchg_u16_locked } },
833 { 4, UINT32_MAX, { (uintptr_t)iemAImpl_xchg_u32_locked } },
834 { 8, UINT64_MAX, { (uintptr_t)iemAImpl_xchg_u64_locked } },
835 { 1, UINT8_MAX, { (uintptr_t)iemAImpl_xchg_u8_unlocked } },
836 { 2, UINT16_MAX, { (uintptr_t)iemAImpl_xchg_u16_unlocked } },
837 { 4, UINT32_MAX, { (uintptr_t)iemAImpl_xchg_u32_unlocked } },
838 { 8, UINT64_MAX, { (uintptr_t)iemAImpl_xchg_u64_unlocked } },
839 };
840 for (size_t i = 0; i < RT_ELEMENTS(s_aXchgWorkers); i++)
841 {
842 RTUINT64U uIn1, uIn2, uMem, uDst;
843 uMem.u = uIn1.u = RTRandU64Ex(0, s_aXchgWorkers[i].fMask);
844 uDst.u = uIn2.u = RTRandU64Ex(0, s_aXchgWorkers[i].fMask);
845 if (uIn1.u == uIn2.u)
846 uDst.u = uIn2.u = ~uIn2.u;
847
848 switch (s_aXchgWorkers[i].cb)
849 {
850 case 1:
851 s_aXchgWorkers[i].u.pfnU8(g_pu8, g_pu8Two);
852 s_aXchgWorkers[i].u.pfnU8(&uMem.au8[0], &uDst.au8[0]);
853 break;
854 case 2:
855 s_aXchgWorkers[i].u.pfnU16(g_pu16, g_pu16Two);
856 s_aXchgWorkers[i].u.pfnU16(&uMem.Words.w0, &uDst.Words.w0);
857 break;
858 case 4:
859 s_aXchgWorkers[i].u.pfnU32(g_pu32, g_pu32Two);
860 s_aXchgWorkers[i].u.pfnU32(&uMem.DWords.dw0, &uDst.DWords.dw0);
861 break;
862 case 8:
863 s_aXchgWorkers[i].u.pfnU64(g_pu64, g_pu64Two);
864 s_aXchgWorkers[i].u.pfnU64(&uMem.u, &uDst.u);
865 break;
866 default: RTTestFailed(g_hTest, "%d\n", s_aXchgWorkers[i].cb); break;
867 }
868
869 if (uMem.u != uIn2.u || uDst.u != uIn1.u)
870 RTTestFailed(g_hTest, "i=%u: %#RX64, %#RX64 -> %#RX64, %#RX64\n", i, uIn1.u, uIn2.u, uMem.u, uDst.u);
871 }
872}
873
874
875/*
876 * XADD
877 */
878static void XaddTest(void)
879{
880#define TEST_XADD(a_cBits, a_Type, a_Fmt) do { \
881 typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLXADDU ## a_cBits, (a_Type *, a_Type *, uint32_t *)); \
882 static struct \
883 { \
884 const char *pszName; \
885 FNIEMAIMPLXADDU ## a_cBits *pfn; \
886 BINU ## a_cBits ## _TEST_T const *paTests; \
887 uint32_t cTests; \
888 } const s_aFuncs[] = \
889 { \
890 { "xadd_u" # a_cBits, iemAImpl_xadd_u ## a_cBits, \
891 g_aTests_add_u ## a_cBits, RT_ELEMENTS(g_aTests_add_u ## a_cBits) }, \
892 { "xadd_u" # a_cBits "8_locked", iemAImpl_xadd_u ## a_cBits ## _locked, \
893 g_aTests_add_u ## a_cBits, RT_ELEMENTS(g_aTests_add_u ## a_cBits) }, \
894 }; \
895 for (size_t iFn = 0; iFn < RT_ELEMENTS(s_aFuncs); iFn++) \
896 { \
897 RTTestSub(g_hTest, s_aFuncs[iFn].pszName); \
898 BINU ## a_cBits ## _TEST_T const * const paTests = s_aFuncs[iFn].paTests; \
899 uint32_t const cTests = s_aFuncs[iFn].cTests; \
900 for (uint32_t iTest = 0; iTest < cTests; iTest++) \
901 { \
902 uint32_t fEfl = paTests[iTest].fEflIn; \
903 a_Type uSrc = paTests[iTest].uSrcIn; \
904 *g_pu ## a_cBits = paTests[iTest].uDstIn; \
905 s_aFuncs[iFn].pfn(g_pu ## a_cBits, &uSrc, &fEfl); \
906 if ( fEfl != paTests[iTest].fEflOut \
907 || *g_pu ## a_cBits != paTests[iTest].uDstOut \
908 || uSrc != paTests[iTest].uDstIn) \
909 RTTestFailed(g_hTest, "%s/#%u: efl=%#08x dst=" a_Fmt " src=" a_Fmt " -> efl=%#08x dst=" a_Fmt " src=" a_Fmt ", expected %#08x, " a_Fmt ", " a_Fmt "%s\n", \
910 s_aFuncs[iFn].pszName, iTest, paTests[iTest].fEflIn, paTests[iTest].uDstIn, paTests[iTest].uSrcIn, \
911 fEfl, *g_pu ## a_cBits, uSrc, paTests[iTest].fEflOut, paTests[iTest].uDstOut, paTests[iTest].uDstIn, \
912 EFlagsDiff(fEfl, paTests[iTest].fEflOut)); \
913 } \
914 } \
915 } while(0)
916 TEST_XADD(8, uint8_t, "%#04x");
917 TEST_XADD(16, uint16_t, "%#06x");
918 TEST_XADD(32, uint32_t, "%#010RX32");
919 TEST_XADD(64, uint64_t, "%#010RX64");
920}
921
922
923/*
924 * CMPXCHG
925 */
926
927static void CmpXchgTest(void)
928{
929#define TEST_CMPXCHG(a_cBits, a_Type, a_Fmt) do {\
930 typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLCMPXCHGU ## a_cBits, (a_Type *, a_Type *, a_Type, uint32_t *)); \
931 static struct \
932 { \
933 const char *pszName; \
934 FNIEMAIMPLCMPXCHGU ## a_cBits *pfn; \
935 PFNIEMAIMPLBINU ## a_cBits pfnSub; \
936 BINU ## a_cBits ## _TEST_T const *paTests; \
937 uint32_t cTests; \
938 } const s_aFuncs[] = \
939 { \
940 { "cmpxchg_u" # a_cBits, iemAImpl_cmpxchg_u ## a_cBits, iemAImpl_sub_u ## a_cBits, \
941 g_aTests_cmp_u ## a_cBits, RT_ELEMENTS(g_aTests_cmp_u ## a_cBits) }, \
942 { "cmpxchg_u" # a_cBits "_locked", iemAImpl_cmpxchg_u ## a_cBits ## _locked, iemAImpl_sub_u ## a_cBits, \
943 g_aTests_cmp_u ## a_cBits, RT_ELEMENTS(g_aTests_cmp_u ## a_cBits) }, \
944 }; \
945 for (size_t iFn = 0; iFn < RT_ELEMENTS(s_aFuncs); iFn++) \
946 { \
947 RTTestSub(g_hTest, s_aFuncs[iFn].pszName); \
948 BINU ## a_cBits ## _TEST_T const * const paTests = s_aFuncs[iFn].paTests; \
949 uint32_t const cTests = s_aFuncs[iFn].cTests; \
950 for (uint32_t iTest = 0; iTest < cTests; iTest++) \
951 { \
952 /* as is (99% likely to be negative). */ \
953 uint32_t fEfl = paTests[iTest].fEflIn; \
954 a_Type const uNew = paTests[iTest].uSrcIn + 0x42; \
955 a_Type uA = paTests[iTest].uDstIn; \
956 *g_pu ## a_cBits = paTests[iTest].uSrcIn; \
957 a_Type const uExpect = uA != paTests[iTest].uSrcIn ? paTests[iTest].uSrcIn : uNew; \
958 s_aFuncs[iFn].pfn(g_pu ## a_cBits, &uA, uNew, &fEfl); \
959 if ( fEfl != paTests[iTest].fEflOut \
960 || *g_pu ## a_cBits != uExpect \
961 || uA != paTests[iTest].uSrcIn) \
962 RTTestFailed(g_hTest, "%s/#%ua: efl=%#08x dst=" a_Fmt " cmp=" a_Fmt " new=" a_Fmt " -> efl=%#08x dst=" a_Fmt " old=" a_Fmt ", expected %#08x, " a_Fmt ", " a_Fmt "%s\n", \
963 s_aFuncs[iFn].pszName, iTest, paTests[iTest].fEflIn, paTests[iTest].uSrcIn, paTests[iTest].uDstIn, \
964 uNew, fEfl, *g_pu ## a_cBits, uA, paTests[iTest].fEflOut, uExpect, paTests[iTest].uSrcIn, \
965 EFlagsDiff(fEfl, paTests[iTest].fEflOut)); \
966 /* positive */ \
967 uint32_t fEflExpect = paTests[iTest].fEflIn; \
968 uA = paTests[iTest].uDstIn; \
969 s_aFuncs[iFn].pfnSub(&uA, uA, &fEflExpect); \
970 fEfl = paTests[iTest].fEflIn; \
971 uA = paTests[iTest].uDstIn; \
972 *g_pu ## a_cBits = uA; \
973 s_aFuncs[iFn].pfn(g_pu ## a_cBits, &uA, uNew, &fEfl); \
974 if ( fEfl != fEflExpect \
975 || *g_pu ## a_cBits != uNew \
976 || uA != paTests[iTest].uDstIn) \
977 RTTestFailed(g_hTest, "%s/#%ua: efl=%#08x dst=" a_Fmt " cmp=" a_Fmt " new=" a_Fmt " -> efl=%#08x dst=" a_Fmt " old=" a_Fmt ", expected %#08x, " a_Fmt ", " a_Fmt "%s\n", \
978 s_aFuncs[iFn].pszName, iTest, paTests[iTest].fEflIn, paTests[iTest].uDstIn, paTests[iTest].uDstIn, \
979 uNew, fEfl, *g_pu ## a_cBits, uA, fEflExpect, uNew, paTests[iTest].uDstIn, \
980 EFlagsDiff(fEfl, fEflExpect)); \
981 } \
982 } \
983 } while(0)
984 TEST_CMPXCHG(8, uint8_t, "%#04RX8");
985 TEST_CMPXCHG(16, uint16_t, "%#06x");
986 TEST_CMPXCHG(32, uint32_t, "%#010RX32");
987#if ARCH_BITS != 32 /* calling convension issue, skipping as it's an unsupported host */
988 TEST_CMPXCHG(64, uint64_t, "%#010RX64");
989#endif
990}
991
992static void CmpXchg8bTest(void)
993{
994 typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLCMPXCHG8B,(uint64_t *, PRTUINT64U, PRTUINT64U, uint32_t *));
995 static struct
996 {
997 const char *pszName;
998 FNIEMAIMPLCMPXCHG8B *pfn;
999 } const s_aFuncs[] =
1000 {
1001 { "cmpxchg8b", iemAImpl_cmpxchg8b },
1002 { "cmpxchg8b_locked", iemAImpl_cmpxchg8b_locked },
1003 };
1004 for (size_t iFn = 0; iFn < RT_ELEMENTS(s_aFuncs); iFn++)
1005 {
1006 RTTestSub(g_hTest, s_aFuncs[iFn].pszName);
1007 for (uint32_t iTest = 0; iTest < 4; iTest += 2)
1008 {
1009 uint64_t const uOldValue = RandU64();
1010 uint64_t const uNewValue = RandU64();
1011
1012 /* positive test. */
1013 RTUINT64U uA, uB;
1014 uB.u = uNewValue;
1015 uA.u = uOldValue;
1016 *g_pu64 = uOldValue;
1017 uint32_t fEflIn = RandEFlags();
1018 uint32_t fEfl = fEflIn;
1019 s_aFuncs[iFn].pfn(g_pu64, &uA, &uB, &fEfl);
1020 if ( fEfl != (fEflIn | X86_EFL_ZF)
1021 || *g_pu64 != uNewValue
1022 || uA.u != uOldValue)
1023 RTTestFailed(g_hTest, "#%u: efl=%#08x dst=%#018RX64 cmp=%#018RX64 new=%#018RX64\n -> efl=%#08x dst=%#018RX64 old=%#018RX64,\n wanted %#08x, %#018RX64, %#018RX64%s\n",
1024 iTest, fEflIn, uOldValue, uOldValue, uNewValue,
1025 fEfl, *g_pu64, uA.u,
1026 (fEflIn | X86_EFL_ZF), uNewValue, uOldValue, EFlagsDiff(fEfl, fEflIn | X86_EFL_ZF));
1027 RTTEST_CHECK(g_hTest, uB.u == uNewValue);
1028
1029 /* negative */
1030 uint64_t const uExpect = ~uOldValue;
1031 *g_pu64 = uExpect;
1032 uA.u = uOldValue;
1033 uB.u = uNewValue;
1034 fEfl = fEflIn = RandEFlags();
1035 s_aFuncs[iFn].pfn(g_pu64, &uA, &uB, &fEfl);
1036 if ( fEfl != (fEflIn & ~X86_EFL_ZF)
1037 || *g_pu64 != uExpect
1038 || uA.u != uExpect)
1039 RTTestFailed(g_hTest, "#%u: efl=%#08x dst=%#018RX64 cmp=%#018RX64 new=%#018RX64\n -> efl=%#08x dst=%#018RX64 old=%#018RX64,\n wanted %#08x, %#018RX64, %#018RX64%s\n",
1040 iTest + 1, fEflIn, uExpect, uOldValue, uNewValue,
1041 fEfl, *g_pu64, uA.u,
1042 (fEflIn & ~X86_EFL_ZF), uExpect, uExpect, EFlagsDiff(fEfl, fEflIn & ~X86_EFL_ZF));
1043 RTTEST_CHECK(g_hTest, uB.u == uNewValue);
1044 }
1045 }
1046}
1047
1048static void CmpXchg16bTest(void)
1049{
1050 typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLCMPXCHG16B,(PRTUINT128U, PRTUINT128U, PRTUINT128U, uint32_t *));
1051 static struct
1052 {
1053 const char *pszName;
1054 FNIEMAIMPLCMPXCHG16B *pfn;
1055 } const s_aFuncs[] =
1056 {
1057 { "cmpxchg16b", iemAImpl_cmpxchg16b },
1058 { "cmpxchg16b_locked", iemAImpl_cmpxchg16b_locked },
1059#if !defined(RT_ARCH_ARM64)
1060 { "cmpxchg16b_fallback", iemAImpl_cmpxchg16b_fallback },
1061#endif
1062 };
1063 for (size_t iFn = 0; iFn < RT_ELEMENTS(s_aFuncs); iFn++)
1064 {
1065#if !defined(IEM_WITHOUT_ASSEMBLY) && defined(RT_ARCH_AMD64)
1066 if (!(ASMCpuId_ECX(1) & X86_CPUID_FEATURE_ECX_CX16))
1067 continue;
1068#endif
1069 RTTestSub(g_hTest, s_aFuncs[iFn].pszName);
1070 for (uint32_t iTest = 0; iTest < 4; iTest += 2)
1071 {
1072 RTUINT128U const uOldValue = RandU128();
1073 RTUINT128U const uNewValue = RandU128();
1074
1075 /* positive test. */
1076 RTUINT128U uA, uB;
1077 uB = uNewValue;
1078 uA = uOldValue;
1079 *g_pu128 = uOldValue;
1080 uint32_t fEflIn = RandEFlags();
1081 uint32_t fEfl = fEflIn;
1082 s_aFuncs[iFn].pfn(g_pu128, &uA, &uB, &fEfl);
1083 if ( fEfl != (fEflIn | X86_EFL_ZF)
1084 || g_pu128->s.Lo != uNewValue.s.Lo
1085 || g_pu128->s.Hi != uNewValue.s.Hi
1086 || uA.s.Lo != uOldValue.s.Lo
1087 || uA.s.Hi != uOldValue.s.Hi)
1088 RTTestFailed(g_hTest, "#%u: efl=%#08x dst=%#018RX64'%016RX64 cmp=%#018RX64'%016RX64 new=%#018RX64'%016RX64\n"
1089 " -> efl=%#08x dst=%#018RX64'%016RX64 old=%#018RX64'%016RX64,\n"
1090 " wanted %#08x, %#018RX64'%016RX64, %#018RX64'%016RX64%s\n",
1091 iTest, fEflIn, uOldValue.s.Hi, uOldValue.s.Lo, uOldValue.s.Hi, uOldValue.s.Lo, uNewValue.s.Hi, uNewValue.s.Lo,
1092 fEfl, g_pu128->s.Hi, g_pu128->s.Lo, uA.s.Hi, uA.s.Lo,
1093 (fEflIn | X86_EFL_ZF), uNewValue.s.Hi, uNewValue.s.Lo, uOldValue.s.Hi, uOldValue.s.Lo,
1094 EFlagsDiff(fEfl, fEflIn | X86_EFL_ZF));
1095 RTTEST_CHECK(g_hTest, uB.s.Lo == uNewValue.s.Lo && uB.s.Hi == uNewValue.s.Hi);
1096
1097 /* negative */
1098 RTUINT128U const uExpect = RTUINT128_INIT(~uOldValue.s.Hi, ~uOldValue.s.Lo);
1099 *g_pu128 = uExpect;
1100 uA = uOldValue;
1101 uB = uNewValue;
1102 fEfl = fEflIn = RandEFlags();
1103 s_aFuncs[iFn].pfn(g_pu128, &uA, &uB, &fEfl);
1104 if ( fEfl != (fEflIn & ~X86_EFL_ZF)
1105 || g_pu128->s.Lo != uExpect.s.Lo
1106 || g_pu128->s.Hi != uExpect.s.Hi
1107 || uA.s.Lo != uExpect.s.Lo
1108 || uA.s.Hi != uExpect.s.Hi)
1109 RTTestFailed(g_hTest, "#%u: efl=%#08x dst=%#018RX64'%016RX64 cmp=%#018RX64'%016RX64 new=%#018RX64'%016RX64\n"
1110 " -> efl=%#08x dst=%#018RX64'%016RX64 old=%#018RX64'%016RX64,\n"
1111 " wanted %#08x, %#018RX64'%016RX64, %#018RX64'%016RX64%s\n",
1112 iTest + 1, fEflIn, uExpect.s.Hi, uExpect.s.Lo, uOldValue.s.Hi, uOldValue.s.Lo, uNewValue.s.Hi, uNewValue.s.Lo,
1113 fEfl, g_pu128->s.Hi, g_pu128->s.Lo, uA.s.Hi, uA.s.Lo,
1114 (fEflIn & ~X86_EFL_ZF), uExpect.s.Hi, uExpect.s.Lo, uExpect.s.Hi, uExpect.s.Lo,
1115 EFlagsDiff(fEfl, fEflIn & ~X86_EFL_ZF));
1116 RTTEST_CHECK(g_hTest, uB.s.Lo == uNewValue.s.Lo && uB.s.Hi == uNewValue.s.Hi);
1117 }
1118 }
1119}
1120
1121
1122/*
1123 * Double shifts.
1124 *
1125 * Note! We use BINUxx_TEST_T with the shift value in the uMisc field.
1126 */
1127
1128#ifndef HAVE_SHIFT_DBL_TESTS_AMD
1129static const BINU16_TEST_T g_aTests_shrd_u16_amd[] = { {0} };
1130static const BINU16_TEST_T g_aTests_shld_u16_amd[] = { {0} };
1131static const BINU32_TEST_T g_aTests_shrd_u32_amd[] = { {0} };
1132static const BINU32_TEST_T g_aTests_shld_u32_amd[] = { {0} };
1133static const BINU64_TEST_T g_aTests_shrd_u64_amd[] = { {0} };
1134static const BINU64_TEST_T g_aTests_shld_u64_amd[] = { {0} };
1135#endif
1136#ifndef HAVE_SHIFT_DBL_TESTS_INTEL
1137static const BINU16_TEST_T g_aTests_shrd_u16_intel[] = { {0} };
1138static const BINU16_TEST_T g_aTests_shld_u16_intel[] = { {0} };
1139static const BINU32_TEST_T g_aTests_shrd_u32_intel[] = { {0} };
1140static const BINU32_TEST_T g_aTests_shld_u32_intel[] = { {0} };
1141static const BINU64_TEST_T g_aTests_shrd_u64_intel[] = { {0} };
1142static const BINU64_TEST_T g_aTests_shld_u64_intel[] = { {0} };
1143#endif
1144
1145#ifdef TSTIEMAIMPL_WITH_GENERATOR
1146# define GEN_SHIFT_DBL(a_cBits, a_Fmt, a_aSubTests) \
1147void ShiftDblU ## a_cBits ## Generate(PRTSTREAM pOut, uint32_t cTests) \
1148{ \
1149 for (size_t iFn = 0; iFn < RT_ELEMENTS(a_aSubTests); iFn++) \
1150 { \
1151 if ( a_aSubTests[iFn].idxCpuEflFlavour != IEMTARGETCPU_EFL_BEHAVIOR_NATIVE \
1152 && a_aSubTests[iFn].idxCpuEflFlavour != g_idxCpuEflFlavour) \
1153 continue; \
1154 RTStrmPrintf(pOut, "static const BINU" #a_cBits "_TEST_T g_aTests_%s[] =\n{\n", a_aSubTests[iFn].pszName); \
1155 for (uint32_t iTest = 0; iTest < cTests; iTest++ ) \
1156 { \
1157 BINU ## a_cBits ## _TEST_T Test; \
1158 Test.fEflIn = RandEFlags(); \
1159 Test.fEflOut = Test.fEflIn; \
1160 Test.uDstIn = RandU ## a_cBits ## Dst(iTest); \
1161 Test.uDstOut = Test.uDstIn; \
1162 Test.uSrcIn = RandU ## a_cBits ## Src(iTest); \
1163 Test.uMisc = RandU8() & (a_cBits - 1); /** @todo wrong for 16-bit, can shift up to 31 rounds! */ \
1164 a_aSubTests[iFn].pfnNative(&Test.uDstOut, Test.uSrcIn, Test.uMisc, &Test.fEflOut); \
1165 RTStrmPrintf(pOut, " { %#08x, %#08x, " a_Fmt ", " a_Fmt ", " a_Fmt ", %2u }, /* #%u */\n", \
1166 Test.fEflIn, Test.fEflOut, Test.uDstIn, Test.uDstOut, Test.uSrcIn, Test.uMisc, iTest); \
1167 } \
1168 RTStrmPrintf(pOut, "};\n"); \
1169 } \
1170}
1171#else
1172# define GEN_SHIFT_DBL(a_cBits, a_Fmt, a_aSubTests)
1173#endif
1174
1175#define TEST_SHIFT_DBL(a_cBits, a_Type, a_Fmt, a_aSubTests) \
1176static const struct \
1177{ \
1178 const char *pszName; \
1179 PFNIEMAIMPLSHIFTDBLU ## a_cBits pfn; \
1180 PFNIEMAIMPLSHIFTDBLU ## a_cBits pfnNative; \
1181 BINU ## a_cBits ## _TEST_T const *paTests; \
1182 uint32_t cTests, uExtra; \
1183 uint8_t idxCpuEflFlavour; \
1184} a_aSubTests[] = \
1185{ \
1186 ENTRY_AMD(shld_u ## a_cBits, X86_EFL_OF | X86_EFL_CF), \
1187 ENTRY_INTEL(shld_u ## a_cBits, X86_EFL_OF | X86_EFL_CF), \
1188 ENTRY_AMD(shrd_u ## a_cBits, X86_EFL_OF | X86_EFL_CF), \
1189 ENTRY_INTEL(shrd_u ## a_cBits, X86_EFL_OF | X86_EFL_CF), \
1190}; \
1191\
1192GEN_SHIFT_DBL(a_cBits, a_Fmt, a_aSubTests) \
1193\
1194static void ShiftDblU ## a_cBits ## Test(void) \
1195{ \
1196 for (size_t iFn = 0; iFn < RT_ELEMENTS(a_aSubTests); iFn++) \
1197 { \
1198 if ( a_aSubTests[iFn].idxCpuEflFlavour != IEMTARGETCPU_EFL_BEHAVIOR_NATIVE \
1199 && a_aSubTests[iFn].idxCpuEflFlavour != g_idxCpuEflFlavour) \
1200 continue; \
1201 \
1202 RTTestSub(g_hTest, a_aSubTests[iFn].pszName); \
1203 BINU ## a_cBits ## _TEST_T const * const paTests = a_aSubTests[iFn].paTests; \
1204 uint32_t const cTests = a_aSubTests[iFn].cTests; \
1205 PFNIEMAIMPLSHIFTDBLU ## a_cBits pfn = a_aSubTests[iFn].pfn; \
1206 for (uint32_t iCpu = 0; iCpu < 2 && pfn; iCpu++) \
1207 { \
1208 for (uint32_t iTest = 0; iTest < cTests; iTest++ ) \
1209 { \
1210 uint32_t fEfl = paTests[iTest].fEflIn; \
1211 a_Type uDst = paTests[iTest].uDstIn; \
1212 pfn(&uDst, paTests[iTest].uSrcIn, paTests[iTest].uMisc, &fEfl); \
1213 if ( uDst != paTests[iTest].uDstOut \
1214 || fEfl!= paTests[iTest].fEflOut) \
1215 RTTestFailed(g_hTest, "#%u%s: efl=%#08x dst=" a_Fmt " src=" a_Fmt " shift=%-2u -> efl=%#08x dst=" a_Fmt ", expected %#08x & " a_Fmt "%s\n", \
1216 iTest, iCpu == 0 ? "" : "/n", paTests[iTest].fEflIn, \
1217 paTests[iTest].uDstIn, paTests[iTest].uSrcIn, (unsigned)paTests[iTest].uMisc, \
1218 fEfl, uDst, paTests[iTest].fEflOut, paTests[iTest].uDstOut, \
1219 EFlagsDiff(fEfl, paTests[iTest].fEflOut)); \
1220 else \
1221 { \
1222 *g_pu ## a_cBits = paTests[iTest].uDstIn; \
1223 *g_pfEfl = paTests[iTest].fEflIn; \
1224 pfn(g_pu ## a_cBits, paTests[iTest].uSrcIn, paTests[iTest].uMisc, g_pfEfl); \
1225 RTTEST_CHECK(g_hTest, *g_pu ## a_cBits == paTests[iTest].uDstOut); \
1226 RTTEST_CHECK(g_hTest, *g_pfEfl == paTests[iTest].fEflOut); \
1227 } \
1228 } \
1229 pfn = a_aSubTests[iFn].pfnNative; \
1230 } \
1231 } \
1232}
1233TEST_SHIFT_DBL(16, uint16_t, "%#06RX16", g_aShiftDblU16)
1234TEST_SHIFT_DBL(32, uint32_t, "%#010RX32", g_aShiftDblU32)
1235TEST_SHIFT_DBL(64, uint64_t, "%#018RX64", g_aShiftDblU64)
1236
1237#ifdef TSTIEMAIMPL_WITH_GENERATOR
1238static void ShiftDblGenerate(PRTSTREAM pOut, const char *pszCpuSuffU, uint32_t cTests)
1239{
1240 RTStrmPrintf(pOut, "\n\n#define HAVE_SHIFT_DBL_TESTS%s\n", pszCpuSuffU);
1241 ShiftDblU16Generate(pOut, cTests);
1242 ShiftDblU32Generate(pOut, cTests);
1243 ShiftDblU64Generate(pOut, cTests);
1244}
1245#endif
1246
1247static void ShiftDblTest(void)
1248{
1249 ShiftDblU16Test();
1250 ShiftDblU32Test();
1251 ShiftDblU64Test();
1252}
1253
1254
1255/*
1256 * Unary operators.
1257 *
1258 * Note! We use BINUxx_TEST_T ignoreing uSrcIn and uMisc.
1259 */
1260
1261#ifndef HAVE_UNARY_TESTS
1262# define DUMMY_UNARY_TESTS(a_cBits, a_Type) \
1263 static const a_Type g_aTests_inc_u ## a_cBits[] = { {0} }; \
1264 static const a_Type g_aTests_inc_u ## a_cBits ## _locked[] = { {0} }; \
1265 static const a_Type g_aTests_dec_u ## a_cBits[] = { {0} }; \
1266 static const a_Type g_aTests_dec_u ## a_cBits ## _locked[] = { {0} }; \
1267 static const a_Type g_aTests_not_u ## a_cBits[] = { {0} }; \
1268 static const a_Type g_aTests_not_u ## a_cBits ## _locked[] = { {0} }; \
1269 static const a_Type g_aTests_neg_u ## a_cBits[] = { {0} }; \
1270 static const a_Type g_aTests_neg_u ## a_cBits ## _locked[] = { {0} }
1271DUMMY_UNARY_TESTS(8, BINU8_TEST_T);
1272DUMMY_UNARY_TESTS(16, BINU16_TEST_T);
1273DUMMY_UNARY_TESTS(32, BINU32_TEST_T);
1274DUMMY_UNARY_TESTS(64, BINU64_TEST_T);
1275#endif
1276
1277#define TEST_UNARY(a_cBits, a_Type, a_Fmt, a_TestType) \
1278static const struct \
1279{ \
1280 const char *pszName; \
1281 PFNIEMAIMPLUNARYU ## a_cBits pfn; \
1282 PFNIEMAIMPLUNARYU ## a_cBits pfnNative; \
1283 a_TestType const *paTests; \
1284 uint32_t cTests, uExtra; \
1285 uint8_t idxCpuEflFlavour; \
1286} g_aUnaryU ## a_cBits [] = \
1287{ \
1288 ENTRY(inc_u ## a_cBits), \
1289 ENTRY(inc_u ## a_cBits ## _locked), \
1290 ENTRY(dec_u ## a_cBits), \
1291 ENTRY(dec_u ## a_cBits ## _locked), \
1292 ENTRY(not_u ## a_cBits), \
1293 ENTRY(not_u ## a_cBits ## _locked), \
1294 ENTRY(neg_u ## a_cBits), \
1295 ENTRY(neg_u ## a_cBits ## _locked), \
1296}; \
1297\
1298void UnaryU ## a_cBits ## Generate(PRTSTREAM pOut, uint32_t cTests) \
1299{ \
1300 for (size_t iFn = 0; iFn < RT_ELEMENTS(g_aUnaryU ## a_cBits); iFn++) \
1301 { \
1302 RTStrmPrintf(pOut, "static const BINU" #a_cBits "_TEST_T g_aTests_%s[] =\n{\n", g_aUnaryU ## a_cBits[iFn].pszName); \
1303 for (uint32_t iTest = 0; iTest < cTests; iTest++ ) \
1304 { \
1305 a_TestType Test; \
1306 Test.fEflIn = RandEFlags(); \
1307 Test.fEflOut = Test.fEflIn; \
1308 Test.uDstIn = RandU ## a_cBits(); \
1309 Test.uDstOut = Test.uDstIn; \
1310 Test.uSrcIn = 0; \
1311 Test.uMisc = 0; \
1312 g_aUnaryU ## a_cBits[iFn].pfn(&Test.uDstOut, &Test.fEflOut); \
1313 RTStrmPrintf(pOut, " { %#08x, %#08x, " a_Fmt ", " a_Fmt ", 0, 0 }, /* #%u */\n", \
1314 Test.fEflIn, Test.fEflOut, Test.uDstIn, Test.uDstOut, iTest); \
1315 } \
1316 RTStrmPrintf(pOut, "};\n"); \
1317 } \
1318} \
1319\
1320static void UnaryU ## a_cBits ## Test(void) \
1321{ \
1322 for (size_t iFn = 0; iFn < RT_ELEMENTS(g_aUnaryU ## a_cBits); iFn++) \
1323 { \
1324 RTTestSub(g_hTest, g_aUnaryU ## a_cBits[iFn].pszName); \
1325 a_TestType const * const paTests = g_aUnaryU ## a_cBits[iFn].paTests; \
1326 uint32_t const cTests = g_aUnaryU ## a_cBits[iFn].cTests; \
1327 for (uint32_t iTest = 0; iTest < cTests; iTest++ ) \
1328 { \
1329 uint32_t fEfl = paTests[iTest].fEflIn; \
1330 a_Type uDst = paTests[iTest].uDstIn; \
1331 g_aUnaryU ## a_cBits[iFn].pfn(&uDst, &fEfl); \
1332 if ( uDst != paTests[iTest].uDstOut \
1333 || fEfl != paTests[iTest].fEflOut) \
1334 RTTestFailed(g_hTest, "#%u: efl=%#08x dst=" a_Fmt " -> efl=%#08x dst=" a_Fmt ", expected %#08x & " a_Fmt "%s\n", \
1335 iTest, paTests[iTest].fEflIn, paTests[iTest].uDstIn, \
1336 fEfl, uDst, paTests[iTest].fEflOut, paTests[iTest].uDstOut, \
1337 EFlagsDiff(fEfl, paTests[iTest].fEflOut)); \
1338 else \
1339 { \
1340 *g_pu ## a_cBits = paTests[iTest].uDstIn; \
1341 *g_pfEfl = paTests[iTest].fEflIn; \
1342 g_aUnaryU ## a_cBits[iFn].pfn(g_pu ## a_cBits, g_pfEfl); \
1343 RTTEST_CHECK(g_hTest, *g_pu ## a_cBits == paTests[iTest].uDstOut); \
1344 RTTEST_CHECK(g_hTest, *g_pfEfl == paTests[iTest].fEflOut); \
1345 } \
1346 } \
1347 } \
1348}
1349TEST_UNARY(8, uint8_t, "%#04RX8", BINU8_TEST_T)
1350TEST_UNARY(16, uint16_t, "%#06RX16", BINU16_TEST_T)
1351TEST_UNARY(32, uint32_t, "%#010RX32", BINU32_TEST_T)
1352TEST_UNARY(64, uint64_t, "%#018RX64", BINU64_TEST_T)
1353
1354#ifdef TSTIEMAIMPL_WITH_GENERATOR
1355static void UnaryGenerate(PRTSTREAM pOut, uint32_t cTests)
1356{
1357 RTStrmPrintf(pOut, "\n\n#define HAVE_UNARY_TESTS\n");
1358 UnaryU8Generate(pOut, cTests);
1359 UnaryU16Generate(pOut, cTests);
1360 UnaryU32Generate(pOut, cTests);
1361 UnaryU64Generate(pOut, cTests);
1362}
1363#endif
1364
1365static void UnaryTest(void)
1366{
1367 UnaryU8Test();
1368 UnaryU16Test();
1369 UnaryU32Test();
1370 UnaryU64Test();
1371}
1372
1373
1374/*
1375 * Shifts.
1376 *
1377 * Note! We use BINUxx_TEST_T with the shift count in uMisc and uSrcIn unused.
1378 */
1379
1380#ifndef HAVE_SHIFT_TESTS
1381# define DUMMY_SHIFT_TESTS(a_cBits, a_Type) \
1382 static const a_Type g_aTests_rol_u ## a_cBits[] = { {0} }; \
1383 static const a_Type g_aTests_ror_u ## a_cBits[] = { {0} }; \
1384 static const a_Type g_aTests_rcl_u ## a_cBits[] = { {0} }; \
1385 static const a_Type g_aTests_rcr_u ## a_cBits[] = { {0} }; \
1386 static const a_Type g_aTests_shl_u ## a_cBits[] = { {0} }; \
1387 static const a_Type g_aTests_shr_u ## a_cBits[] = { {0} }; \
1388 static const a_Type g_aTests_sar_u ## a_cBits[] = { {0} }
1389DUMMY_SHIFT_TESTS(8, BINU8_TEST_T);
1390DUMMY_SHIFT_TESTS(16, BINU16_TEST_T);
1391DUMMY_SHIFT_TESTS(32, BINU32_TEST_T);
1392DUMMY_SHIFT_TESTS(64, BINU64_TEST_T);
1393#endif
1394
1395#define TEST_SHIFT(a_cBits, a_Type, a_Fmt, a_TestType) \
1396static const struct \
1397{ \
1398 const char *pszName; \
1399 PFNIEMAIMPLSHIFTU ## a_cBits pfn; \
1400 PFNIEMAIMPLSHIFTU ## a_cBits pfnNative; \
1401 a_TestType const *paTests; \
1402 uint32_t cTests, uExtra; \
1403 uint8_t idxCpuEflFlavour; \
1404} g_aShiftU ## a_cBits [] = \
1405{ \
1406 ENTRY(rol_u ## a_cBits), \
1407 ENTRY(ror_u ## a_cBits), \
1408 ENTRY(rcl_u ## a_cBits), \
1409 ENTRY(rcr_u ## a_cBits), \
1410 ENTRY(shl_u ## a_cBits), \
1411 ENTRY(shr_u ## a_cBits), \
1412 ENTRY(sar_u ## a_cBits), \
1413}; \
1414\
1415void ShiftU ## a_cBits ## Generate(PRTSTREAM pOut, uint32_t cTests) \
1416{ \
1417 for (size_t iFn = 0; iFn < RT_ELEMENTS(g_aShiftU ## a_cBits); iFn++) \
1418 { \
1419 RTStrmPrintf(pOut, "static const BINU" #a_cBits "_TEST_T g_aTests_%s[] =\n{\n", g_aShiftU ## a_cBits[iFn].pszName); \
1420 for (uint32_t iTest = 0; iTest < cTests; iTest++ ) \
1421 { \
1422 a_TestType Test; \
1423 Test.fEflIn = RandEFlags(); \
1424 Test.fEflOut = Test.fEflIn; \
1425 Test.uDstIn = RandU ## a_cBits(); \
1426 Test.uDstOut = Test.uDstIn; \
1427 Test.uSrcIn = 0; \
1428 Test.uMisc = RandU8() & (a_cBits - 1); \
1429 g_aShiftU ## a_cBits[iFn].pfn(&Test.uDstOut, Test.uMisc, &Test.fEflOut); \
1430 RTStrmPrintf(pOut, " { %#08x, %#08x, " a_Fmt ", " a_Fmt ", 0, %-2u }, /* #%u */\n", \
1431 Test.fEflIn, Test.fEflOut, Test.uDstIn, Test.uDstOut, Test.uMisc, iTest); \
1432 } \
1433 RTStrmPrintf(pOut, "};\n"); \
1434 } \
1435} \
1436\
1437static void ShiftU ## a_cBits ## Test(void) \
1438{ \
1439 for (size_t iFn = 0; iFn < RT_ELEMENTS(g_aShiftU ## a_cBits); iFn++) \
1440 { \
1441 RTTestSub(g_hTest, g_aShiftU ## a_cBits[iFn].pszName); \
1442 a_TestType const * const paTests = g_aShiftU ## a_cBits[iFn].paTests; \
1443 uint32_t const cTests = g_aShiftU ## a_cBits[iFn].cTests; \
1444 for (uint32_t iTest = 0; iTest < cTests; iTest++ ) \
1445 { \
1446 uint32_t fEfl = paTests[iTest].fEflIn; \
1447 a_Type uDst = paTests[iTest].uDstIn; \
1448 g_aShiftU ## a_cBits[iFn].pfn(&uDst, paTests[iTest].uMisc, &fEfl); \
1449 if ( uDst != paTests[iTest].uDstOut \
1450 || fEfl != paTests[iTest].fEflOut) \
1451 RTTestFailed(g_hTest, "#%u: efl=%#08x dst=" a_Fmt " shift=%2u -> efl=%#08x dst=" a_Fmt ", expected %#08x & " a_Fmt "%s\n", \
1452 iTest, paTests[iTest].fEflIn, paTests[iTest].uDstIn, paTests[iTest].uMisc, \
1453 fEfl, uDst, paTests[iTest].fEflOut, paTests[iTest].uDstOut, \
1454 EFlagsDiff(fEfl, paTests[iTest].fEflOut)); \
1455 else \
1456 { \
1457 *g_pu ## a_cBits = paTests[iTest].uDstIn; \
1458 *g_pfEfl = paTests[iTest].fEflIn; \
1459 g_aShiftU ## a_cBits[iFn].pfn(g_pu ## a_cBits, paTests[iTest].uMisc, g_pfEfl); \
1460 RTTEST_CHECK(g_hTest, *g_pu ## a_cBits == paTests[iTest].uDstOut); \
1461 RTTEST_CHECK(g_hTest, *g_pfEfl == paTests[iTest].fEflOut); \
1462 } \
1463 } \
1464 } \
1465}
1466TEST_SHIFT(8, uint8_t, "%#04RX8", BINU8_TEST_T)
1467TEST_SHIFT(16, uint16_t, "%#06RX16", BINU16_TEST_T)
1468TEST_SHIFT(32, uint32_t, "%#010RX32", BINU32_TEST_T)
1469TEST_SHIFT(64, uint64_t, "%#018RX64", BINU64_TEST_T)
1470
1471#ifdef TSTIEMAIMPL_WITH_GENERATOR
1472static void ShiftGenerate(PRTSTREAM pOut, uint32_t cTests)
1473{
1474 RTStrmPrintf(pOut, "\n\n#define HAVE_SHIFT_TESTS\n");
1475 ShiftU8Generate(pOut, cTests);
1476 ShiftU16Generate(pOut, cTests);
1477 ShiftU32Generate(pOut, cTests);
1478 ShiftU64Generate(pOut, cTests);
1479}
1480#endif
1481
1482static void ShiftTest(void)
1483{
1484 ShiftU8Test();
1485 ShiftU16Test();
1486 ShiftU32Test();
1487 ShiftU64Test();
1488}
1489
1490
1491/*
1492 * Multiplication and division.
1493 *
1494 * Note! The 8-bit functions has a different format, so we need to duplicate things.
1495 * Note! Currently ignoring undefined bits.
1496 */
1497
1498# define DUMMY_MULDIV_TESTS(a_cBits, a_Type, a_Vendor) \
1499 static const a_Type g_aTests_mul_u ## a_cBits ## a_Vendor[] = { {0} }; \
1500 static const a_Type g_aTests_imul_u ## a_cBits ## a_Vendor[] = { {0} }; \
1501 static const a_Type g_aTests_div_u ## a_cBits ## a_Vendor[] = { {0} }; \
1502 static const a_Type g_aTests_idiv_u ## a_cBits ## a_Vendor[] = { {0} }
1503
1504#ifndef HAVE_MULDIV_TESTS_AMD
1505DUMMY_MULDIV_TESTS(8, MULDIVU8_TEST_T, _amd);
1506DUMMY_MULDIV_TESTS(16, MULDIVU16_TEST_T, _amd);
1507DUMMY_MULDIV_TESTS(32, MULDIVU32_TEST_T, _amd);
1508DUMMY_MULDIV_TESTS(64, MULDIVU64_TEST_T, _amd);
1509#endif
1510
1511#ifndef HAVE_MULDIV_TESTS_INTEL
1512DUMMY_MULDIV_TESTS(8, MULDIVU8_TEST_T, _intel);
1513DUMMY_MULDIV_TESTS(16, MULDIVU16_TEST_T, _intel);
1514DUMMY_MULDIV_TESTS(32, MULDIVU32_TEST_T, _intel);
1515DUMMY_MULDIV_TESTS(64, MULDIVU64_TEST_T, _intel);
1516#endif
1517
1518/* U8 */
1519static const struct
1520{
1521 const char *pszName;
1522 PFNIEMAIMPLMULDIVU8 pfn;
1523 PFNIEMAIMPLMULDIVU8 pfnNative;
1524 MULDIVU8_TEST_T const *paTests;
1525 uint32_t cTests, uExtra;
1526 uint8_t idxCpuEflFlavour;
1527} g_aMulDivU8[] =
1528{
1529 ENTRY_AMD_EX(mul_u8, X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF,
1530 X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF),
1531 ENTRY_INTEL_EX(mul_u8, X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF, 0),
1532 ENTRY_AMD_EX(imul_u8, X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF,
1533 X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF),
1534 ENTRY_INTEL_EX(imul_u8, X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF, 0),
1535 ENTRY_AMD_EX(div_u8, X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF | X86_EFL_CF | X86_EFL_OF,
1536 X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF | X86_EFL_CF | X86_EFL_OF),
1537 ENTRY_INTEL_EX(div_u8, X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF | X86_EFL_CF | X86_EFL_OF, 0),
1538 ENTRY_AMD_EX(idiv_u8, X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF | X86_EFL_CF | X86_EFL_OF,
1539 X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF | X86_EFL_CF | X86_EFL_OF),
1540 ENTRY_INTEL_EX(idiv_u8, X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF | X86_EFL_CF | X86_EFL_OF, 0),
1541};
1542
1543#ifdef TSTIEMAIMPL_WITH_GENERATOR
1544static void MulDivU8Generate(PRTSTREAM pOut, uint32_t cTests)
1545{
1546 for (size_t iFn = 0; iFn < RT_ELEMENTS(g_aMulDivU8); iFn++)
1547 {
1548 if ( g_aMulDivU8[iFn].idxCpuEflFlavour != IEMTARGETCPU_EFL_BEHAVIOR_NATIVE
1549 && g_aMulDivU8[iFn].idxCpuEflFlavour != g_idxCpuEflFlavour)
1550 continue;
1551 RTStrmPrintf(pOut, "static const MULDIVU8_TEST_T g_aTests_%s[] =\n{\n", g_aMulDivU8[iFn].pszName);
1552 for (uint32_t iTest = 0; iTest < cTests; iTest++ )
1553 {
1554 MULDIVU8_TEST_T Test;
1555 Test.fEflIn = RandEFlags();
1556 Test.fEflOut = Test.fEflIn;
1557 Test.uDstIn = RandU16Dst(iTest);
1558 Test.uDstOut = Test.uDstIn;
1559 Test.uSrcIn = RandU8Src(iTest);
1560 Test.rc = g_aMulDivU8[iFn].pfnNative(&Test.uDstOut, Test.uSrcIn, &Test.fEflOut);
1561 RTStrmPrintf(pOut, " { %#08x, %#08x, %#06RX16, %#06RX16, %#04RX8, %d }, /* #%u */\n",
1562 Test.fEflIn, Test.fEflOut, Test.uDstIn, Test.uDstOut, Test.uSrcIn, Test.rc, iTest);
1563 }
1564 RTStrmPrintf(pOut, "};\n");
1565 }
1566}
1567#endif
1568
1569static void MulDivU8Test(void)
1570{
1571 for (size_t iFn = 0; iFn < RT_ELEMENTS(g_aMulDivU8); iFn++)
1572 {
1573 if ( g_aMulDivU8[iFn].idxCpuEflFlavour != IEMTARGETCPU_EFL_BEHAVIOR_NATIVE
1574 && g_aMulDivU8[iFn].idxCpuEflFlavour != g_idxCpuEflFlavour)
1575 continue;
1576
1577 RTTestSub(g_hTest, g_aMulDivU8[iFn].pszName);
1578 MULDIVU8_TEST_T const * const paTests = g_aMulDivU8[iFn].paTests;
1579 uint32_t const cTests = g_aMulDivU8[iFn].cTests;
1580 uint32_t const fEflIgn = g_aMulDivU8[iFn].uExtra;
1581 for (uint32_t iTest = 0; iTest < cTests; iTest++ )
1582 {
1583 uint32_t fEfl = paTests[iTest].fEflIn;
1584 uint16_t uDst = paTests[iTest].uDstIn;
1585 int rc = g_aMulDivU8[iFn].pfn(&uDst, paTests[iTest].uSrcIn, &fEfl);
1586 if ( uDst != paTests[iTest].uDstOut
1587 || (fEfl | fEflIgn) != (paTests[iTest].fEflOut | fEflIgn)
1588 || rc != paTests[iTest].rc)
1589 RTTestFailed(g_hTest, "#%02u: efl=%#08x dst=%#06RX16 src=%#04RX8\n"
1590 " -> efl=%#08x dst=%#06RX16 rc=%d\n"
1591 "expected %#08x %#06RX16 %d%s\n",
1592 iTest, paTests[iTest].fEflIn, paTests[iTest].uDstIn, paTests[iTest].uSrcIn,
1593 fEfl, uDst, rc, paTests[iTest].fEflOut, paTests[iTest].uDstOut, paTests[iTest].rc,
1594 EFlagsDiff(fEfl | fEflIgn, paTests[iTest].fEflOut | fEflIgn));
1595 else
1596 {
1597 *g_pu16 = paTests[iTest].uDstIn;
1598 *g_pfEfl = paTests[iTest].fEflIn;
1599 rc = g_aMulDivU8[iFn].pfn(g_pu16, paTests[iTest].uSrcIn, g_pfEfl);
1600 RTTEST_CHECK(g_hTest, *g_pu16 == paTests[iTest].uDstOut);
1601 RTTEST_CHECK(g_hTest, (*g_pfEfl | fEflIgn) == (paTests[iTest].fEflOut | fEflIgn));
1602 RTTEST_CHECK(g_hTest, rc == paTests[iTest].rc);
1603 }
1604 }
1605 }
1606}
1607
1608#ifdef TSTIEMAIMPL_WITH_GENERATOR
1609# define GEN_MULDIV(a_cBits, a_Fmt, a_TestType, a_aSubTests) \
1610void MulDivU ## a_cBits ## Generate(PRTSTREAM pOut, uint32_t cTests) \
1611{ \
1612 for (size_t iFn = 0; iFn < RT_ELEMENTS(a_aSubTests); iFn++) \
1613 { \
1614 if ( a_aSubTests[iFn].idxCpuEflFlavour != IEMTARGETCPU_EFL_BEHAVIOR_NATIVE \
1615 && a_aSubTests[iFn].idxCpuEflFlavour != g_idxCpuEflFlavour) \
1616 continue; \
1617 RTStrmPrintf(pOut, "static const MULDIVU" #a_cBits "_TEST_T g_aTests_%s[] =\n{\n", a_aSubTests[iFn].pszName); \
1618 for (uint32_t iTest = 0; iTest < cTests; iTest++ ) \
1619 { \
1620 a_TestType Test; \
1621 Test.fEflIn = RandEFlags(); \
1622 Test.fEflOut = Test.fEflIn; \
1623 Test.uDst1In = RandU ## a_cBits ## Dst(iTest); \
1624 Test.uDst1Out = Test.uDst1In; \
1625 Test.uDst2In = RandU ## a_cBits ## Dst(iTest); \
1626 Test.uDst2Out = Test.uDst2In; \
1627 Test.uSrcIn = RandU ## a_cBits ## Src(iTest); \
1628 Test.rc = a_aSubTests[iFn].pfnNative(&Test.uDst1Out, &Test.uDst2Out, Test.uSrcIn, &Test.fEflOut); \
1629 RTStrmPrintf(pOut, " { %#08x, %#08x, " a_Fmt ", " a_Fmt ", " a_Fmt ", " a_Fmt ", " a_Fmt ", %d }, /* #%u */\n", \
1630 Test.fEflIn, Test.fEflOut, Test.uDst1In, Test.uDst1Out, Test.uDst2In, Test.uDst2Out, Test.uSrcIn, \
1631 Test.rc, iTest); \
1632 } \
1633 RTStrmPrintf(pOut, "};\n"); \
1634 } \
1635}
1636#else
1637# define GEN_MULDIV(a_cBits, a_Fmt, a_TestType, a_aSubTests)
1638#endif
1639
1640#define TEST_MULDIV(a_cBits, a_Type, a_Fmt, a_TestType, a_aSubTests) \
1641static const struct \
1642{ \
1643 const char *pszName; \
1644 PFNIEMAIMPLMULDIVU ## a_cBits pfn; \
1645 PFNIEMAIMPLMULDIVU ## a_cBits pfnNative; \
1646 a_TestType const *paTests; \
1647 uint32_t cTests, uExtra; \
1648 uint8_t idxCpuEflFlavour; \
1649} a_aSubTests [] = \
1650{ \
1651 ENTRY_AMD_EX(mul_u ## a_cBits, X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF, \
1652 X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF /** @todo check out AMD flags */ ), \
1653 ENTRY_INTEL_EX(mul_u ## a_cBits, X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF, 0), \
1654 ENTRY_AMD_EX(imul_u ## a_cBits, X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF, \
1655 X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF), \
1656 ENTRY_INTEL_EX(imul_u ## a_cBits, X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF, 0), \
1657 ENTRY_AMD_EX(div_u ## a_cBits, X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF | X86_EFL_CF | X86_EFL_OF, \
1658 X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF | X86_EFL_CF | X86_EFL_OF), \
1659 ENTRY_INTEL_EX(div_u ## a_cBits, X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF | X86_EFL_CF | X86_EFL_OF, 0), \
1660 ENTRY_AMD_EX(idiv_u ## a_cBits, X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF | X86_EFL_CF | X86_EFL_OF, \
1661 X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF | X86_EFL_CF | X86_EFL_OF), \
1662 ENTRY_INTEL_EX(idiv_u ## a_cBits, X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF | X86_EFL_CF | X86_EFL_OF, 0), \
1663}; \
1664\
1665GEN_MULDIV(a_cBits, a_Fmt, a_TestType, a_aSubTests) \
1666\
1667static void MulDivU ## a_cBits ## Test(void) \
1668{ \
1669 for (size_t iFn = 0; iFn < RT_ELEMENTS(a_aSubTests); iFn++) \
1670 { \
1671 if ( a_aSubTests[iFn].idxCpuEflFlavour != IEMTARGETCPU_EFL_BEHAVIOR_NATIVE \
1672 && a_aSubTests[iFn].idxCpuEflFlavour != g_idxCpuEflFlavour) \
1673 continue; \
1674 \
1675 RTTestSub(g_hTest, a_aSubTests[iFn].pszName); \
1676 a_TestType const * const paTests = a_aSubTests[iFn].paTests; \
1677 uint32_t const cTests = a_aSubTests[iFn].cTests; \
1678 uint32_t const fEflIgn = a_aSubTests[iFn].uExtra; \
1679 PFNIEMAIMPLMULDIVU ## a_cBits pfn = a_aSubTests[iFn].pfn; \
1680 for (uint32_t iCpu = 0; iCpu < 2 && pfn; iCpu++) \
1681 { \
1682 for (uint32_t iTest = 0; iTest < cTests; iTest++ ) \
1683 { \
1684 uint32_t fEfl = paTests[iTest].fEflIn; \
1685 a_Type uDst1 = paTests[iTest].uDst1In; \
1686 a_Type uDst2 = paTests[iTest].uDst2In; \
1687 int rc = a_aSubTests[iFn].pfn(&uDst1, &uDst2, paTests[iTest].uSrcIn, &fEfl); \
1688 if ( uDst1 != paTests[iTest].uDst1Out \
1689 || uDst2 != paTests[iTest].uDst2Out \
1690 || (fEfl | fEflIgn) != (paTests[iTest].fEflOut | fEflIgn)\
1691 || rc != paTests[iTest].rc) \
1692 RTTestFailed(g_hTest, "#%02u%s: efl=%#08x dst1=" a_Fmt " dst2=" a_Fmt " src=" a_Fmt "\n" \
1693 " -> efl=%#08x dst1=" a_Fmt " dst2=" a_Fmt " rc=%d\n" \
1694 "expected %#08x " a_Fmt " " a_Fmt " %d%s -%s%s%s\n", \
1695 iTest, iCpu == 0 ? "" : "/n", \
1696 paTests[iTest].fEflIn, paTests[iTest].uDst1In, paTests[iTest].uDst2In, paTests[iTest].uSrcIn, \
1697 fEfl, uDst1, uDst2, rc, \
1698 paTests[iTest].fEflOut, paTests[iTest].uDst1Out, paTests[iTest].uDst2Out, paTests[iTest].rc, \
1699 EFlagsDiff(fEfl | fEflIgn, paTests[iTest].fEflOut | fEflIgn), \
1700 uDst1 != paTests[iTest].uDst1Out ? " dst1" : "", uDst2 != paTests[iTest].uDst2Out ? " dst2" : "", \
1701 (fEfl | fEflIgn) != (paTests[iTest].fEflOut | fEflIgn) ? " eflags" : ""); \
1702 else \
1703 { \
1704 *g_pu ## a_cBits = paTests[iTest].uDst1In; \
1705 *g_pu ## a_cBits ## Two = paTests[iTest].uDst2In; \
1706 *g_pfEfl = paTests[iTest].fEflIn; \
1707 rc = a_aSubTests[iFn].pfn(g_pu ## a_cBits, g_pu ## a_cBits ## Two, paTests[iTest].uSrcIn, g_pfEfl); \
1708 RTTEST_CHECK(g_hTest, *g_pu ## a_cBits == paTests[iTest].uDst1Out); \
1709 RTTEST_CHECK(g_hTest, *g_pu ## a_cBits ## Two == paTests[iTest].uDst2Out); \
1710 RTTEST_CHECK(g_hTest, (*g_pfEfl | fEflIgn) == (paTests[iTest].fEflOut | fEflIgn)); \
1711 RTTEST_CHECK(g_hTest, rc == paTests[iTest].rc); \
1712 } \
1713 } \
1714 pfn = a_aSubTests[iFn].pfnNative; \
1715 } \
1716 } \
1717}
1718TEST_MULDIV(16, uint16_t, "%#06RX16", MULDIVU16_TEST_T, g_aMulDivU16)
1719TEST_MULDIV(32, uint32_t, "%#010RX32", MULDIVU32_TEST_T, g_aMulDivU32)
1720TEST_MULDIV(64, uint64_t, "%#018RX64", MULDIVU64_TEST_T, g_aMulDivU64)
1721
1722#ifdef TSTIEMAIMPL_WITH_GENERATOR
1723static void MulDivGenerate(PRTSTREAM pOut, const char *pszCpuSuffU, uint32_t cTests)
1724{
1725 RTStrmPrintf(pOut, "\n\n#define HAVE_MULDIV_TESTS%s\n", pszCpuSuffU);
1726 MulDivU8Generate(pOut, cTests);
1727 MulDivU16Generate(pOut, cTests);
1728 MulDivU32Generate(pOut, cTests);
1729 MulDivU64Generate(pOut, cTests);
1730}
1731#endif
1732
1733static void MulDivTest(void)
1734{
1735 MulDivU8Test();
1736 MulDivU16Test();
1737 MulDivU32Test();
1738 MulDivU64Test();
1739}
1740
1741
1742/*
1743 * BSWAP
1744 */
1745static void BswapTest(void)
1746{
1747 RTTestSub(g_hTest, "bswap_u16");
1748 *g_pu32 = UINT32_C(0x12345678);
1749 iemAImpl_bswap_u16(g_pu32);
1750#if 0
1751 RTTEST_CHECK_MSG(g_hTest, *g_pu32 == UINT32_C(0x12347856), (g_hTest, "*g_pu32=%#RX32\n", *g_pu32));
1752#else
1753 RTTEST_CHECK_MSG(g_hTest, *g_pu32 == UINT32_C(0x12340000), (g_hTest, "*g_pu32=%#RX32\n", *g_pu32));
1754#endif
1755 *g_pu32 = UINT32_C(0xffff1122);
1756 iemAImpl_bswap_u16(g_pu32);
1757#if 0
1758 RTTEST_CHECK_MSG(g_hTest, *g_pu32 == UINT32_C(0xffff2211), (g_hTest, "*g_pu32=%#RX32\n", *g_pu32));
1759#else
1760 RTTEST_CHECK_MSG(g_hTest, *g_pu32 == UINT32_C(0xffff0000), (g_hTest, "*g_pu32=%#RX32\n", *g_pu32));
1761#endif
1762
1763 RTTestSub(g_hTest, "bswap_u32");
1764 *g_pu32 = UINT32_C(0x12345678);
1765 iemAImpl_bswap_u32(g_pu32);
1766 RTTEST_CHECK(g_hTest, *g_pu32 == UINT32_C(0x78563412));
1767
1768 RTTestSub(g_hTest, "bswap_u64");
1769 *g_pu64 = UINT64_C(0x0123456789abcdef);
1770 iemAImpl_bswap_u64(g_pu64);
1771 RTTEST_CHECK(g_hTest, *g_pu64 == UINT64_C(0xefcdab8967452301));
1772}
1773
1774
1775int main(int argc, char **argv)
1776{
1777 int rc = RTR3InitExe(argc, &argv, 0);
1778 if (RT_FAILURE(rc))
1779 return RTMsgInitFailure(rc);
1780
1781 /*
1782 * Determin the host CPU.
1783 */
1784#if defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64)
1785 g_idxCpuEflFlavour = ASMIsAmdCpu() || ASMIsHygonCpu()
1786 ? IEMTARGETCPU_EFL_BEHAVIOR_AMD
1787 : IEMTARGETCPU_EFL_BEHAVIOR_INTEL;
1788#endif
1789
1790 /*
1791 * Generate data?
1792 */
1793 if (argc > 2)
1794 {
1795#ifdef TSTIEMAIMPL_WITH_GENERATOR
1796 char szCpuDesc[256] = {0};
1797 RTMpGetDescription(NIL_RTCPUID, szCpuDesc, sizeof(szCpuDesc));
1798 const char * const pszCpuType = g_idxCpuEflFlavour == IEMTARGETCPU_EFL_BEHAVIOR_AMD ? "Amd" : "Intel";
1799 //const char * const pszCpuTypeU = g_idxCpuEflFlavour == IEMTARGETCPU_EFL_BEHAVIOR_AMD ? "AMD" : "INTEL";
1800 const char * const pszCpuSuff = g_idxCpuEflFlavour == IEMTARGETCPU_EFL_BEHAVIOR_AMD ? "_Amd" : "_Intel";
1801 //const char * const pszCpuSuffL = g_idxCpuEflFlavour == IEMTARGETCPU_EFL_BEHAVIOR_AMD ? "_amd" : "_intel";
1802 const char * const pszCpuSuffU = g_idxCpuEflFlavour == IEMTARGETCPU_EFL_BEHAVIOR_AMD ? "_AMD" : "_INTEL";
1803
1804 PRTSTREAM pStrmData = NULL;
1805 rc = RTStrmOpen("tstIEMAImplData.h", "w", &pStrmData);
1806 if (!pStrmData)
1807 return RTMsgErrorExitFailure("Failed to open tstIEMAImplData.h for writing: %Rrc", rc);
1808
1809 PRTSTREAM pStrmDataCpu = NULL;
1810 rc = RTStrmOpenF("w", &pStrmDataCpu, "tstIEMAImplData-%s.h", pszCpuType);
1811 if (!pStrmData)
1812 return RTMsgErrorExitFailure("Failed to open tstIEMAImplData-%s.h for writing: %Rrc", pszCpuType, rc);
1813
1814 GenerateHeader(pStrmData, szCpuDesc, NULL, "");
1815 GenerateHeader(pStrmDataCpu, szCpuDesc, pszCpuType, pszCpuSuff);
1816
1817 uint32_t cTests = 64;
1818 g_cZeroDstTests = RT_MIN(cTests / 16, 32);
1819 g_cZeroSrcTests = g_cZeroDstTests * 2;
1820
1821 BinU8Generate( pStrmData, pStrmDataCpu, pszCpuSuffU, cTests);
1822 BinU16Generate(pStrmData, pStrmDataCpu, pszCpuSuffU, cTests);
1823 BinU32Generate(pStrmData, pStrmDataCpu, pszCpuSuffU, cTests);
1824 BinU64Generate(pStrmData, pStrmDataCpu, pszCpuSuffU, cTests);
1825 ShiftDblGenerate(pStrmDataCpu, pszCpuSuffU, RT_MIN(cTests, 128));
1826 UnaryGenerate(pStrmData, cTests);
1827 ShiftGenerate(pStrmData, cTests);
1828 MulDivGenerate(pStrmDataCpu, pszCpuSuffU, cTests);
1829
1830 return GenerateFooterAndClose(pStrmDataCpu, pszCpuType, pszCpuSuff,
1831 GenerateFooterAndClose(pStrmData, NULL, "", RTEXITCODE_SUCCESS));
1832#else
1833 return RTMsgErrorExitFailure("Test data generator not compiled in!");
1834#endif
1835 }
1836
1837 /*
1838 * Do testing. Currrently disabled by default as data needs to be checked
1839 * on both intel and AMD systems first.
1840 */
1841 rc = RTTestCreate("tstIEMAimpl", &g_hTest);
1842 AssertRCReturn(rc, RTEXITCODE_FAILURE);
1843 if (argc > 1)
1844 {
1845 /* Allocate guarded memory for use in the tests. */
1846#define ALLOC_GUARDED_VAR(a_puVar) do { \
1847 rc = RTTestGuardedAlloc(g_hTest, sizeof(*a_puVar), sizeof(*a_puVar), false /*fHead*/, (void **)&a_puVar); \
1848 if (RT_FAILURE(rc)) RTTestFailed(g_hTest, "Failed to allocate guarded mem: " #a_puVar); \
1849 } while (0)
1850 ALLOC_GUARDED_VAR(g_pu8);
1851 ALLOC_GUARDED_VAR(g_pu16);
1852 ALLOC_GUARDED_VAR(g_pu32);
1853 ALLOC_GUARDED_VAR(g_pu64);
1854 ALLOC_GUARDED_VAR(g_pu128);
1855 ALLOC_GUARDED_VAR(g_pu8Two);
1856 ALLOC_GUARDED_VAR(g_pu16Two);
1857 ALLOC_GUARDED_VAR(g_pu32Two);
1858 ALLOC_GUARDED_VAR(g_pu64Two);
1859 ALLOC_GUARDED_VAR(g_pu128Two);
1860 ALLOC_GUARDED_VAR(g_pfEfl);
1861 if (RTTestErrorCount(g_hTest) == 0)
1862 {
1863 BinU8Test();
1864 BinU16Test();
1865 BinU32Test();
1866 BinU64Test();
1867 XchgTest();
1868 XaddTest();
1869 CmpXchgTest();
1870 CmpXchg8bTest();
1871 CmpXchg16bTest();
1872 ShiftDblTest();
1873 UnaryTest();
1874 ShiftTest();
1875 MulDivTest();
1876 BswapTest();
1877 }
1878 return RTTestSummaryAndDestroy(g_hTest);
1879 }
1880 return RTTestSkipAndDestroy(g_hTest, "unfinished testcase");
1881}
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette