VirtualBox

source: vbox/trunk/src/VBox/VMM/testcase/tstIEMAImpl.cpp@ 103407

Last change on this file since 103407 was 103100, checked in by vboxsync, 10 months ago

tstIEMAImpl,VMM/IEM: Regenerated integer tests on intel, increasing the number to 1024 entries per tests. Fixed some issues. [build fix] bugref:9898 bugref:10591

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 484.8 KB
Line 
1/* $Id: tstIEMAImpl.cpp 103100 2024-01-26 23:36:34Z vboxsync $ */
2/** @file
3 * IEM Assembly Instruction Helper Testcase.
4 */
5
6/*
7 * Copyright (C) 2022-2023 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/*********************************************************************************************************************************
30* Header Files *
31*********************************************************************************************************************************/
32#include "../include/IEMInternal.h"
33
34#include <iprt/errcore.h>
35#include <VBox/log.h>
36#include <iprt/assert.h>
37#include <iprt/buildconfig.h>
38#include <iprt/ctype.h>
39#include <iprt/err.h>
40#include <iprt/getopt.h>
41#include <iprt/initterm.h>
42#include <iprt/file.h>
43#include <iprt/mem.h>
44#include <iprt/message.h>
45#include <iprt/mp.h>
46#include <iprt/rand.h>
47#include <iprt/stream.h>
48#include <iprt/string.h>
49#include <iprt/test.h>
50#include <iprt/time.h>
51#include <iprt/thread.h>
52#include <iprt/vfs.h>
53#include <iprt/zip.h>
54#include <VBox/version.h>
55
56#include "tstIEMAImpl.h"
57
58
59/*********************************************************************************************************************************
60* Defined Constants And Macros *
61*********************************************************************************************************************************/
62#define ENTRY_BIN_FIX(a_Name) ENTRY_BIN_FIX_EX(a_Name, 0)
63#ifdef TSTIEMAIMPL_WITH_GENERATOR
64# define ENTRY_BIN_FIX_EX(a_Name, a_uExtra) \
65 { RT_XSTR(a_Name), iemAImpl_ ## a_Name, NULL, \
66 g_abTests_ ## a_Name, &g_cbTests_ ## a_Name, \
67 a_uExtra, IEMTARGETCPU_EFL_BEHAVIOR_NATIVE /* means same for all here */, \
68 RT_ELEMENTS(g_aFixedTests_ ## a_Name), g_aFixedTests_ ## a_Name }
69#else
70# define ENTRY_BIN_FIX_EX(a_Name, a_uExtra) ENTRY_BIN_EX(a_Name, a_uExtra)
71#endif
72
73#define ENTRY_BIN_PFN_CAST(a_Name, a_pfnType) ENTRY_BIN_PFN_CAST_EX(a_Name, a_pfnType, 0)
74#define ENTRY_BIN_PFN_CAST_EX(a_Name, a_pfnType, a_uExtra) \
75 { RT_XSTR(a_Name), (a_pfnType)iemAImpl_ ## a_Name, NULL, \
76 g_abTests_ ## a_Name, &g_cbTests_ ## a_Name, \
77 a_uExtra, IEMTARGETCPU_EFL_BEHAVIOR_NATIVE /* means same for all here */ }
78
79#define ENTRY_BIN(a_Name) ENTRY_BIN_EX(a_Name, 0)
80#define ENTRY_BIN_EX(a_Name, a_uExtra) \
81 { RT_XSTR(a_Name), iemAImpl_ ## a_Name, NULL, \
82 g_abTests_ ## a_Name, &g_cbTests_ ## a_Name, \
83 a_uExtra, IEMTARGETCPU_EFL_BEHAVIOR_NATIVE /* means same for all here */ }
84
85#define ENTRY_BIN_AVX(a_Name) ENTRY_BIN_AVX_EX(a_Name, 0)
86#ifndef IEM_WITHOUT_ASSEMBLY
87# define ENTRY_BIN_AVX_EX(a_Name, a_uExtra) \
88 { RT_XSTR(a_Name), iemAImpl_ ## a_Name, NULL, \
89 g_abTests_ ## a_Name, &g_cbTests_ ## a_Name, \
90 a_uExtra, IEMTARGETCPU_EFL_BEHAVIOR_NATIVE /* means same for all here */ }
91#else
92# define ENTRY_BIN_AVX_EX(a_Name, a_uExtra) \
93 { RT_XSTR(a_Name), iemAImpl_ ## a_Name ## _fallback, NULL, \
94 g_abTests_ ## a_Name, &g_cbTests_ ## a_Name, \
95 a_uExtra, IEMTARGETCPU_EFL_BEHAVIOR_NATIVE /* means same for all here */ }
96#endif
97
98#define ENTRY_BIN_SSE_OPT(a_Name) ENTRY_BIN_SSE_OPT_EX(a_Name, 0)
99#ifndef IEM_WITHOUT_ASSEMBLY
100# define ENTRY_BIN_SSE_OPT_EX(a_Name, a_uExtra) \
101 { RT_XSTR(a_Name), iemAImpl_ ## a_Name, NULL, \
102 g_abTests_ ## a_Name, &g_cbTests_ ## a_Name, \
103 a_uExtra, IEMTARGETCPU_EFL_BEHAVIOR_NATIVE /* means same for all here */ }
104#else
105# define ENTRY_BIN_SSE_OPT_EX(a_Name, a_uExtra) \
106 { RT_XSTR(a_Name), iemAImpl_ ## a_Name ## _fallback, NULL, \
107 g_abTests_ ## a_Name, &g_cbTests_ ## a_Name, \
108 a_uExtra, IEMTARGETCPU_EFL_BEHAVIOR_NATIVE /* means same for all here */ }
109#endif
110
111#define ENTRY_BIN_INTEL(a_Name, a_fEflUndef) ENTRY_BIN_INTEL_EX(a_Name, a_fEflUndef, 0)
112#define ENTRY_BIN_INTEL_EX(a_Name, a_fEflUndef, a_uExtra) \
113 { RT_XSTR(a_Name) "_intel", iemAImpl_ ## a_Name ## _intel, iemAImpl_ ## a_Name, \
114 g_abTests_ ## a_Name ## _intel, &g_cbTests_ ## a_Name ## _intel, \
115 a_uExtra, IEMTARGETCPU_EFL_BEHAVIOR_INTEL }
116
117#define ENTRY_BIN_AMD(a_Name, a_fEflUndef) ENTRY_BIN_AMD_EX(a_Name, a_fEflUndef, 0)
118#define ENTRY_BIN_AMD_EX(a_Name, a_fEflUndef, a_uExtra) \
119 { RT_XSTR(a_Name) "_amd", iemAImpl_ ## a_Name ## _amd, iemAImpl_ ## a_Name, \
120 g_abTests_ ## a_Name ## _amd, &g_cbTests_ ## a_Name ## _amd, \
121 a_uExtra, IEMTARGETCPU_EFL_BEHAVIOR_AMD }
122
123#define ENTRY_BIN_FIX_INTEL(a_Name, a_fEflUndef) ENTRY_BIN_FIX_INTEL_EX(a_Name, a_fEflUndef, 0)
124#ifdef TSTIEMAIMPL_WITH_GENERATOR
125# define ENTRY_BIN_FIX_INTEL_EX(a_Name, a_fEflUndef, a_uExtra) \
126 { RT_XSTR(a_Name) "_intel", iemAImpl_ ## a_Name ## _intel, iemAImpl_ ## a_Name, \
127 g_abTests_ ## a_Name ## _intel, &g_cbTests_ ## a_Name ## _intel, \
128 a_uExtra, IEMTARGETCPU_EFL_BEHAVIOR_INTEL, \
129 RT_ELEMENTS(g_aFixedTests_ ## a_Name), g_aFixedTests_ ## a_Name }
130#else
131# define ENTRY_BIN_FIX_INTEL_EX(a_Name, a_fEflUndef, a_uExtra) ENTRY_BIN_INTEL_EX(a_Name, a_fEflUndef, a_uExtra)
132#endif
133
134#define ENTRY_BIN_FIX_AMD(a_Name, a_fEflUndef) ENTRY_BIN_FIX_AMD_EX(a_Name, a_fEflUndef, 0)
135#ifdef TSTIEMAIMPL_WITH_GENERATOR
136# define ENTRY_BIN_FIX_AMD_EX(a_Name, a_fEflUndef, a_uExtra) \
137 { RT_XSTR(a_Name) "_amd", iemAImpl_ ## a_Name ## _amd, iemAImpl_ ## a_Name, \
138 g_abTests_ ## a_Name ## _amd, &g_cbTests_ ## a_Name ## _amd, \
139 a_uExtra, IEMTARGETCPU_EFL_BEHAVIOR_AMD, \
140 RT_ELEMENTS(g_aFixedTests_ ## a_Name), g_aFixedTests_ ## a_Name }
141#else
142# define ENTRY_BIN_FIX_AMD_EX(a_Name, a_fEflUndef, a_uExtra) ENTRY_BIN_AMD_EX(a_Name, a_fEflUndef, a_uExtra)
143#endif
144
145
146#define TYPEDEF_SUBTEST_TYPE(a_TypeName, a_TestType, a_FunctionPtrType) \
147 typedef struct a_TypeName \
148 { \
149 const char *pszName; \
150 const a_FunctionPtrType pfn; \
151 const a_FunctionPtrType pfnNative; \
152 void const * const pvCompressedTests; \
153 uint32_t const *pcbCompressedTests; \
154 uint32_t const uExtra; \
155 uint8_t const idxCpuEflFlavour; \
156 uint16_t const cFixedTests; \
157 a_TestType const * const paFixedTests; \
158 a_TestType const *paTests; /**< The decompressed info. */ \
159 uint32_t cTests; /**< The decompressed info. */ \
160 IEMTESTENTRYINFO Info; \
161 } a_TypeName
162
163#define COUNT_VARIATIONS(a_SubTest) \
164 (1 + ((a_SubTest).idxCpuEflFlavour == g_idxCpuEflFlavour && (a_SubTest).pfnNative) )
165
166
167/*********************************************************************************************************************************
168* Structures and Typedefs *
169*********************************************************************************************************************************/
170typedef struct IEMBINARYHEADER
171{
172 char szMagic[16];
173 uint32_t cbEntry;
174 uint32_t uSvnRev;
175 uint32_t auUnused[6];
176 char szCpuDesc[80];
177} IEMBINARYHEADER;
178AssertCompileSize(IEMBINARYHEADER, 128);
179
180 // 01234567890123456
181#define IEMBINARYHEADER_MAGIC "IEMAImpl Bin v1"
182AssertCompile(sizeof(IEMBINARYHEADER_MAGIC) == 16);
183
184
185typedef struct IEMBINARYFOOTER
186{
187 char szMagic[24];
188 uint32_t cbEntry;
189 uint32_t cEntries;
190} IEMBINARYFOOTER;
191AssertCompileSize(IEMBINARYFOOTER, 32);
192 // 012345678901234567890123
193#define IEMBINARYFOOTER_MAGIC "\nIEMAImpl Bin Footer v1"
194AssertCompile(sizeof(IEMBINARYFOOTER_MAGIC) == 24);
195
196
197/** Fixed part of TYPEDEF_SUBTEST_TYPE and friends. */
198typedef struct IEMTESTENTRYINFO
199{
200 void *pvUncompressed;
201 uint32_t cbUncompressed;
202 const char *pszCpuDesc;
203 uint32_t uSvnRev;
204} IEMTESTENTRYINFO;
205
206
207#ifdef TSTIEMAIMPL_WITH_GENERATOR
208typedef struct IEMBINARYOUTPUT
209{
210 /** The output file. */
211 RTVFSFILE hVfsFile;
212 /** The stream we write uncompressed binary test data to. */
213 RTVFSIOSTREAM hVfsUncompressed;
214 /** The number of bytes written (ignoring write failures). */
215 size_t cbWritten;
216 /** The entry size. */
217 uint32_t cbEntry;
218 /** Write status. */
219 int rcWrite;
220 /** Set if NULL. */
221 bool fNull;
222 /** Set if we wrote a header and should write a footer as well. */
223 bool fWroteHeader;
224 /** Filename. */
225 char szFilename[94];
226} IEMBINARYOUTPUT;
227typedef IEMBINARYOUTPUT *PIEMBINARYOUTPUT;
228#endif /* TSTIEMAIMPL_WITH_GENERATOR */
229
230
231/*********************************************************************************************************************************
232* Global Variables *
233*********************************************************************************************************************************/
234static RTTEST g_hTest;
235static uint8_t g_idxCpuEflFlavour = IEMTARGETCPU_EFL_BEHAVIOR_INTEL;
236#ifdef TSTIEMAIMPL_WITH_GENERATOR
237static uint32_t g_cZeroDstTests = 2;
238static uint32_t g_cZeroSrcTests = 4;
239#endif
240static uint8_t *g_pu8, *g_pu8Two;
241static uint16_t *g_pu16, *g_pu16Two;
242static uint32_t *g_pu32, *g_pu32Two, *g_pfEfl;
243static uint64_t *g_pu64, *g_pu64Two;
244static RTUINT128U *g_pu128, *g_pu128Two;
245
246static char g_aszBuf[32][256];
247static unsigned g_idxBuf = 0;
248
249static uint32_t g_cIncludeTestPatterns;
250static uint32_t g_cExcludeTestPatterns;
251static const char *g_apszIncludeTestPatterns[64];
252static const char *g_apszExcludeTestPatterns[64];
253
254/** Higher value, means longer benchmarking. */
255static uint64_t g_cPicoSecBenchmark = 0;
256
257static unsigned g_cVerbosity = 0;
258
259
260#ifdef TSTIEMAIMPL_WITH_GENERATOR
261/** The SVN revision (for use in the binary headers). */
262static uint32_t g_uSvnRev = 0;
263/** The CPU description (for use in the binary headers). */
264static char g_szCpuDesc[80] = "";
265#endif
266
267
268/*********************************************************************************************************************************
269* Internal Functions *
270*********************************************************************************************************************************/
271static const char *FormatR80(PCRTFLOAT80U pr80);
272static const char *FormatR64(PCRTFLOAT64U pr64);
273static const char *FormatR32(PCRTFLOAT32U pr32);
274
275
276/*
277 * Random helpers.
278 */
279
280static uint32_t RandEFlags(void)
281{
282 uint32_t fEfl = RTRandU32();
283 return (fEfl & X86_EFL_LIVE_MASK) | X86_EFL_RA1_MASK;
284}
285
286#ifdef TSTIEMAIMPL_WITH_GENERATOR
287
288static uint8_t RandU8(void)
289{
290 return RTRandU32Ex(0, 0xff);
291}
292
293
294static uint16_t RandU16(void)
295{
296 return RTRandU32Ex(0, 0xffff);
297}
298
299
300static uint32_t RandU32(void)
301{
302 return RTRandU32();
303}
304
305#endif
306
307static uint64_t RandU64(void)
308{
309 return RTRandU64();
310}
311
312
313static RTUINT128U RandU128(void)
314{
315 RTUINT128U Ret;
316 Ret.s.Hi = RTRandU64();
317 Ret.s.Lo = RTRandU64();
318 return Ret;
319}
320
321#ifdef TSTIEMAIMPL_WITH_GENERATOR
322
323static uint8_t RandU8Dst(uint32_t iTest)
324{
325 if (iTest < g_cZeroDstTests)
326 return 0;
327 return RandU8();
328}
329
330
331static uint8_t RandU8Src(uint32_t iTest)
332{
333 if (iTest < g_cZeroSrcTests)
334 return 0;
335 return RandU8();
336}
337
338
339static uint16_t RandU16Dst(uint32_t iTest)
340{
341 if (iTest < g_cZeroDstTests)
342 return 0;
343 return RandU16();
344}
345
346
347static uint16_t RandU16Src(uint32_t iTest)
348{
349 if (iTest < g_cZeroSrcTests)
350 return 0;
351 return RandU16();
352}
353
354
355static uint32_t RandU32Dst(uint32_t iTest)
356{
357 if (iTest < g_cZeroDstTests)
358 return 0;
359 return RandU32();
360}
361
362
363static uint32_t RandU32Src(uint32_t iTest)
364{
365 if (iTest < g_cZeroSrcTests)
366 return 0;
367 return RandU32();
368}
369
370
371static uint64_t RandU64Dst(uint32_t iTest)
372{
373 if (iTest < g_cZeroDstTests)
374 return 0;
375 return RandU64();
376}
377
378
379static uint64_t RandU64Src(uint32_t iTest)
380{
381 if (iTest < g_cZeroSrcTests)
382 return 0;
383 return RandU64();
384}
385
386
387/** 2nd operand for and FPU instruction, pairing with RandR80Src1. */
388static int16_t RandI16Src2(uint32_t iTest)
389{
390 if (iTest < 18 * 4)
391 switch (iTest % 4)
392 {
393 case 0: return 0;
394 case 1: return INT16_MAX;
395 case 2: return INT16_MIN;
396 case 3: break;
397 }
398 return (int16_t)RandU16();
399}
400
401
402/** 2nd operand for and FPU instruction, pairing with RandR80Src1. */
403static int32_t RandI32Src2(uint32_t iTest)
404{
405 if (iTest < 18 * 4)
406 switch (iTest % 4)
407 {
408 case 0: return 0;
409 case 1: return INT32_MAX;
410 case 2: return INT32_MIN;
411 case 3: break;
412 }
413 return (int32_t)RandU32();
414}
415
416
417static int64_t RandI64Src(uint32_t iTest)
418{
419 RT_NOREF(iTest);
420 return (int64_t)RandU64();
421}
422
423
424static uint16_t RandFcw(void)
425{
426 return RandU16() & ~X86_FCW_ZERO_MASK;
427}
428
429
430static uint16_t RandFsw(void)
431{
432 AssertCompile((X86_FSW_C_MASK | X86_FSW_XCPT_ES_MASK | X86_FSW_TOP_MASK | X86_FSW_B) == 0xffff);
433 return RandU16();
434}
435
436
437static uint32_t RandMxcsr(void)
438{
439 return RandU32() & ~X86_MXCSR_ZERO_MASK;
440}
441
442
443static void SafeR80FractionShift(PRTFLOAT80U pr80, uint8_t cShift)
444{
445 if (pr80->sj64.uFraction >= RT_BIT_64(cShift))
446 pr80->sj64.uFraction >>= cShift;
447 else
448 pr80->sj64.uFraction = (cShift % 19) + 1;
449}
450
451
452
453static RTFLOAT80U RandR80Ex(uint8_t bType, unsigned cTarget = 80, bool fIntTarget = false)
454{
455 Assert(cTarget == (!fIntTarget ? 80U : 16U) || cTarget == 64U || cTarget == 32U || (cTarget == 59U && fIntTarget));
456
457 RTFLOAT80U r80;
458 r80.au64[0] = RandU64();
459 r80.au16[4] = RandU16();
460
461 /*
462 * Adjust the random stuff according to bType.
463 */
464 bType &= 0x1f;
465 if (bType == 0 || bType == 1 || bType == 2 || bType == 3)
466 {
467 /* Zero (0), Pseudo-Infinity (1), Infinity (2), Indefinite (3). We only keep fSign here. */
468 r80.sj64.uExponent = bType == 0 ? 0 : 0x7fff;
469 r80.sj64.uFraction = bType <= 2 ? 0 : RT_BIT_64(62);
470 r80.sj64.fInteger = bType >= 2 ? 1 : 0;
471 AssertMsg(bType != 0 || RTFLOAT80U_IS_ZERO(&r80), ("%s\n", FormatR80(&r80)));
472 AssertMsg(bType != 1 || RTFLOAT80U_IS_PSEUDO_INF(&r80), ("%s\n", FormatR80(&r80)));
473 Assert( bType != 1 || RTFLOAT80U_IS_387_INVALID(&r80));
474 AssertMsg(bType != 2 || RTFLOAT80U_IS_INF(&r80), ("%s\n", FormatR80(&r80)));
475 AssertMsg(bType != 3 || RTFLOAT80U_IS_INDEFINITE(&r80), ("%s\n", FormatR80(&r80)));
476 }
477 else if (bType == 4 || bType == 5 || bType == 6 || bType == 7)
478 {
479 /* Denormals (4,5) and Pseudo denormals (6,7) */
480 if (bType & 1)
481 SafeR80FractionShift(&r80, r80.sj64.uExponent % 62);
482 else if (r80.sj64.uFraction == 0 && bType < 6)
483 r80.sj64.uFraction = RTRandU64Ex(1, RT_BIT_64(RTFLOAT80U_FRACTION_BITS) - 1);
484 r80.sj64.uExponent = 0;
485 r80.sj64.fInteger = bType >= 6;
486 AssertMsg(bType >= 6 || RTFLOAT80U_IS_DENORMAL(&r80), ("%s bType=%#x\n", FormatR80(&r80), bType));
487 AssertMsg(bType < 6 || RTFLOAT80U_IS_PSEUDO_DENORMAL(&r80), ("%s bType=%#x\n", FormatR80(&r80), bType));
488 }
489 else if (bType == 8 || bType == 9)
490 {
491 /* Pseudo NaN. */
492 if (bType & 1)
493 SafeR80FractionShift(&r80, r80.sj64.uExponent % 62);
494 else if (r80.sj64.uFraction == 0 && !r80.sj64.fInteger)
495 r80.sj64.uFraction = RTRandU64Ex(1, RT_BIT_64(RTFLOAT80U_FRACTION_BITS) - 1);
496 r80.sj64.uExponent = 0x7fff;
497 if (r80.sj64.fInteger)
498 r80.sj64.uFraction |= RT_BIT_64(62);
499 else
500 r80.sj64.uFraction &= ~RT_BIT_64(62);
501 r80.sj64.fInteger = 0;
502 AssertMsg(RTFLOAT80U_IS_PSEUDO_NAN(&r80), ("%s bType=%#x\n", FormatR80(&r80), bType));
503 AssertMsg(RTFLOAT80U_IS_NAN(&r80), ("%s bType=%#x\n", FormatR80(&r80), bType));
504 Assert(RTFLOAT80U_IS_387_INVALID(&r80));
505 }
506 else if (bType == 10 || bType == 11 || bType == 12 || bType == 13)
507 {
508 /* Quiet and signalling NaNs. */
509 if (bType & 1)
510 SafeR80FractionShift(&r80, r80.sj64.uExponent % 62);
511 else if (r80.sj64.uFraction == 0)
512 r80.sj64.uFraction = RTRandU64Ex(1, RT_BIT_64(RTFLOAT80U_FRACTION_BITS) - 1);
513 r80.sj64.uExponent = 0x7fff;
514 if (bType < 12)
515 r80.sj64.uFraction |= RT_BIT_64(62); /* quiet */
516 else
517 r80.sj64.uFraction &= ~RT_BIT_64(62); /* signaling */
518 r80.sj64.fInteger = 1;
519 AssertMsg(bType >= 12 || RTFLOAT80U_IS_QUIET_NAN(&r80), ("%s\n", FormatR80(&r80)));
520 AssertMsg(bType < 12 || RTFLOAT80U_IS_SIGNALLING_NAN(&r80), ("%s\n", FormatR80(&r80)));
521 AssertMsg(RTFLOAT80U_IS_SIGNALLING_NAN(&r80) || RTFLOAT80U_IS_QUIET_NAN(&r80), ("%s\n", FormatR80(&r80)));
522 AssertMsg(RTFLOAT80U_IS_QUIET_OR_SIGNALLING_NAN(&r80), ("%s\n", FormatR80(&r80)));
523 AssertMsg(RTFLOAT80U_IS_NAN(&r80), ("%s\n", FormatR80(&r80)));
524 }
525 else if (bType == 14 || bType == 15)
526 {
527 /* Unnormals */
528 if (bType & 1)
529 SafeR80FractionShift(&r80, RandU8() % 62);
530 r80.sj64.fInteger = 0;
531 if (r80.sj64.uExponent == RTFLOAT80U_EXP_MAX || r80.sj64.uExponent == 0)
532 r80.sj64.uExponent = (uint16_t)RTRandU32Ex(1, RTFLOAT80U_EXP_MAX - 1);
533 AssertMsg(RTFLOAT80U_IS_UNNORMAL(&r80), ("%s\n", FormatR80(&r80)));
534 Assert(RTFLOAT80U_IS_387_INVALID(&r80));
535 }
536 else if (bType < 26)
537 {
538 /* Make sure we have lots of normalized values. */
539 if (!fIntTarget)
540 {
541 const unsigned uMinExp = cTarget == 64 ? RTFLOAT80U_EXP_BIAS - RTFLOAT64U_EXP_BIAS
542 : cTarget == 32 ? RTFLOAT80U_EXP_BIAS - RTFLOAT32U_EXP_BIAS : 0;
543 const unsigned uMaxExp = cTarget == 64 ? uMinExp + RTFLOAT64U_EXP_MAX
544 : cTarget == 32 ? uMinExp + RTFLOAT32U_EXP_MAX : RTFLOAT80U_EXP_MAX;
545 r80.sj64.fInteger = 1;
546 if (r80.sj64.uExponent <= uMinExp)
547 r80.sj64.uExponent = uMinExp + 1;
548 else if (r80.sj64.uExponent >= uMaxExp)
549 r80.sj64.uExponent = uMaxExp - 1;
550
551 if (bType == 16)
552 { /* All 1s is useful to testing rounding. Also try trigger special
553 behaviour by sometimes rounding out of range, while we're at it. */
554 r80.sj64.uFraction = RT_BIT_64(63) - 1;
555 uint8_t bExp = RandU8();
556 if ((bExp & 3) == 0)
557 r80.sj64.uExponent = uMaxExp - 1;
558 else if ((bExp & 3) == 1)
559 r80.sj64.uExponent = uMinExp + 1;
560 else if ((bExp & 3) == 2)
561 r80.sj64.uExponent = uMinExp - (bExp & 15); /* (small numbers are mapped to subnormal values) */
562 }
563 }
564 else
565 {
566 /* integer target: */
567 const unsigned uMinExp = RTFLOAT80U_EXP_BIAS;
568 const unsigned uMaxExp = RTFLOAT80U_EXP_BIAS + cTarget - 2;
569 r80.sj64.fInteger = 1;
570 if (r80.sj64.uExponent < uMinExp)
571 r80.sj64.uExponent = uMinExp;
572 else if (r80.sj64.uExponent > uMaxExp)
573 r80.sj64.uExponent = uMaxExp;
574
575 if (bType == 16)
576 { /* All 1s is useful to testing rounding. Also try trigger special
577 behaviour by sometimes rounding out of range, while we're at it. */
578 r80.sj64.uFraction = RT_BIT_64(63) - 1;
579 uint8_t bExp = RandU8();
580 if ((bExp & 3) == 0)
581 r80.sj64.uExponent = uMaxExp;
582 else if ((bExp & 3) == 1)
583 r80.sj64.uFraction &= ~(RT_BIT_64(cTarget - 1 - r80.sj64.uExponent) - 1); /* no rounding */
584 }
585 }
586
587 AssertMsg(RTFLOAT80U_IS_NORMAL(&r80), ("%s\n", FormatR80(&r80)));
588 }
589 return r80;
590}
591
592
593static RTFLOAT80U RandR80(unsigned cTarget = 80, bool fIntTarget = false)
594{
595 /*
596 * Make it more likely that we get a good selection of special values.
597 */
598 return RandR80Ex(RandU8(), cTarget, fIntTarget);
599
600}
601
602
603static RTFLOAT80U RandR80Src(uint32_t iTest, unsigned cTarget = 80, bool fIntTarget = false)
604{
605 /* Make sure we cover all the basic types first before going for random selection: */
606 if (iTest <= 18)
607 return RandR80Ex(18 - iTest, cTarget, fIntTarget); /* Starting with 3 normals. */
608 return RandR80(cTarget, fIntTarget);
609}
610
611
612/**
613 * Helper for RandR80Src1 and RandR80Src2 that converts bType from a 0..11 range
614 * to a 0..17, covering all basic value types.
615 */
616static uint8_t RandR80Src12RemapType(uint8_t bType)
617{
618 switch (bType)
619 {
620 case 0: return 18; /* normal */
621 case 1: return 16; /* normal extreme rounding */
622 case 2: return 14; /* unnormal */
623 case 3: return 12; /* Signalling NaN */
624 case 4: return 10; /* Quiet NaN */
625 case 5: return 8; /* PseudoNaN */
626 case 6: return 6; /* Pseudo Denormal */
627 case 7: return 4; /* Denormal */
628 case 8: return 3; /* Indefinite */
629 case 9: return 2; /* Infinity */
630 case 10: return 1; /* Pseudo-Infinity */
631 case 11: return 0; /* Zero */
632 default: AssertFailedReturn(18);
633 }
634}
635
636
637/**
638 * This works in tandem with RandR80Src2 to make sure we cover all operand
639 * type mixes first before we venture into regular random testing.
640 *
641 * There are 11 basic variations, when we leave out the five odd ones using
642 * SafeR80FractionShift. Because of the special normalized value targetting at
643 * rounding, we make it an even 12. So 144 combinations for two operands.
644 */
645static RTFLOAT80U RandR80Src1(uint32_t iTest, unsigned cPartnerBits = 80, bool fPartnerInt = false)
646{
647 if (cPartnerBits == 80)
648 {
649 Assert(!fPartnerInt);
650 if (iTest < 12 * 12)
651 return RandR80Ex(RandR80Src12RemapType(iTest / 12));
652 }
653 else if ((cPartnerBits == 64 || cPartnerBits == 32) && !fPartnerInt)
654 {
655 if (iTest < 12 * 10)
656 return RandR80Ex(RandR80Src12RemapType(iTest / 10));
657 }
658 else if (iTest < 18 * 4 && fPartnerInt)
659 return RandR80Ex(iTest / 4);
660 return RandR80();
661}
662
663
664/** Partner to RandR80Src1. */
665static RTFLOAT80U RandR80Src2(uint32_t iTest)
666{
667 if (iTest < 12 * 12)
668 return RandR80Ex(RandR80Src12RemapType(iTest % 12));
669 return RandR80();
670}
671
672
673static void SafeR64FractionShift(PRTFLOAT64U pr64, uint8_t cShift)
674{
675 if (pr64->s64.uFraction >= RT_BIT_64(cShift))
676 pr64->s64.uFraction >>= cShift;
677 else
678 pr64->s64.uFraction = (cShift % 19) + 1;
679}
680
681
682static RTFLOAT64U RandR64Ex(uint8_t bType)
683{
684 RTFLOAT64U r64;
685 r64.u = RandU64();
686
687 /*
688 * Make it more likely that we get a good selection of special values.
689 * On average 6 out of 16 calls should return a special value.
690 */
691 bType &= 0xf;
692 if (bType == 0 || bType == 1)
693 {
694 /* 0 or Infinity. We only keep fSign here. */
695 r64.s.uExponent = bType == 0 ? 0 : 0x7ff;
696 r64.s.uFractionHigh = 0;
697 r64.s.uFractionLow = 0;
698 AssertMsg(bType != 0 || RTFLOAT64U_IS_ZERO(&r64), ("%s bType=%#x\n", FormatR64(&r64), bType));
699 AssertMsg(bType != 1 || RTFLOAT64U_IS_INF(&r64), ("%s bType=%#x\n", FormatR64(&r64), bType));
700 }
701 else if (bType == 2 || bType == 3)
702 {
703 /* Subnormals */
704 if (bType == 3)
705 SafeR64FractionShift(&r64, r64.s64.uExponent % 51);
706 else if (r64.s64.uFraction == 0)
707 r64.s64.uFraction = RTRandU64Ex(1, RT_BIT_64(RTFLOAT64U_FRACTION_BITS) - 1);
708 r64.s64.uExponent = 0;
709 AssertMsg(RTFLOAT64U_IS_SUBNORMAL(&r64), ("%s bType=%#x\n", FormatR64(&r64), bType));
710 }
711 else if (bType == 4 || bType == 5 || bType == 6 || bType == 7)
712 {
713 /* NaNs */
714 if (bType & 1)
715 SafeR64FractionShift(&r64, r64.s64.uExponent % 51);
716 else if (r64.s64.uFraction == 0)
717 r64.s64.uFraction = RTRandU64Ex(1, RT_BIT_64(RTFLOAT64U_FRACTION_BITS) - 1);
718 r64.s64.uExponent = 0x7ff;
719 if (bType < 6)
720 r64.s64.uFraction |= RT_BIT_64(RTFLOAT64U_FRACTION_BITS - 1); /* quiet */
721 else
722 r64.s64.uFraction &= ~RT_BIT_64(RTFLOAT64U_FRACTION_BITS - 1); /* signalling */
723 AssertMsg(bType >= 6 || RTFLOAT64U_IS_QUIET_NAN(&r64), ("%s bType=%#x\n", FormatR64(&r64), bType));
724 AssertMsg(bType < 6 || RTFLOAT64U_IS_SIGNALLING_NAN(&r64), ("%s bType=%#x\n", FormatR64(&r64), bType));
725 AssertMsg(RTFLOAT64U_IS_NAN(&r64), ("%s bType=%#x\n", FormatR64(&r64), bType));
726 }
727 else if (bType < 12)
728 {
729 /* Make sure we have lots of normalized values. */
730 if (r64.s.uExponent == 0)
731 r64.s.uExponent = 1;
732 else if (r64.s.uExponent == 0x7ff)
733 r64.s.uExponent = 0x7fe;
734 AssertMsg(RTFLOAT64U_IS_NORMAL(&r64), ("%s bType=%#x\n", FormatR64(&r64), bType));
735 }
736 return r64;
737}
738
739
740static RTFLOAT64U RandR64Src(uint32_t iTest)
741{
742 if (iTest < 16)
743 return RandR64Ex(iTest);
744 return RandR64Ex(RandU8());
745}
746
747
748/** Pairing with a 80-bit floating point arg. */
749static RTFLOAT64U RandR64Src2(uint32_t iTest)
750{
751 if (iTest < 12 * 10)
752 return RandR64Ex(9 - iTest % 10); /* start with normal values */
753 return RandR64Ex(RandU8());
754}
755
756
757static void SafeR32FractionShift(PRTFLOAT32U pr32, uint8_t cShift)
758{
759 if (pr32->s.uFraction >= RT_BIT_32(cShift))
760 pr32->s.uFraction >>= cShift;
761 else
762 pr32->s.uFraction = (cShift % 19) + 1;
763}
764
765
766static RTFLOAT32U RandR32Ex(uint8_t bType)
767{
768 RTFLOAT32U r32;
769 r32.u = RandU32();
770
771 /*
772 * Make it more likely that we get a good selection of special values.
773 * On average 6 out of 16 calls should return a special value.
774 */
775 bType &= 0xf;
776 if (bType == 0 || bType == 1)
777 {
778 /* 0 or Infinity. We only keep fSign here. */
779 r32.s.uExponent = bType == 0 ? 0 : 0xff;
780 r32.s.uFraction = 0;
781 AssertMsg(bType != 0 || RTFLOAT32U_IS_ZERO(&r32), ("%s\n", FormatR32(&r32)));
782 AssertMsg(bType != 1 || RTFLOAT32U_IS_INF(&r32), ("%s\n", FormatR32(&r32)));
783 }
784 else if (bType == 2 || bType == 3)
785 {
786 /* Subnormals */
787 if (bType == 3)
788 SafeR32FractionShift(&r32, r32.s.uExponent % 22);
789 else if (r32.s.uFraction == 0)
790 r32.s.uFraction = RTRandU32Ex(1, RT_BIT_32(RTFLOAT32U_FRACTION_BITS) - 1);
791 r32.s.uExponent = 0;
792 AssertMsg(RTFLOAT32U_IS_SUBNORMAL(&r32), ("%s bType=%#x\n", FormatR32(&r32), bType));
793 }
794 else if (bType == 4 || bType == 5 || bType == 6 || bType == 7)
795 {
796 /* NaNs */
797 if (bType & 1)
798 SafeR32FractionShift(&r32, r32.s.uExponent % 22);
799 else if (r32.s.uFraction == 0)
800 r32.s.uFraction = RTRandU32Ex(1, RT_BIT_32(RTFLOAT32U_FRACTION_BITS) - 1);
801 r32.s.uExponent = 0xff;
802 if (bType < 6)
803 r32.s.uFraction |= RT_BIT_32(RTFLOAT32U_FRACTION_BITS - 1); /* quiet */
804 else
805 r32.s.uFraction &= ~RT_BIT_32(RTFLOAT32U_FRACTION_BITS - 1); /* signalling */
806 AssertMsg(bType >= 6 || RTFLOAT32U_IS_QUIET_NAN(&r32), ("%s bType=%#x\n", FormatR32(&r32), bType));
807 AssertMsg(bType < 6 || RTFLOAT32U_IS_SIGNALLING_NAN(&r32), ("%s bType=%#x\n", FormatR32(&r32), bType));
808 AssertMsg(RTFLOAT32U_IS_NAN(&r32), ("%s bType=%#x\n", FormatR32(&r32), bType));
809 }
810 else if (bType < 12)
811 {
812 /* Make sure we have lots of normalized values. */
813 if (r32.s.uExponent == 0)
814 r32.s.uExponent = 1;
815 else if (r32.s.uExponent == 0xff)
816 r32.s.uExponent = 0xfe;
817 AssertMsg(RTFLOAT32U_IS_NORMAL(&r32), ("%s bType=%#x\n", FormatR32(&r32), bType));
818 }
819 return r32;
820}
821
822
823static RTFLOAT32U RandR32Src(uint32_t iTest)
824{
825 if (iTest < 16)
826 return RandR32Ex(iTest);
827 return RandR32Ex(RandU8());
828}
829
830
831/** Pairing with a 80-bit floating point arg. */
832static RTFLOAT32U RandR32Src2(uint32_t iTest)
833{
834 if (iTest < 12 * 10)
835 return RandR32Ex(9 - iTest % 10); /* start with normal values */
836 return RandR32Ex(RandU8());
837}
838
839
840static RTPBCD80U RandD80Src(uint32_t iTest)
841{
842 if (iTest < 3)
843 {
844 RTPBCD80U d80Zero = RTPBCD80U_INIT_ZERO(!(iTest & 1));
845 return d80Zero;
846 }
847 if (iTest < 5)
848 {
849 RTPBCD80U d80Ind = RTPBCD80U_INIT_INDEFINITE();
850 return d80Ind;
851 }
852
853 RTPBCD80U d80;
854 uint8_t b = RandU8();
855 d80.s.fSign = b & 1;
856
857 if ((iTest & 7) >= 6)
858 {
859 /* Illegal */
860 d80.s.uPad = (iTest & 7) == 7 ? b >> 1 : 0;
861 for (size_t iPair = 0; iPair < RT_ELEMENTS(d80.s.abPairs); iPair++)
862 d80.s.abPairs[iPair] = RandU8();
863 }
864 else
865 {
866 /* Normal */
867 d80.s.uPad = 0;
868 for (size_t iPair = 0; iPair < RT_ELEMENTS(d80.s.abPairs); iPair++)
869 {
870 uint8_t const uLo = (uint8_t)RTRandU32Ex(0, 9);
871 uint8_t const uHi = (uint8_t)RTRandU32Ex(0, 9);
872 d80.s.abPairs[iPair] = RTPBCD80U_MAKE_PAIR(uHi, uLo);
873 }
874 }
875 return d80;
876}
877
878# if 0 /* unused */
879
880static const char *GenFormatR80(PCRTFLOAT80U plrd)
881{
882 if (RTFLOAT80U_IS_ZERO(plrd))
883 return plrd->s.fSign ? "RTFLOAT80U_INIT_ZERO(1)" : "RTFLOAT80U_INIT_ZERO(0)";
884 if (RTFLOAT80U_IS_INF(plrd))
885 return plrd->s.fSign ? "RTFLOAT80U_INIT_INF(1)" : "RTFLOAT80U_INIT_INF(0)";
886 if (RTFLOAT80U_IS_INDEFINITE(plrd))
887 return plrd->s.fSign ? "RTFLOAT80U_INIT_IND(1)" : "RTFLOAT80U_INIT_IND(0)";
888 if (RTFLOAT80U_IS_QUIET_NAN(plrd) && (plrd->s.uMantissa & (RT_BIT_64(62) - 1)) == 1)
889 return plrd->s.fSign ? "RTFLOAT80U_INIT_QNAN(1)" : "RTFLOAT80U_INIT_QNAN(0)";
890 if (RTFLOAT80U_IS_SIGNALLING_NAN(plrd) && (plrd->s.uMantissa & (RT_BIT_64(62) - 1)) == 1)
891 return plrd->s.fSign ? "RTFLOAT80U_INIT_SNAN(1)" : "RTFLOAT80U_INIT_SNAN(0)";
892
893 char *pszBuf = g_aszBuf[g_idxBuf++ % RT_ELEMENTS(g_aszBuf)];
894 RTStrPrintf(pszBuf, sizeof(g_aszBuf[0]), "RTFLOAT80U_INIT_C(%d,%#RX64,%u)",
895 plrd->s.fSign, plrd->s.uMantissa, plrd->s.uExponent);
896 return pszBuf;
897}
898
899static const char *GenFormatR64(PCRTFLOAT64U prd)
900{
901 char *pszBuf = g_aszBuf[g_idxBuf++ % RT_ELEMENTS(g_aszBuf)];
902 RTStrPrintf(pszBuf, sizeof(g_aszBuf[0]), "RTFLOAT64U_INIT_C(%d,%#RX64,%u)",
903 prd->s.fSign, RT_MAKE_U64(prd->s.uFractionLow, prd->s.uFractionHigh), prd->s.uExponent);
904 return pszBuf;
905}
906
907
908static const char *GenFormatR32(PCRTFLOAT32U pr)
909{
910 char *pszBuf = g_aszBuf[g_idxBuf++ % RT_ELEMENTS(g_aszBuf)];
911 RTStrPrintf(pszBuf, sizeof(g_aszBuf[0]), "RTFLOAT32U_INIT_C(%d,%#RX32,%u)", pr->s.fSign, pr->s.uFraction, pr->s.uExponent);
912 return pszBuf;
913}
914
915
916static const char *GenFormatD80(PCRTPBCD80U pd80)
917{
918 char *pszBuf = g_aszBuf[g_idxBuf++ % RT_ELEMENTS(g_aszBuf)];
919 size_t off;
920 if (pd80->s.uPad == 0)
921 off = RTStrPrintf(pszBuf, sizeof(g_aszBuf[0]), "RTPBCD80U_INIT_C(%d", pd80->s.fSign);
922 else
923 off = RTStrPrintf(pszBuf, sizeof(g_aszBuf[0]), "RTPBCD80U_INIT_EX_C(%#x,%d", pd80->s.uPad, pd80->s.fSign);
924 size_t iPair = RT_ELEMENTS(pd80->s.abPairs);
925 while (iPair-- > 0)
926 off += RTStrPrintf(&pszBuf[off], sizeof(g_aszBuf[0]) - off, ",%d,%d",
927 RTPBCD80U_HI_DIGIT(pd80->s.abPairs[iPair]),
928 RTPBCD80U_LO_DIGIT(pd80->s.abPairs[iPair]));
929 pszBuf[off++] = ')';
930 pszBuf[off++] = '\0';
931 return pszBuf;
932}
933
934
935static const char *GenFormatI64(int64_t i64)
936{
937 if (i64 == INT64_MIN) /* This one is problematic */
938 return "INT64_MIN";
939 if (i64 == INT64_MAX)
940 return "INT64_MAX";
941 char *pszBuf = g_aszBuf[g_idxBuf++ % RT_ELEMENTS(g_aszBuf)];
942 RTStrPrintf(pszBuf, sizeof(g_aszBuf[0]), "INT64_C(%RI64)", i64);
943 return pszBuf;
944}
945
946# if 0 /* unused */
947static const char *GenFormatI64(int64_t const *pi64)
948{
949 return GenFormatI64(*pi64);
950}
951# endif
952
953static const char *GenFormatI32(int32_t i32)
954{
955 if (i32 == INT32_MIN) /* This one is problematic */
956 return "INT32_MIN";
957 if (i32 == INT32_MAX)
958 return "INT32_MAX";
959 char *pszBuf = g_aszBuf[g_idxBuf++ % RT_ELEMENTS(g_aszBuf)];
960 RTStrPrintf(pszBuf, sizeof(g_aszBuf[0]), "INT32_C(%RI32)", i32);
961 return pszBuf;
962}
963
964
965const char *GenFormatI32(int32_t const *pi32)
966{
967 return GenFormatI32(*pi32);
968}
969
970
971const char *GenFormatI16(int16_t i16)
972{
973 if (i16 == INT16_MIN) /* This one is problematic */
974 return "INT16_MIN";
975 if (i16 == INT16_MAX)
976 return "INT16_MAX";
977 char *pszBuf = g_aszBuf[g_idxBuf++ % RT_ELEMENTS(g_aszBuf)];
978 RTStrPrintf(pszBuf, sizeof(g_aszBuf[0]), "INT16_C(%RI16)", i16);
979 return pszBuf;
980}
981
982
983const char *GenFormatI16(int16_t const *pi16)
984{
985 return GenFormatI16(*pi16);
986}
987
988
989static void GenerateHeader(PRTSTREAM pOut, const char *pszCpuDesc, const char *pszCpuType)
990{
991 /* We want to tag the generated source code with the revision that produced it. */
992 static char s_szRev[] = "$Revision: 103100 $";
993 const char *pszRev = RTStrStripL(strchr(s_szRev, ':') + 1);
994 size_t cchRev = 0;
995 while (RT_C_IS_DIGIT(pszRev[cchRev]))
996 cchRev++;
997
998 RTStrmPrintf(pOut,
999 "/* $Id: tstIEMAImpl.cpp 103100 2024-01-26 23:36:34Z vboxsync $ */\n"
1000 "/** @file\n"
1001 " * IEM Assembly Instruction Helper Testcase Data%s%s - r%.*s on %s.\n"
1002 " */\n"
1003 "\n"
1004 "/*\n"
1005 " * Copyright (C) 2022-" VBOX_C_YEAR " Oracle and/or its affiliates.\n"
1006 " *\n"
1007 " * This file is part of VirtualBox base platform packages, as\n"
1008 " * available from https://www.virtualbox.org.\n"
1009 " *\n"
1010 " * This program is free software; you can redistribute it and/or\n"
1011 " * modify it under the terms of the GNU General Public License\n"
1012 " * as published by the Free Software Foundation, in version 3 of the\n"
1013 " * License.\n"
1014 " *\n"
1015 " * This program is distributed in the hope that it will be useful, but\n"
1016 " * WITHOUT ANY WARRANTY; without even the implied warranty of\n"
1017 " * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU\n"
1018 " * General Public License for more details.\n"
1019 " *\n"
1020 " * You should have received a copy of the GNU General Public License\n"
1021 " * along with this program; if not, see <https://www.gnu.org/licenses>.\n"
1022 " *\n"
1023 " * SPDX-License-Identifier: GPL-3.0-only\n"
1024 " */\n"
1025 "\n"
1026 "#include \"tstIEMAImpl.h\"\n"
1027 "\n"
1028 ,
1029 pszCpuType ? " " : "", pszCpuType ? pszCpuType : "", cchRev, pszRev, pszCpuDesc);
1030}
1031
1032
1033static PRTSTREAM GenerateOpenWithHdr(const char *pszFilename, const char *pszCpuDesc, const char *pszCpuType)
1034{
1035 PRTSTREAM pOut = NULL;
1036 int rc = RTStrmOpen(pszFilename, "w", &pOut);
1037 if (RT_SUCCESS(rc))
1038 {
1039 GenerateHeader(pOut, pszCpuDesc, pszCpuType);
1040 return pOut;
1041 }
1042 RTMsgError("Failed to open %s for writing: %Rrc", pszFilename, rc);
1043 return NULL;
1044}
1045
1046
1047static RTEXITCODE GenerateFooterAndClose(PRTSTREAM pOut, const char *pszFilename, RTEXITCODE rcExit)
1048{
1049 RTStrmPrintf(pOut,
1050 "\n"
1051 "/* end of file */\n");
1052 int rc = RTStrmClose(pOut);
1053 if (RT_SUCCESS(rc))
1054 return rcExit;
1055 return RTMsgErrorExitFailure("RTStrmClose failed on %s: %Rrc", pszFilename, rc);
1056}
1057
1058
1059static void GenerateArrayStart(PRTSTREAM pOut, const char *pszName, const char *pszType)
1060{
1061 RTStrmPrintf(pOut, "%s const g_aTests_%s[] =\n{\n", pszType, pszName);
1062}
1063
1064
1065static void GenerateArrayEnd(PRTSTREAM pOut, const char *pszName)
1066{
1067 RTStrmPrintf(pOut,
1068 "};\n"
1069 "uint32_t const g_cTests_%s = RT_ELEMENTS(g_aTests_%s);\n"
1070 "\n",
1071 pszName, pszName);
1072}
1073
1074# endif /* unused */
1075
1076static void GenerateBinaryWrite(PIEMBINARYOUTPUT pBinOut, const void *pvData, size_t cbData)
1077{
1078 pBinOut->cbWritten += cbData; /* ignore errors - makes entry calculation simpler */
1079 if (RT_SUCCESS_NP(pBinOut->rcWrite))
1080 {
1081 pBinOut->rcWrite = RTVfsIoStrmWrite(pBinOut->hVfsUncompressed, pvData, cbData, true /*fBlocking*/, NULL);
1082 if (RT_SUCCESS(pBinOut->rcWrite))
1083 return;
1084 RTMsgError("Error writing '%s': %Rrc", pBinOut->szFilename, pBinOut->rcWrite);
1085 }
1086}
1087
1088static bool GenerateBinaryOpen(PIEMBINARYOUTPUT pBinOut, const char *pszFilenameFmt, const char *pszName,
1089 IEMTESTENTRYINFO const *pInfoToPreserve, uint32_t cbEntry)
1090{
1091 pBinOut->cbEntry = cbEntry;
1092 pBinOut->cbWritten = 0;
1093 pBinOut->hVfsFile = NIL_RTVFSFILE;
1094 pBinOut->hVfsUncompressed = NIL_RTVFSIOSTREAM;
1095 if (pszFilenameFmt)
1096 {
1097 pBinOut->fNull = false;
1098 if (RTStrPrintf2(pBinOut->szFilename, sizeof(pBinOut->szFilename), pszFilenameFmt, pszName) > 0)
1099 {
1100 RTMsgInfo("GenerateBinaryOpen: %s...\n", pBinOut->szFilename);
1101 pBinOut->rcWrite = RTVfsFileOpenNormal(pBinOut->szFilename,
1102 RTFILE_O_CREATE_REPLACE | RTFILE_O_WRITE | RTFILE_O_DENY_READWRITE,
1103 &pBinOut->hVfsFile);
1104 if (RT_SUCCESS(pBinOut->rcWrite))
1105 {
1106 RTVFSIOSTREAM hVfsIoFile = RTVfsFileToIoStream(pBinOut->hVfsFile);
1107 if (hVfsIoFile != NIL_RTVFSIOSTREAM)
1108 {
1109 pBinOut->rcWrite = RTZipGzipCompressIoStream(hVfsIoFile, 0 /*fFlags*/, 9, &pBinOut->hVfsUncompressed);
1110 RTVfsIoStrmRelease(hVfsIoFile);
1111 if (RT_SUCCESS(pBinOut->rcWrite))
1112 {
1113 pBinOut->rcWrite = VINF_SUCCESS;
1114 pBinOut->fWroteHeader = false;
1115
1116 /* Write the header if applicable. */
1117 if ( !pInfoToPreserve
1118 || (pInfoToPreserve->uSvnRev != 0 && *pInfoToPreserve->pszCpuDesc))
1119 {
1120 IEMBINARYHEADER Hdr;
1121 RT_ZERO(Hdr);
1122 memcpy(Hdr.szMagic, IEMBINARYHEADER_MAGIC, sizeof(IEMBINARYHEADER_MAGIC));
1123 Hdr.cbEntry = cbEntry;
1124 Hdr.uSvnRev = pInfoToPreserve ? pInfoToPreserve->uSvnRev : g_uSvnRev;
1125 RTStrCopy(Hdr.szCpuDesc, sizeof(Hdr.szCpuDesc),
1126 pInfoToPreserve ? pInfoToPreserve->pszCpuDesc : g_szCpuDesc);
1127 GenerateBinaryWrite(pBinOut, &Hdr, sizeof(Hdr));
1128 pBinOut->fWroteHeader = true;
1129 }
1130
1131 return true;
1132 }
1133
1134 RTMsgError("RTZipGzipCompressIoStream: %Rrc", pBinOut->rcWrite);
1135 }
1136 else
1137 {
1138 RTMsgError("RTVfsFileToIoStream failed!");
1139 pBinOut->rcWrite = VERR_VFS_CHAIN_CAST_FAILED;
1140 }
1141 RTVfsFileRelease(pBinOut->hVfsFile);
1142 RTFileDelete(pBinOut->szFilename);
1143 }
1144 else
1145 RTMsgError("Failed to open '%s' for writing: %Rrc", pBinOut->szFilename, pBinOut->rcWrite);
1146 }
1147 else
1148 {
1149 RTMsgError("filename too long: %s + %s", pszFilenameFmt, pszName);
1150 pBinOut->rcWrite = VERR_BUFFER_OVERFLOW;
1151 }
1152 return false;
1153 }
1154 RTMsgInfo("GenerateBinaryOpen: %s -> /dev/null\n", pszName);
1155 pBinOut->rcWrite = VERR_IGNORED;
1156 pBinOut->fNull = true;
1157 pBinOut->fWroteHeader = false;
1158 pBinOut->szFilename[0] = '\0';
1159 return true;
1160}
1161
1162# define GENERATE_BINARY_OPEN(a_pBinOut, a_papszNameFmts, a_Entry) \
1163 GenerateBinaryOpen((a_pBinOut), a_papszNameFmts[(a_Entry).idxCpuEflFlavour], (a_Entry).pszName, \
1164 NULL /*pInfo*/, sizeof((a_Entry).paTests[0]))
1165
1166static bool GenerateBinaryClose(PIEMBINARYOUTPUT pBinOut)
1167{
1168 if (!pBinOut->fNull)
1169 {
1170 /* Write footer if we've written a header. */
1171 if (pBinOut->fWroteHeader)
1172 {
1173 IEMBINARYFOOTER Ftr;
1174 RT_ZERO(Ftr);
1175 memcpy(Ftr.szMagic, IEMBINARYFOOTER_MAGIC, sizeof(IEMBINARYFOOTER_MAGIC));
1176 Ftr.cbEntry = pBinOut->cbEntry;
1177 Ftr.cEntries = (uint32_t)((pBinOut->cbWritten - sizeof(IEMBINARYHEADER)) / pBinOut->cbEntry);
1178 Assert(Ftr.cEntries * pBinOut->cbEntry + sizeof(IEMBINARYHEADER) == pBinOut->cbWritten);
1179 GenerateBinaryWrite(pBinOut, &Ftr, sizeof(Ftr));
1180 }
1181
1182 /* This is rather jovial about rcWrite. */
1183 int const rc1 = RTVfsIoStrmFlush(pBinOut->hVfsUncompressed);
1184 RTVfsIoStrmRelease(pBinOut->hVfsUncompressed);
1185 pBinOut->hVfsUncompressed = NIL_RTVFSIOSTREAM;
1186 if (RT_FAILURE(rc1))
1187 RTMsgError("Error flushing '%s' (uncompressed stream): %Rrc", pBinOut->szFilename, rc1);
1188
1189 int const rc2 = RTVfsFileFlush(pBinOut->hVfsFile);
1190 RTVfsFileRelease(pBinOut->hVfsFile);
1191 pBinOut->hVfsFile = NIL_RTVFSFILE;
1192 if (RT_FAILURE(rc2))
1193 RTMsgError("Error flushing '%s' (compressed file): %Rrc", pBinOut->szFilename, rc2);
1194
1195 return RT_SUCCESS(rc2) && RT_SUCCESS(rc1) && RT_SUCCESS(pBinOut->rcWrite);
1196 }
1197 return true;
1198}
1199
1200/* Helper for DumpAll. */
1201# define DUMP_ALL_FN(a_FnBaseName, a_aSubTests) \
1202 static RTEXITCODE a_FnBaseName ## DumpAll(const char * const * papszNameFmts) \
1203 { \
1204 for (size_t iFn = 0; iFn < RT_ELEMENTS(a_aSubTests); iFn++) \
1205 { \
1206 AssertReturn(DECOMPRESS_TESTS(a_aSubTests[iFn]), RTEXITCODE_FAILURE); \
1207 IEMBINARYOUTPUT BinOut; \
1208 AssertReturn(GenerateBinaryOpen(&BinOut, papszNameFmts[a_aSubTests[iFn].idxCpuEflFlavour], \
1209 a_aSubTests[iFn].pszName, &a_aSubTests[iFn].Info, \
1210 sizeof(a_aSubTests[iFn].paTests[0])), \
1211 RTEXITCODE_FAILURE); \
1212 GenerateBinaryWrite(&BinOut, a_aSubTests[iFn].paTests, a_aSubTests[iFn].cTests); \
1213 FREE_DECOMPRESSED_TESTS(a_aSubTests[iFn]); \
1214 AssertReturn(GenerateBinaryClose(&BinOut), RTEXITCODE_FAILURE); \
1215 } \
1216 return RTEXITCODE_SUCCESS; \
1217 }
1218#endif /* TSTIEMAIMPL_WITH_GENERATOR */
1219
1220
1221/*
1222 * Test helpers.
1223 */
1224static bool IsTestEnabled(const char *pszName)
1225{
1226 /* Process excludes first: */
1227 uint32_t i = g_cExcludeTestPatterns;
1228 while (i-- > 0)
1229 if (RTStrSimplePatternMultiMatch(g_apszExcludeTestPatterns[i], RTSTR_MAX, pszName, RTSTR_MAX, NULL))
1230 return false;
1231
1232 /* If no include patterns, everything is included: */
1233 i = g_cIncludeTestPatterns;
1234 if (!i)
1235 return true;
1236
1237 /* Otherwise only tests in the include patters gets tested: */
1238 while (i-- > 0)
1239 if (RTStrSimplePatternMultiMatch(g_apszIncludeTestPatterns[i], RTSTR_MAX, pszName, RTSTR_MAX, NULL))
1240 return true;
1241
1242 return false;
1243}
1244
1245
1246static bool SubTestAndCheckIfEnabled(const char *pszName)
1247{
1248 RTTestSub(g_hTest, pszName);
1249 if (IsTestEnabled(pszName))
1250 return true;
1251 RTTestSkipped(g_hTest, g_cVerbosity > 0 ? "excluded" : NULL);
1252 return false;
1253}
1254
1255
1256/** Decompresses test data before use as required. */
1257static int DecompressBinaryTest(void const *pvCompressed, uint32_t cbCompressed, size_t cbEntry, const char *pszWhat,
1258 void **ppvTests, uint32_t *pcTests, IEMTESTENTRYINFO *pInfo)
1259{
1260 /* Don't do it again. */
1261 if (pInfo->pvUncompressed && *ppvTests)
1262 return VINF_SUCCESS;
1263
1264 /* Open a memory stream for the compressed binary data. */
1265 RTVFSIOSTREAM hVfsIos = NIL_RTVFSIOSTREAM;
1266 int rc = RTVfsIoStrmFromBuffer(RTFILE_O_READ, pvCompressed, cbCompressed, &hVfsIos);
1267 RTTESTI_CHECK_RC_OK_RET(rc, rc);
1268
1269 /* Open a decompressed stream for it. */
1270 RTVFSIOSTREAM hVfsIosDecomp = NIL_RTVFSIOSTREAM;
1271 rc = RTZipGzipDecompressIoStream(hVfsIos, RTZIPGZIPDECOMP_F_ALLOW_ZLIB_HDR, &hVfsIosDecomp);
1272 RTTESTI_CHECK_RC_OK(rc);
1273 if (RT_SUCCESS(rc))
1274 {
1275 /* Initial output buffer allocation. */
1276 size_t cbDecompressedAlloc = cbCompressed <= _16M ? (size_t)cbCompressed * 16 : (size_t)cbCompressed * 4;
1277 uint8_t *pbDecompressed = (uint8_t *)RTMemAllocZ(cbDecompressedAlloc);
1278 if (pbDecompressed)
1279 {
1280 size_t off = 0;
1281 for (;;)
1282 {
1283 size_t cbRead = 0;
1284 rc = RTVfsIoStrmRead(hVfsIosDecomp, &pbDecompressed[off], cbDecompressedAlloc - off, true /*fBlocking*/, &cbRead);
1285 if (RT_FAILURE(rc))
1286 break;
1287 if (rc == VINF_EOF && cbRead == 0)
1288 break;
1289 off += cbRead;
1290
1291 if (cbDecompressedAlloc < off + 256)
1292 {
1293 size_t const cbNew = cbDecompressedAlloc < _128M ? cbDecompressedAlloc * 2 : cbDecompressedAlloc + _32M;
1294 void * const pvNew = RTMemRealloc(pbDecompressed, cbNew);
1295 AssertBreakStmt(pvNew, rc = VERR_NO_MEMORY);
1296 cbDecompressedAlloc = cbNew;
1297 pbDecompressed = (uint8_t *)pvNew;
1298 }
1299 }
1300 if (RT_SUCCESS(rc))
1301 {
1302 size_t const cbUncompressed = off;
1303
1304 /* Validate the header and footer if present and subtract them from 'off'. */
1305 IEMBINARYHEADER const *pHdr = NULL;
1306 if ( off >= sizeof(IEMTESTENTRYINFO)
1307 && memcmp(pbDecompressed, IEMBINARYHEADER_MAGIC, sizeof(IEMBINARYHEADER_MAGIC)) == 0)
1308 {
1309 pHdr = (IEMBINARYHEADER const *)pbDecompressed;
1310 IEMBINARYFOOTER const *pFtr = (IEMBINARYFOOTER const *)&pbDecompressed[off - sizeof(IEMBINARYFOOTER)];
1311
1312 off -= sizeof(*pHdr) + sizeof(*pFtr);
1313 rc = VERR_IO_BAD_UNIT;
1314 if (pHdr->cbEntry != cbEntry)
1315 RTTestIFailed("Test entry size differs for '%s': %#x (header r%u), expected %#zx (uncompressed size %#zx)",
1316 pszWhat, pHdr->cbEntry, pHdr->uSvnRev, cbEntry, off + sizeof(*pHdr) + sizeof(*pFtr));
1317 else if (memcmp(pFtr->szMagic, IEMBINARYFOOTER_MAGIC, sizeof(IEMBINARYFOOTER_MAGIC)) != 0)
1318 RTTestIFailed("Wrong footer magic for '%s': %.*Rhxs\n", pszWhat, sizeof(pFtr->szMagic), pFtr->szMagic);
1319 else if (pFtr->cbEntry != cbEntry)
1320 RTTestIFailed("Wrong footer entry size for '%s': %#x, expected %#x\n", pszWhat, pFtr->cbEntry, cbEntry);
1321 else if (pFtr->cEntries != off / cbEntry)
1322 RTTestIFailed("Wrong footer entry count for '%s': %#x, expected %#x\n",
1323 pszWhat, pFtr->cEntries, off / cbEntry);
1324 else
1325 rc = VINF_SUCCESS;
1326 }
1327
1328 /* Validate the decompressed size wrt entry size. */
1329 if ((off % cbEntry) != 0 && RT_SUCCESS(rc))
1330 {
1331 RTTestIFailed("Uneven decompressed data size for '%s': %#zx vs entry size %#zx -> %#zx",
1332 pszWhat, off, cbEntry, off % cbEntry);
1333 rc = VERR_IO_BAD_LENGTH;
1334 }
1335
1336 if (RT_SUCCESS(rc))
1337 {
1338 /*
1339 * We're good.
1340 */
1341 /* Reallocate the block if it's way to big. */
1342 if (cbDecompressedAlloc - cbUncompressed > _512K)
1343 {
1344 void * const pvNew = RTMemRealloc(pbDecompressed, cbUncompressed);
1345 if (pvNew)
1346 {
1347 pbDecompressed = (uint8_t *)pvNew;
1348 if (pHdr)
1349 pHdr = (IEMBINARYHEADER const *)pbDecompressed;
1350 }
1351 }
1352 RTMEM_MAY_LEAK(pbDecompressed);
1353
1354 /* Fill in the info and other return values. */
1355 pInfo->cbUncompressed = (uint32_t)cbUncompressed;
1356 pInfo->pvUncompressed = pbDecompressed;
1357 pInfo->pszCpuDesc = pHdr ? pHdr->szCpuDesc : NULL;
1358 pInfo->uSvnRev = pHdr ? pHdr->uSvnRev : 0;
1359 *pcTests = (uint32_t)(off / cbEntry);
1360 *ppvTests = pHdr ? (uint8_t *)(pHdr + 1) : pbDecompressed;
1361
1362 pbDecompressed = NULL;
1363 rc = VINF_SUCCESS;
1364 }
1365 }
1366 else
1367 RTTestIFailed("Failed to decompress binary stream '%s': %Rrc (off=%#zx, cbCompressed=%#x)",
1368 pszWhat, rc, off, cbCompressed);
1369 RTMemFree(pbDecompressed);
1370 }
1371 else
1372 {
1373 RTTestIFailed("Out of memory decompressing test data '%s'", pszWhat);
1374 rc = VERR_NO_MEMORY;
1375 }
1376 RTVfsIoStrmRelease(hVfsIosDecomp);
1377 }
1378 RTVfsIoStrmRelease(hVfsIos);
1379 return rc;
1380}
1381
1382#define DECOMPRESS_TESTS(a_Entry) \
1383 RT_SUCCESS(DecompressBinaryTest((a_Entry).pvCompressedTests, *(a_Entry).pcbCompressedTests, \
1384 sizeof((a_Entry).paTests[0]), (a_Entry).pszName, \
1385 (void **)&(a_Entry).paTests, &(a_Entry).cTests, &(a_Entry).Info))
1386
1387/** Frees the decompressed test data. */
1388static void FreeDecompressedTests(void **ppvTests, uint32_t *pcTests, IEMTESTENTRYINFO *pInfo)
1389{
1390 RTMemFree(pInfo->pvUncompressed);
1391 pInfo->pvUncompressed = NULL;
1392 pInfo->cbUncompressed = 0;
1393 *ppvTests = NULL;
1394 *pcTests = 0;
1395}
1396
1397#define FREE_DECOMPRESSED_TESTS(a_Entry) \
1398 FreeDecompressedTests((void **)&(a_Entry).paTests, &(a_Entry).cTests, &(a_Entry).Info)
1399
1400
1401/** Check if the test is enabled and decompresses test data. */
1402static int SubTestAndCheckIfEnabledAndDecompress(const char *pszName, void const *pvCompressed, uint32_t cbCompressed,
1403 size_t cbEntry, void **ppvTests, uint32_t *pcTests, IEMTESTENTRYINFO *pInfo)
1404{
1405 if (SubTestAndCheckIfEnabled(pszName))
1406 {
1407 int const rc = DecompressBinaryTest(pvCompressed, cbCompressed, cbEntry, pszName, ppvTests, pcTests, pInfo);
1408 if (RT_SUCCESS(rc))
1409 return true;
1410 }
1411 return false;
1412}
1413
1414#define SUBTEST_CHECK_IF_ENABLED_AND_DECOMPRESS(a_Entry) \
1415 SubTestAndCheckIfEnabledAndDecompress((a_Entry).pszName, (a_Entry).pvCompressedTests, *(a_Entry).pcbCompressedTests, \
1416 sizeof((a_Entry).paTests[0]), \
1417 (void **)&(a_Entry).paTests, &(a_Entry).cTests, &(a_Entry).Info)
1418
1419
1420static const char *EFlagsDiff(uint32_t fActual, uint32_t fExpected)
1421{
1422 if (fActual == fExpected)
1423 return "";
1424
1425 uint32_t const fXor = fActual ^ fExpected;
1426 char *pszBuf = g_aszBuf[g_idxBuf++ % RT_ELEMENTS(g_aszBuf)];
1427 size_t cch = RTStrPrintf(pszBuf, sizeof(g_aszBuf[0]), " - %#x", fXor);
1428
1429 static struct
1430 {
1431 const char *pszName;
1432 uint32_t fFlag;
1433 } const s_aFlags[] =
1434 {
1435#define EFL_ENTRY(a_Flags) { #a_Flags, X86_EFL_ ## a_Flags }
1436 EFL_ENTRY(CF),
1437 EFL_ENTRY(PF),
1438 EFL_ENTRY(AF),
1439 EFL_ENTRY(ZF),
1440 EFL_ENTRY(SF),
1441 EFL_ENTRY(TF),
1442 EFL_ENTRY(IF),
1443 EFL_ENTRY(DF),
1444 EFL_ENTRY(OF),
1445 EFL_ENTRY(IOPL),
1446 EFL_ENTRY(NT),
1447 EFL_ENTRY(RF),
1448 EFL_ENTRY(VM),
1449 EFL_ENTRY(AC),
1450 EFL_ENTRY(VIF),
1451 EFL_ENTRY(VIP),
1452 EFL_ENTRY(ID),
1453 };
1454 for (size_t i = 0; i < RT_ELEMENTS(s_aFlags); i++)
1455 if (s_aFlags[i].fFlag & fXor)
1456 cch += RTStrPrintf(&pszBuf[cch], sizeof(g_aszBuf[0]) - cch,
1457 s_aFlags[i].fFlag & fActual ? "/%s" : "/!%s", s_aFlags[i].pszName);
1458 RTStrPrintf(&pszBuf[cch], sizeof(g_aszBuf[0]) - cch, "");
1459 return pszBuf;
1460}
1461
1462
1463static const char *FswDiff(uint16_t fActual, uint16_t fExpected)
1464{
1465 if (fActual == fExpected)
1466 return "";
1467
1468 uint16_t const fXor = fActual ^ fExpected;
1469 char *pszBuf = g_aszBuf[g_idxBuf++ % RT_ELEMENTS(g_aszBuf)];
1470 size_t cch = RTStrPrintf(pszBuf, sizeof(g_aszBuf[0]), " - %#x", fXor);
1471
1472 static struct
1473 {
1474 const char *pszName;
1475 uint32_t fFlag;
1476 } const s_aFlags[] =
1477 {
1478#define FSW_ENTRY(a_Flags) { #a_Flags, X86_FSW_ ## a_Flags }
1479 FSW_ENTRY(IE),
1480 FSW_ENTRY(DE),
1481 FSW_ENTRY(ZE),
1482 FSW_ENTRY(OE),
1483 FSW_ENTRY(UE),
1484 FSW_ENTRY(PE),
1485 FSW_ENTRY(SF),
1486 FSW_ENTRY(ES),
1487 FSW_ENTRY(C0),
1488 FSW_ENTRY(C1),
1489 FSW_ENTRY(C2),
1490 FSW_ENTRY(C3),
1491 FSW_ENTRY(B),
1492 };
1493 for (size_t i = 0; i < RT_ELEMENTS(s_aFlags); i++)
1494 if (s_aFlags[i].fFlag & fXor)
1495 cch += RTStrPrintf(&pszBuf[cch], sizeof(g_aszBuf[0]) - cch,
1496 s_aFlags[i].fFlag & fActual ? "/%s" : "/!%s", s_aFlags[i].pszName);
1497 if (fXor & X86_FSW_TOP_MASK)
1498 cch += RTStrPrintf(&pszBuf[cch], sizeof(g_aszBuf[0]) - cch, "/TOP%u!%u",
1499 X86_FSW_TOP_GET(fActual), X86_FSW_TOP_GET(fExpected));
1500#if 0 /* For debugging fprem & fprem1 */
1501 cch += RTStrPrintf(&pszBuf[cch], sizeof(g_aszBuf[0]) - cch, " - Q=%d (vs %d)",
1502 X86_FSW_CX_TO_QUOTIENT(fActual), X86_FSW_CX_TO_QUOTIENT(fExpected));
1503#endif
1504 RTStrPrintf(&pszBuf[cch], sizeof(g_aszBuf[0]) - cch, "");
1505 return pszBuf;
1506}
1507
1508
1509static const char *MxcsrDiff(uint32_t fActual, uint32_t fExpected)
1510{
1511 if (fActual == fExpected)
1512 return "";
1513
1514 uint16_t const fXor = fActual ^ fExpected;
1515 char *pszBuf = g_aszBuf[g_idxBuf++ % RT_ELEMENTS(g_aszBuf)];
1516 size_t cch = RTStrPrintf(pszBuf, sizeof(g_aszBuf[0]), " - %#x", fXor);
1517
1518 static struct
1519 {
1520 const char *pszName;
1521 uint32_t fFlag;
1522 } const s_aFlags[] =
1523 {
1524#define MXCSR_ENTRY(a_Flags) { #a_Flags, X86_MXCSR_ ## a_Flags }
1525 MXCSR_ENTRY(IE),
1526 MXCSR_ENTRY(DE),
1527 MXCSR_ENTRY(ZE),
1528 MXCSR_ENTRY(OE),
1529 MXCSR_ENTRY(UE),
1530 MXCSR_ENTRY(PE),
1531
1532 MXCSR_ENTRY(IM),
1533 MXCSR_ENTRY(DM),
1534 MXCSR_ENTRY(ZM),
1535 MXCSR_ENTRY(OM),
1536 MXCSR_ENTRY(UM),
1537 MXCSR_ENTRY(PM),
1538
1539 MXCSR_ENTRY(DAZ),
1540 MXCSR_ENTRY(FZ),
1541#undef MXCSR_ENTRY
1542 };
1543 for (size_t i = 0; i < RT_ELEMENTS(s_aFlags); i++)
1544 if (s_aFlags[i].fFlag & fXor)
1545 cch += RTStrPrintf(&pszBuf[cch], sizeof(g_aszBuf[0]) - cch,
1546 s_aFlags[i].fFlag & fActual ? "/%s" : "/!%s", s_aFlags[i].pszName);
1547 RTStrPrintf(&pszBuf[cch], sizeof(g_aszBuf[0]) - cch, "");
1548 return pszBuf;
1549}
1550
1551
1552static const char *FormatFcw(uint16_t fFcw)
1553{
1554 char *pszBuf = g_aszBuf[g_idxBuf++ % RT_ELEMENTS(g_aszBuf)];
1555
1556 const char *pszPC = NULL; /* (msc+gcc are too stupid) */
1557 switch (fFcw & X86_FCW_PC_MASK)
1558 {
1559 case X86_FCW_PC_24: pszPC = "PC24"; break;
1560 case X86_FCW_PC_RSVD: pszPC = "PCRSVD!"; break;
1561 case X86_FCW_PC_53: pszPC = "PC53"; break;
1562 case X86_FCW_PC_64: pszPC = "PC64"; break;
1563 }
1564
1565 const char *pszRC = NULL; /* (msc+gcc are too stupid) */
1566 switch (fFcw & X86_FCW_RC_MASK)
1567 {
1568 case X86_FCW_RC_NEAREST: pszRC = "NEAR"; break;
1569 case X86_FCW_RC_DOWN: pszRC = "DOWN"; break;
1570 case X86_FCW_RC_UP: pszRC = "UP"; break;
1571 case X86_FCW_RC_ZERO: pszRC = "ZERO"; break;
1572 }
1573 size_t cch = RTStrPrintf(&pszBuf[0], sizeof(g_aszBuf[0]), "%s %s", pszPC, pszRC);
1574
1575 static struct
1576 {
1577 const char *pszName;
1578 uint32_t fFlag;
1579 } const s_aFlags[] =
1580 {
1581#define FCW_ENTRY(a_Flags) { #a_Flags, X86_FCW_ ## a_Flags }
1582 FCW_ENTRY(IM),
1583 FCW_ENTRY(DM),
1584 FCW_ENTRY(ZM),
1585 FCW_ENTRY(OM),
1586 FCW_ENTRY(UM),
1587 FCW_ENTRY(PM),
1588 { "6M", 64 },
1589 };
1590 for (size_t i = 0; i < RT_ELEMENTS(s_aFlags); i++)
1591 if (fFcw & s_aFlags[i].fFlag)
1592 cch += RTStrPrintf(&pszBuf[cch], sizeof(g_aszBuf[0]) - cch, " %s", s_aFlags[i].pszName);
1593
1594 RTStrPrintf(&pszBuf[cch], sizeof(g_aszBuf[0]) - cch, "");
1595 return pszBuf;
1596}
1597
1598
1599static const char *FormatMxcsr(uint32_t fMxcsr)
1600{
1601 char *pszBuf = g_aszBuf[g_idxBuf++ % RT_ELEMENTS(g_aszBuf)];
1602
1603 const char *pszRC = NULL; /* (msc+gcc are too stupid) */
1604 switch (fMxcsr & X86_MXCSR_RC_MASK)
1605 {
1606 case X86_MXCSR_RC_NEAREST: pszRC = "NEAR"; break;
1607 case X86_MXCSR_RC_DOWN: pszRC = "DOWN"; break;
1608 case X86_MXCSR_RC_UP: pszRC = "UP"; break;
1609 case X86_MXCSR_RC_ZERO: pszRC = "ZERO"; break;
1610 }
1611
1612 const char *pszDAZ = fMxcsr & X86_MXCSR_DAZ ? " DAZ" : "";
1613 const char *pszFZ = fMxcsr & X86_MXCSR_FZ ? " FZ" : "";
1614 size_t cch = RTStrPrintf(&pszBuf[0], sizeof(g_aszBuf[0]), "%s%s%s", pszRC, pszDAZ, pszFZ);
1615
1616 static struct
1617 {
1618 const char *pszName;
1619 uint32_t fFlag;
1620 } const s_aFlags[] =
1621 {
1622#define MXCSR_ENTRY(a_Flags) { #a_Flags, X86_MXCSR_ ## a_Flags }
1623 MXCSR_ENTRY(IE),
1624 MXCSR_ENTRY(DE),
1625 MXCSR_ENTRY(ZE),
1626 MXCSR_ENTRY(OE),
1627 MXCSR_ENTRY(UE),
1628 MXCSR_ENTRY(PE),
1629
1630 MXCSR_ENTRY(IM),
1631 MXCSR_ENTRY(DM),
1632 MXCSR_ENTRY(ZM),
1633 MXCSR_ENTRY(OM),
1634 MXCSR_ENTRY(UM),
1635 MXCSR_ENTRY(PM),
1636 { "6M", 64 },
1637 };
1638 for (size_t i = 0; i < RT_ELEMENTS(s_aFlags); i++)
1639 if (fMxcsr & s_aFlags[i].fFlag)
1640 cch += RTStrPrintf(&pszBuf[cch], sizeof(g_aszBuf[0]) - cch, " %s", s_aFlags[i].pszName);
1641
1642 RTStrPrintf(&pszBuf[cch], sizeof(g_aszBuf[0]) - cch, "");
1643 return pszBuf;
1644}
1645
1646
1647static const char *FormatR80(PCRTFLOAT80U pr80)
1648{
1649 char *pszBuf = g_aszBuf[g_idxBuf++ % RT_ELEMENTS(g_aszBuf)];
1650 RTStrFormatR80(pszBuf, sizeof(g_aszBuf[0]), pr80, 0, 0, RTSTR_F_SPECIAL);
1651 return pszBuf;
1652}
1653
1654
1655static const char *FormatR64(PCRTFLOAT64U pr64)
1656{
1657 char *pszBuf = g_aszBuf[g_idxBuf++ % RT_ELEMENTS(g_aszBuf)];
1658 RTStrFormatR64(pszBuf, sizeof(g_aszBuf[0]), pr64, 0, 0, RTSTR_F_SPECIAL);
1659 return pszBuf;
1660}
1661
1662
1663static const char *FormatR32(PCRTFLOAT32U pr32)
1664{
1665 char *pszBuf = g_aszBuf[g_idxBuf++ % RT_ELEMENTS(g_aszBuf)];
1666 RTStrFormatR32(pszBuf, sizeof(g_aszBuf[0]), pr32, 0, 0, RTSTR_F_SPECIAL);
1667 return pszBuf;
1668}
1669
1670
1671static const char *FormatD80(PCRTPBCD80U pd80)
1672{
1673 /* There is only one indefinite endcoding (same as for 80-bit
1674 floating point), so get it out of the way first: */
1675 if (RTPBCD80U_IS_INDEFINITE(pd80))
1676 return "Ind";
1677
1678 char *pszBuf = g_aszBuf[g_idxBuf++ % RT_ELEMENTS(g_aszBuf)];
1679 size_t off = 0;
1680 pszBuf[off++] = pd80->s.fSign ? '-' : '+';
1681 unsigned cBadDigits = 0;
1682 size_t iPair = RT_ELEMENTS(pd80->s.abPairs);
1683 while (iPair-- > 0)
1684 {
1685 static const char s_szDigits[] = "0123456789abcdef";
1686 static const uint8_t s_bBadDigits[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1 };
1687 pszBuf[off++] = s_szDigits[RTPBCD80U_HI_DIGIT(pd80->s.abPairs[iPair])];
1688 pszBuf[off++] = s_szDigits[RTPBCD80U_LO_DIGIT(pd80->s.abPairs[iPair])];
1689 cBadDigits += s_bBadDigits[RTPBCD80U_HI_DIGIT(pd80->s.abPairs[iPair])]
1690 + s_bBadDigits[RTPBCD80U_LO_DIGIT(pd80->s.abPairs[iPair])];
1691 }
1692 if (cBadDigits || pd80->s.uPad != 0)
1693 off += RTStrPrintf(&pszBuf[off], sizeof(g_aszBuf[0]) - off, "[%u,%#x]", cBadDigits, pd80->s.uPad);
1694 pszBuf[off] = '\0';
1695 return pszBuf;
1696}
1697
1698
1699#if 0
1700static const char *FormatI64(int64_t const *piVal)
1701{
1702 char *pszBuf = g_aszBuf[g_idxBuf++ % RT_ELEMENTS(g_aszBuf)];
1703 RTStrFormatU64(pszBuf, sizeof(g_aszBuf[0]), *piVal, 16, 0, 0, RTSTR_F_SPECIAL | RTSTR_F_VALSIGNED);
1704 return pszBuf;
1705}
1706#endif
1707
1708
1709static const char *FormatI32(int32_t const *piVal)
1710{
1711 char *pszBuf = g_aszBuf[g_idxBuf++ % RT_ELEMENTS(g_aszBuf)];
1712 RTStrFormatU32(pszBuf, sizeof(g_aszBuf[0]), *piVal, 16, 0, 0, RTSTR_F_SPECIAL | RTSTR_F_VALSIGNED);
1713 return pszBuf;
1714}
1715
1716
1717static const char *FormatI16(int16_t const *piVal)
1718{
1719 char *pszBuf = g_aszBuf[g_idxBuf++ % RT_ELEMENTS(g_aszBuf)];
1720 RTStrFormatU16(pszBuf, sizeof(g_aszBuf[0]), *piVal, 16, 0, 0, RTSTR_F_SPECIAL | RTSTR_F_VALSIGNED);
1721 return pszBuf;
1722}
1723
1724
1725static const char *FormatU128(PCRTUINT128U puVal)
1726{
1727 char *pszBuf = g_aszBuf[g_idxBuf++ % RT_ELEMENTS(g_aszBuf)];
1728 RTStrFormatU128(pszBuf, sizeof(g_aszBuf[0]), puVal, 16, 0, 0, RTSTR_F_SPECIAL);
1729 return pszBuf;
1730}
1731
1732
1733/*
1734 * Binary operations.
1735 */
1736TYPEDEF_SUBTEST_TYPE(BINU8_T, BINU8_TEST_T, PFNIEMAIMPLBINU8);
1737TYPEDEF_SUBTEST_TYPE(BINU16_T, BINU16_TEST_T, PFNIEMAIMPLBINU16);
1738TYPEDEF_SUBTEST_TYPE(BINU32_T, BINU32_TEST_T, PFNIEMAIMPLBINU32);
1739TYPEDEF_SUBTEST_TYPE(BINU64_T, BINU64_TEST_T, PFNIEMAIMPLBINU64);
1740
1741#ifdef TSTIEMAIMPL_WITH_GENERATOR
1742# define GEN_BINARY_TESTS(a_cBits, a_Fmt, a_TestType) \
1743static RTEXITCODE BinU ## a_cBits ## Generate(uint32_t cTests, const char * const * papszNameFmts) \
1744{ \
1745 for (size_t iFn = 0; iFn < RT_ELEMENTS(g_aBinU ## a_cBits); iFn++) \
1746 { \
1747 PFNIEMAIMPLBINU ## a_cBits const pfn = g_aBinU ## a_cBits[iFn].pfnNative \
1748 ? g_aBinU ## a_cBits[iFn].pfnNative : g_aBinU ## a_cBits[iFn].pfn; \
1749 IEMBINARYOUTPUT BinOut; \
1750 if ( g_aBinU ## a_cBits[iFn].idxCpuEflFlavour != IEMTARGETCPU_EFL_BEHAVIOR_NATIVE \
1751 && g_aBinU ## a_cBits[iFn].idxCpuEflFlavour != g_idxCpuEflFlavour) \
1752 continue; \
1753 AssertReturn(GENERATE_BINARY_OPEN(&BinOut, papszNameFmts, g_aBinU ## a_cBits[iFn]), RTEXITCODE_FAILURE); \
1754 for (uint32_t iTest = 0; iTest < cTests; iTest++ ) \
1755 { \
1756 a_TestType Test; \
1757 Test.fEflIn = RandEFlags(); \
1758 Test.fEflOut = Test.fEflIn; \
1759 Test.uDstIn = RandU ## a_cBits ## Dst(iTest); \
1760 Test.uDstOut = Test.uDstIn; \
1761 Test.uSrcIn = RandU ## a_cBits ## Src(iTest); \
1762 if (g_aBinU ## a_cBits[iFn].uExtra) \
1763 Test.uSrcIn &= a_cBits - 1; /* Restrict bit index according to operand width */ \
1764 Test.uMisc = 0; \
1765 pfn(&Test.uDstOut, Test.uSrcIn, &Test.fEflOut); \
1766 GenerateBinaryWrite(&BinOut, &Test, sizeof(Test)); \
1767 } \
1768 for (uint32_t iTest = 0; iTest < g_aBinU ## a_cBits[iFn].cFixedTests; iTest++ ) \
1769 { \
1770 a_TestType Test; \
1771 Test.fEflIn = g_aBinU ## a_cBits[iFn].paFixedTests[iTest].fEflIn == UINT32_MAX ? RandEFlags() \
1772 : g_aBinU ## a_cBits[iFn].paFixedTests[iTest].fEflIn; \
1773 Test.fEflOut = Test.fEflIn; \
1774 Test.uDstIn = g_aBinU ## a_cBits[iFn].paFixedTests[iTest].uDstIn; \
1775 Test.uDstOut = Test.uDstIn; \
1776 Test.uSrcIn = g_aBinU ## a_cBits[iFn].paFixedTests[iTest].uSrcIn; \
1777 Test.uMisc = g_aBinU ## a_cBits[iFn].paFixedTests[iTest].uMisc; \
1778 pfn(&Test.uDstOut, Test.uSrcIn, &Test.fEflOut); \
1779 GenerateBinaryWrite(&BinOut, &Test, sizeof(Test)); \
1780 } \
1781 AssertReturn(GenerateBinaryClose(&BinOut), RTEXITCODE_FAILURE); \
1782 } \
1783 return RTEXITCODE_SUCCESS; \
1784} \
1785DUMP_ALL_FN(BinU ## a_cBits, g_aBinU ## a_cBits)
1786
1787#else
1788# define GEN_BINARY_TESTS(a_cBits, a_Fmt, a_TestType)
1789#endif
1790
1791
1792/** Based on a quick probe run, guess how long to run the benchmark. */
1793static uint32_t EstimateIterations(uint32_t cProbeIterations, uint64_t cNsProbe)
1794{
1795 uint64_t cPicoSecPerIteration = cNsProbe * 1000 / cProbeIterations;
1796 uint64_t cIterations = g_cPicoSecBenchmark / cPicoSecPerIteration;
1797 if (cIterations > _2G)
1798 return _2G;
1799 if (cIterations < _4K)
1800 return _4K;
1801 return RT_ALIGN_32((uint32_t)cIterations, _4K);
1802}
1803
1804
1805#define TEST_BINARY_OPS(a_cBits, a_uType, a_Fmt, a_TestType, a_aSubTests) \
1806GEN_BINARY_TESTS(a_cBits, a_Fmt, a_TestType) \
1807\
1808static uint64_t BinU ## a_cBits ## Bench(uint32_t cIterations, PFNIEMAIMPLBINU ## a_cBits pfn, a_TestType const *pEntry) \
1809{ \
1810 uint32_t const fEflIn = pEntry->fEflIn; \
1811 a_uType const uDstIn = pEntry->uDstIn; \
1812 a_uType const uSrcIn = pEntry->uSrcIn; \
1813 cIterations /= 4; \
1814 RTThreadYield(); \
1815 uint64_t const nsStart = RTTimeNanoTS(); \
1816 for (uint32_t i = 0; i < cIterations; i++) \
1817 { \
1818 uint32_t fBenchEfl = fEflIn; \
1819 a_uType uBenchDst = uDstIn; \
1820 pfn(&uBenchDst, uSrcIn, &fBenchEfl); \
1821 \
1822 fBenchEfl = fEflIn; \
1823 uBenchDst = uDstIn; \
1824 pfn(&uBenchDst, uSrcIn, &fBenchEfl); \
1825 \
1826 fBenchEfl = fEflIn; \
1827 uBenchDst = uDstIn; \
1828 pfn(&uBenchDst, uSrcIn, &fBenchEfl); \
1829 \
1830 fBenchEfl = fEflIn; \
1831 uBenchDst = uDstIn; \
1832 pfn(&uBenchDst, uSrcIn, &fBenchEfl); \
1833 } \
1834 return RTTimeNanoTS() - nsStart; \
1835} \
1836\
1837static void BinU ## a_cBits ## Test(void) \
1838{ \
1839 for (size_t iFn = 0; iFn < RT_ELEMENTS(a_aSubTests); iFn++) \
1840 { \
1841 if (!SUBTEST_CHECK_IF_ENABLED_AND_DECOMPRESS(a_aSubTests[iFn])) \
1842 continue; \
1843 a_TestType const * const paTests = a_aSubTests[iFn].paTests; \
1844 uint32_t const cTests = a_aSubTests[iFn].cTests; \
1845 PFNIEMAIMPLBINU ## a_cBits pfn = a_aSubTests[iFn].pfn; \
1846 uint32_t const cVars = COUNT_VARIATIONS(a_aSubTests[iFn]); \
1847 if (!cTests) { RTTestSkipped(g_hTest, "no tests"); continue; } \
1848 for (uint32_t iVar = 0; iVar < cVars; iVar++) \
1849 { \
1850 for (uint32_t iTest = 0; iTest < cTests; iTest++ ) \
1851 { \
1852 uint32_t fEfl = paTests[iTest].fEflIn; \
1853 a_uType uDst = paTests[iTest].uDstIn; \
1854 pfn(&uDst, paTests[iTest].uSrcIn, &fEfl); \
1855 if ( uDst != paTests[iTest].uDstOut \
1856 || fEfl != paTests[iTest].fEflOut ) \
1857 RTTestFailed(g_hTest, "#%u%s: efl=%#08x dst=" a_Fmt " src=" a_Fmt " -> efl=%#08x dst=" a_Fmt ", expected %#08x & " a_Fmt "%s - %s\n", \
1858 iTest, !iVar ? "" : "/n", paTests[iTest].fEflIn, paTests[iTest].uDstIn, paTests[iTest].uSrcIn, \
1859 fEfl, uDst, paTests[iTest].fEflOut, paTests[iTest].uDstOut, \
1860 EFlagsDiff(fEfl, paTests[iTest].fEflOut), \
1861 uDst == paTests[iTest].uDstOut ? "eflags" : fEfl == paTests[iTest].fEflOut ? "dst" : "both"); \
1862 else \
1863 { \
1864 *g_pu ## a_cBits = paTests[iTest].uDstIn; \
1865 *g_pfEfl = paTests[iTest].fEflIn; \
1866 pfn(g_pu ## a_cBits, paTests[iTest].uSrcIn, g_pfEfl); \
1867 RTTEST_CHECK(g_hTest, *g_pu ## a_cBits == paTests[iTest].uDstOut); \
1868 RTTEST_CHECK(g_hTest, *g_pfEfl == paTests[iTest].fEflOut); \
1869 } \
1870 } \
1871 \
1872 /* Benchmark if all succeeded. */ \
1873 if (g_cPicoSecBenchmark && RTTestSubErrorCount(g_hTest) == 0) \
1874 { \
1875 uint32_t const iTest = cTests / 2; \
1876 uint32_t const cIterations = EstimateIterations(_64K, BinU ## a_cBits ## Bench(_64K, pfn, &paTests[iTest])); \
1877 uint64_t const cNsRealRun = BinU ## a_cBits ## Bench(cIterations, pfn, &paTests[iTest]); \
1878 RTTestValueF(g_hTest, cNsRealRun * 1000 / cIterations, RTTESTUNIT_PS_PER_CALL, \
1879 "%s%s", a_aSubTests[iFn].pszName, iVar ? "-native" : ""); \
1880 } \
1881 \
1882 /* Next variation is native. */ \
1883 pfn = a_aSubTests[iFn].pfnNative; \
1884 } \
1885 FREE_DECOMPRESSED_TESTS(a_aSubTests[iFn]); \
1886 } \
1887}
1888
1889
1890/*
1891 * 8-bit binary operations.
1892 */
1893static BINU8_T g_aBinU8[] =
1894{
1895 ENTRY_BIN(add_u8),
1896 ENTRY_BIN(add_u8_locked),
1897 ENTRY_BIN(adc_u8),
1898 ENTRY_BIN(adc_u8_locked),
1899 ENTRY_BIN(sub_u8),
1900 ENTRY_BIN(sub_u8_locked),
1901 ENTRY_BIN(sbb_u8),
1902 ENTRY_BIN(sbb_u8_locked),
1903 ENTRY_BIN(or_u8),
1904 ENTRY_BIN(or_u8_locked),
1905 ENTRY_BIN(xor_u8),
1906 ENTRY_BIN(xor_u8_locked),
1907 ENTRY_BIN(and_u8),
1908 ENTRY_BIN(and_u8_locked),
1909 ENTRY_BIN_PFN_CAST(cmp_u8, PFNIEMAIMPLBINU8),
1910 ENTRY_BIN_PFN_CAST(test_u8, PFNIEMAIMPLBINU8),
1911};
1912TEST_BINARY_OPS(8, uint8_t, "%#04x", BINU8_TEST_T, g_aBinU8)
1913
1914
1915/*
1916 * 16-bit binary operations.
1917 */
1918#ifdef TSTIEMAIMPL_WITH_GENERATOR
1919static const BINU16_TEST_T g_aFixedTests_add_u16[] =
1920{
1921 /* efl in, efl out, uDstIn, uDstOut, uSrc, uExtra */
1922 { UINT32_MAX, 0, 1, 0, UINT16_MAX, 0 },
1923};
1924#endif
1925static BINU16_T g_aBinU16[] =
1926{
1927 ENTRY_BIN_FIX(add_u16),
1928 ENTRY_BIN(add_u16_locked),
1929 ENTRY_BIN(adc_u16),
1930 ENTRY_BIN(adc_u16_locked),
1931 ENTRY_BIN(sub_u16),
1932 ENTRY_BIN(sub_u16_locked),
1933 ENTRY_BIN(sbb_u16),
1934 ENTRY_BIN(sbb_u16_locked),
1935 ENTRY_BIN(or_u16),
1936 ENTRY_BIN(or_u16_locked),
1937 ENTRY_BIN(xor_u16),
1938 ENTRY_BIN(xor_u16_locked),
1939 ENTRY_BIN(and_u16),
1940 ENTRY_BIN(and_u16_locked),
1941 ENTRY_BIN_PFN_CAST(cmp_u16, PFNIEMAIMPLBINU16),
1942 ENTRY_BIN_PFN_CAST(test_u16, PFNIEMAIMPLBINU16),
1943 ENTRY_BIN_PFN_CAST_EX(bt_u16, PFNIEMAIMPLBINU16, 1),
1944 ENTRY_BIN_EX(btc_u16, 1),
1945 ENTRY_BIN_EX(btc_u16_locked, 1),
1946 ENTRY_BIN_EX(btr_u16, 1),
1947 ENTRY_BIN_EX(btr_u16_locked, 1),
1948 ENTRY_BIN_EX(bts_u16, 1),
1949 ENTRY_BIN_EX(bts_u16_locked, 1),
1950 ENTRY_BIN_AMD( bsf_u16, X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_SF | X86_EFL_OF),
1951 ENTRY_BIN_INTEL(bsf_u16, X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_SF | X86_EFL_OF),
1952 ENTRY_BIN_AMD( bsr_u16, X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_SF | X86_EFL_OF),
1953 ENTRY_BIN_INTEL(bsr_u16, X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_SF | X86_EFL_OF),
1954 ENTRY_BIN_AMD( imul_two_u16, X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF),
1955 ENTRY_BIN_INTEL(imul_two_u16, X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF),
1956 ENTRY_BIN(arpl),
1957};
1958TEST_BINARY_OPS(16, uint16_t, "%#06x", BINU16_TEST_T, g_aBinU16)
1959
1960
1961/*
1962 * 32-bit binary operations.
1963 */
1964#ifdef TSTIEMAIMPL_WITH_GENERATOR
1965static const BINU32_TEST_T g_aFixedTests_add_u32[] =
1966{
1967 /* efl in, efl out, uDstIn, uDstOut, uSrc, uExtra */
1968 { UINT32_MAX, 0, 1, 0, UINT32_MAX, 0 },
1969};
1970#endif
1971static BINU32_T g_aBinU32[] =
1972{
1973 ENTRY_BIN_FIX(add_u32),
1974 ENTRY_BIN(add_u32_locked),
1975 ENTRY_BIN(adc_u32),
1976 ENTRY_BIN(adc_u32_locked),
1977 ENTRY_BIN(sub_u32),
1978 ENTRY_BIN(sub_u32_locked),
1979 ENTRY_BIN(sbb_u32),
1980 ENTRY_BIN(sbb_u32_locked),
1981 ENTRY_BIN(or_u32),
1982 ENTRY_BIN(or_u32_locked),
1983 ENTRY_BIN(xor_u32),
1984 ENTRY_BIN(xor_u32_locked),
1985 ENTRY_BIN(and_u32),
1986 ENTRY_BIN(and_u32_locked),
1987 ENTRY_BIN_PFN_CAST(cmp_u32, PFNIEMAIMPLBINU32),
1988 ENTRY_BIN_PFN_CAST(test_u32, PFNIEMAIMPLBINU32),
1989 ENTRY_BIN_PFN_CAST_EX(bt_u32, PFNIEMAIMPLBINU32, 1),
1990 ENTRY_BIN_EX(btc_u32, 1),
1991 ENTRY_BIN_EX(btc_u32_locked, 1),
1992 ENTRY_BIN_EX(btr_u32, 1),
1993 ENTRY_BIN_EX(btr_u32_locked, 1),
1994 ENTRY_BIN_EX(bts_u32, 1),
1995 ENTRY_BIN_EX(bts_u32_locked, 1),
1996 ENTRY_BIN_AMD( bsf_u32, X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_SF | X86_EFL_OF),
1997 ENTRY_BIN_INTEL(bsf_u32, X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_SF | X86_EFL_OF),
1998 ENTRY_BIN_AMD( bsr_u32, X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_SF | X86_EFL_OF),
1999 ENTRY_BIN_INTEL(bsr_u32, X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_SF | X86_EFL_OF),
2000 ENTRY_BIN_AMD( imul_two_u32, X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF),
2001 ENTRY_BIN_INTEL(imul_two_u32, X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF),
2002 ENTRY_BIN(adcx_u32),
2003 ENTRY_BIN(adox_u32),
2004};
2005TEST_BINARY_OPS(32, uint32_t, "%#010RX32", BINU32_TEST_T, g_aBinU32)
2006
2007
2008/*
2009 * 64-bit binary operations.
2010 */
2011#ifdef TSTIEMAIMPL_WITH_GENERATOR
2012static const BINU64_TEST_T g_aFixedTests_add_u64[] =
2013{
2014 /* efl in, efl out, uDstIn, uDstOut, uSrc, uExtra */
2015 { UINT32_MAX, 0, 1, 0, UINT64_MAX, 0 },
2016};
2017#endif
2018static BINU64_T g_aBinU64[] =
2019{
2020 ENTRY_BIN_FIX(add_u64),
2021 ENTRY_BIN(add_u64_locked),
2022 ENTRY_BIN(adc_u64),
2023 ENTRY_BIN(adc_u64_locked),
2024 ENTRY_BIN(sub_u64),
2025 ENTRY_BIN(sub_u64_locked),
2026 ENTRY_BIN(sbb_u64),
2027 ENTRY_BIN(sbb_u64_locked),
2028 ENTRY_BIN(or_u64),
2029 ENTRY_BIN(or_u64_locked),
2030 ENTRY_BIN(xor_u64),
2031 ENTRY_BIN(xor_u64_locked),
2032 ENTRY_BIN(and_u64),
2033 ENTRY_BIN(and_u64_locked),
2034 ENTRY_BIN_PFN_CAST(cmp_u64, PFNIEMAIMPLBINU64),
2035 ENTRY_BIN_PFN_CAST(test_u64, PFNIEMAIMPLBINU64),
2036 ENTRY_BIN_PFN_CAST_EX(bt_u64, PFNIEMAIMPLBINU64, 1),
2037 ENTRY_BIN_EX(btc_u64, 1),
2038 ENTRY_BIN_EX(btc_u64_locked, 1),
2039 ENTRY_BIN_EX(btr_u64, 1),
2040 ENTRY_BIN_EX(btr_u64_locked, 1),
2041 ENTRY_BIN_EX(bts_u64, 1),
2042 ENTRY_BIN_EX(bts_u64_locked, 1),
2043 ENTRY_BIN_AMD( bsf_u64, X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_SF | X86_EFL_OF),
2044 ENTRY_BIN_INTEL(bsf_u64, X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_SF | X86_EFL_OF),
2045 ENTRY_BIN_AMD( bsr_u64, X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_SF | X86_EFL_OF),
2046 ENTRY_BIN_INTEL(bsr_u64, X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_SF | X86_EFL_OF),
2047 ENTRY_BIN_AMD( imul_two_u64, X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF),
2048 ENTRY_BIN_INTEL(imul_two_u64, X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF),
2049 ENTRY_BIN(adcx_u64),
2050 ENTRY_BIN(adox_u64),
2051};
2052TEST_BINARY_OPS(64, uint64_t, "%#018RX64", BINU64_TEST_T, g_aBinU64)
2053
2054
2055/*
2056 * XCHG
2057 */
2058static void XchgTest(void)
2059{
2060 if (!SubTestAndCheckIfEnabled("xchg"))
2061 return;
2062 typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLXCHGU8, (uint8_t *pu8Mem, uint8_t *pu8Reg));
2063 typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLXCHGU16,(uint16_t *pu16Mem, uint16_t *pu16Reg));
2064 typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLXCHGU32,(uint32_t *pu32Mem, uint32_t *pu32Reg));
2065 typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLXCHGU64,(uint64_t *pu64Mem, uint64_t *pu64Reg));
2066
2067 static struct
2068 {
2069 uint8_t cb; uint64_t fMask;
2070 union
2071 {
2072 uintptr_t pfn;
2073 FNIEMAIMPLXCHGU8 *pfnU8;
2074 FNIEMAIMPLXCHGU16 *pfnU16;
2075 FNIEMAIMPLXCHGU32 *pfnU32;
2076 FNIEMAIMPLXCHGU64 *pfnU64;
2077 } u;
2078 }
2079 s_aXchgWorkers[] =
2080 {
2081 { 1, UINT8_MAX, { (uintptr_t)iemAImpl_xchg_u8_locked } },
2082 { 2, UINT16_MAX, { (uintptr_t)iemAImpl_xchg_u16_locked } },
2083 { 4, UINT32_MAX, { (uintptr_t)iemAImpl_xchg_u32_locked } },
2084 { 8, UINT64_MAX, { (uintptr_t)iemAImpl_xchg_u64_locked } },
2085 { 1, UINT8_MAX, { (uintptr_t)iemAImpl_xchg_u8_unlocked } },
2086 { 2, UINT16_MAX, { (uintptr_t)iemAImpl_xchg_u16_unlocked } },
2087 { 4, UINT32_MAX, { (uintptr_t)iemAImpl_xchg_u32_unlocked } },
2088 { 8, UINT64_MAX, { (uintptr_t)iemAImpl_xchg_u64_unlocked } },
2089 };
2090 for (size_t i = 0; i < RT_ELEMENTS(s_aXchgWorkers); i++)
2091 {
2092 RTUINT64U uIn1, uIn2, uMem, uDst;
2093 uMem.u = uIn1.u = RTRandU64Ex(0, s_aXchgWorkers[i].fMask);
2094 uDst.u = uIn2.u = RTRandU64Ex(0, s_aXchgWorkers[i].fMask);
2095 if (uIn1.u == uIn2.u)
2096 uDst.u = uIn2.u = ~uIn2.u;
2097
2098 switch (s_aXchgWorkers[i].cb)
2099 {
2100 case 1:
2101 s_aXchgWorkers[i].u.pfnU8(g_pu8, g_pu8Two);
2102 s_aXchgWorkers[i].u.pfnU8(&uMem.au8[0], &uDst.au8[0]);
2103 break;
2104 case 2:
2105 s_aXchgWorkers[i].u.pfnU16(g_pu16, g_pu16Two);
2106 s_aXchgWorkers[i].u.pfnU16(&uMem.Words.w0, &uDst.Words.w0);
2107 break;
2108 case 4:
2109 s_aXchgWorkers[i].u.pfnU32(g_pu32, g_pu32Two);
2110 s_aXchgWorkers[i].u.pfnU32(&uMem.DWords.dw0, &uDst.DWords.dw0);
2111 break;
2112 case 8:
2113 s_aXchgWorkers[i].u.pfnU64(g_pu64, g_pu64Two);
2114 s_aXchgWorkers[i].u.pfnU64(&uMem.u, &uDst.u);
2115 break;
2116 default: RTTestFailed(g_hTest, "%d\n", s_aXchgWorkers[i].cb); break;
2117 }
2118
2119 if (uMem.u != uIn2.u || uDst.u != uIn1.u)
2120 RTTestFailed(g_hTest, "i=%u: %#RX64, %#RX64 -> %#RX64, %#RX64\n", i, uIn1.u, uIn2.u, uMem.u, uDst.u);
2121 }
2122}
2123
2124
2125/*
2126 * XADD
2127 */
2128static void XaddTest(void)
2129{
2130#define TEST_XADD(a_cBits, a_Type, a_Fmt) do { \
2131 typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLXADDU ## a_cBits, (a_Type *, a_Type *, uint32_t *)); \
2132 static struct \
2133 { \
2134 const char * const pszName; \
2135 FNIEMAIMPLXADDU ## a_cBits * const pfn; \
2136 void const * const pvCompressedTests; \
2137 uint32_t const * const pcbCompressedTests; \
2138 BINU ## a_cBits ## _TEST_T const *paTests; \
2139 uint32_t cTests; \
2140 IEMTESTENTRYINFO Info; \
2141 } s_aFuncs[] = \
2142 { \
2143 { "xadd_u" # a_cBits, iemAImpl_xadd_u ## a_cBits, \
2144 g_abTests_add_u ## a_cBits, &g_cbTests_add_u ## a_cBits }, \
2145 { "xadd_u" # a_cBits "8_locked", iemAImpl_xadd_u ## a_cBits ## _locked, \
2146 g_abTests_add_u ## a_cBits, &g_cbTests_add_u ## a_cBits }, \
2147 }; \
2148 for (size_t iFn = 0; iFn < RT_ELEMENTS(s_aFuncs); iFn++) \
2149 { \
2150 if (!SUBTEST_CHECK_IF_ENABLED_AND_DECOMPRESS(s_aFuncs[iFn])) continue; \
2151 BINU ## a_cBits ## _TEST_T const * const paTests = s_aFuncs[iFn].paTests; \
2152 uint32_t const cTests = s_aFuncs[iFn].cTests; \
2153 if (!cTests) RTTestSkipped(g_hTest, "no tests"); \
2154 for (uint32_t iTest = 0; iTest < cTests; iTest++) \
2155 { \
2156 uint32_t fEfl = paTests[iTest].fEflIn; \
2157 a_Type uSrc = paTests[iTest].uSrcIn; \
2158 *g_pu ## a_cBits = paTests[iTest].uDstIn; \
2159 s_aFuncs[iFn].pfn(g_pu ## a_cBits, &uSrc, &fEfl); \
2160 if ( fEfl != paTests[iTest].fEflOut \
2161 || *g_pu ## a_cBits != paTests[iTest].uDstOut \
2162 || uSrc != paTests[iTest].uDstIn) \
2163 RTTestFailed(g_hTest, "%s/#%u: efl=%#08x dst=" a_Fmt " src=" a_Fmt " -> efl=%#08x dst=" a_Fmt " src=" a_Fmt ", expected %#08x, " a_Fmt ", " a_Fmt "%s\n", \
2164 s_aFuncs[iFn].pszName, iTest, paTests[iTest].fEflIn, paTests[iTest].uDstIn, paTests[iTest].uSrcIn, \
2165 fEfl, *g_pu ## a_cBits, uSrc, paTests[iTest].fEflOut, paTests[iTest].uDstOut, paTests[iTest].uDstIn, \
2166 EFlagsDiff(fEfl, paTests[iTest].fEflOut)); \
2167 } \
2168 FREE_DECOMPRESSED_TESTS(s_aFuncs[iFn]); \
2169 } \
2170 } while(0)
2171 TEST_XADD(8, uint8_t, "%#04x");
2172 TEST_XADD(16, uint16_t, "%#06x");
2173 TEST_XADD(32, uint32_t, "%#010RX32");
2174 TEST_XADD(64, uint64_t, "%#010RX64");
2175}
2176
2177
2178/*
2179 * CMPXCHG
2180 */
2181
2182static void CmpXchgTest(void)
2183{
2184#define TEST_CMPXCHG(a_cBits, a_Type, a_Fmt) do {\
2185 typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLCMPXCHGU ## a_cBits, (a_Type *, a_Type *, a_Type, uint32_t *)); \
2186 static struct \
2187 { \
2188 const char * const pszName; \
2189 FNIEMAIMPLCMPXCHGU ## a_cBits * const pfn; \
2190 PFNIEMAIMPLBINU ## a_cBits const pfnSub; \
2191 void const * const pvCompressedTests; \
2192 uint32_t const * const pcbCompressedTests; \
2193 BINU ## a_cBits ## _TEST_T const *paTests; \
2194 uint32_t cTests; \
2195 IEMTESTENTRYINFO Info; \
2196 } s_aFuncs[] = \
2197 { \
2198 { "cmpxchg_u" # a_cBits, iemAImpl_cmpxchg_u ## a_cBits, iemAImpl_sub_u ## a_cBits, \
2199 g_abTests_cmp_u ## a_cBits, &g_cbTests_cmp_u ## a_cBits }, \
2200 { "cmpxchg_u" # a_cBits "_locked", iemAImpl_cmpxchg_u ## a_cBits ## _locked, iemAImpl_sub_u ## a_cBits, \
2201 g_abTests_cmp_u ## a_cBits, &g_cbTests_cmp_u ## a_cBits }, \
2202 }; \
2203 for (size_t iFn = 0; iFn < RT_ELEMENTS(s_aFuncs); iFn++) \
2204 { \
2205 if (!SUBTEST_CHECK_IF_ENABLED_AND_DECOMPRESS(s_aFuncs[iFn])) continue; \
2206 BINU ## a_cBits ## _TEST_T const * const paTests = s_aFuncs[iFn].paTests; \
2207 uint32_t const cTests = s_aFuncs[iFn].cTests; \
2208 if (!cTests) RTTestSkipped(g_hTest, "no tests"); \
2209 for (uint32_t iTest = 0; iTest < cTests; iTest++) \
2210 { \
2211 /* as is (99% likely to be negative). */ \
2212 uint32_t fEfl = paTests[iTest].fEflIn; \
2213 a_Type const uNew = paTests[iTest].uSrcIn + 0x42; \
2214 a_Type uA = paTests[iTest].uDstIn; \
2215 *g_pu ## a_cBits = paTests[iTest].uSrcIn; \
2216 a_Type const uExpect = uA != paTests[iTest].uSrcIn ? paTests[iTest].uSrcIn : uNew; \
2217 s_aFuncs[iFn].pfn(g_pu ## a_cBits, &uA, uNew, &fEfl); \
2218 if ( fEfl != paTests[iTest].fEflOut \
2219 || *g_pu ## a_cBits != uExpect \
2220 || uA != paTests[iTest].uSrcIn) \
2221 RTTestFailed(g_hTest, "%s/#%ua: efl=%#08x dst=" a_Fmt " cmp=" a_Fmt " new=" a_Fmt " -> efl=%#08x dst=" a_Fmt " old=" a_Fmt ", expected %#08x, " a_Fmt ", " a_Fmt "%s\n", \
2222 s_aFuncs[iFn].pszName, iTest, paTests[iTest].fEflIn, paTests[iTest].uSrcIn, paTests[iTest].uDstIn, \
2223 uNew, fEfl, *g_pu ## a_cBits, uA, paTests[iTest].fEflOut, uExpect, paTests[iTest].uSrcIn, \
2224 EFlagsDiff(fEfl, paTests[iTest].fEflOut)); \
2225 /* positive */ \
2226 uint32_t fEflExpect = paTests[iTest].fEflIn; \
2227 uA = paTests[iTest].uDstIn; \
2228 s_aFuncs[iFn].pfnSub(&uA, uA, &fEflExpect); \
2229 fEfl = paTests[iTest].fEflIn; \
2230 uA = paTests[iTest].uDstIn; \
2231 *g_pu ## a_cBits = uA; \
2232 s_aFuncs[iFn].pfn(g_pu ## a_cBits, &uA, uNew, &fEfl); \
2233 if ( fEfl != fEflExpect \
2234 || *g_pu ## a_cBits != uNew \
2235 || uA != paTests[iTest].uDstIn) \
2236 RTTestFailed(g_hTest, "%s/#%ua: efl=%#08x dst=" a_Fmt " cmp=" a_Fmt " new=" a_Fmt " -> efl=%#08x dst=" a_Fmt " old=" a_Fmt ", expected %#08x, " a_Fmt ", " a_Fmt "%s\n", \
2237 s_aFuncs[iFn].pszName, iTest, paTests[iTest].fEflIn, paTests[iTest].uDstIn, paTests[iTest].uDstIn, \
2238 uNew, fEfl, *g_pu ## a_cBits, uA, fEflExpect, uNew, paTests[iTest].uDstIn, \
2239 EFlagsDiff(fEfl, fEflExpect)); \
2240 } \
2241 FREE_DECOMPRESSED_TESTS(s_aFuncs[iFn]); \
2242 } \
2243 } while(0)
2244 TEST_CMPXCHG(8, uint8_t, "%#04RX8");
2245 TEST_CMPXCHG(16, uint16_t, "%#06x");
2246 TEST_CMPXCHG(32, uint32_t, "%#010RX32");
2247#if ARCH_BITS != 32 /* calling convension issue, skipping as it's an unsupported host */
2248 TEST_CMPXCHG(64, uint64_t, "%#010RX64");
2249#endif
2250}
2251
2252static void CmpXchg8bTest(void)
2253{
2254 typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLCMPXCHG8B,(uint64_t *, PRTUINT64U, PRTUINT64U, uint32_t *));
2255 static struct
2256 {
2257 const char *pszName;
2258 FNIEMAIMPLCMPXCHG8B *pfn;
2259 } const s_aFuncs[] =
2260 {
2261 { "cmpxchg8b", iemAImpl_cmpxchg8b },
2262 { "cmpxchg8b_locked", iemAImpl_cmpxchg8b_locked },
2263 };
2264 for (size_t iFn = 0; iFn < RT_ELEMENTS(s_aFuncs); iFn++)
2265 {
2266 if (!SubTestAndCheckIfEnabled(s_aFuncs[iFn].pszName))
2267 continue;
2268 for (uint32_t iTest = 0; iTest < 4; iTest += 2)
2269 {
2270 uint64_t const uOldValue = RandU64();
2271 uint64_t const uNewValue = RandU64();
2272
2273 /* positive test. */
2274 RTUINT64U uA, uB;
2275 uB.u = uNewValue;
2276 uA.u = uOldValue;
2277 *g_pu64 = uOldValue;
2278 uint32_t fEflIn = RandEFlags();
2279 uint32_t fEfl = fEflIn;
2280 s_aFuncs[iFn].pfn(g_pu64, &uA, &uB, &fEfl);
2281 if ( fEfl != (fEflIn | X86_EFL_ZF)
2282 || *g_pu64 != uNewValue
2283 || uA.u != uOldValue)
2284 RTTestFailed(g_hTest, "#%u: efl=%#08x dst=%#018RX64 cmp=%#018RX64 new=%#018RX64\n -> efl=%#08x dst=%#018RX64 old=%#018RX64,\n wanted %#08x, %#018RX64, %#018RX64%s\n",
2285 iTest, fEflIn, uOldValue, uOldValue, uNewValue,
2286 fEfl, *g_pu64, uA.u,
2287 (fEflIn | X86_EFL_ZF), uNewValue, uOldValue, EFlagsDiff(fEfl, fEflIn | X86_EFL_ZF));
2288 RTTEST_CHECK(g_hTest, uB.u == uNewValue);
2289
2290 /* negative */
2291 uint64_t const uExpect = ~uOldValue;
2292 *g_pu64 = uExpect;
2293 uA.u = uOldValue;
2294 uB.u = uNewValue;
2295 fEfl = fEflIn = RandEFlags();
2296 s_aFuncs[iFn].pfn(g_pu64, &uA, &uB, &fEfl);
2297 if ( fEfl != (fEflIn & ~X86_EFL_ZF)
2298 || *g_pu64 != uExpect
2299 || uA.u != uExpect)
2300 RTTestFailed(g_hTest, "#%u: efl=%#08x dst=%#018RX64 cmp=%#018RX64 new=%#018RX64\n -> efl=%#08x dst=%#018RX64 old=%#018RX64,\n wanted %#08x, %#018RX64, %#018RX64%s\n",
2301 iTest + 1, fEflIn, uExpect, uOldValue, uNewValue,
2302 fEfl, *g_pu64, uA.u,
2303 (fEflIn & ~X86_EFL_ZF), uExpect, uExpect, EFlagsDiff(fEfl, fEflIn & ~X86_EFL_ZF));
2304 RTTEST_CHECK(g_hTest, uB.u == uNewValue);
2305 }
2306 }
2307}
2308
2309static void CmpXchg16bTest(void)
2310{
2311 typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLCMPXCHG16B,(PRTUINT128U, PRTUINT128U, PRTUINT128U, uint32_t *));
2312 static struct
2313 {
2314 const char *pszName;
2315 FNIEMAIMPLCMPXCHG16B *pfn;
2316 } const s_aFuncs[] =
2317 {
2318 { "cmpxchg16b", iemAImpl_cmpxchg16b },
2319 { "cmpxchg16b_locked", iemAImpl_cmpxchg16b_locked },
2320#if !defined(RT_ARCH_ARM64)
2321 { "cmpxchg16b_fallback", iemAImpl_cmpxchg16b_fallback },
2322#endif
2323 };
2324 for (size_t iFn = 0; iFn < RT_ELEMENTS(s_aFuncs); iFn++)
2325 {
2326 if (!SubTestAndCheckIfEnabled(s_aFuncs[iFn].pszName))
2327 continue;
2328#if !defined(IEM_WITHOUT_ASSEMBLY) && defined(RT_ARCH_AMD64)
2329 if (!(ASMCpuId_ECX(1) & X86_CPUID_FEATURE_ECX_CX16))
2330 {
2331 RTTestSkipped(g_hTest, "no hardware cmpxchg16b");
2332 continue;
2333 }
2334#endif
2335 for (uint32_t iTest = 0; iTest < 4; iTest += 2)
2336 {
2337 RTUINT128U const uOldValue = RandU128();
2338 RTUINT128U const uNewValue = RandU128();
2339
2340 /* positive test. */
2341 RTUINT128U uA, uB;
2342 uB = uNewValue;
2343 uA = uOldValue;
2344 *g_pu128 = uOldValue;
2345 uint32_t fEflIn = RandEFlags();
2346 uint32_t fEfl = fEflIn;
2347 s_aFuncs[iFn].pfn(g_pu128, &uA, &uB, &fEfl);
2348 if ( fEfl != (fEflIn | X86_EFL_ZF)
2349 || g_pu128->s.Lo != uNewValue.s.Lo
2350 || g_pu128->s.Hi != uNewValue.s.Hi
2351 || uA.s.Lo != uOldValue.s.Lo
2352 || uA.s.Hi != uOldValue.s.Hi)
2353 RTTestFailed(g_hTest, "#%u: efl=%#08x dst=%#018RX64'%016RX64 cmp=%#018RX64'%016RX64 new=%#018RX64'%016RX64\n"
2354 " -> efl=%#08x dst=%#018RX64'%016RX64 old=%#018RX64'%016RX64,\n"
2355 " wanted %#08x, %#018RX64'%016RX64, %#018RX64'%016RX64%s\n",
2356 iTest, fEflIn, uOldValue.s.Hi, uOldValue.s.Lo, uOldValue.s.Hi, uOldValue.s.Lo, uNewValue.s.Hi, uNewValue.s.Lo,
2357 fEfl, g_pu128->s.Hi, g_pu128->s.Lo, uA.s.Hi, uA.s.Lo,
2358 (fEflIn | X86_EFL_ZF), uNewValue.s.Hi, uNewValue.s.Lo, uOldValue.s.Hi, uOldValue.s.Lo,
2359 EFlagsDiff(fEfl, fEflIn | X86_EFL_ZF));
2360 RTTEST_CHECK(g_hTest, uB.s.Lo == uNewValue.s.Lo && uB.s.Hi == uNewValue.s.Hi);
2361
2362 /* negative */
2363 RTUINT128U const uExpect = RTUINT128_INIT(~uOldValue.s.Hi, ~uOldValue.s.Lo);
2364 *g_pu128 = uExpect;
2365 uA = uOldValue;
2366 uB = uNewValue;
2367 fEfl = fEflIn = RandEFlags();
2368 s_aFuncs[iFn].pfn(g_pu128, &uA, &uB, &fEfl);
2369 if ( fEfl != (fEflIn & ~X86_EFL_ZF)
2370 || g_pu128->s.Lo != uExpect.s.Lo
2371 || g_pu128->s.Hi != uExpect.s.Hi
2372 || uA.s.Lo != uExpect.s.Lo
2373 || uA.s.Hi != uExpect.s.Hi)
2374 RTTestFailed(g_hTest, "#%u: efl=%#08x dst=%#018RX64'%016RX64 cmp=%#018RX64'%016RX64 new=%#018RX64'%016RX64\n"
2375 " -> efl=%#08x dst=%#018RX64'%016RX64 old=%#018RX64'%016RX64,\n"
2376 " wanted %#08x, %#018RX64'%016RX64, %#018RX64'%016RX64%s\n",
2377 iTest + 1, fEflIn, uExpect.s.Hi, uExpect.s.Lo, uOldValue.s.Hi, uOldValue.s.Lo, uNewValue.s.Hi, uNewValue.s.Lo,
2378 fEfl, g_pu128->s.Hi, g_pu128->s.Lo, uA.s.Hi, uA.s.Lo,
2379 (fEflIn & ~X86_EFL_ZF), uExpect.s.Hi, uExpect.s.Lo, uExpect.s.Hi, uExpect.s.Lo,
2380 EFlagsDiff(fEfl, fEflIn & ~X86_EFL_ZF));
2381 RTTEST_CHECK(g_hTest, uB.s.Lo == uNewValue.s.Lo && uB.s.Hi == uNewValue.s.Hi);
2382 }
2383 }
2384}
2385
2386
2387/*
2388 * Double shifts.
2389 *
2390 * Note! We use BINUxx_TEST_T with the shift value in the uMisc field.
2391 */
2392#ifdef TSTIEMAIMPL_WITH_GENERATOR
2393# define GEN_SHIFT_DBL(a_cBits, a_Fmt, a_TestType, a_aSubTests) \
2394static RTEXITCODE ShiftDblU ## a_cBits ## Generate(uint32_t cTests, const char * const * papszNameFmts) \
2395{ \
2396 for (size_t iFn = 0; iFn < RT_ELEMENTS(a_aSubTests); iFn++) \
2397 { \
2398 if ( a_aSubTests[iFn].idxCpuEflFlavour != IEMTARGETCPU_EFL_BEHAVIOR_NATIVE \
2399 && a_aSubTests[iFn].idxCpuEflFlavour != g_idxCpuEflFlavour) \
2400 continue; \
2401 IEMBINARYOUTPUT BinOut; \
2402 AssertReturn(GENERATE_BINARY_OPEN(&BinOut, papszNameFmts, a_aSubTests[iFn]), RTEXITCODE_FAILURE); \
2403 for (uint32_t iTest = 0; iTest < cTests; iTest++ ) \
2404 { \
2405 a_TestType Test; \
2406 Test.fEflIn = RandEFlags(); \
2407 Test.fEflOut = Test.fEflIn; \
2408 Test.uDstIn = RandU ## a_cBits ## Dst(iTest); \
2409 Test.uDstOut = Test.uDstIn; \
2410 Test.uSrcIn = RandU ## a_cBits ## Src(iTest); \
2411 Test.uMisc = RandU8() & (a_cBits * 4 - 1); /* need to go way beyond the a_cBits limit */ \
2412 a_aSubTests[iFn].pfnNative(&Test.uDstOut, Test.uSrcIn, Test.uMisc, &Test.fEflOut); \
2413 GenerateBinaryWrite(&BinOut, &Test, sizeof(Test)); \
2414 } \
2415 AssertReturn(GenerateBinaryClose(&BinOut), RTEXITCODE_FAILURE); \
2416 } \
2417 return RTEXITCODE_SUCCESS; \
2418} \
2419DUMP_ALL_FN(ShiftDblU ## a_cBits, a_aSubTests)
2420
2421#else
2422# define GEN_SHIFT_DBL(a_cBits, a_Fmt, a_TestType, a_aSubTests)
2423#endif
2424
2425#define TEST_SHIFT_DBL(a_cBits, a_Type, a_Fmt, a_TestType, a_SubTestType, a_aSubTests) \
2426TYPEDEF_SUBTEST_TYPE(a_SubTestType, a_TestType, PFNIEMAIMPLSHIFTDBLU ## a_cBits); \
2427\
2428static a_SubTestType a_aSubTests[] = \
2429{ \
2430 ENTRY_BIN_AMD(shld_u ## a_cBits, X86_EFL_OF | X86_EFL_CF), \
2431 ENTRY_BIN_INTEL(shld_u ## a_cBits, X86_EFL_OF | X86_EFL_CF), \
2432 ENTRY_BIN_AMD(shrd_u ## a_cBits, X86_EFL_OF | X86_EFL_CF), \
2433 ENTRY_BIN_INTEL(shrd_u ## a_cBits, X86_EFL_OF | X86_EFL_CF), \
2434}; \
2435\
2436GEN_SHIFT_DBL(a_cBits, a_Fmt, a_TestType, a_aSubTests) \
2437\
2438static void ShiftDblU ## a_cBits ## Test(void) \
2439{ \
2440 for (size_t iFn = 0; iFn < RT_ELEMENTS(a_aSubTests); iFn++) \
2441 { \
2442 if (!SUBTEST_CHECK_IF_ENABLED_AND_DECOMPRESS(a_aSubTests[iFn])) \
2443 continue; \
2444 a_TestType const * const paTests = a_aSubTests[iFn].paTests; \
2445 uint32_t const cTests = a_aSubTests[iFn].cTests; \
2446 PFNIEMAIMPLSHIFTDBLU ## a_cBits pfn = a_aSubTests[iFn].pfn; \
2447 uint32_t const cVars = COUNT_VARIATIONS(a_aSubTests[iFn]); \
2448 if (!cTests) RTTestSkipped(g_hTest, "no tests"); \
2449 for (uint32_t iVar = 0; iVar < cVars; iVar++) \
2450 { \
2451 for (uint32_t iTest = 0; iTest < cTests; iTest++ ) \
2452 { \
2453 uint32_t fEfl = paTests[iTest].fEflIn; \
2454 a_Type uDst = paTests[iTest].uDstIn; \
2455 pfn(&uDst, paTests[iTest].uSrcIn, paTests[iTest].uMisc, &fEfl); \
2456 if ( uDst != paTests[iTest].uDstOut \
2457 || fEfl != paTests[iTest].fEflOut) \
2458 RTTestFailed(g_hTest, "#%03u%s: efl=%#08x dst=" a_Fmt " src=" a_Fmt " shift=%-2u -> efl=%#08x dst=" a_Fmt ", expected %#08x & " a_Fmt "%s%s\n", \
2459 iTest, iVar == 0 ? "" : "/n", paTests[iTest].fEflIn, \
2460 paTests[iTest].uDstIn, paTests[iTest].uSrcIn, (unsigned)paTests[iTest].uMisc, \
2461 fEfl, uDst, paTests[iTest].fEflOut, paTests[iTest].uDstOut, \
2462 EFlagsDiff(fEfl, paTests[iTest].fEflOut), uDst == paTests[iTest].uDstOut ? "" : " dst!"); \
2463 else \
2464 { \
2465 *g_pu ## a_cBits = paTests[iTest].uDstIn; \
2466 *g_pfEfl = paTests[iTest].fEflIn; \
2467 pfn(g_pu ## a_cBits, paTests[iTest].uSrcIn, paTests[iTest].uMisc, g_pfEfl); \
2468 RTTEST_CHECK(g_hTest, *g_pu ## a_cBits == paTests[iTest].uDstOut); \
2469 RTTEST_CHECK(g_hTest, *g_pfEfl == paTests[iTest].fEflOut); \
2470 } \
2471 } \
2472 pfn = a_aSubTests[iFn].pfnNative; \
2473 } \
2474 FREE_DECOMPRESSED_TESTS(a_aSubTests[iFn]); \
2475 } \
2476}
2477TEST_SHIFT_DBL(16, uint16_t, "%#06RX16", BINU16_TEST_T, SHIFT_DBL_U16_T, g_aShiftDblU16)
2478TEST_SHIFT_DBL(32, uint32_t, "%#010RX32", BINU32_TEST_T, SHIFT_DBL_U32_T, g_aShiftDblU32)
2479TEST_SHIFT_DBL(64, uint64_t, "%#018RX64", BINU64_TEST_T, SHIFT_DBL_U64_T, g_aShiftDblU64)
2480
2481#ifdef TSTIEMAIMPL_WITH_GENERATOR
2482static RTEXITCODE ShiftDblGenerate(uint32_t cTests, const char * const * papszNameFmts)
2483{
2484 RTEXITCODE rcExit = ShiftDblU16Generate(cTests, papszNameFmts);
2485 if (rcExit == RTEXITCODE_SUCCESS)
2486 rcExit = ShiftDblU32Generate(cTests, papszNameFmts);
2487 if (rcExit == RTEXITCODE_SUCCESS)
2488 rcExit = ShiftDblU64Generate(cTests, papszNameFmts);
2489 return rcExit;
2490}
2491
2492static RTEXITCODE ShiftDblDumpAll(const char * const * papszNameFmts)
2493{
2494 RTEXITCODE rcExit = ShiftDblU16DumpAll(papszNameFmts);
2495 if (rcExit == RTEXITCODE_SUCCESS)
2496 rcExit = ShiftDblU32DumpAll(papszNameFmts);
2497 if (rcExit == RTEXITCODE_SUCCESS)
2498 rcExit = ShiftDblU64DumpAll(papszNameFmts);
2499 return rcExit;
2500}
2501#endif
2502
2503static void ShiftDblTest(void)
2504{
2505 ShiftDblU16Test();
2506 ShiftDblU32Test();
2507 ShiftDblU64Test();
2508}
2509
2510
2511/*
2512 * Unary operators.
2513 *
2514 * Note! We use BINUxx_TEST_T ignoreing uSrcIn and uMisc.
2515 */
2516#ifdef TSTIEMAIMPL_WITH_GENERATOR
2517# define GEN_UNARY(a_cBits, a_Type, a_Fmt, a_TestType, a_SubTestType) \
2518static RTEXITCODE UnaryU ## a_cBits ## Generate(uint32_t cTests, const char * const * papszNameFmts) \
2519{ \
2520 for (size_t iFn = 0; iFn < RT_ELEMENTS(g_aUnaryU ## a_cBits); iFn++) \
2521 { \
2522 IEMBINARYOUTPUT BinOut; \
2523 AssertReturn(GENERATE_BINARY_OPEN(&BinOut, papszNameFmts, g_aUnaryU ## a_cBits[iFn]), RTEXITCODE_FAILURE); \
2524 for (uint32_t iTest = 0; iTest < cTests; iTest++ ) \
2525 { \
2526 a_TestType Test; \
2527 Test.fEflIn = RandEFlags(); \
2528 Test.fEflOut = Test.fEflIn; \
2529 Test.uDstIn = RandU ## a_cBits(); \
2530 Test.uDstOut = Test.uDstIn; \
2531 Test.uSrcIn = 0; \
2532 Test.uMisc = 0; \
2533 g_aUnaryU ## a_cBits[iFn].pfn(&Test.uDstOut, &Test.fEflOut); \
2534 GenerateBinaryWrite(&BinOut, &Test, sizeof(Test)); \
2535 } \
2536 AssertReturn(GenerateBinaryClose(&BinOut), RTEXITCODE_FAILURE); \
2537 } \
2538 return RTEXITCODE_SUCCESS; \
2539} \
2540DUMP_ALL_FN(UnaryU ## a_cBits, g_aUnaryU ## a_cBits)
2541#else
2542# define GEN_UNARY(a_cBits, a_Type, a_Fmt, a_TestType, a_SubTestType)
2543#endif
2544
2545#define TEST_UNARY(a_cBits, a_Type, a_Fmt, a_TestType, a_SubTestType) \
2546TYPEDEF_SUBTEST_TYPE(a_SubTestType, a_TestType, PFNIEMAIMPLUNARYU ## a_cBits); \
2547static a_SubTestType g_aUnaryU ## a_cBits [] = \
2548{ \
2549 ENTRY_BIN(inc_u ## a_cBits), \
2550 ENTRY_BIN(inc_u ## a_cBits ## _locked), \
2551 ENTRY_BIN(dec_u ## a_cBits), \
2552 ENTRY_BIN(dec_u ## a_cBits ## _locked), \
2553 ENTRY_BIN(not_u ## a_cBits), \
2554 ENTRY_BIN(not_u ## a_cBits ## _locked), \
2555 ENTRY_BIN(neg_u ## a_cBits), \
2556 ENTRY_BIN(neg_u ## a_cBits ## _locked), \
2557}; \
2558\
2559GEN_UNARY(a_cBits, a_Type, a_Fmt, a_TestType, a_SubTestType) \
2560\
2561static void UnaryU ## a_cBits ## Test(void) \
2562{ \
2563 for (size_t iFn = 0; iFn < RT_ELEMENTS(g_aUnaryU ## a_cBits); iFn++) \
2564 { \
2565 if (!SUBTEST_CHECK_IF_ENABLED_AND_DECOMPRESS(g_aUnaryU ## a_cBits[iFn])) \
2566 continue; \
2567 a_TestType const * const paTests = g_aUnaryU ## a_cBits[iFn].paTests; \
2568 uint32_t const cTests = g_aUnaryU ## a_cBits[iFn].cTests; \
2569 if (!cTests) RTTestSkipped(g_hTest, "no tests"); \
2570 for (uint32_t iTest = 0; iTest < cTests; iTest++ ) \
2571 { \
2572 uint32_t fEfl = paTests[iTest].fEflIn; \
2573 a_Type uDst = paTests[iTest].uDstIn; \
2574 g_aUnaryU ## a_cBits[iFn].pfn(&uDst, &fEfl); \
2575 if ( uDst != paTests[iTest].uDstOut \
2576 || fEfl != paTests[iTest].fEflOut) \
2577 RTTestFailed(g_hTest, "#%u: efl=%#08x dst=" a_Fmt " -> efl=%#08x dst=" a_Fmt ", expected %#08x & " a_Fmt "%s\n", \
2578 iTest, paTests[iTest].fEflIn, paTests[iTest].uDstIn, \
2579 fEfl, uDst, paTests[iTest].fEflOut, paTests[iTest].uDstOut, \
2580 EFlagsDiff(fEfl, paTests[iTest].fEflOut)); \
2581 else \
2582 { \
2583 *g_pu ## a_cBits = paTests[iTest].uDstIn; \
2584 *g_pfEfl = paTests[iTest].fEflIn; \
2585 g_aUnaryU ## a_cBits[iFn].pfn(g_pu ## a_cBits, g_pfEfl); \
2586 RTTEST_CHECK(g_hTest, *g_pu ## a_cBits == paTests[iTest].uDstOut); \
2587 RTTEST_CHECK(g_hTest, *g_pfEfl == paTests[iTest].fEflOut); \
2588 } \
2589 } \
2590 FREE_DECOMPRESSED_TESTS(g_aUnaryU ## a_cBits[iFn]); \
2591 } \
2592}
2593TEST_UNARY(8, uint8_t, "%#04RX8", BINU8_TEST_T, INT_UNARY_U8_T)
2594TEST_UNARY(16, uint16_t, "%#06RX16", BINU16_TEST_T, INT_UNARY_U16_T)
2595TEST_UNARY(32, uint32_t, "%#010RX32", BINU32_TEST_T, INT_UNARY_U32_T)
2596TEST_UNARY(64, uint64_t, "%#018RX64", BINU64_TEST_T, INT_UNARY_U64_T)
2597
2598#ifdef TSTIEMAIMPL_WITH_GENERATOR
2599static RTEXITCODE UnaryGenerate(uint32_t cTests, const char * const * papszNameFmts)
2600{
2601 RTEXITCODE rcExit = UnaryU8Generate(cTests, papszNameFmts);
2602 if (rcExit == RTEXITCODE_SUCCESS)
2603 rcExit = UnaryU16Generate(cTests, papszNameFmts);
2604 if (rcExit == RTEXITCODE_SUCCESS)
2605 rcExit = UnaryU32Generate(cTests, papszNameFmts);
2606 if (rcExit == RTEXITCODE_SUCCESS)
2607 rcExit = UnaryU64Generate(cTests, papszNameFmts);
2608 return rcExit;
2609}
2610
2611static RTEXITCODE UnaryDumpAll(const char * const * papszNameFmts)
2612{
2613 RTEXITCODE rcExit = UnaryU8DumpAll(papszNameFmts);
2614 if (rcExit == RTEXITCODE_SUCCESS)
2615 rcExit = UnaryU16DumpAll(papszNameFmts);
2616 if (rcExit == RTEXITCODE_SUCCESS)
2617 rcExit = UnaryU32DumpAll(papszNameFmts);
2618 if (rcExit == RTEXITCODE_SUCCESS)
2619 rcExit = UnaryU64DumpAll(papszNameFmts);
2620 return rcExit;
2621}
2622#endif
2623
2624static void UnaryTest(void)
2625{
2626 UnaryU8Test();
2627 UnaryU16Test();
2628 UnaryU32Test();
2629 UnaryU64Test();
2630}
2631
2632
2633/*
2634 * Shifts.
2635 *
2636 * Note! We use BINUxx_TEST_T with the shift count in uMisc and uSrcIn unused.
2637 */
2638#ifdef TSTIEMAIMPL_WITH_GENERATOR
2639# define GEN_SHIFT(a_cBits, a_Fmt, a_TestType, a_aSubTests) \
2640static RTEXITCODE ShiftU ## a_cBits ## Generate(uint32_t cTests, const char * const * papszNameFmts) \
2641{ \
2642 for (size_t iFn = 0; iFn < RT_ELEMENTS(a_aSubTests); iFn++) \
2643 { \
2644 if ( a_aSubTests[iFn].idxCpuEflFlavour != IEMTARGETCPU_EFL_BEHAVIOR_NATIVE \
2645 && a_aSubTests[iFn].idxCpuEflFlavour != g_idxCpuEflFlavour) \
2646 continue; \
2647 IEMBINARYOUTPUT BinOut; \
2648 AssertReturn(GENERATE_BINARY_OPEN(&BinOut, papszNameFmts, a_aSubTests[iFn]), RTEXITCODE_FAILURE); \
2649 for (uint32_t iTest = 0; iTest < cTests; iTest++ ) \
2650 { \
2651 a_TestType Test; \
2652 Test.fEflIn = RandEFlags(); \
2653 Test.fEflOut = Test.fEflIn; \
2654 Test.uDstIn = RandU ## a_cBits ## Dst(iTest); \
2655 Test.uDstOut = Test.uDstIn; \
2656 Test.uSrcIn = 0; \
2657 Test.uMisc = RandU8() & (a_cBits * 4 - 1); /* need to go way beyond the a_cBits limit */ \
2658 a_aSubTests[iFn].pfnNative(&Test.uDstOut, Test.uMisc, &Test.fEflOut); \
2659 GenerateBinaryWrite(&BinOut, &Test, sizeof(Test)); \
2660 \
2661 Test.fEflIn = (~Test.fEflIn & X86_EFL_LIVE_MASK) | X86_EFL_RA1_MASK; \
2662 Test.fEflOut = Test.fEflIn; \
2663 Test.uDstOut = Test.uDstIn; \
2664 a_aSubTests[iFn].pfnNative(&Test.uDstOut, Test.uMisc, &Test.fEflOut); \
2665 GenerateBinaryWrite(&BinOut, &Test, sizeof(Test)); \
2666 } \
2667 AssertReturn(GenerateBinaryClose(&BinOut), RTEXITCODE_FAILURE); \
2668 } \
2669 return RTEXITCODE_SUCCESS; \
2670} \
2671DUMP_ALL_FN(ShiftU ## a_cBits, a_aSubTests)
2672#else
2673# define GEN_SHIFT(a_cBits, a_Fmt, a_TestType, a_aSubTests)
2674#endif
2675
2676#define TEST_SHIFT(a_cBits, a_Type, a_Fmt, a_TestType, a_SubTestType, a_aSubTests) \
2677TYPEDEF_SUBTEST_TYPE(a_SubTestType, a_TestType, PFNIEMAIMPLSHIFTU ## a_cBits); \
2678static a_SubTestType a_aSubTests[] = \
2679{ \
2680 ENTRY_BIN_AMD( rol_u ## a_cBits, X86_EFL_OF), \
2681 ENTRY_BIN_INTEL(rol_u ## a_cBits, X86_EFL_OF), \
2682 ENTRY_BIN_AMD( ror_u ## a_cBits, X86_EFL_OF), \
2683 ENTRY_BIN_INTEL(ror_u ## a_cBits, X86_EFL_OF), \
2684 ENTRY_BIN_AMD( rcl_u ## a_cBits, X86_EFL_OF), \
2685 ENTRY_BIN_INTEL(rcl_u ## a_cBits, X86_EFL_OF), \
2686 ENTRY_BIN_AMD( rcr_u ## a_cBits, X86_EFL_OF), \
2687 ENTRY_BIN_INTEL(rcr_u ## a_cBits, X86_EFL_OF), \
2688 ENTRY_BIN_AMD( shl_u ## a_cBits, X86_EFL_OF | X86_EFL_AF), \
2689 ENTRY_BIN_INTEL(shl_u ## a_cBits, X86_EFL_OF | X86_EFL_AF), \
2690 ENTRY_BIN_AMD( shr_u ## a_cBits, X86_EFL_OF | X86_EFL_AF), \
2691 ENTRY_BIN_INTEL(shr_u ## a_cBits, X86_EFL_OF | X86_EFL_AF), \
2692 ENTRY_BIN_AMD( sar_u ## a_cBits, X86_EFL_OF | X86_EFL_AF), \
2693 ENTRY_BIN_INTEL(sar_u ## a_cBits, X86_EFL_OF | X86_EFL_AF), \
2694}; \
2695\
2696GEN_SHIFT(a_cBits, a_Fmt, a_TestType, a_aSubTests) \
2697\
2698static void ShiftU ## a_cBits ## Test(void) \
2699{ \
2700 for (size_t iFn = 0; iFn < RT_ELEMENTS(a_aSubTests); iFn++) \
2701 { \
2702 if (!SUBTEST_CHECK_IF_ENABLED_AND_DECOMPRESS(a_aSubTests[iFn])) \
2703 continue; \
2704 PFNIEMAIMPLSHIFTU ## a_cBits pfn = a_aSubTests[iFn].pfn; \
2705 a_TestType const * const paTests = a_aSubTests[iFn].paTests; \
2706 uint32_t const cTests = a_aSubTests[iFn].cTests; \
2707 uint32_t const cVars = COUNT_VARIATIONS(a_aSubTests[iFn]); \
2708 if (!cTests) RTTestSkipped(g_hTest, "no tests"); \
2709 for (uint32_t iVar = 0; iVar < cVars; iVar++) \
2710 { \
2711 for (uint32_t iTest = 0; iTest < cTests; iTest++ ) \
2712 { \
2713 uint32_t fEfl = paTests[iTest].fEflIn; \
2714 a_Type uDst = paTests[iTest].uDstIn; \
2715 pfn(&uDst, paTests[iTest].uMisc, &fEfl); \
2716 if ( uDst != paTests[iTest].uDstOut \
2717 || fEfl != paTests[iTest].fEflOut ) \
2718 RTTestFailed(g_hTest, "#%u%s: efl=%#08x dst=" a_Fmt " shift=%2u -> efl=%#08x dst=" a_Fmt ", expected %#08x & " a_Fmt "%s\n", \
2719 iTest, iVar == 0 ? "" : "/n", \
2720 paTests[iTest].fEflIn, paTests[iTest].uDstIn, paTests[iTest].uMisc, \
2721 fEfl, uDst, paTests[iTest].fEflOut, paTests[iTest].uDstOut, \
2722 EFlagsDiff(fEfl, paTests[iTest].fEflOut)); \
2723 else \
2724 { \
2725 *g_pu ## a_cBits = paTests[iTest].uDstIn; \
2726 *g_pfEfl = paTests[iTest].fEflIn; \
2727 pfn(g_pu ## a_cBits, paTests[iTest].uMisc, g_pfEfl); \
2728 RTTEST_CHECK(g_hTest, *g_pu ## a_cBits == paTests[iTest].uDstOut); \
2729 RTTEST_CHECK(g_hTest, *g_pfEfl == paTests[iTest].fEflOut); \
2730 } \
2731 } \
2732 pfn = a_aSubTests[iFn].pfnNative; \
2733 } \
2734 FREE_DECOMPRESSED_TESTS(a_aSubTests[iFn]); \
2735 } \
2736}
2737TEST_SHIFT(8, uint8_t, "%#04RX8", BINU8_TEST_T, INT_BINARY_U8_T, g_aShiftU8)
2738TEST_SHIFT(16, uint16_t, "%#06RX16", BINU16_TEST_T, INT_BINARY_U16_T, g_aShiftU16)
2739TEST_SHIFT(32, uint32_t, "%#010RX32", BINU32_TEST_T, INT_BINARY_U32_T, g_aShiftU32)
2740TEST_SHIFT(64, uint64_t, "%#018RX64", BINU64_TEST_T, INT_BINARY_U64_T, g_aShiftU64)
2741
2742#ifdef TSTIEMAIMPL_WITH_GENERATOR
2743static RTEXITCODE ShiftGenerate(uint32_t cTests, const char * const * papszNameFmts)
2744{
2745 RTEXITCODE rcExit = ShiftU8Generate(cTests, papszNameFmts);
2746 if (rcExit == RTEXITCODE_SUCCESS)
2747 rcExit = ShiftU16Generate(cTests, papszNameFmts);
2748 if (rcExit == RTEXITCODE_SUCCESS)
2749 rcExit = ShiftU32Generate(cTests, papszNameFmts);
2750 if (rcExit == RTEXITCODE_SUCCESS)
2751 rcExit = ShiftU64Generate(cTests, papszNameFmts);
2752 return rcExit;
2753}
2754
2755static RTEXITCODE ShiftDumpAll(const char * const * papszNameFmts)
2756{
2757 RTEXITCODE rcExit = ShiftU8DumpAll(papszNameFmts);
2758 if (rcExit == RTEXITCODE_SUCCESS)
2759 rcExit = ShiftU16DumpAll(papszNameFmts);
2760 if (rcExit == RTEXITCODE_SUCCESS)
2761 rcExit = ShiftU32DumpAll(papszNameFmts);
2762 if (rcExit == RTEXITCODE_SUCCESS)
2763 rcExit = ShiftU64DumpAll(papszNameFmts);
2764 return rcExit;
2765}
2766#endif
2767
2768static void ShiftTest(void)
2769{
2770 ShiftU8Test();
2771 ShiftU16Test();
2772 ShiftU32Test();
2773 ShiftU64Test();
2774}
2775
2776
2777/*
2778 * Multiplication and division.
2779 *
2780 * Note! The 8-bit functions has a different format, so we need to duplicate things.
2781 * Note! Currently ignoring undefined bits.
2782 */
2783
2784/* U8 */
2785#ifdef TSTIEMAIMPL_WITH_GENERATOR
2786static const MULDIVU8_TEST_T g_aFixedTests_idiv_u8[] =
2787{
2788 /* efl in, efl out, uDstIn, uDstOut, uSrcIn, rc (0 or -1 for actual; -128 for auto) */
2789 { UINT32_MAX, 0, 0x8000, 0, 0xc7, -1 }, /* -32768 / -57 = #DE (574.8771929824...) */
2790 { UINT32_MAX, 0, 0x8000, 0, 0xdd, -128 }, /* -32768 / -35 = #DE (936.2285714285...) */
2791 { UINT32_MAX, 0, 0x7f00, 0, 0x7f, -1 }, /* 0x7f00 / 0x7f = #DE (0x100) */
2792 { UINT32_MAX, 0, 0x3f80, 0, 0x7f, -1 }, /* 0x3F80 / 0x7f = #DE (0x80) */
2793 { UINT32_MAX, 0, 0x3f7f, 0, 0x7f, 0 }, /* 0x3F7F / 0x7f = 127.992125984... */
2794 { UINT32_MAX, 0, 0xc000, 0, 0x80, -1 }, /* -16384 / -128 = #DE (0x80) */
2795 { UINT32_MAX, 0, 0xc001, 0, 0x80, 0 }, /* -16383 / -128 = 127.9921875 */
2796};
2797#endif
2798TYPEDEF_SUBTEST_TYPE(INT_MULDIV_U8_T, MULDIVU8_TEST_T, PFNIEMAIMPLMULDIVU8);
2799static INT_MULDIV_U8_T g_aMulDivU8[] =
2800{
2801 ENTRY_BIN_AMD_EX(mul_u8, X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF,
2802 X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF),
2803 ENTRY_BIN_INTEL_EX(mul_u8, X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF, 0),
2804 ENTRY_BIN_AMD_EX(imul_u8, X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF,
2805 X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF),
2806 ENTRY_BIN_INTEL_EX(imul_u8, X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF, 0),
2807 ENTRY_BIN_AMD_EX(div_u8, X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF | X86_EFL_CF | X86_EFL_OF, 0),
2808 ENTRY_BIN_INTEL_EX(div_u8, X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF | X86_EFL_CF | X86_EFL_OF, 0),
2809 ENTRY_BIN_FIX_AMD_EX(idiv_u8, X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF | X86_EFL_CF | X86_EFL_OF, 0),
2810 ENTRY_BIN_FIX_INTEL_EX(idiv_u8, X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF | X86_EFL_CF | X86_EFL_OF, 0),
2811};
2812
2813#ifdef TSTIEMAIMPL_WITH_GENERATOR
2814DUMP_ALL_FN(MulDivU8, g_aMulDivU8)
2815static RTEXITCODE MulDivU8Generate(uint32_t cTests, const char * const * papszNameFmts)
2816{
2817 for (size_t iFn = 0; iFn < RT_ELEMENTS(g_aMulDivU8); iFn++)
2818 {
2819 if ( g_aMulDivU8[iFn].idxCpuEflFlavour != IEMTARGETCPU_EFL_BEHAVIOR_NATIVE
2820 && g_aMulDivU8[iFn].idxCpuEflFlavour != g_idxCpuEflFlavour)
2821 continue;
2822 IEMBINARYOUTPUT BinOut; \
2823 AssertReturn(GENERATE_BINARY_OPEN(&BinOut, papszNameFmts, g_aMulDivU8[iFn]), RTEXITCODE_FAILURE); \
2824 for (uint32_t iTest = 0; iTest < cTests; iTest++ )
2825 {
2826 MULDIVU8_TEST_T Test;
2827 Test.fEflIn = RandEFlags();
2828 Test.fEflOut = Test.fEflIn;
2829 Test.uDstIn = RandU16Dst(iTest);
2830 Test.uDstOut = Test.uDstIn;
2831 Test.uSrcIn = RandU8Src(iTest);
2832 Test.rc = g_aMulDivU8[iFn].pfnNative(&Test.uDstOut, Test.uSrcIn, &Test.fEflOut);
2833 GenerateBinaryWrite(&BinOut, &Test, sizeof(Test));
2834 }
2835 for (uint32_t iTest = 0; iTest < g_aMulDivU8[iFn].cFixedTests; iTest++)
2836 {
2837 MULDIVU8_TEST_T Test;
2838 Test.fEflIn = g_aMulDivU8[iFn].paFixedTests[iTest].fEflIn == UINT32_MAX ? RandEFlags()
2839 : g_aMulDivU8[iFn].paFixedTests[iTest].fEflIn;
2840 Test.fEflOut = Test.fEflIn;
2841 Test.uDstIn = g_aMulDivU8[iFn].paFixedTests[iTest].uDstIn;
2842 Test.uDstOut = Test.uDstIn;
2843 Test.uSrcIn = g_aMulDivU8[iFn].paFixedTests[iTest].uSrcIn;
2844 Test.rc = g_aMulDivU8[iFn].pfnNative(&Test.uDstOut, Test.uSrcIn, &Test.fEflOut);
2845 if (g_aMulDivU8[iFn].paFixedTests[iTest].rc == 0 || g_aMulDivU8[iFn].paFixedTests[iTest].rc == -1)
2846 Test.rc = g_aMulDivU8[iFn].paFixedTests[iTest].rc;
2847 GenerateBinaryWrite(&BinOut, &Test, sizeof(Test));
2848 }
2849 AssertReturn(GenerateBinaryClose(&BinOut), RTEXITCODE_FAILURE);
2850 }
2851 return RTEXITCODE_SUCCESS;
2852}
2853#endif
2854
2855static void MulDivU8Test(void)
2856{
2857 for (size_t iFn = 0; iFn < RT_ELEMENTS(g_aMulDivU8); iFn++)
2858 {
2859 if (!SUBTEST_CHECK_IF_ENABLED_AND_DECOMPRESS(g_aMulDivU8[iFn])) \
2860 continue; \
2861 MULDIVU8_TEST_T const * const paTests = g_aMulDivU8[iFn].paTests;
2862 uint32_t const cTests = g_aMulDivU8[iFn].cTests;
2863 uint32_t const fEflIgn = g_aMulDivU8[iFn].uExtra;
2864 PFNIEMAIMPLMULDIVU8 pfn = g_aMulDivU8[iFn].pfn;
2865 uint32_t const cVars = COUNT_VARIATIONS(g_aMulDivU8[iFn]); \
2866 if (!cTests) RTTestSkipped(g_hTest, "no tests");
2867 for (uint32_t iVar = 0; iVar < cVars; iVar++)
2868 {
2869 for (uint32_t iTest = 0; iTest < cTests; iTest++ )
2870 {
2871 uint32_t fEfl = paTests[iTest].fEflIn;
2872 uint16_t uDst = paTests[iTest].uDstIn;
2873 int rc = g_aMulDivU8[iFn].pfn(&uDst, paTests[iTest].uSrcIn, &fEfl);
2874 if ( uDst != paTests[iTest].uDstOut
2875 || (fEfl | fEflIgn) != (paTests[iTest].fEflOut | fEflIgn)
2876 || rc != paTests[iTest].rc)
2877 RTTestFailed(g_hTest, "#%02u%s: efl=%#08x dst=%#06RX16 src=%#04RX8\n"
2878 " %s-> efl=%#08x dst=%#06RX16 rc=%d\n"
2879 "%sexpected %#08x %#06RX16 %d%s\n",
2880 iTest, iVar ? "/n" : "", paTests[iTest].fEflIn, paTests[iTest].uDstIn, paTests[iTest].uSrcIn,
2881 iVar ? " " : "", fEfl, uDst, rc,
2882 iVar ? " " : "", paTests[iTest].fEflOut, paTests[iTest].uDstOut, paTests[iTest].rc,
2883 EFlagsDiff(fEfl | fEflIgn, paTests[iTest].fEflOut | fEflIgn));
2884 else
2885 {
2886 *g_pu16 = paTests[iTest].uDstIn;
2887 *g_pfEfl = paTests[iTest].fEflIn;
2888 rc = g_aMulDivU8[iFn].pfn(g_pu16, paTests[iTest].uSrcIn, g_pfEfl);
2889 RTTEST_CHECK(g_hTest, *g_pu16 == paTests[iTest].uDstOut);
2890 RTTEST_CHECK(g_hTest, (*g_pfEfl | fEflIgn) == (paTests[iTest].fEflOut | fEflIgn));
2891 RTTEST_CHECK(g_hTest, rc == paTests[iTest].rc);
2892 }
2893 }
2894 pfn = g_aMulDivU8[iFn].pfnNative;
2895 }
2896 FREE_DECOMPRESSED_TESTS(g_aMulDivU8[iFn]); \
2897 }
2898}
2899
2900#ifdef TSTIEMAIMPL_WITH_GENERATOR
2901static const MULDIVU16_TEST_T g_aFixedTests_idiv_u16[] =
2902{
2903 /* low high */
2904 /* --- eflags ---, -- uDst1 --, -- uDst2 --, */
2905 /* in, out, in , out, in , out, uSrcIn, rc (0 or -1 for actual; -128 for auto) */
2906 { UINT32_MAX, 0, 0x0000, 0, 0x8000, 0, 0xc004, -1 }, /* -2147483648 /-16380 = #DE (131104.00781...) */
2907 { UINT32_MAX, 0, 0xffff, 0, 0x7fff, 0, 0x7fff, -1 }, /* 2147483647 / 32767 = #DE (65538.000030...) */
2908 { UINT32_MAX, 0, 0x8000, 0, 0x3fff, 0, 0x7fff, -1 }, /* 0x3fff8000 / 0x7fff = #DE (0x8000) */
2909 { UINT32_MAX, 0, 0x7fff, 0, 0x3fff, 0, 0x7fff, 0 }, /* 0x3fff7fff / 0x7fff = 32767.99996948... */
2910 { UINT32_MAX, 0, 0x0000, 0, 0xc000, 0, 0x8000, -1 }, /* -1073741824 / -32768 = #DE (0x8000) */
2911 { UINT32_MAX, 0, 0x0001, 0, 0xc000, 0, 0x8000, 0 }, /* -1073741823 / -32768 = 32767.999969482421875 */
2912};
2913
2914static const MULDIVU32_TEST_T g_aFixedTests_idiv_u32[] =
2915{
2916 /* low high */
2917 /* --- eflags ---, ---- uDst1 ----, ---- uDst2 ----, */
2918 /* in, out, in , out, in , out, uSrcIn, rc (0 or -1 for actual; -128 for auto) */
2919 { UINT32_MAX, 0, 0x00000000, 0, 0x80000000, 0, 0xc0000004, -1 },
2920 { UINT32_MAX, 0, 0xffffffff, 0, 0x7fffffff, 0, 0x7fffffff, -1 },
2921 { UINT32_MAX, 0, 0x80000000, 0, 0x3fffffff, 0, 0x7fffffff, -1 },
2922 { UINT32_MAX, 0, 0x7fffffff, 0, 0x3fffffff, 0, 0x7fffffff, 0 },
2923 { UINT32_MAX, 0, 0x00000000, 0, 0xc0000000, 0, 0x80000000, -1 },
2924 { UINT32_MAX, 0, 0x00000001, 0, 0xc0000000, 0, 0x80000000, 0 },
2925};
2926
2927static const MULDIVU64_TEST_T g_aFixedTests_idiv_u64[] =
2928{
2929 /* low high */
2930 /* --- eflags ---, -------- uDst1 --------, -------- uDst2 --------, */
2931 /* in, out, in , out, in , out, uSrcIn, rc (0 or -1 for actual; -128 for auto) */
2932 { UINT32_MAX, 0, 0x0000000000000000, 0, 0x8000000000000000, 0, 0xc000000000000004, -1 },
2933 { UINT32_MAX, 0, 0xffffffffffffffff, 0, 0x7fffffffffffffff, 0, 0x7fffffffffffffff, -1 },
2934 { UINT32_MAX, 0, 0x8000000000000000, 0, 0x3fffffffffffffff, 0, 0x7fffffffffffffff, -1 },
2935 { UINT32_MAX, 0, 0x7fffffffffffffff, 0, 0x3fffffffffffffff, 0, 0x7fffffffffffffff, 0 },
2936 { UINT32_MAX, 0, 0x0000000000000000, 0, 0xc000000000000000, 0, 0x8000000000000000, -1 },
2937 { UINT32_MAX, 0, 0x0000000000000001, 0, 0xc000000000000000, 0, 0x8000000000000000, 0 },
2938};
2939
2940# define GEN_MULDIV(a_cBits, a_Fmt, a_TestType, a_aSubTests) \
2941DUMP_ALL_FN(MulDivU ## a_cBits, a_aSubTests) \
2942static RTEXITCODE MulDivU ## a_cBits ## Generate(uint32_t cTests, const char * const * papszNameFmts) \
2943{ \
2944 for (size_t iFn = 0; iFn < RT_ELEMENTS(a_aSubTests); iFn++) \
2945 { \
2946 if ( a_aSubTests[iFn].idxCpuEflFlavour != IEMTARGETCPU_EFL_BEHAVIOR_NATIVE \
2947 && a_aSubTests[iFn].idxCpuEflFlavour != g_idxCpuEflFlavour) \
2948 continue; \
2949 IEMBINARYOUTPUT BinOut; \
2950 a_TestType Test; \
2951 RT_ZERO(Test); /* 64-bit variant contains alignment padding */ \
2952 AssertReturn(GENERATE_BINARY_OPEN(&BinOut, papszNameFmts, a_aSubTests[iFn]), RTEXITCODE_FAILURE); \
2953 for (uint32_t iTest = 0; iTest < cTests; iTest++ ) \
2954 { \
2955 Test.fEflIn = RandEFlags(); \
2956 Test.fEflOut = Test.fEflIn; \
2957 Test.uDst1In = RandU ## a_cBits ## Dst(iTest); \
2958 Test.uDst1Out = Test.uDst1In; \
2959 Test.uDst2In = RandU ## a_cBits ## Dst(iTest); \
2960 Test.uDst2Out = Test.uDst2In; \
2961 Test.uSrcIn = RandU ## a_cBits ## Src(iTest); \
2962 Test.rc = a_aSubTests[iFn].pfnNative(&Test.uDst1Out, &Test.uDst2Out, Test.uSrcIn, &Test.fEflOut); \
2963 GenerateBinaryWrite(&BinOut, &Test, sizeof(Test)); \
2964 } \
2965 for (uint32_t iTest = 0; iTest < a_aSubTests[iFn].cFixedTests; iTest++ ) \
2966 { \
2967 Test.fEflIn = a_aSubTests[iFn].paFixedTests[iTest].fEflIn == UINT32_MAX ? RandEFlags() \
2968 : a_aSubTests[iFn].paFixedTests[iTest].fEflIn; \
2969 Test.fEflOut = Test.fEflIn; \
2970 Test.uDst1In = a_aSubTests[iFn].paFixedTests[iTest].uDst1In; \
2971 Test.uDst1Out = Test.uDst1In; \
2972 Test.uDst2In = a_aSubTests[iFn].paFixedTests[iTest].uDst2In; \
2973 Test.uDst2Out = Test.uDst2In; \
2974 Test.uSrcIn = a_aSubTests[iFn].paFixedTests[iTest].uSrcIn; \
2975 Test.rc = a_aSubTests[iFn].pfnNative(&Test.uDst1Out, &Test.uDst2Out, Test.uSrcIn, &Test.fEflOut); \
2976 if (a_aSubTests[iFn].paFixedTests[iTest].rc == 0 || a_aSubTests[iFn].paFixedTests[iTest].rc == -1) \
2977 Test.rc = a_aSubTests[iFn].paFixedTests[iTest].rc; \
2978 GenerateBinaryWrite(&BinOut, &Test, sizeof(Test)); \
2979 } \
2980 AssertReturn(GenerateBinaryClose(&BinOut), RTEXITCODE_FAILURE); \
2981 } \
2982 return RTEXITCODE_SUCCESS; \
2983}
2984#else
2985# define GEN_MULDIV(a_cBits, a_Fmt, a_TestType, a_aSubTests)
2986#endif
2987
2988#define TEST_MULDIV(a_cBits, a_Type, a_Fmt, a_TestType, a_SubTestType, a_aSubTests) \
2989TYPEDEF_SUBTEST_TYPE(a_SubTestType, a_TestType, PFNIEMAIMPLMULDIVU ## a_cBits); \
2990static a_SubTestType a_aSubTests [] = \
2991{ \
2992 ENTRY_BIN_AMD_EX(mul_u ## a_cBits, X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF, 0), \
2993 ENTRY_BIN_INTEL_EX(mul_u ## a_cBits, X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF, 0), \
2994 ENTRY_BIN_AMD_EX(imul_u ## a_cBits, X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF, 0), \
2995 ENTRY_BIN_INTEL_EX(imul_u ## a_cBits, X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF, 0), \
2996 ENTRY_BIN_AMD_EX(div_u ## a_cBits, X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF | X86_EFL_CF | X86_EFL_OF, 0), \
2997 ENTRY_BIN_INTEL_EX(div_u ## a_cBits, X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF | X86_EFL_CF | X86_EFL_OF, 0), \
2998 ENTRY_BIN_FIX_AMD_EX(idiv_u ## a_cBits, X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF | X86_EFL_CF | X86_EFL_OF, 0), \
2999 ENTRY_BIN_FIX_INTEL_EX(idiv_u ## a_cBits, X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF | X86_EFL_CF | X86_EFL_OF, 0), \
3000}; \
3001\
3002GEN_MULDIV(a_cBits, a_Fmt, a_TestType, a_aSubTests) \
3003\
3004static void MulDivU ## a_cBits ## Test(void) \
3005{ \
3006 for (size_t iFn = 0; iFn < RT_ELEMENTS(a_aSubTests); iFn++) \
3007 { \
3008 if (!SUBTEST_CHECK_IF_ENABLED_AND_DECOMPRESS(a_aSubTests[iFn])) \
3009 continue; \
3010 a_TestType const * const paTests = a_aSubTests[iFn].paTests; \
3011 uint32_t const cTests = a_aSubTests[iFn].cTests; \
3012 uint32_t const fEflIgn = a_aSubTests[iFn].uExtra; \
3013 PFNIEMAIMPLMULDIVU ## a_cBits pfn = a_aSubTests[iFn].pfn; \
3014 uint32_t const cVars = COUNT_VARIATIONS(a_aSubTests[iFn]); \
3015 if (!cTests) RTTestSkipped(g_hTest, "no tests"); \
3016 for (uint32_t iVar = 0; iVar < cVars; iVar++) \
3017 { \
3018 for (uint32_t iTest = 0; iTest < cTests; iTest++ ) \
3019 { \
3020 uint32_t fEfl = paTests[iTest].fEflIn; \
3021 a_Type uDst1 = paTests[iTest].uDst1In; \
3022 a_Type uDst2 = paTests[iTest].uDst2In; \
3023 int rc = pfn(&uDst1, &uDst2, paTests[iTest].uSrcIn, &fEfl); \
3024 if ( uDst1 != paTests[iTest].uDst1Out \
3025 || uDst2 != paTests[iTest].uDst2Out \
3026 || (fEfl | fEflIgn) != (paTests[iTest].fEflOut | fEflIgn)\
3027 || rc != paTests[iTest].rc) \
3028 RTTestFailed(g_hTest, "#%04u%s: efl=%#010x dst1=" a_Fmt " dst2=" a_Fmt " src=" a_Fmt "\n" \
3029 " -> efl=%#010x dst1=" a_Fmt " dst2=" a_Fmt " rc=%d\n" \
3030 " expected %#010x " a_Fmt " " a_Fmt " %d%s -%s%s%s\n", \
3031 iTest, iVar == 0 ? " " : "/n", \
3032 paTests[iTest].fEflIn, paTests[iTest].uDst1In, paTests[iTest].uDst2In, paTests[iTest].uSrcIn, \
3033 fEfl, uDst1, uDst2, rc, \
3034 paTests[iTest].fEflOut, paTests[iTest].uDst1Out, paTests[iTest].uDst2Out, paTests[iTest].rc, \
3035 EFlagsDiff(fEfl | fEflIgn, paTests[iTest].fEflOut | fEflIgn), \
3036 uDst1 != paTests[iTest].uDst1Out ? " dst1" : "", uDst2 != paTests[iTest].uDst2Out ? " dst2" : "", \
3037 (fEfl | fEflIgn) != (paTests[iTest].fEflOut | fEflIgn) ? " eflags" : ""); \
3038 else \
3039 { \
3040 *g_pu ## a_cBits = paTests[iTest].uDst1In; \
3041 *g_pu ## a_cBits ## Two = paTests[iTest].uDst2In; \
3042 *g_pfEfl = paTests[iTest].fEflIn; \
3043 rc = pfn(g_pu ## a_cBits, g_pu ## a_cBits ## Two, paTests[iTest].uSrcIn, g_pfEfl); \
3044 RTTEST_CHECK(g_hTest, *g_pu ## a_cBits == paTests[iTest].uDst1Out); \
3045 RTTEST_CHECK(g_hTest, *g_pu ## a_cBits ## Two == paTests[iTest].uDst2Out); \
3046 RTTEST_CHECK(g_hTest, (*g_pfEfl | fEflIgn) == (paTests[iTest].fEflOut | fEflIgn)); \
3047 RTTEST_CHECK(g_hTest, rc == paTests[iTest].rc); \
3048 } \
3049 } \
3050 pfn = a_aSubTests[iFn].pfnNative; \
3051 } \
3052 FREE_DECOMPRESSED_TESTS(a_aSubTests[iFn]); \
3053 } \
3054} //1068553096 = 0x3FB0D388 (1068553096)
3055TEST_MULDIV(16, uint16_t, "%#06RX16", MULDIVU16_TEST_T, INT_MULDIV_U16_T, g_aMulDivU16)
3056TEST_MULDIV(32, uint32_t, "%#010RX32", MULDIVU32_TEST_T, INT_MULDIV_U32_T, g_aMulDivU32)
3057TEST_MULDIV(64, uint64_t, "%#018RX64", MULDIVU64_TEST_T, INT_MULDIV_U64_T, g_aMulDivU64)
3058
3059#ifdef TSTIEMAIMPL_WITH_GENERATOR
3060static RTEXITCODE MulDivGenerate(uint32_t cTests, const char * const * papszNameFmts)
3061{
3062 RTEXITCODE rcExit = MulDivU8Generate(cTests, papszNameFmts);
3063 if (rcExit == RTEXITCODE_SUCCESS)
3064 rcExit = MulDivU16Generate(cTests, papszNameFmts);
3065 if (rcExit == RTEXITCODE_SUCCESS)
3066 rcExit = MulDivU32Generate(cTests, papszNameFmts);
3067 if (rcExit == RTEXITCODE_SUCCESS)
3068 rcExit = MulDivU64Generate(cTests, papszNameFmts);
3069 return rcExit;
3070}
3071
3072static RTEXITCODE MulDivDumpAll(const char * const * papszNameFmts)
3073{
3074 RTEXITCODE rcExit = MulDivU8DumpAll(papszNameFmts);
3075 if (rcExit == RTEXITCODE_SUCCESS)
3076 rcExit = MulDivU16DumpAll(papszNameFmts);
3077 if (rcExit == RTEXITCODE_SUCCESS)
3078 rcExit = MulDivU32DumpAll(papszNameFmts);
3079 if (rcExit == RTEXITCODE_SUCCESS)
3080 rcExit = MulDivU64DumpAll(papszNameFmts);
3081 return rcExit;
3082}
3083#endif
3084
3085static void MulDivTest(void)
3086{
3087 MulDivU8Test();
3088 MulDivU16Test();
3089 MulDivU32Test();
3090 MulDivU64Test();
3091}
3092
3093
3094/*
3095 * BSWAP
3096 */
3097static void BswapTest(void)
3098{
3099 if (SubTestAndCheckIfEnabled("bswap_u16"))
3100 {
3101 *g_pu32 = UINT32_C(0x12345678);
3102 iemAImpl_bswap_u16(g_pu32);
3103#if 0
3104 RTTEST_CHECK_MSG(g_hTest, *g_pu32 == UINT32_C(0x12347856), (g_hTest, "*g_pu32=%#RX32\n", *g_pu32));
3105#else
3106 RTTEST_CHECK_MSG(g_hTest, *g_pu32 == UINT32_C(0x12340000), (g_hTest, "*g_pu32=%#RX32\n", *g_pu32));
3107#endif
3108 *g_pu32 = UINT32_C(0xffff1122);
3109 iemAImpl_bswap_u16(g_pu32);
3110#if 0
3111 RTTEST_CHECK_MSG(g_hTest, *g_pu32 == UINT32_C(0xffff2211), (g_hTest, "*g_pu32=%#RX32\n", *g_pu32));
3112#else
3113 RTTEST_CHECK_MSG(g_hTest, *g_pu32 == UINT32_C(0xffff0000), (g_hTest, "*g_pu32=%#RX32\n", *g_pu32));
3114#endif
3115 }
3116
3117 if (SubTestAndCheckIfEnabled("bswap_u32"))
3118 {
3119 *g_pu32 = UINT32_C(0x12345678);
3120 iemAImpl_bswap_u32(g_pu32);
3121 RTTEST_CHECK(g_hTest, *g_pu32 == UINT32_C(0x78563412));
3122 }
3123
3124 if (SubTestAndCheckIfEnabled("bswap_u64"))
3125 {
3126 *g_pu64 = UINT64_C(0x0123456789abcdef);
3127 iemAImpl_bswap_u64(g_pu64);
3128 RTTEST_CHECK(g_hTest, *g_pu64 == UINT64_C(0xefcdab8967452301));
3129 }
3130}
3131
3132
3133
3134/*********************************************************************************************************************************
3135* Floating point (x87 style) *
3136*********************************************************************************************************************************/
3137
3138/*
3139 * FPU constant loading.
3140 */
3141TYPEDEF_SUBTEST_TYPE(FPU_LD_CONST_T, FPU_LD_CONST_TEST_T, PFNIEMAIMPLFPUR80LDCONST);
3142
3143static FPU_LD_CONST_T g_aFpuLdConst[] =
3144{
3145 ENTRY_BIN(fld1),
3146 ENTRY_BIN(fldl2t),
3147 ENTRY_BIN(fldl2e),
3148 ENTRY_BIN(fldpi),
3149 ENTRY_BIN(fldlg2),
3150 ENTRY_BIN(fldln2),
3151 ENTRY_BIN(fldz),
3152};
3153
3154#ifdef TSTIEMAIMPL_WITH_GENERATOR
3155static RTEXITCODE FpuLdConstGenerate(uint32_t cTests, const char * const *papszNameFmts)
3156{
3157 X86FXSTATE State;
3158 RT_ZERO(State);
3159 for (size_t iFn = 0; iFn < RT_ELEMENTS(g_aFpuLdConst); iFn++)
3160 {
3161 IEMBINARYOUTPUT BinOut;
3162 AssertReturn(GENERATE_BINARY_OPEN(&BinOut, papszNameFmts, g_aFpuLdConst[iFn]), RTEXITCODE_FAILURE);
3163 for (uint32_t iTest = 0; iTest < cTests; iTest += 4)
3164 {
3165 State.FCW = RandFcw();
3166 State.FSW = RandFsw();
3167
3168 for (uint16_t iRounding = 0; iRounding < 4; iRounding++)
3169 {
3170 IEMFPURESULT Res = { RTFLOAT80U_INIT(0, 0, 0), 0 };
3171 State.FCW = (State.FCW & ~X86_FCW_RC_MASK) | (iRounding << X86_FCW_RC_SHIFT);
3172 g_aFpuLdConst[iFn].pfn(&State, &Res);
3173 FPU_LD_CONST_TEST_T const Test = { State.FCW, State.FSW, Res.FSW, Res.r80Result };
3174 GenerateBinaryWrite(&BinOut, &Test, sizeof(Test));
3175 }
3176 }
3177 AssertReturn(GenerateBinaryClose(&BinOut), RTEXITCODE_FAILURE);
3178 }
3179 return RTEXITCODE_SUCCESS;
3180}
3181DUMP_ALL_FN(FpuLdConst, g_aFpuLdConst)
3182#endif
3183
3184static void FpuLdConstTest(void)
3185{
3186 /*
3187 * Inputs:
3188 * - FSW: C0, C1, C2, C3
3189 * - FCW: Exception masks, Precision control, Rounding control.
3190 *
3191 * C1 set to 1 on stack overflow, zero otherwise. C0, C2, and C3 are "undefined".
3192 */
3193 X86FXSTATE State;
3194 RT_ZERO(State);
3195 for (size_t iFn = 0; iFn < RT_ELEMENTS(g_aFpuLdConst); iFn++)
3196 {
3197 if (!SUBTEST_CHECK_IF_ENABLED_AND_DECOMPRESS(g_aFpuLdConst[iFn]))
3198 continue;
3199
3200 FPU_LD_CONST_TEST_T const *paTests = g_aFpuLdConst[iFn].paTests;
3201 uint32_t const cTests = g_aFpuLdConst[iFn].cTests;
3202 PFNIEMAIMPLFPUR80LDCONST pfn = g_aFpuLdConst[iFn].pfn;
3203 uint32_t const cVars = COUNT_VARIATIONS(g_aFpuLdConst[iFn]); \
3204 if (!cTests) RTTestSkipped(g_hTest, "no tests");
3205 for (uint32_t iVar = 0; iVar < cVars; iVar++)
3206 {
3207 for (uint32_t iTest = 0; iTest < cTests; iTest++)
3208 {
3209 State.FCW = paTests[iTest].fFcw;
3210 State.FSW = paTests[iTest].fFswIn;
3211 IEMFPURESULT Res = { RTFLOAT80U_INIT(0, 0, 0), 0 };
3212 pfn(&State, &Res);
3213 if ( Res.FSW != paTests[iTest].fFswOut
3214 || !RTFLOAT80U_ARE_IDENTICAL(&Res.r80Result, &paTests[iTest].rdResult))
3215 RTTestFailed(g_hTest, "#%u%s: fcw=%#06x fsw=%#06x -> fsw=%#06x %s, expected %#06x %s%s%s (%s)\n",
3216 iTest, iVar ? "/n" : "", paTests[iTest].fFcw, paTests[iTest].fFswIn,
3217 Res.FSW, FormatR80(&Res.r80Result),
3218 paTests[iTest].fFswOut, FormatR80(&paTests[iTest].rdResult),
3219 FswDiff(Res.FSW, paTests[iTest].fFswOut),
3220 !RTFLOAT80U_ARE_IDENTICAL(&Res.r80Result, &paTests[iTest].rdResult) ? " - val" : "",
3221 FormatFcw(paTests[iTest].fFcw) );
3222 }
3223 pfn = g_aFpuLdConst[iFn].pfnNative;
3224 }
3225
3226 FREE_DECOMPRESSED_TESTS(g_aFpuLdConst[iFn]);
3227 }
3228}
3229
3230
3231/*
3232 * Load floating point values from memory.
3233 */
3234#ifdef TSTIEMAIMPL_WITH_GENERATOR
3235# define GEN_FPU_LOAD(a_cBits, a_rdTypeIn, a_aSubTests, a_TestType) \
3236static RTEXITCODE FpuLdR ## a_cBits ## Generate(uint32_t cTests, const char * const *papszNameFmts) \
3237{ \
3238 X86FXSTATE State; \
3239 RT_ZERO(State); \
3240 for (size_t iFn = 0; iFn < RT_ELEMENTS(a_aSubTests); iFn++) \
3241 { \
3242 IEMBINARYOUTPUT BinOut; \
3243 AssertReturn(GENERATE_BINARY_OPEN(&BinOut, papszNameFmts, a_aSubTests[iFn]), RTEXITCODE_FAILURE); \
3244 for (uint32_t iTest = 0; iTest < cTests; iTest++) \
3245 { \
3246 State.FCW = RandFcw(); \
3247 State.FSW = RandFsw(); \
3248 a_rdTypeIn InVal = RandR ## a_cBits ## Src(iTest); \
3249 \
3250 for (uint16_t iRounding = 0; iRounding < 4; iRounding++) \
3251 { \
3252 IEMFPURESULT Res = { RTFLOAT80U_INIT(0, 0, 0), 0 }; \
3253 State.FCW = (State.FCW & ~X86_FCW_RC_MASK) | (iRounding << X86_FCW_RC_SHIFT); \
3254 a_aSubTests[iFn].pfn(&State, &Res, &InVal); \
3255 a_TestType const Test = { State.FCW, State.FSW, Res.FSW, Res.r80Result, InVal }; \
3256 GenerateBinaryWrite(&BinOut, &Test, sizeof(Test)); \
3257 } \
3258 } \
3259 AssertReturn(GenerateBinaryClose(&BinOut), RTEXITCODE_FAILURE); \
3260 } \
3261 return RTEXITCODE_SUCCESS; \
3262} \
3263DUMP_ALL_FN(FpuLdR ## a_cBits, a_aSubTests)
3264#else
3265# define GEN_FPU_LOAD(a_cBits, a_rdTypeIn, a_aSubTests, a_TestType)
3266#endif
3267
3268#define TEST_FPU_LOAD(a_cBits, a_rdTypeIn, a_SubTestType, a_aSubTests, a_TestType) \
3269typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLFPULDR80FROM ## a_cBits,(PCX86FXSTATE, PIEMFPURESULT, PC ## a_rdTypeIn)); \
3270typedef FNIEMAIMPLFPULDR80FROM ## a_cBits *PFNIEMAIMPLFPULDR80FROM ## a_cBits; \
3271TYPEDEF_SUBTEST_TYPE(a_SubTestType, a_TestType, PFNIEMAIMPLFPULDR80FROM ## a_cBits); \
3272\
3273static a_SubTestType a_aSubTests[] = \
3274{ \
3275 ENTRY_BIN(RT_CONCAT(fld_r80_from_r,a_cBits)) \
3276}; \
3277GEN_FPU_LOAD(a_cBits, a_rdTypeIn, a_aSubTests, a_TestType) \
3278\
3279static void FpuLdR ## a_cBits ## Test(void) \
3280{ \
3281 X86FXSTATE State; \
3282 RT_ZERO(State); \
3283 for (size_t iFn = 0; iFn < RT_ELEMENTS(a_aSubTests); iFn++) \
3284 { \
3285 if (!SUBTEST_CHECK_IF_ENABLED_AND_DECOMPRESS(a_aSubTests[iFn])) \
3286 continue; \
3287 \
3288 a_TestType const * const paTests = a_aSubTests[iFn].paTests; \
3289 uint32_t const cTests = a_aSubTests[iFn].cTests; \
3290 PFNIEMAIMPLFPULDR80FROM ## a_cBits pfn = a_aSubTests[iFn].pfn; \
3291 uint32_t const cVars = COUNT_VARIATIONS(a_aSubTests[iFn]); \
3292 if (!cTests) RTTestSkipped(g_hTest, "no tests"); \
3293 for (uint32_t iVar = 0; iVar < cVars; iVar++) \
3294 { \
3295 for (uint32_t iTest = 0; iTest < cTests; iTest++) \
3296 { \
3297 a_rdTypeIn const InVal = paTests[iTest].InVal; \
3298 State.FCW = paTests[iTest].fFcw; \
3299 State.FSW = paTests[iTest].fFswIn; \
3300 IEMFPURESULT Res = { RTFLOAT80U_INIT(0, 0, 0), 0 }; \
3301 pfn(&State, &Res, &InVal); \
3302 if ( Res.FSW != paTests[iTest].fFswOut \
3303 || !RTFLOAT80U_ARE_IDENTICAL(&Res.r80Result, &paTests[iTest].rdResult)) \
3304 RTTestFailed(g_hTest, "#%03u%s: fcw=%#06x fsw=%#06x in=%s\n" \
3305 "%s -> fsw=%#06x %s\n" \
3306 "%s expected %#06x %s%s%s (%s)\n", \
3307 iTest, iVar ? "/n" : "", paTests[iTest].fFcw, paTests[iTest].fFswIn, \
3308 FormatR ## a_cBits(&paTests[iTest].InVal), \
3309 iVar ? " " : "", Res.FSW, FormatR80(&Res.r80Result), \
3310 iVar ? " " : "", paTests[iTest].fFswOut, FormatR80(&paTests[iTest].rdResult), \
3311 FswDiff(Res.FSW, paTests[iTest].fFswOut), \
3312 !RTFLOAT80U_ARE_IDENTICAL(&Res.r80Result, &paTests[iTest].rdResult) ? " - val" : "", \
3313 FormatFcw(paTests[iTest].fFcw) ); \
3314 } \
3315 pfn = a_aSubTests[iFn].pfnNative; \
3316 } \
3317 \
3318 FREE_DECOMPRESSED_TESTS(a_aSubTests[iFn]); \
3319 } \
3320}
3321
3322TEST_FPU_LOAD(80, RTFLOAT80U, FPU_LD_R80_T, g_aFpuLdR80, FPU_R80_IN_TEST_T)
3323TEST_FPU_LOAD(64, RTFLOAT64U, FPU_LD_R64_T, g_aFpuLdR64, FPU_R64_IN_TEST_T)
3324TEST_FPU_LOAD(32, RTFLOAT32U, FPU_LD_R32_T, g_aFpuLdR32, FPU_R32_IN_TEST_T)
3325
3326#ifdef TSTIEMAIMPL_WITH_GENERATOR
3327static RTEXITCODE FpuLdMemGenerate(uint32_t cTests, const char * const *papszNameFmts)
3328{
3329 RTEXITCODE rcExit = FpuLdR80Generate(cTests, papszNameFmts);
3330 if (rcExit == RTEXITCODE_SUCCESS)
3331 rcExit = FpuLdR64Generate(cTests, papszNameFmts);
3332 if (rcExit == RTEXITCODE_SUCCESS)
3333 rcExit = FpuLdR32Generate(cTests, papszNameFmts);
3334 return rcExit;
3335}
3336
3337static RTEXITCODE FpuLdMemDumpAll(const char * const *papszNameFmts)
3338{
3339 RTEXITCODE rcExit = FpuLdR80DumpAll(papszNameFmts);
3340 if (rcExit == RTEXITCODE_SUCCESS)
3341 rcExit = FpuLdR64DumpAll(papszNameFmts);
3342 if (rcExit == RTEXITCODE_SUCCESS)
3343 rcExit = FpuLdR32DumpAll(papszNameFmts);
3344 return rcExit;
3345}
3346#endif
3347
3348static void FpuLdMemTest(void)
3349{
3350 FpuLdR80Test();
3351 FpuLdR64Test();
3352 FpuLdR32Test();
3353}
3354
3355
3356/*
3357 * Load integer values from memory.
3358 */
3359#ifdef TSTIEMAIMPL_WITH_GENERATOR
3360# define GEN_FPU_LOAD_INT(a_cBits, a_iTypeIn, a_szFmtIn, a_aSubTests, a_TestType) \
3361static RTEXITCODE FpuLdI ## a_cBits ## Generate(uint32_t cTests, const char * const *papszNameFmts) \
3362{ \
3363 X86FXSTATE State; \
3364 RT_ZERO(State); \
3365 for (size_t iFn = 0; iFn < RT_ELEMENTS(a_aSubTests); iFn++) \
3366 { \
3367 IEMBINARYOUTPUT BinOut; \
3368 AssertReturn(GENERATE_BINARY_OPEN(&BinOut, papszNameFmts, a_aSubTests[iFn]), RTEXITCODE_FAILURE); \
3369 for (uint32_t iTest = 0; iTest < cTests; iTest++) \
3370 { \
3371 State.FCW = RandFcw(); \
3372 State.FSW = RandFsw(); \
3373 a_iTypeIn InVal = (a_iTypeIn)RandU ## a_cBits ## Src(iTest); \
3374 \
3375 for (uint16_t iRounding = 0; iRounding < 4; iRounding++) \
3376 { \
3377 IEMFPURESULT Res = { RTFLOAT80U_INIT(0, 0, 0), 0 }; \
3378 State.FCW = (State.FCW & ~X86_FCW_RC_MASK) | (iRounding << X86_FCW_RC_SHIFT); \
3379 a_aSubTests[iFn].pfn(&State, &Res, &InVal); \
3380 a_TestType const Test = { State.FCW, State.FSW, Res.FSW, Res.r80Result }; \
3381 GenerateBinaryWrite(&BinOut, &Test, sizeof(Test)); \
3382 } \
3383 } \
3384 AssertReturn(GenerateBinaryClose(&BinOut), RTEXITCODE_FAILURE); \
3385 } \
3386 return RTEXITCODE_SUCCESS; \
3387} \
3388DUMP_ALL_FN(FpuLdI ## a_cBits, a_aSubTests)
3389#else
3390# define GEN_FPU_LOAD_INT(a_cBits, a_iTypeIn, a_szFmtIn, a_aSubTests, a_TestType)
3391#endif
3392
3393#define TEST_FPU_LOAD_INT(a_cBits, a_iTypeIn, a_szFmtIn, a_SubTestType, a_aSubTests, a_TestType) \
3394typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLFPULDR80FROMI ## a_cBits,(PCX86FXSTATE, PIEMFPURESULT, a_iTypeIn const *)); \
3395typedef FNIEMAIMPLFPULDR80FROMI ## a_cBits *PFNIEMAIMPLFPULDR80FROMI ## a_cBits; \
3396TYPEDEF_SUBTEST_TYPE(a_SubTestType, a_TestType, PFNIEMAIMPLFPULDR80FROMI ## a_cBits); \
3397\
3398static a_SubTestType a_aSubTests[] = \
3399{ \
3400 ENTRY_BIN(RT_CONCAT(fild_r80_from_i,a_cBits)) \
3401}; \
3402GEN_FPU_LOAD_INT(a_cBits, a_iTypeIn, a_szFmtIn, a_aSubTests, a_TestType) \
3403\
3404static void FpuLdI ## a_cBits ## Test(void) \
3405{ \
3406 X86FXSTATE State; \
3407 RT_ZERO(State); \
3408 for (size_t iFn = 0; iFn < RT_ELEMENTS(a_aSubTests); iFn++) \
3409 { \
3410 if (!SUBTEST_CHECK_IF_ENABLED_AND_DECOMPRESS(a_aSubTests[iFn])) \
3411 continue; \
3412 \
3413 a_TestType const * const paTests = a_aSubTests[iFn].paTests; \
3414 uint32_t const cTests = a_aSubTests[iFn].cTests; \
3415 PFNIEMAIMPLFPULDR80FROMI ## a_cBits pfn = a_aSubTests[iFn].pfn; \
3416 uint32_t const cVars = COUNT_VARIATIONS(a_aSubTests[iFn]); \
3417 if (!cTests) RTTestSkipped(g_hTest, "no tests"); \
3418 for (uint32_t iVar = 0; iVar < cVars; iVar++) \
3419 { \
3420 for (uint32_t iTest = 0; iTest < cTests; iTest++) \
3421 { \
3422 a_iTypeIn const iInVal = paTests[iTest].iInVal; \
3423 State.FCW = paTests[iTest].fFcw; \
3424 State.FSW = paTests[iTest].fFswIn; \
3425 IEMFPURESULT Res = { RTFLOAT80U_INIT(0, 0, 0), 0 }; \
3426 pfn(&State, &Res, &iInVal); \
3427 if ( Res.FSW != paTests[iTest].fFswOut \
3428 || !RTFLOAT80U_ARE_IDENTICAL(&Res.r80Result, &paTests[iTest].rdResult)) \
3429 RTTestFailed(g_hTest, "#%03u%s: fcw=%#06x fsw=%#06x in=" a_szFmtIn "\n" \
3430 "%s -> fsw=%#06x %s\n" \
3431 "%s expected %#06x %s%s%s (%s)\n", \
3432 iTest, iVar ? "/n" : "", paTests[iTest].fFcw, paTests[iTest].fFswIn, paTests[iTest].iInVal, \
3433 iVar ? " " : "", Res.FSW, FormatR80(&Res.r80Result), \
3434 iVar ? " " : "", paTests[iTest].fFswOut, FormatR80(&paTests[iTest].rdResult), \
3435 FswDiff(Res.FSW, paTests[iTest].fFswOut), \
3436 !RTFLOAT80U_ARE_IDENTICAL(&Res.r80Result, &paTests[iTest].rdResult) ? " - val" : "", \
3437 FormatFcw(paTests[iTest].fFcw) ); \
3438 } \
3439 pfn = a_aSubTests[iFn].pfnNative; \
3440 } \
3441 FREE_DECOMPRESSED_TESTS(a_aSubTests[iFn]); \
3442 } \
3443}
3444
3445TEST_FPU_LOAD_INT(64, int64_t, "%RI64", FPU_LD_I64_T, g_aFpuLdU64, FPU_I64_IN_TEST_T)
3446TEST_FPU_LOAD_INT(32, int32_t, "%RI32", FPU_LD_I32_T, g_aFpuLdU32, FPU_I32_IN_TEST_T)
3447TEST_FPU_LOAD_INT(16, int16_t, "%RI16", FPU_LD_I16_T, g_aFpuLdU16, FPU_I16_IN_TEST_T)
3448
3449#ifdef TSTIEMAIMPL_WITH_GENERATOR
3450static RTEXITCODE FpuLdIntGenerate(uint32_t cTests, const char * const *papszNameFmts)
3451{
3452 RTEXITCODE rcExit = FpuLdI64Generate(cTests, papszNameFmts);
3453 if (rcExit == RTEXITCODE_SUCCESS)
3454 rcExit = FpuLdI32Generate(cTests, papszNameFmts);
3455 if (rcExit == RTEXITCODE_SUCCESS)
3456 rcExit = FpuLdI16Generate(cTests, papszNameFmts);
3457 return rcExit;
3458}
3459
3460static RTEXITCODE FpuLdIntDumpAll(const char * const *papszNameFmts)
3461{
3462 RTEXITCODE rcExit = FpuLdI64DumpAll(papszNameFmts);
3463 if (rcExit == RTEXITCODE_SUCCESS)
3464 rcExit = FpuLdI32DumpAll(papszNameFmts);
3465 if (rcExit == RTEXITCODE_SUCCESS)
3466 rcExit = FpuLdI16DumpAll(papszNameFmts);
3467 return rcExit;
3468}
3469#endif
3470
3471static void FpuLdIntTest(void)
3472{
3473 FpuLdI64Test();
3474 FpuLdI32Test();
3475 FpuLdI16Test();
3476}
3477
3478
3479/*
3480 * Load binary coded decimal values from memory.
3481 */
3482typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLFPULDR80FROMD80,(PCX86FXSTATE, PIEMFPURESULT, PCRTPBCD80U));
3483typedef FNIEMAIMPLFPULDR80FROMD80 *PFNIEMAIMPLFPULDR80FROMD80;
3484TYPEDEF_SUBTEST_TYPE(FPU_LD_D80_T, FPU_D80_IN_TEST_T, PFNIEMAIMPLFPULDR80FROMD80);
3485
3486static FPU_LD_D80_T g_aFpuLdD80[] =
3487{
3488 ENTRY_BIN(fld_r80_from_d80)
3489};
3490
3491#ifdef TSTIEMAIMPL_WITH_GENERATOR
3492static RTEXITCODE FpuLdD80Generate(uint32_t cTests, const char * const *papszNameFmts)
3493{
3494 X86FXSTATE State;
3495 RT_ZERO(State);
3496 for (size_t iFn = 0; iFn < RT_ELEMENTS(g_aFpuLdD80); iFn++)
3497 {
3498 IEMBINARYOUTPUT BinOut;
3499 AssertReturn(GENERATE_BINARY_OPEN(&BinOut, papszNameFmts, g_aFpuLdD80[iFn]), RTEXITCODE_FAILURE);
3500 for (uint32_t iTest = 0; iTest < cTests; iTest++)
3501 {
3502 State.FCW = RandFcw();
3503 State.FSW = RandFsw();
3504 RTPBCD80U InVal = RandD80Src(iTest);
3505
3506 for (uint16_t iRounding = 0; iRounding < 4; iRounding++)
3507 {
3508 IEMFPURESULT Res = { RTFLOAT80U_INIT(0, 0, 0), 0 };
3509 State.FCW = (State.FCW & ~X86_FCW_RC_MASK) | (iRounding << X86_FCW_RC_SHIFT);
3510 g_aFpuLdD80[iFn].pfn(&State, &Res, &InVal);
3511 FPU_D80_IN_TEST_T const Test = { State.FCW, State.FSW, Res.FSW, Res.r80Result, InVal };
3512 GenerateBinaryWrite(&BinOut, &Test, sizeof(Test));
3513 }
3514 }
3515 AssertReturn(GenerateBinaryClose(&BinOut), RTEXITCODE_FAILURE);
3516 }
3517 return RTEXITCODE_SUCCESS;
3518}
3519DUMP_ALL_FN(FpuLdD80, g_aFpuLdD80)
3520#endif
3521
3522static void FpuLdD80Test(void)
3523{
3524 X86FXSTATE State;
3525 RT_ZERO(State);
3526 for (size_t iFn = 0; iFn < RT_ELEMENTS(g_aFpuLdD80); iFn++)
3527 {
3528 if (!SUBTEST_CHECK_IF_ENABLED_AND_DECOMPRESS(g_aFpuLdD80[iFn]))
3529 continue;
3530
3531 FPU_D80_IN_TEST_T const * const paTests = g_aFpuLdD80[iFn].paTests;
3532 uint32_t const cTests = g_aFpuLdD80[iFn].cTests;
3533 PFNIEMAIMPLFPULDR80FROMD80 pfn = g_aFpuLdD80[iFn].pfn;
3534 uint32_t const cVars = COUNT_VARIATIONS(g_aFpuLdD80[iFn]);
3535 if (!cTests) RTTestSkipped(g_hTest, "no tests");
3536 for (uint32_t iVar = 0; iVar < cVars; iVar++)
3537 {
3538 for (uint32_t iTest = 0; iTest < cTests; iTest++)
3539 {
3540 RTPBCD80U const InVal = paTests[iTest].InVal;
3541 State.FCW = paTests[iTest].fFcw;
3542 State.FSW = paTests[iTest].fFswIn;
3543 IEMFPURESULT Res = { RTFLOAT80U_INIT(0, 0, 0), 0 };
3544 pfn(&State, &Res, &InVal);
3545 if ( Res.FSW != paTests[iTest].fFswOut
3546 || !RTFLOAT80U_ARE_IDENTICAL(&Res.r80Result, &paTests[iTest].rdResult))
3547 RTTestFailed(g_hTest, "#%03u%s: fcw=%#06x fsw=%#06x in=%s\n"
3548 "%s -> fsw=%#06x %s\n"
3549 "%s expected %#06x %s%s%s (%s)\n",
3550 iTest, iVar ? "/n" : "", paTests[iTest].fFcw, paTests[iTest].fFswIn,
3551 FormatD80(&paTests[iTest].InVal),
3552 iVar ? " " : "", Res.FSW, FormatR80(&Res.r80Result),
3553 iVar ? " " : "", paTests[iTest].fFswOut, FormatR80(&paTests[iTest].rdResult),
3554 FswDiff(Res.FSW, paTests[iTest].fFswOut),
3555 !RTFLOAT80U_ARE_IDENTICAL(&Res.r80Result, &paTests[iTest].rdResult) ? " - val" : "",
3556 FormatFcw(paTests[iTest].fFcw) );
3557 }
3558 pfn = g_aFpuLdD80[iFn].pfnNative;
3559 }
3560
3561 FREE_DECOMPRESSED_TESTS(g_aFpuLdD80[iFn]);
3562 }
3563}
3564
3565
3566/*
3567 * Store values floating point values to memory.
3568 */
3569#ifdef TSTIEMAIMPL_WITH_GENERATOR
3570static const RTFLOAT80U g_aFpuStR32Specials[] =
3571{
3572 RTFLOAT80U_INIT_C(0, 0xffffff8000000000, RTFLOAT80U_EXP_BIAS), /* near rounding with carry */
3573 RTFLOAT80U_INIT_C(1, 0xffffff8000000000, RTFLOAT80U_EXP_BIAS), /* near rounding with carry */
3574 RTFLOAT80U_INIT_C(0, 0xfffffe8000000000, RTFLOAT80U_EXP_BIAS), /* near rounding */
3575 RTFLOAT80U_INIT_C(1, 0xfffffe8000000000, RTFLOAT80U_EXP_BIAS), /* near rounding */
3576};
3577static const RTFLOAT80U g_aFpuStR64Specials[] =
3578{
3579 RTFLOAT80U_INIT_C(0, 0xfffffffffffffc00, RTFLOAT80U_EXP_BIAS), /* near rounding with carry */
3580 RTFLOAT80U_INIT_C(1, 0xfffffffffffffc00, RTFLOAT80U_EXP_BIAS), /* near rounding with carry */
3581 RTFLOAT80U_INIT_C(0, 0xfffffffffffff400, RTFLOAT80U_EXP_BIAS), /* near rounding */
3582 RTFLOAT80U_INIT_C(1, 0xfffffffffffff400, RTFLOAT80U_EXP_BIAS), /* near rounding */
3583 RTFLOAT80U_INIT_C(0, 0xd0b9e6fdda887400, 687 + RTFLOAT80U_EXP_BIAS), /* random example for this */
3584};
3585static const RTFLOAT80U g_aFpuStR80Specials[] =
3586{
3587 RTFLOAT80U_INIT_C(0, 0x8000000000000000, RTFLOAT80U_EXP_BIAS), /* placeholder */
3588};
3589# define GEN_FPU_STORE(a_cBits, a_rdType, a_aSubTests, a_TestType) \
3590static RTEXITCODE FpuStR ## a_cBits ## Generate(uint32_t cTests, const char * const *papszNameFmts) \
3591{ \
3592 uint32_t const cTotalTests = cTests + RT_ELEMENTS(g_aFpuStR ## a_cBits ## Specials); \
3593 X86FXSTATE State; \
3594 RT_ZERO(State); \
3595 for (size_t iFn = 0; iFn < RT_ELEMENTS(a_aSubTests); iFn++) \
3596 { \
3597 IEMBINARYOUTPUT BinOut; \
3598 AssertReturn(GENERATE_BINARY_OPEN(&BinOut, papszNameFmts, a_aSubTests[iFn]), RTEXITCODE_FAILURE); \
3599 for (uint32_t iTest = 0; iTest < cTotalTests; iTest++) \
3600 { \
3601 uint16_t const fFcw = RandFcw(); \
3602 State.FSW = RandFsw(); \
3603 RTFLOAT80U const InVal = iTest < cTests ? RandR80Src(iTest, a_cBits) \
3604 : g_aFpuStR ## a_cBits ## Specials[iTest - cTests]; \
3605 \
3606 for (uint16_t iRounding = 0; iRounding < 4; iRounding++) \
3607 { \
3608 /* PC doesn't influence these, so leave as is. */ \
3609 AssertCompile(X86_FCW_OM_BIT + 1 == X86_FCW_UM_BIT && X86_FCW_UM_BIT + 1 == X86_FCW_PM_BIT); \
3610 for (uint16_t iMask = 0; iMask < 16; iMask += 2 /*1*/) \
3611 { \
3612 uint16_t uFswOut = 0; \
3613 a_rdType OutVal; \
3614 RT_ZERO(OutVal); \
3615 memset(&OutVal, 0xfe, sizeof(OutVal)); \
3616 State.FCW = (fFcw & ~(X86_FCW_RC_MASK | X86_FCW_OM | X86_FCW_UM | X86_FCW_PM)) \
3617 | (iRounding << X86_FCW_RC_SHIFT); \
3618 /*if (iMask & 1) State.FCW ^= X86_FCW_MASK_ALL;*/ \
3619 State.FCW |= (iMask >> 1) << X86_FCW_OM_BIT; \
3620 a_aSubTests[iFn].pfn(&State, &uFswOut, &OutVal, &InVal); \
3621 a_TestType const Test = { State.FCW, State.FSW, uFswOut, InVal, OutVal }; \
3622 GenerateBinaryWrite(&BinOut, &Test, sizeof(Test)); \
3623 } \
3624 } \
3625 } \
3626 AssertReturn(GenerateBinaryClose(&BinOut), RTEXITCODE_FAILURE); \
3627 } \
3628 return RTEXITCODE_SUCCESS; \
3629} \
3630DUMP_ALL_FN(FpuStR ## a_cBits, a_aSubTests)
3631#else
3632# define GEN_FPU_STORE(a_cBits, a_rdType, a_aSubTests, a_TestType)
3633#endif
3634
3635#define TEST_FPU_STORE(a_cBits, a_rdType, a_SubTestType, a_aSubTests, a_TestType) \
3636typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLFPUSTR80TOR ## a_cBits,(PCX86FXSTATE, uint16_t *, \
3637 PRTFLOAT ## a_cBits ## U, PCRTFLOAT80U)); \
3638typedef FNIEMAIMPLFPUSTR80TOR ## a_cBits *PFNIEMAIMPLFPUSTR80TOR ## a_cBits; \
3639TYPEDEF_SUBTEST_TYPE(a_SubTestType, a_TestType, PFNIEMAIMPLFPUSTR80TOR ## a_cBits); \
3640\
3641static a_SubTestType a_aSubTests[] = \
3642{ \
3643 ENTRY_BIN(RT_CONCAT(fst_r80_to_r,a_cBits)) \
3644}; \
3645GEN_FPU_STORE(a_cBits, a_rdType, a_aSubTests, a_TestType) \
3646\
3647static void FpuStR ## a_cBits ## Test(void) \
3648{ \
3649 X86FXSTATE State; \
3650 RT_ZERO(State); \
3651 for (size_t iFn = 0; iFn < RT_ELEMENTS(a_aSubTests); iFn++) \
3652 { \
3653 if (!SUBTEST_CHECK_IF_ENABLED_AND_DECOMPRESS(a_aSubTests[iFn])) \
3654 continue; \
3655 \
3656 a_TestType const * const paTests = a_aSubTests[iFn].paTests; \
3657 uint32_t const cTests = a_aSubTests[iFn].cTests; \
3658 PFNIEMAIMPLFPUSTR80TOR ## a_cBits pfn = a_aSubTests[iFn].pfn; \
3659 uint32_t const cVars = COUNT_VARIATIONS(a_aSubTests[iFn]); \
3660 if (!cTests) RTTestSkipped(g_hTest, "no tests"); \
3661 for (uint32_t iVar = 0; iVar < cVars; iVar++) \
3662 { \
3663 for (uint32_t iTest = 0; iTest < cTests; iTest++) \
3664 { \
3665 RTFLOAT80U const InVal = paTests[iTest].InVal; \
3666 uint16_t uFswOut = 0; \
3667 a_rdType OutVal; \
3668 RT_ZERO(OutVal); \
3669 memset(&OutVal, 0xfe, sizeof(OutVal)); \
3670 State.FCW = paTests[iTest].fFcw; \
3671 State.FSW = paTests[iTest].fFswIn; \
3672 pfn(&State, &uFswOut, &OutVal, &InVal); \
3673 if ( uFswOut != paTests[iTest].fFswOut \
3674 || !RTFLOAT ## a_cBits ## U_ARE_IDENTICAL(&OutVal, &paTests[iTest].OutVal)) \
3675 RTTestFailed(g_hTest, "#%04u%s: fcw=%#06x fsw=%#06x in=%s\n" \
3676 "%s -> fsw=%#06x %s\n" \
3677 "%s expected %#06x %s%s%s (%s)\n", \
3678 iTest, iVar ? "/n" : "", paTests[iTest].fFcw, paTests[iTest].fFswIn, \
3679 FormatR80(&paTests[iTest].InVal), \
3680 iVar ? " " : "", uFswOut, FormatR ## a_cBits(&OutVal), \
3681 iVar ? " " : "", paTests[iTest].fFswOut, FormatR ## a_cBits(&paTests[iTest].OutVal), \
3682 FswDiff(uFswOut, paTests[iTest].fFswOut), \
3683 !RTFLOAT ## a_cBits ## U_ARE_IDENTICAL(&OutVal, &paTests[iTest].OutVal) ? " - val" : "", \
3684 FormatFcw(paTests[iTest].fFcw) ); \
3685 } \
3686 pfn = a_aSubTests[iFn].pfnNative; \
3687 } \
3688 FREE_DECOMPRESSED_TESTS(a_aSubTests[iFn]); \
3689 } \
3690}
3691
3692TEST_FPU_STORE(80, RTFLOAT80U, FPU_ST_R80_T, g_aFpuStR80, FPU_ST_R80_TEST_T)
3693TEST_FPU_STORE(64, RTFLOAT64U, FPU_ST_R64_T, g_aFpuStR64, FPU_ST_R64_TEST_T)
3694TEST_FPU_STORE(32, RTFLOAT32U, FPU_ST_R32_T, g_aFpuStR32, FPU_ST_R32_TEST_T)
3695
3696#ifdef TSTIEMAIMPL_WITH_GENERATOR
3697static RTEXITCODE FpuStMemGenerate(uint32_t cTests, const char * const *papszNameFmts)
3698{
3699 RTEXITCODE rcExit = FpuStR80Generate(cTests, papszNameFmts);
3700 if (rcExit == RTEXITCODE_SUCCESS)
3701 rcExit = FpuStR64Generate(cTests, papszNameFmts);
3702 if (rcExit == RTEXITCODE_SUCCESS)
3703 rcExit = FpuStR32Generate(cTests, papszNameFmts);
3704 return rcExit;
3705}
3706
3707static RTEXITCODE FpuStMemDumpAll(const char * const *papszNameFmts)
3708{
3709 RTEXITCODE rcExit = FpuStR80DumpAll(papszNameFmts);
3710 if (rcExit == RTEXITCODE_SUCCESS)
3711 rcExit = FpuStR64DumpAll(papszNameFmts);
3712 if (rcExit == RTEXITCODE_SUCCESS)
3713 rcExit = FpuStR32DumpAll(papszNameFmts);
3714 return rcExit;
3715}
3716#endif
3717
3718static void FpuStMemTest(void)
3719{
3720 FpuStR80Test();
3721 FpuStR64Test();
3722 FpuStR32Test();
3723}
3724
3725
3726/*
3727 * Store integer values to memory or register.
3728 */
3729TYPEDEF_SUBTEST_TYPE(FPU_ST_I16_T, FPU_ST_I16_TEST_T, PFNIEMAIMPLFPUSTR80TOI16);
3730TYPEDEF_SUBTEST_TYPE(FPU_ST_I32_T, FPU_ST_I32_TEST_T, PFNIEMAIMPLFPUSTR80TOI32);
3731TYPEDEF_SUBTEST_TYPE(FPU_ST_I64_T, FPU_ST_I64_TEST_T, PFNIEMAIMPLFPUSTR80TOI64);
3732
3733static FPU_ST_I16_T g_aFpuStI16[] =
3734{
3735 ENTRY_BIN(fist_r80_to_i16),
3736 ENTRY_BIN_AMD( fistt_r80_to_i16, 0),
3737 ENTRY_BIN_INTEL(fistt_r80_to_i16, 0),
3738};
3739static FPU_ST_I32_T g_aFpuStI32[] =
3740{
3741 ENTRY_BIN(fist_r80_to_i32),
3742 ENTRY_BIN(fistt_r80_to_i32),
3743};
3744static FPU_ST_I64_T g_aFpuStI64[] =
3745{
3746 ENTRY_BIN(fist_r80_to_i64),
3747 ENTRY_BIN(fistt_r80_to_i64),
3748};
3749
3750#ifdef TSTIEMAIMPL_WITH_GENERATOR
3751static const RTFLOAT80U g_aFpuStI16Specials[] = /* 16-bit variant borrows properties from the 32-bit one, thus all this stuff. */
3752{
3753 RTFLOAT80U_INIT_C(0, 0x8000000000000000, 13 + RTFLOAT80U_EXP_BIAS),
3754 RTFLOAT80U_INIT_C(0, 0xfffffffffffffff0, 13 + RTFLOAT80U_EXP_BIAS),
3755 RTFLOAT80U_INIT_C(0, 0x8000000000000000, 14 + RTFLOAT80U_EXP_BIAS),
3756 RTFLOAT80U_INIT_C(1, 0x8000000000000000, 14 + RTFLOAT80U_EXP_BIAS),
3757 RTFLOAT80U_INIT_C(0, 0x8000080000000000, 14 + RTFLOAT80U_EXP_BIAS),
3758 RTFLOAT80U_INIT_C(1, 0x8000080000000000, 14 + RTFLOAT80U_EXP_BIAS),
3759 RTFLOAT80U_INIT_C(0, 0x8000100000000000, 14 + RTFLOAT80U_EXP_BIAS),
3760 RTFLOAT80U_INIT_C(1, 0x8000100000000000, 14 + RTFLOAT80U_EXP_BIAS),
3761 RTFLOAT80U_INIT_C(0, 0x8000200000000000, 14 + RTFLOAT80U_EXP_BIAS),
3762 RTFLOAT80U_INIT_C(1, 0x8000200000000000, 14 + RTFLOAT80U_EXP_BIAS),
3763 RTFLOAT80U_INIT_C(0, 0x8000400000000000, 14 + RTFLOAT80U_EXP_BIAS),
3764 RTFLOAT80U_INIT_C(1, 0x8000400000000000, 14 + RTFLOAT80U_EXP_BIAS),
3765 RTFLOAT80U_INIT_C(0, 0x8000800000000000, 14 + RTFLOAT80U_EXP_BIAS),
3766 RTFLOAT80U_INIT_C(1, 0x8000800000000000, 14 + RTFLOAT80U_EXP_BIAS),
3767 RTFLOAT80U_INIT_C(1, 0x8000ffffffffffff, 14 + RTFLOAT80U_EXP_BIAS),
3768 RTFLOAT80U_INIT_C(0, 0x8001000000000000, 14 + RTFLOAT80U_EXP_BIAS),
3769 RTFLOAT80U_INIT_C(1, 0x8001000000000000, 14 + RTFLOAT80U_EXP_BIAS),
3770 RTFLOAT80U_INIT_C(0, 0xfffffffffffffff0, 14 + RTFLOAT80U_EXP_BIAS),
3771 RTFLOAT80U_INIT_C(1, 0xfffffffffffffff0, 14 + RTFLOAT80U_EXP_BIAS),
3772 RTFLOAT80U_INIT_C(0, 0xffff800000000000, 14 + RTFLOAT80U_EXP_BIAS),
3773 RTFLOAT80U_INIT_C(0, 0xffff000000000000, 14 + RTFLOAT80U_EXP_BIAS), /* overflow to min/nan */
3774 RTFLOAT80U_INIT_C(0, 0xfffe000000000000, 14 + RTFLOAT80U_EXP_BIAS),
3775 RTFLOAT80U_INIT_C(1, 0xffff800000000000, 14 + RTFLOAT80U_EXP_BIAS),
3776 RTFLOAT80U_INIT_C(1, 0xffff000000000000, 14 + RTFLOAT80U_EXP_BIAS), /* min */
3777 RTFLOAT80U_INIT_C(1, 0xfffe000000000000, 14 + RTFLOAT80U_EXP_BIAS),
3778 RTFLOAT80U_INIT_C(0, 0x8000000000000000, 15 + RTFLOAT80U_EXP_BIAS),
3779 RTFLOAT80U_INIT_C(0, 0xfffffffffffffff0, 15 + RTFLOAT80U_EXP_BIAS),
3780 RTFLOAT80U_INIT_C(0, 0x8000000000000000, 16 + RTFLOAT80U_EXP_BIAS),
3781 RTFLOAT80U_INIT_C(0, 0x8000000000000000, 17 + RTFLOAT80U_EXP_BIAS),
3782 RTFLOAT80U_INIT_C(0, 0x8000000000000000, 20 + RTFLOAT80U_EXP_BIAS),
3783 RTFLOAT80U_INIT_C(0, 0x8000000000000000, 24 + RTFLOAT80U_EXP_BIAS),
3784 RTFLOAT80U_INIT_C(0, 0x8000000000000000, 28 + RTFLOAT80U_EXP_BIAS),
3785 RTFLOAT80U_INIT_C(0, 0x8000000000000000, 30 + RTFLOAT80U_EXP_BIAS),
3786 RTFLOAT80U_INIT_C(1, 0x8000000000000000, 30 + RTFLOAT80U_EXP_BIAS),
3787 RTFLOAT80U_INIT_C(0, 0xfffffffffffffff0, 30 + RTFLOAT80U_EXP_BIAS),
3788 RTFLOAT80U_INIT_C(1, 0xfffffffffffffff0, 30 + RTFLOAT80U_EXP_BIAS),
3789 RTFLOAT80U_INIT_C(0, 0x8000000000000000, 31 + RTFLOAT80U_EXP_BIAS),
3790 RTFLOAT80U_INIT_C(1, 0x8000000000000000, 31 + RTFLOAT80U_EXP_BIAS),
3791 RTFLOAT80U_INIT_C(0, 0x8000000000000001, 31 + RTFLOAT80U_EXP_BIAS),
3792 RTFLOAT80U_INIT_C(1, 0x8000000000000001, 31 + RTFLOAT80U_EXP_BIAS),
3793 RTFLOAT80U_INIT_C(0, 0x8000ffffffffffff, 31 + RTFLOAT80U_EXP_BIAS),
3794 RTFLOAT80U_INIT_C(1, 0x8000ffffffffffff, 31 + RTFLOAT80U_EXP_BIAS),
3795 RTFLOAT80U_INIT_C(0, 0x8001000000000000, 31 + RTFLOAT80U_EXP_BIAS),
3796 RTFLOAT80U_INIT_C(1, 0x8001000000000000, 31 + RTFLOAT80U_EXP_BIAS),
3797 RTFLOAT80U_INIT_C(0, 0xfffffffffffffff0, 31 + RTFLOAT80U_EXP_BIAS),
3798 RTFLOAT80U_INIT_C(1, 0xfffffffffffffff0, 31 + RTFLOAT80U_EXP_BIAS),
3799 RTFLOAT80U_INIT_C(0, 0x8000000000000000, 32 + RTFLOAT80U_EXP_BIAS),
3800};
3801static const RTFLOAT80U g_aFpuStI32Specials[] =
3802{
3803 RTFLOAT80U_INIT_C(0, 0x8000000000000000, 30 + RTFLOAT80U_EXP_BIAS),
3804 RTFLOAT80U_INIT_C(1, 0x8000000000000000, 30 + RTFLOAT80U_EXP_BIAS),
3805 RTFLOAT80U_INIT_C(0, 0xfffffffffffffff0, 30 + RTFLOAT80U_EXP_BIAS), /* overflow to min/nan */
3806 RTFLOAT80U_INIT_C(1, 0xfffffffffffffff0, 30 + RTFLOAT80U_EXP_BIAS), /* min */
3807 RTFLOAT80U_INIT_C(0, 0xffffffff80000000, 30 + RTFLOAT80U_EXP_BIAS), /* overflow to min/nan */
3808 RTFLOAT80U_INIT_C(1, 0xffffffff80000000, 30 + RTFLOAT80U_EXP_BIAS), /* min */
3809 RTFLOAT80U_INIT_C(0, 0xffffffff00000000, 30 + RTFLOAT80U_EXP_BIAS), /* overflow to min/nan */
3810 RTFLOAT80U_INIT_C(1, 0xffffffff00000000, 30 + RTFLOAT80U_EXP_BIAS), /* min */
3811 RTFLOAT80U_INIT_C(0, 0xfffffffe00000000, 30 + RTFLOAT80U_EXP_BIAS),
3812 RTFLOAT80U_INIT_C(1, 0xfffffffe00000000, 30 + RTFLOAT80U_EXP_BIAS),
3813 RTFLOAT80U_INIT_C(0, 0x8000000000000000, 31 + RTFLOAT80U_EXP_BIAS),
3814 RTFLOAT80U_INIT_C(1, 0x8000000000000000, 31 + RTFLOAT80U_EXP_BIAS),
3815 RTFLOAT80U_INIT_C(0, 0x8000000000000001, 31 + RTFLOAT80U_EXP_BIAS),
3816 RTFLOAT80U_INIT_C(1, 0x8000000000000001, 31 + RTFLOAT80U_EXP_BIAS),
3817 RTFLOAT80U_INIT_C(0, 0xfffffffffffffff0, 31 + RTFLOAT80U_EXP_BIAS),
3818 RTFLOAT80U_INIT_C(1, 0xfffffffffffffff0, 31 + RTFLOAT80U_EXP_BIAS),
3819};
3820static const RTFLOAT80U g_aFpuStI64Specials[] =
3821{
3822 RTFLOAT80U_INIT_C(0, 0x8000000000000000, 61 + RTFLOAT80U_EXP_BIAS),
3823 RTFLOAT80U_INIT_C(0, 0xffffffffffffffff, 61 + RTFLOAT80U_EXP_BIAS),
3824 RTFLOAT80U_INIT_C(0, 0x8000000000000000, 62 + RTFLOAT80U_EXP_BIAS),
3825 RTFLOAT80U_INIT_C(1, 0x8000000000000000, 62 + RTFLOAT80U_EXP_BIAS),
3826 RTFLOAT80U_INIT_C(0, 0xfffffffffffffff0, 62 + RTFLOAT80U_EXP_BIAS),
3827 RTFLOAT80U_INIT_C(1, 0xfffffffffffffff0, 62 + RTFLOAT80U_EXP_BIAS),
3828 RTFLOAT80U_INIT_C(0, 0xffffffffffffffff, 62 + RTFLOAT80U_EXP_BIAS), /* overflow to min/nan */
3829 RTFLOAT80U_INIT_C(1, 0xffffffffffffffff, 62 + RTFLOAT80U_EXP_BIAS), /* min */
3830 RTFLOAT80U_INIT_C(0, 0xfffffffffffffffe, 62 + RTFLOAT80U_EXP_BIAS),
3831 RTFLOAT80U_INIT_C(1, 0xfffffffffffffffe, 62 + RTFLOAT80U_EXP_BIAS),
3832 RTFLOAT80U_INIT_C(0, 0x8000000000000000, 63 + RTFLOAT80U_EXP_BIAS),
3833 RTFLOAT80U_INIT_C(1, 0x8000000000000000, 63 + RTFLOAT80U_EXP_BIAS),
3834 RTFLOAT80U_INIT_C(0, 0x8000000000000001, 63 + RTFLOAT80U_EXP_BIAS),
3835 RTFLOAT80U_INIT_C(1, 0x8000000000000001, 63 + RTFLOAT80U_EXP_BIAS),
3836 RTFLOAT80U_INIT_C(0, 0x8000000000000002, 63 + RTFLOAT80U_EXP_BIAS),
3837 RTFLOAT80U_INIT_C(1, 0x8000000000000002, 63 + RTFLOAT80U_EXP_BIAS),
3838 RTFLOAT80U_INIT_C(0, 0xfffffffffffffff0, 63 + RTFLOAT80U_EXP_BIAS),
3839};
3840
3841# define GEN_FPU_STORE_INT(a_cBits, a_iType, a_szFmt, a_aSubTests, a_TestType) \
3842static RTEXITCODE FpuStI ## a_cBits ## Generate(uint32_t cTests, const char * const *papszNameFmts) \
3843{ \
3844 X86FXSTATE State; \
3845 RT_ZERO(State); \
3846 for (size_t iFn = 0; iFn < RT_ELEMENTS(a_aSubTests); iFn++) \
3847 { \
3848 PFNIEMAIMPLFPUSTR80TOI ## a_cBits const pfn = a_aSubTests[iFn].pfnNative \
3849 ? a_aSubTests[iFn].pfnNative : a_aSubTests[iFn].pfn; \
3850 if ( a_aSubTests[iFn].idxCpuEflFlavour != IEMTARGETCPU_EFL_BEHAVIOR_NATIVE \
3851 && a_aSubTests[iFn].idxCpuEflFlavour != g_idxCpuEflFlavour) \
3852 continue; \
3853 \
3854 IEMBINARYOUTPUT BinOut; \
3855 AssertReturn(GENERATE_BINARY_OPEN(&BinOut, papszNameFmts, a_aSubTests[iFn]), RTEXITCODE_FAILURE); \
3856 uint32_t const cTotalTests = cTests + RT_ELEMENTS(g_aFpuStI ## a_cBits ## Specials); \
3857 for (uint32_t iTest = 0; iTest < cTotalTests; iTest++) \
3858 { \
3859 uint16_t const fFcw = RandFcw(); \
3860 State.FSW = RandFsw(); \
3861 RTFLOAT80U const InVal = iTest < cTests ? RandR80Src(iTest, a_cBits, true) \
3862 : g_aFpuStI ## a_cBits ## Specials[iTest - cTests]; \
3863 \
3864 for (uint16_t iRounding = 0; iRounding < 4; iRounding++) \
3865 { \
3866 /* PC doesn't influence these, so leave as is. */ \
3867 AssertCompile(X86_FCW_OM_BIT + 1 == X86_FCW_UM_BIT && X86_FCW_UM_BIT + 1 == X86_FCW_PM_BIT); \
3868 for (uint16_t iMask = 0; iMask < 16; iMask += 2 /*1*/) \
3869 { \
3870 uint16_t uFswOut = 0; \
3871 a_iType iOutVal = ~(a_iType)2; \
3872 State.FCW = (fFcw & ~(X86_FCW_RC_MASK | X86_FCW_OM | X86_FCW_UM | X86_FCW_PM)) \
3873 | (iRounding << X86_FCW_RC_SHIFT); \
3874 /*if (iMask & 1) State.FCW ^= X86_FCW_MASK_ALL;*/ \
3875 State.FCW |= (iMask >> 1) << X86_FCW_OM_BIT; \
3876 pfn(&State, &uFswOut, &iOutVal, &InVal); \
3877 a_TestType const Test = { State.FCW, State.FSW, uFswOut, InVal, iOutVal }; \
3878 GenerateBinaryWrite(&BinOut, &Test, sizeof(Test)); \
3879 } \
3880 } \
3881 } \
3882 AssertReturn(GenerateBinaryClose(&BinOut), RTEXITCODE_FAILURE); \
3883 } \
3884 return RTEXITCODE_SUCCESS; \
3885} \
3886DUMP_ALL_FN(FpuStI ## a_cBits, a_aSubTests)
3887#else
3888# define GEN_FPU_STORE_INT(a_cBits, a_iType, a_szFmt, a_aSubTests, a_TestType)
3889#endif
3890
3891#define TEST_FPU_STORE_INT(a_cBits, a_iType, a_szFmt, a_SubTestType, a_aSubTests, a_TestType) \
3892GEN_FPU_STORE_INT(a_cBits, a_iType, a_szFmt, a_aSubTests, a_TestType) \
3893\
3894static void FpuStI ## a_cBits ## Test(void) \
3895{ \
3896 X86FXSTATE State; \
3897 RT_ZERO(State); \
3898 for (size_t iFn = 0; iFn < RT_ELEMENTS(a_aSubTests); iFn++) \
3899 { \
3900 if (!SUBTEST_CHECK_IF_ENABLED_AND_DECOMPRESS(a_aSubTests[iFn])) \
3901 continue; \
3902 \
3903 a_TestType const * const paTests = a_aSubTests[iFn].paTests; \
3904 uint32_t const cTests = a_aSubTests[iFn].cTests; \
3905 PFNIEMAIMPLFPUSTR80TOI ## a_cBits pfn = a_aSubTests[iFn].pfn; \
3906 uint32_t const cVars = COUNT_VARIATIONS(a_aSubTests[iFn]); \
3907 if (!cTests) RTTestSkipped(g_hTest, "no tests"); \
3908 for (uint32_t iVar = 0; iVar < cVars; iVar++) \
3909 { \
3910 for (uint32_t iTest = 0; iTest < cTests; iTest++) \
3911 { \
3912 RTFLOAT80U const InVal = paTests[iTest].InVal; \
3913 uint16_t uFswOut = 0; \
3914 a_iType iOutVal = ~(a_iType)2; \
3915 State.FCW = paTests[iTest].fFcw; \
3916 State.FSW = paTests[iTest].fFswIn; \
3917 pfn(&State, &uFswOut, &iOutVal, &InVal); \
3918 if ( uFswOut != paTests[iTest].fFswOut \
3919 || iOutVal != paTests[iTest].iOutVal) \
3920 RTTestFailed(g_hTest, "#%04u%s: fcw=%#06x fsw=%#06x in=%s\n" \
3921 "%s -> fsw=%#06x " a_szFmt "\n" \
3922 "%s expected %#06x " a_szFmt "%s%s (%s)\n", \
3923 iTest, iVar ? "/n" : "", paTests[iTest].fFcw, paTests[iTest].fFswIn, \
3924 FormatR80(&paTests[iTest].InVal), \
3925 iVar ? " " : "", uFswOut, iOutVal, \
3926 iVar ? " " : "", paTests[iTest].fFswOut, paTests[iTest].iOutVal, \
3927 FswDiff(uFswOut, paTests[iTest].fFswOut), \
3928 iOutVal != paTests[iTest].iOutVal ? " - val" : "", FormatFcw(paTests[iTest].fFcw) ); \
3929 } \
3930 pfn = a_aSubTests[iFn].pfnNative; \
3931 } \
3932 FREE_DECOMPRESSED_TESTS(a_aSubTests[iFn]); \
3933 } \
3934}
3935
3936//fistt_r80_to_i16 diffs for AMD, of course :-)
3937
3938TEST_FPU_STORE_INT(64, int64_t, "%RI64", FPU_ST_I64_T, g_aFpuStI64, FPU_ST_I64_TEST_T)
3939TEST_FPU_STORE_INT(32, int32_t, "%RI32", FPU_ST_I32_T, g_aFpuStI32, FPU_ST_I32_TEST_T)
3940TEST_FPU_STORE_INT(16, int16_t, "%RI16", FPU_ST_I16_T, g_aFpuStI16, FPU_ST_I16_TEST_T)
3941
3942#ifdef TSTIEMAIMPL_WITH_GENERATOR
3943static RTEXITCODE FpuStIntGenerate(uint32_t cTests, const char * const *papszNameFmts)
3944{
3945 RTEXITCODE rcExit = FpuStI64Generate(cTests, papszNameFmts);
3946 if (rcExit == RTEXITCODE_SUCCESS)
3947 rcExit = FpuStI32Generate(cTests, papszNameFmts);
3948 if (rcExit == RTEXITCODE_SUCCESS)
3949 rcExit = FpuStI16Generate(cTests, papszNameFmts);
3950 return rcExit;
3951}
3952static RTEXITCODE FpuStIntDumpAll(const char * const *papszNameFmts)
3953{
3954 RTEXITCODE rcExit = FpuStI64DumpAll(papszNameFmts);
3955 if (rcExit == RTEXITCODE_SUCCESS)
3956 rcExit = FpuStI32DumpAll(papszNameFmts);
3957 if (rcExit == RTEXITCODE_SUCCESS)
3958 rcExit = FpuStI16DumpAll(papszNameFmts);
3959 return rcExit;
3960}
3961#endif
3962
3963static void FpuStIntTest(void)
3964{
3965 FpuStI64Test();
3966 FpuStI32Test();
3967 FpuStI16Test();
3968}
3969
3970
3971/*
3972 * Store as packed BCD value (memory).
3973 */
3974typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLFPUSTR80TOD80,(PCX86FXSTATE, uint16_t *, PRTPBCD80U, PCRTFLOAT80U));
3975typedef FNIEMAIMPLFPUSTR80TOD80 *PFNIEMAIMPLFPUSTR80TOD80;
3976TYPEDEF_SUBTEST_TYPE(FPU_ST_D80_T, FPU_ST_D80_TEST_T, PFNIEMAIMPLFPUSTR80TOD80);
3977
3978static FPU_ST_D80_T g_aFpuStD80[] =
3979{
3980 ENTRY_BIN(fst_r80_to_d80),
3981};
3982
3983#ifdef TSTIEMAIMPL_WITH_GENERATOR
3984static RTEXITCODE FpuStD80Generate(uint32_t cTests, const char * const *papszNameFmts)
3985{
3986 static RTFLOAT80U const s_aSpecials[] =
3987 {
3988 RTFLOAT80U_INIT_C(0, 0xde0b6b3a763fffe0, RTFLOAT80U_EXP_BIAS + 59), /* 1 below max */
3989 RTFLOAT80U_INIT_C(1, 0xde0b6b3a763fffe0, RTFLOAT80U_EXP_BIAS + 59), /* 1 above min */
3990 RTFLOAT80U_INIT_C(0, 0xde0b6b3a763ffff0, RTFLOAT80U_EXP_BIAS + 59), /* exact max */
3991 RTFLOAT80U_INIT_C(1, 0xde0b6b3a763ffff0, RTFLOAT80U_EXP_BIAS + 59), /* exact min */
3992 RTFLOAT80U_INIT_C(0, 0xde0b6b3a763fffff, RTFLOAT80U_EXP_BIAS + 59), /* max & all rounded off bits set */
3993 RTFLOAT80U_INIT_C(1, 0xde0b6b3a763fffff, RTFLOAT80U_EXP_BIAS + 59), /* min & all rounded off bits set */
3994 RTFLOAT80U_INIT_C(0, 0xde0b6b3a763ffff8, RTFLOAT80U_EXP_BIAS + 59), /* max & some rounded off bits set */
3995 RTFLOAT80U_INIT_C(1, 0xde0b6b3a763ffff8, RTFLOAT80U_EXP_BIAS + 59), /* min & some rounded off bits set */
3996 RTFLOAT80U_INIT_C(0, 0xde0b6b3a763ffff1, RTFLOAT80U_EXP_BIAS + 59), /* max & some other rounded off bits set */
3997 RTFLOAT80U_INIT_C(1, 0xde0b6b3a763ffff1, RTFLOAT80U_EXP_BIAS + 59), /* min & some other rounded off bits set */
3998 RTFLOAT80U_INIT_C(0, 0xde0b6b3a76400000, RTFLOAT80U_EXP_BIAS + 59), /* 1 above max */
3999 RTFLOAT80U_INIT_C(1, 0xde0b6b3a76400000, RTFLOAT80U_EXP_BIAS + 59), /* 1 below min */
4000 };
4001
4002 X86FXSTATE State;
4003 RT_ZERO(State);
4004 for (size_t iFn = 0; iFn < RT_ELEMENTS(g_aFpuStD80); iFn++)
4005 {
4006 IEMBINARYOUTPUT BinOut;
4007 AssertReturn(GENERATE_BINARY_OPEN(&BinOut, papszNameFmts, g_aFpuStD80[iFn]), RTEXITCODE_FAILURE);
4008 for (uint32_t iTest = 0; iTest < cTests + RT_ELEMENTS(s_aSpecials); iTest += 1)
4009 {
4010 uint16_t const fFcw = RandFcw();
4011 State.FSW = RandFsw();
4012 RTFLOAT80U const InVal = iTest < cTests ? RandR80Src(iTest, 59, true) : s_aSpecials[iTest - cTests];
4013
4014 for (uint16_t iRounding = 0; iRounding < 4; iRounding++)
4015 {
4016 /* PC doesn't influence these, so leave as is. */
4017 AssertCompile(X86_FCW_OM_BIT + 1 == X86_FCW_UM_BIT && X86_FCW_UM_BIT + 1 == X86_FCW_PM_BIT);
4018 for (uint16_t iMask = 0; iMask < 16; iMask += 2 /*1*/)
4019 {
4020 uint16_t uFswOut = 0;
4021 RTPBCD80U OutVal = RTPBCD80U_INIT_ZERO(0);
4022 State.FCW = (fFcw & ~(X86_FCW_RC_MASK | X86_FCW_OM | X86_FCW_UM | X86_FCW_PM))
4023 | (iRounding << X86_FCW_RC_SHIFT);
4024 /*if (iMask & 1) State.FCW ^= X86_FCW_MASK_ALL;*/
4025 State.FCW |= (iMask >> 1) << X86_FCW_OM_BIT;
4026 g_aFpuStD80[iFn].pfn(&State, &uFswOut, &OutVal, &InVal);
4027 FPU_ST_D80_TEST_T const Test = { State.FCW, State.FSW, uFswOut, InVal, OutVal };
4028 GenerateBinaryWrite(&BinOut, &Test, sizeof(Test));
4029 }
4030 }
4031 }
4032 AssertReturn(GenerateBinaryClose(&BinOut), RTEXITCODE_FAILURE);
4033 }
4034 return RTEXITCODE_SUCCESS;
4035}
4036DUMP_ALL_FN(FpuStD80, g_aFpuStD80)
4037#endif
4038
4039
4040static void FpuStD80Test(void)
4041{
4042 X86FXSTATE State;
4043 RT_ZERO(State);
4044 for (size_t iFn = 0; iFn < RT_ELEMENTS(g_aFpuStD80); iFn++)
4045 {
4046 if (!SUBTEST_CHECK_IF_ENABLED_AND_DECOMPRESS(g_aFpuStD80[iFn]))
4047 continue;
4048
4049 FPU_ST_D80_TEST_T const * const paTests = g_aFpuStD80[iFn].paTests;
4050 uint32_t const cTests = g_aFpuStD80[iFn].cTests;
4051 PFNIEMAIMPLFPUSTR80TOD80 pfn = g_aFpuStD80[iFn].pfn;
4052 uint32_t const cVars = COUNT_VARIATIONS(g_aFpuStD80[iFn]);
4053 if (!cTests) RTTestSkipped(g_hTest, "no tests");
4054 for (uint32_t iVar = 0; iVar < cVars; iVar++)
4055 {
4056 for (uint32_t iTest = 0; iTest < cTests; iTest++)
4057 {
4058 RTFLOAT80U const InVal = paTests[iTest].InVal;
4059 uint16_t uFswOut = 0;
4060 RTPBCD80U OutVal = RTPBCD80U_INIT_ZERO(0);
4061 State.FCW = paTests[iTest].fFcw;
4062 State.FSW = paTests[iTest].fFswIn;
4063 pfn(&State, &uFswOut, &OutVal, &InVal);
4064 if ( uFswOut != paTests[iTest].fFswOut
4065 || !RTPBCD80U_ARE_IDENTICAL(&OutVal, &paTests[iTest].OutVal))
4066 RTTestFailed(g_hTest, "#%04u%s: fcw=%#06x fsw=%#06x in=%s\n"
4067 "%s -> fsw=%#06x %s\n"
4068 "%s expected %#06x %s%s%s (%s)\n",
4069 iTest, iVar ? "/n" : "", paTests[iTest].fFcw, paTests[iTest].fFswIn,
4070 FormatR80(&paTests[iTest].InVal),
4071 iVar ? " " : "", uFswOut, FormatD80(&OutVal),
4072 iVar ? " " : "", paTests[iTest].fFswOut, FormatD80(&paTests[iTest].OutVal),
4073 FswDiff(uFswOut, paTests[iTest].fFswOut),
4074 RTPBCD80U_ARE_IDENTICAL(&OutVal, &paTests[iTest].OutVal) ? " - val" : "",
4075 FormatFcw(paTests[iTest].fFcw) );
4076 }
4077 pfn = g_aFpuStD80[iFn].pfnNative;
4078 }
4079
4080 FREE_DECOMPRESSED_TESTS(g_aFpuStD80[iFn]);
4081 }
4082}
4083
4084
4085
4086/*********************************************************************************************************************************
4087* x87 FPU Binary Operations *
4088*********************************************************************************************************************************/
4089
4090/*
4091 * Binary FPU operations on two 80-bit floating point values.
4092 */
4093TYPEDEF_SUBTEST_TYPE(FPU_BINARY_R80_T, FPU_BINARY_R80_TEST_T, PFNIEMAIMPLFPUR80);
4094enum { kFpuBinaryHint_fprem = 1, };
4095
4096static FPU_BINARY_R80_T g_aFpuBinaryR80[] =
4097{
4098 ENTRY_BIN(fadd_r80_by_r80),
4099 ENTRY_BIN(fsub_r80_by_r80),
4100 ENTRY_BIN(fsubr_r80_by_r80),
4101 ENTRY_BIN(fmul_r80_by_r80),
4102 ENTRY_BIN(fdiv_r80_by_r80),
4103 ENTRY_BIN(fdivr_r80_by_r80),
4104 ENTRY_BIN_EX(fprem_r80_by_r80, kFpuBinaryHint_fprem),
4105 ENTRY_BIN_EX(fprem1_r80_by_r80, kFpuBinaryHint_fprem),
4106 ENTRY_BIN(fscale_r80_by_r80),
4107 ENTRY_BIN_AMD( fpatan_r80_by_r80, 0), // C1 and rounding differs on AMD
4108 ENTRY_BIN_INTEL(fpatan_r80_by_r80, 0), // C1 and rounding differs on AMD
4109 ENTRY_BIN_AMD( fyl2x_r80_by_r80, 0), // C1 and rounding differs on AMD
4110 ENTRY_BIN_INTEL(fyl2x_r80_by_r80, 0), // C1 and rounding differs on AMD
4111 ENTRY_BIN_AMD( fyl2xp1_r80_by_r80, 0), // C1 and rounding differs on AMD
4112 ENTRY_BIN_INTEL(fyl2xp1_r80_by_r80, 0), // C1 and rounding differs on AMD
4113};
4114
4115#ifdef TSTIEMAIMPL_WITH_GENERATOR
4116static RTEXITCODE FpuBinaryR80Generate(uint32_t cTests, const char * const *papszNameFmts)
4117{
4118 cTests = RT_MAX(192, cTests); /* there are 144 standard input variations */
4119
4120 static struct { RTFLOAT80U Val1, Val2; } const s_aSpecials[] =
4121 {
4122 { RTFLOAT80U_INIT_C(1, 0xdd762f07f2e80eef, 30142), /* causes weird overflows with DOWN and NEAR rounding. */
4123 RTFLOAT80U_INIT_C(1, 0xffffffffffffffff, RTFLOAT80U_EXP_MAX - 1) },
4124 { RTFLOAT80U_INIT_ZERO(0), /* causes weird overflows with UP and NEAR rounding when precision is lower than 64. */
4125 RTFLOAT80U_INIT_C(0, 0xffffffffffffffff, RTFLOAT80U_EXP_MAX - 1) },
4126 { RTFLOAT80U_INIT_ZERO(0), /* minus variant */
4127 RTFLOAT80U_INIT_C(1, 0xffffffffffffffff, RTFLOAT80U_EXP_MAX - 1) },
4128 { RTFLOAT80U_INIT_C(0, 0xcef238bb9a0afd86, 577 + RTFLOAT80U_EXP_BIAS), /* for fprem and fprem1, max sequence length */
4129 RTFLOAT80U_INIT_C(0, 0xf11684ec0beaad94, 1 + RTFLOAT80U_EXP_BIAS) },
4130 { RTFLOAT80U_INIT_C(0, 0xffffffffffffffff, -13396 + RTFLOAT80U_EXP_BIAS), /* for fdiv. We missed PE. */
4131 RTFLOAT80U_INIT_C(1, 0xffffffffffffffff, 16383 + RTFLOAT80U_EXP_BIAS) },
4132 { RTFLOAT80U_INIT_C(0, 0x8000000000000000, 1 + RTFLOAT80U_EXP_BIAS), /* for fprem/fprem1 */
4133 RTFLOAT80U_INIT_C(0, 0xe000000000000000, 0 + RTFLOAT80U_EXP_BIAS) },
4134 { RTFLOAT80U_INIT_C(0, 0x8000000000000000, 1 + RTFLOAT80U_EXP_BIAS), /* for fprem/fprem1 */
4135 RTFLOAT80U_INIT_C(0, 0x8000000000000000, 0 + RTFLOAT80U_EXP_BIAS) },
4136 /* fscale: This may seriously increase the exponent, and it turns out overflow and underflow behaviour changes
4137 once RTFLOAT80U_EXP_BIAS_ADJUST is exceeded. */
4138 { RTFLOAT80U_INIT_C(0, 0xffffffffffffffff, RTFLOAT80U_EXP_MAX - 1), /* for fscale: max * 2^1 */
4139 RTFLOAT80U_INIT_C(0, 0x8000000000000000, 0 + RTFLOAT80U_EXP_BIAS) },
4140 { RTFLOAT80U_INIT_C(0, 0xffffffffffffffff, RTFLOAT80U_EXP_MAX - 1), /* for fscale: max * 2^64 */
4141 RTFLOAT80U_INIT_C(0, 0x8000000000000000, 6 + RTFLOAT80U_EXP_BIAS) },
4142 { RTFLOAT80U_INIT_C(0, 0xffffffffffffffff, RTFLOAT80U_EXP_MAX - 1), /* for fscale: max * 2^1024 */
4143 RTFLOAT80U_INIT_C(0, 0x8000000000000000, 10 + RTFLOAT80U_EXP_BIAS) },
4144 { RTFLOAT80U_INIT_C(0, 0xffffffffffffffff, RTFLOAT80U_EXP_MAX - 1), /* for fscale: max * 2^4096 */
4145 RTFLOAT80U_INIT_C(0, 0x8000000000000000, 12 + RTFLOAT80U_EXP_BIAS) },
4146 { RTFLOAT80U_INIT_C(0, 0xffffffffffffffff, RTFLOAT80U_EXP_MAX - 1), /* for fscale: max * 2^16384 */
4147 RTFLOAT80U_INIT_C(0, 0x8000000000000000, 14 + RTFLOAT80U_EXP_BIAS) }, /* resulting exponent: 49150 */
4148 { RTFLOAT80U_INIT_C(0, 0xffffffffffffffff, RTFLOAT80U_EXP_MAX - 1), /* for fscale: max * 2^24576 (RTFLOAT80U_EXP_BIAS_ADJUST) */
4149 RTFLOAT80U_INIT_C(0, 0xc000000000000000, 14 + RTFLOAT80U_EXP_BIAS) }, /* resulting exponent: 57342 - within 10980XE range */
4150 { RTFLOAT80U_INIT_C(0, 0xffffffffffffffff, RTFLOAT80U_EXP_MAX - 1), /* for fscale: max * 2^24577 */
4151 RTFLOAT80U_INIT_C(0, 0xc002000000000000, 14 + RTFLOAT80U_EXP_BIAS) }, /* resulting exponent: 57343 - outside 10980XE range, behaviour changes! */
4152 { RTFLOAT80U_INIT_C(0, 0xffffffffffffffff, RTFLOAT80U_EXP_MAX - 1), /* for fscale: max * 2^32768 - result is within range on 10980XE */
4153 RTFLOAT80U_INIT_C(0, 0x8000000000000000, 15 + RTFLOAT80U_EXP_BIAS) }, /* resulting exponent: 65534 */
4154 { RTFLOAT80U_INIT_C(0, 0xffffffffffffffff, RTFLOAT80U_EXP_MAX - 1), /* for fscale: max * 2^65536 */
4155 RTFLOAT80U_INIT_C(0, 0x8000000000000000, 16 + RTFLOAT80U_EXP_BIAS) },
4156 { RTFLOAT80U_INIT_C(0, 0xffffffffffffffff, RTFLOAT80U_EXP_MAX - 1), /* for fscale: max * 2^1048576 */
4157 RTFLOAT80U_INIT_C(0, 0x8000000000000000, 20 + RTFLOAT80U_EXP_BIAS) },
4158 { RTFLOAT80U_INIT_C(0, 0xffffffffffffffff, RTFLOAT80U_EXP_MAX - 1), /* for fscale: max * 2^16777216 */
4159 RTFLOAT80U_INIT_C(0, 0x8000000000000000, 24 + RTFLOAT80U_EXP_BIAS) },
4160 { RTFLOAT80U_INIT_C(0, 0x8000000000000000, 1), /* for fscale: min * 2^-24576 (RTFLOAT80U_EXP_BIAS_ADJUST) */
4161 RTFLOAT80U_INIT_C(1, 0xc000000000000000, 14 + RTFLOAT80U_EXP_BIAS) }, /* resulting exponent: -24575 - within 10980XE range */
4162 { RTFLOAT80U_INIT_C(0, 0x8000000000000000, 1), /* for fscale: max * 2^-24577 (RTFLOAT80U_EXP_BIAS_ADJUST) */
4163 RTFLOAT80U_INIT_C(1, 0xc002000000000000, 14 + RTFLOAT80U_EXP_BIAS) }, /* resulting exponent: -24576 - outside 10980XE range, behaviour changes! */
4164 /* fscale: Negative variants for the essentials of the above. */
4165 { RTFLOAT80U_INIT_C(1, 0xffffffffffffffff, RTFLOAT80U_EXP_MAX - 1), /* for fscale: max * 2^24576 (RTFLOAT80U_EXP_BIAS_ADJUST) */
4166 RTFLOAT80U_INIT_C(0, 0xc000000000000000, 14 + RTFLOAT80U_EXP_BIAS) }, /* resulting exponent: 57342 - within 10980XE range */
4167 { RTFLOAT80U_INIT_C(1, 0xffffffffffffffff, RTFLOAT80U_EXP_MAX - 1), /* for fscale: max * 2^24577 */
4168 RTFLOAT80U_INIT_C(0, 0xc002000000000000, 14 + RTFLOAT80U_EXP_BIAS) }, /* resulting exponent: 57343 - outside 10980XE range, behaviour changes! */
4169 { RTFLOAT80U_INIT_C(1, 0x8000000000000000, 1), /* for fscale: min * 2^-24576 (RTFLOAT80U_EXP_BIAS_ADJUST) */
4170 RTFLOAT80U_INIT_C(1, 0xc000000000000000, 14 + RTFLOAT80U_EXP_BIAS) }, /* resulting exponent: -57342 - within 10980XE range */
4171 { RTFLOAT80U_INIT_C(1, 0x8000000000000000, 1), /* for fscale: max * 2^-24576 (RTFLOAT80U_EXP_BIAS_ADJUST) */
4172 RTFLOAT80U_INIT_C(1, 0xc002000000000000, 14 + RTFLOAT80U_EXP_BIAS) }, /* resulting exponent: -57343 - outside 10980XE range, behaviour changes! */
4173 /* fscale: Some fun with denormals and pseudo-denormals. */
4174 { RTFLOAT80U_INIT_C(0, 0x0800000000000000, 0), /* for fscale: max * 2^-4 */
4175 RTFLOAT80U_INIT_C(1, 0x8000000000000000, 2 + RTFLOAT80U_EXP_BIAS) },
4176 { RTFLOAT80U_INIT_C(0, 0x0800000000000000, 0), /* for fscale: max * 2^+1 */
4177 RTFLOAT80U_INIT_C(0, 0x8000000000000000, 0 + RTFLOAT80U_EXP_BIAS) },
4178 { RTFLOAT80U_INIT_C(0, 0x0800000000000000, 0), RTFLOAT80U_INIT_ZERO(0) }, /* for fscale: max * 2^+0 */
4179 { RTFLOAT80U_INIT_C(0, 0x0000000000000008, 0), /* for fscale: max * 2^-4 => underflow */
4180 RTFLOAT80U_INIT_C(1, 0x8000000000000000, 2 + RTFLOAT80U_EXP_BIAS) },
4181 { RTFLOAT80U_INIT_C(0, 0x8005000300020001, 0), RTFLOAT80U_INIT_ZERO(0) }, /* pseudo-normal number * 2^+0. */
4182 { RTFLOAT80U_INIT_C(1, 0x8005000300020001, 0), RTFLOAT80U_INIT_ZERO(0) }, /* pseudo-normal number * 2^+0. */
4183 { RTFLOAT80U_INIT_C(0, 0x8005000300020001, 0), /* pseudo-normal number * 2^-4 */
4184 RTFLOAT80U_INIT_C(1, 0x8000000000000000, 2 + RTFLOAT80U_EXP_BIAS) },
4185 { RTFLOAT80U_INIT_C(0, 0x8005000300020001, 0), /* pseudo-normal number * 2^+0 */
4186 RTFLOAT80U_INIT_C(0, 0x8000000000000000, 0 + RTFLOAT80U_EXP_BIAS) },
4187 { RTFLOAT80U_INIT_C(0, 0x8005000300020001, 0), /* pseudo-normal number * 2^+1 */
4188 RTFLOAT80U_INIT_C(0, 0x8000000000000000, 1 + RTFLOAT80U_EXP_BIAS) },
4189 };
4190
4191 X86FXSTATE State;
4192 RT_ZERO(State);
4193 uint32_t cMinNormalPairs = (cTests - 144) / 4;
4194 uint32_t cMinTargetRangeInputs = cMinNormalPairs / 2;
4195 for (size_t iFn = 0; iFn < RT_ELEMENTS(g_aFpuBinaryR80); iFn++)
4196 {
4197 PFNIEMAIMPLFPUR80 const pfn = g_aFpuBinaryR80[iFn].pfnNative ? g_aFpuBinaryR80[iFn].pfnNative : g_aFpuBinaryR80[iFn].pfn;
4198 if ( g_aFpuBinaryR80[iFn].idxCpuEflFlavour != IEMTARGETCPU_EFL_BEHAVIOR_NATIVE
4199 && g_aFpuBinaryR80[iFn].idxCpuEflFlavour != g_idxCpuEflFlavour)
4200 continue;
4201
4202 IEMBINARYOUTPUT BinOut;
4203 AssertReturn(GENERATE_BINARY_OPEN(&BinOut, papszNameFmts, g_aFpuBinaryR80[iFn]), RTEXITCODE_FAILURE);
4204 uint32_t cNormalInputPairs = 0;
4205 uint32_t cTargetRangeInputs = 0;
4206 for (uint32_t iTest = 0; iTest < cTests + RT_ELEMENTS(s_aSpecials); iTest += 1)
4207 {
4208 RTFLOAT80U InVal1 = iTest < cTests ? RandR80Src1(iTest) : s_aSpecials[iTest - cTests].Val1;
4209 RTFLOAT80U InVal2 = iTest < cTests ? RandR80Src2(iTest) : s_aSpecials[iTest - cTests].Val2;
4210 bool fTargetRange = false;
4211 if (RTFLOAT80U_IS_NORMAL(&InVal1) && RTFLOAT80U_IS_NORMAL(&InVal2))
4212 {
4213 cNormalInputPairs++;
4214 if ( g_aFpuBinaryR80[iFn].uExtra == kFpuBinaryHint_fprem
4215 && (uint32_t)InVal1.s.uExponent - (uint32_t)InVal2.s.uExponent - (uint32_t)64 <= (uint32_t)512)
4216 cTargetRangeInputs += fTargetRange = true;
4217 else if (cTargetRangeInputs < cMinTargetRangeInputs && iTest < cTests)
4218 if (g_aFpuBinaryR80[iFn].uExtra == kFpuBinaryHint_fprem)
4219 { /* The aim is two values with an exponent difference between 64 and 640 so we can do the whole sequence. */
4220 InVal2.s.uExponent = RTRandU32Ex(1, RTFLOAT80U_EXP_MAX - 66);
4221 InVal1.s.uExponent = RTRandU32Ex(InVal2.s.uExponent + 64, RT_MIN(InVal2.s.uExponent + 512, RTFLOAT80U_EXP_MAX - 1));
4222 cTargetRangeInputs += fTargetRange = true;
4223 }
4224 }
4225 else if (cNormalInputPairs < cMinNormalPairs && iTest + cMinNormalPairs >= cTests && iTest < cTests)
4226 {
4227 iTest -= 1;
4228 continue;
4229 }
4230
4231 uint16_t const fFcwExtra = 0;
4232 uint16_t const fFcw = RandFcw();
4233 State.FSW = RandFsw();
4234
4235 for (uint16_t iRounding = 0; iRounding < 4; iRounding++)
4236 for (uint16_t iPrecision = 0; iPrecision < 4; iPrecision++)
4237 {
4238 State.FCW = (fFcw & ~(X86_FCW_RC_MASK | X86_FCW_PC_MASK | X86_FCW_MASK_ALL))
4239 | (iRounding << X86_FCW_RC_SHIFT)
4240 | (iPrecision << X86_FCW_PC_SHIFT)
4241 | X86_FCW_MASK_ALL;
4242 IEMFPURESULT ResM = { RTFLOAT80U_INIT(0, 0, 0), 0 };
4243 pfn(&State, &ResM, &InVal1, &InVal2);
4244 FPU_BINARY_R80_TEST_T const TestM
4245 = { (uint16_t)(State.FCW | fFcwExtra), State.FSW, ResM.FSW, InVal1, InVal2, ResM.r80Result };
4246 GenerateBinaryWrite(&BinOut, &TestM, sizeof(TestM));
4247
4248 State.FCW = State.FCW & ~X86_FCW_MASK_ALL;
4249 IEMFPURESULT ResU = { RTFLOAT80U_INIT(0, 0, 0), 0 };
4250 pfn(&State, &ResU, &InVal1, &InVal2);
4251 FPU_BINARY_R80_TEST_T const TestU
4252 = { (uint16_t)(State.FCW | fFcwExtra), State.FSW, ResU.FSW, InVal1, InVal2, ResU.r80Result };
4253 GenerateBinaryWrite(&BinOut, &TestU, sizeof(TestU));
4254
4255 uint16_t fXcpt = (ResM.FSW | ResU.FSW) & X86_FSW_XCPT_MASK & ~X86_FSW_SF;
4256 if (fXcpt)
4257 {
4258 State.FCW = (State.FCW & ~X86_FCW_MASK_ALL) | fXcpt;
4259 IEMFPURESULT Res1 = { RTFLOAT80U_INIT(0, 0, 0), 0 };
4260 pfn(&State, &Res1, &InVal1, &InVal2);
4261 FPU_BINARY_R80_TEST_T const Test1
4262 = { (uint16_t)(State.FCW | fFcwExtra), State.FSW, Res1.FSW, InVal1, InVal2, Res1.r80Result };
4263 GenerateBinaryWrite(&BinOut, &Test1, sizeof(Test1));
4264
4265 if (((Res1.FSW & X86_FSW_XCPT_MASK) & fXcpt) != (Res1.FSW & X86_FSW_XCPT_MASK))
4266 {
4267 fXcpt |= Res1.FSW & X86_FSW_XCPT_MASK;
4268 State.FCW = (State.FCW & ~X86_FCW_MASK_ALL) | fXcpt;
4269 IEMFPURESULT Res2 = { RTFLOAT80U_INIT(0, 0, 0), 0 };
4270 pfn(&State, &Res2, &InVal1, &InVal2);
4271 FPU_BINARY_R80_TEST_T const Test2
4272 = { (uint16_t)(State.FCW | fFcwExtra), State.FSW, Res2.FSW, InVal1, InVal2, Res2.r80Result };
4273 GenerateBinaryWrite(&BinOut, &Test2, sizeof(Test2));
4274 }
4275 if (!RT_IS_POWER_OF_TWO(fXcpt))
4276 for (uint16_t fUnmasked = 1; fUnmasked <= X86_FCW_PM; fUnmasked <<= 1)
4277 if (fUnmasked & fXcpt)
4278 {
4279 State.FCW = (State.FCW & ~X86_FCW_MASK_ALL) | (fXcpt & ~fUnmasked);
4280 IEMFPURESULT Res3 = { RTFLOAT80U_INIT(0, 0, 0), 0 };
4281 pfn(&State, &Res3, &InVal1, &InVal2);
4282 FPU_BINARY_R80_TEST_T const Test3
4283 = { (uint16_t)(State.FCW | fFcwExtra), State.FSW, Res3.FSW, InVal1, InVal2, Res3.r80Result };
4284 GenerateBinaryWrite(&BinOut, &Test3, sizeof(Test3));
4285 }
4286 }
4287
4288 /* If the values are in range and caused no exceptions, do the whole series of
4289 partial reminders till we get the non-partial one or run into an exception. */
4290 if (fTargetRange && fXcpt == 0 && g_aFpuBinaryR80[iFn].uExtra == kFpuBinaryHint_fprem)
4291 {
4292 IEMFPURESULT ResPrev = ResM;
4293 for (unsigned i = 0; i < 32 && (ResPrev.FSW & (X86_FSW_C2 | X86_FSW_XCPT_MASK)) == X86_FSW_C2; i++)
4294 {
4295 State.FCW = State.FCW | X86_FCW_MASK_ALL;
4296 State.FSW = ResPrev.FSW;
4297 IEMFPURESULT ResSeq = { RTFLOAT80U_INIT(0, 0, 0), 0 };
4298 pfn(&State, &ResSeq, &ResPrev.r80Result, &InVal2);
4299 FPU_BINARY_R80_TEST_T const TestSeq
4300 = { (uint16_t)(State.FCW | fFcwExtra), State.FSW, ResSeq.FSW, ResPrev.r80Result, InVal2, ResSeq.r80Result };
4301 GenerateBinaryWrite(&BinOut, &TestSeq, sizeof(TestSeq));
4302 ResPrev = ResSeq;
4303 }
4304 }
4305 }
4306 }
4307 AssertReturn(GenerateBinaryClose(&BinOut), RTEXITCODE_FAILURE);
4308 }
4309 return RTEXITCODE_SUCCESS;
4310}
4311DUMP_ALL_FN(FpuBinaryR80, g_aFpuBinaryR80)
4312#endif
4313
4314
4315static void FpuBinaryR80Test(void)
4316{
4317 X86FXSTATE State;
4318 RT_ZERO(State);
4319 for (size_t iFn = 0; iFn < RT_ELEMENTS(g_aFpuBinaryR80); iFn++)
4320 {
4321 if (!SUBTEST_CHECK_IF_ENABLED_AND_DECOMPRESS(g_aFpuBinaryR80[iFn]))
4322 continue;
4323
4324 FPU_BINARY_R80_TEST_T const * const paTests = g_aFpuBinaryR80[iFn].paTests;
4325 uint32_t const cTests = g_aFpuBinaryR80[iFn].cTests;
4326 PFNIEMAIMPLFPUR80 pfn = g_aFpuBinaryR80[iFn].pfn;
4327 uint32_t const cVars = COUNT_VARIATIONS(g_aFpuBinaryR80[iFn]);
4328 if (!cTests) RTTestSkipped(g_hTest, "no tests");
4329 for (uint32_t iVar = 0; iVar < cVars; iVar++)
4330 {
4331 for (uint32_t iTest = 0; iTest < cTests; iTest++)
4332 {
4333 RTFLOAT80U const InVal1 = paTests[iTest].InVal1;
4334 RTFLOAT80U const InVal2 = paTests[iTest].InVal2;
4335 IEMFPURESULT Res = { RTFLOAT80U_INIT(0, 0, 0), 0 };
4336 State.FCW = paTests[iTest].fFcw;
4337 State.FSW = paTests[iTest].fFswIn;
4338 pfn(&State, &Res, &InVal1, &InVal2);
4339 if ( Res.FSW != paTests[iTest].fFswOut
4340 || !RTFLOAT80U_ARE_IDENTICAL(&Res.r80Result, &paTests[iTest].OutVal))
4341 RTTestFailed(g_hTest, "#%04u%s: fcw=%#06x fsw=%#06x in1=%s in2=%s\n"
4342 "%s -> fsw=%#06x %s\n"
4343 "%s expected %#06x %s%s%s (%s)\n",
4344 iTest, iVar ? "/n" : "", paTests[iTest].fFcw, paTests[iTest].fFswIn,
4345 FormatR80(&paTests[iTest].InVal1), FormatR80(&paTests[iTest].InVal2),
4346 iVar ? " " : "", Res.FSW, FormatR80(&Res.r80Result),
4347 iVar ? " " : "", paTests[iTest].fFswOut, FormatR80(&paTests[iTest].OutVal),
4348 FswDiff(Res.FSW, paTests[iTest].fFswOut),
4349 !RTFLOAT80U_ARE_IDENTICAL(&Res.r80Result, &paTests[iTest].OutVal) ? " - val" : "",
4350 FormatFcw(paTests[iTest].fFcw) );
4351 }
4352 pfn = g_aFpuBinaryR80[iFn].pfnNative;
4353 }
4354
4355 FREE_DECOMPRESSED_TESTS(g_aFpuBinaryR80[iFn]);
4356 }
4357}
4358
4359
4360/*
4361 * Binary FPU operations on one 80-bit floating point value and one 64-bit or 32-bit one.
4362 */
4363#define int64_t_IS_NORMAL(a) 1
4364#define int32_t_IS_NORMAL(a) 1
4365#define int16_t_IS_NORMAL(a) 1
4366
4367#ifdef TSTIEMAIMPL_WITH_GENERATOR
4368static struct { RTFLOAT80U Val1; RTFLOAT64U Val2; } const s_aFpuBinaryR64Specials[] =
4369{
4370 { RTFLOAT80U_INIT_C(0, 0xffffeeeeddddcccc, RTFLOAT80U_EXP_BIAS),
4371 RTFLOAT64U_INIT_C(0, 0xfeeeeddddcccc, RTFLOAT64U_EXP_BIAS) }, /* whatever */
4372};
4373static struct { RTFLOAT80U Val1; RTFLOAT32U Val2; } const s_aFpuBinaryR32Specials[] =
4374{
4375 { RTFLOAT80U_INIT_C(0, 0xffffeeeeddddcccc, RTFLOAT80U_EXP_BIAS),
4376 RTFLOAT32U_INIT_C(0, 0x7fffee, RTFLOAT32U_EXP_BIAS) }, /* whatever */
4377};
4378static struct { RTFLOAT80U Val1; int32_t Val2; } const s_aFpuBinaryI32Specials[] =
4379{
4380 { RTFLOAT80U_INIT_C(0, 0xffffeeeeddddcccc, RTFLOAT80U_EXP_BIAS), INT32_MAX }, /* whatever */
4381};
4382static struct { RTFLOAT80U Val1; int16_t Val2; } const s_aFpuBinaryI16Specials[] =
4383{
4384 { RTFLOAT80U_INIT_C(0, 0xffffeeeeddddcccc, RTFLOAT80U_EXP_BIAS), INT16_MAX }, /* whatever */
4385};
4386
4387# define GEN_FPU_BINARY_SMALL(a_fIntType, a_cBits, a_LoBits, a_UpBits, a_Type2, a_aSubTests, a_TestType) \
4388static RTEXITCODE FpuBinary ## a_UpBits ## Generate(uint32_t cTests, const char * const *papszNameFmts) \
4389{ \
4390 cTests = RT_MAX(160, cTests); /* there are 144 standard input variations for r80 by r80 */ \
4391 \
4392 X86FXSTATE State; \
4393 RT_ZERO(State); \
4394 uint32_t cMinNormalPairs = (cTests - 144) / 4; \
4395 for (size_t iFn = 0; iFn < RT_ELEMENTS(a_aSubTests); iFn++) \
4396 { \
4397 IEMBINARYOUTPUT BinOut; \
4398 AssertReturn(GENERATE_BINARY_OPEN(&BinOut, papszNameFmts, a_aSubTests[iFn]), RTEXITCODE_FAILURE); \
4399 uint32_t cNormalInputPairs = 0; \
4400 for (uint32_t iTest = 0; iTest < cTests + RT_ELEMENTS(s_aFpuBinary ## a_UpBits ## Specials); iTest += 1) \
4401 { \
4402 RTFLOAT80U const InVal1 = iTest < cTests ? RandR80Src1(iTest, a_cBits, a_fIntType) \
4403 : s_aFpuBinary ## a_UpBits ## Specials[iTest - cTests].Val1; \
4404 a_Type2 const InVal2 = iTest < cTests ? Rand ## a_UpBits ## Src2(iTest) \
4405 : s_aFpuBinary ## a_UpBits ## Specials[iTest - cTests].Val2; \
4406 if (RTFLOAT80U_IS_NORMAL(&InVal1) && a_Type2 ## _IS_NORMAL(&InVal2)) \
4407 cNormalInputPairs++; \
4408 else if (cNormalInputPairs < cMinNormalPairs && iTest + cMinNormalPairs >= cTests && iTest < cTests) \
4409 { \
4410 iTest -= 1; \
4411 continue; \
4412 } \
4413 \
4414 uint16_t const fFcw = RandFcw(); \
4415 State.FSW = RandFsw(); \
4416 \
4417 for (uint16_t iRounding = 0; iRounding < 4; iRounding++) \
4418 { \
4419 for (uint16_t iPrecision = 0; iPrecision < 4; iPrecision++) \
4420 { \
4421 for (uint16_t iMask = 0; iMask <= X86_FCW_MASK_ALL; iMask += X86_FCW_MASK_ALL) \
4422 { \
4423 State.FCW = (fFcw & ~(X86_FCW_RC_MASK | X86_FCW_PC_MASK | X86_FCW_MASK_ALL)) \
4424 | (iRounding << X86_FCW_RC_SHIFT) \
4425 | (iPrecision << X86_FCW_PC_SHIFT) \
4426 | iMask; \
4427 IEMFPURESULT Res = { RTFLOAT80U_INIT(0, 0, 0), 0 }; \
4428 a_aSubTests[iFn].pfn(&State, &Res, &InVal1, &InVal2); \
4429 a_TestType const Test = { State.FCW, State.FSW, Res.FSW, InVal1, InVal2, Res.r80Result }; \
4430 GenerateBinaryWrite(&BinOut, &Test, sizeof(Test)); \
4431 } \
4432 } \
4433 } \
4434 } \
4435 AssertReturn(GenerateBinaryClose(&BinOut), RTEXITCODE_FAILURE); \
4436 } \
4437 return RTEXITCODE_SUCCESS; \
4438} \
4439DUMP_ALL_FN(FpuBinary ## a_UpBits, a_aSubTests)
4440#else
4441# define GEN_FPU_BINARY_SMALL(a_fIntType, a_cBits, a_LoBits, a_UpBits, a_Type2, a_aSubTests, a_TestType)
4442#endif
4443
4444#define TEST_FPU_BINARY_SMALL(a_fIntType, a_cBits, a_LoBits, a_UpBits, a_I, a_Type2, a_SubTestType, a_aSubTests, a_TestType) \
4445TYPEDEF_SUBTEST_TYPE(a_SubTestType, a_TestType, PFNIEMAIMPLFPU ## a_UpBits); \
4446\
4447static a_SubTestType a_aSubTests[] = \
4448{ \
4449 ENTRY_BIN(RT_CONCAT4(f, a_I, add_r80_by_, a_LoBits)), \
4450 ENTRY_BIN(RT_CONCAT4(f, a_I, mul_r80_by_, a_LoBits)), \
4451 ENTRY_BIN(RT_CONCAT4(f, a_I, sub_r80_by_, a_LoBits)), \
4452 ENTRY_BIN(RT_CONCAT4(f, a_I, subr_r80_by_, a_LoBits)), \
4453 ENTRY_BIN(RT_CONCAT4(f, a_I, div_r80_by_, a_LoBits)), \
4454 ENTRY_BIN(RT_CONCAT4(f, a_I, divr_r80_by_, a_LoBits)), \
4455}; \
4456\
4457GEN_FPU_BINARY_SMALL(a_fIntType, a_cBits, a_LoBits, a_UpBits, a_Type2, a_aSubTests, a_TestType) \
4458\
4459static void FpuBinary ## a_UpBits ## Test(void) \
4460{ \
4461 X86FXSTATE State; \
4462 RT_ZERO(State); \
4463 for (size_t iFn = 0; iFn < RT_ELEMENTS(a_aSubTests); iFn++) \
4464 { \
4465 if (!SUBTEST_CHECK_IF_ENABLED_AND_DECOMPRESS(a_aSubTests[iFn])) \
4466 continue; \
4467 \
4468 a_TestType const * const paTests = a_aSubTests[iFn].paTests; \
4469 uint32_t const cTests = a_aSubTests[iFn].cTests; \
4470 PFNIEMAIMPLFPU ## a_UpBits pfn = a_aSubTests[iFn].pfn; \
4471 uint32_t const cVars = COUNT_VARIATIONS(a_aSubTests[iFn]); \
4472 if (!cTests) RTTestSkipped(g_hTest, "no tests"); \
4473 for (uint32_t iVar = 0; iVar < cVars; iVar++) \
4474 { \
4475 for (uint32_t iTest = 0; iTest < cTests; iTest++) \
4476 { \
4477 RTFLOAT80U const InVal1 = paTests[iTest].InVal1; \
4478 a_Type2 const InVal2 = paTests[iTest].InVal2; \
4479 IEMFPURESULT Res = { RTFLOAT80U_INIT(0, 0, 0), 0 }; \
4480 State.FCW = paTests[iTest].fFcw; \
4481 State.FSW = paTests[iTest].fFswIn; \
4482 pfn(&State, &Res, &InVal1, &InVal2); \
4483 if ( Res.FSW != paTests[iTest].fFswOut \
4484 || !RTFLOAT80U_ARE_IDENTICAL(&Res.r80Result, &paTests[iTest].OutVal)) \
4485 RTTestFailed(g_hTest, "#%04u%s: fcw=%#06x fsw=%#06x in1=%s in2=%s\n" \
4486 "%s -> fsw=%#06x %s\n" \
4487 "%s expected %#06x %s%s%s (%s)\n", \
4488 iTest, iVar ? "/n" : "", paTests[iTest].fFcw, paTests[iTest].fFswIn, \
4489 FormatR80(&paTests[iTest].InVal1), Format ## a_UpBits(&paTests[iTest].InVal2), \
4490 iVar ? " " : "", Res.FSW, FormatR80(&Res.r80Result), \
4491 iVar ? " " : "", paTests[iTest].fFswOut, FormatR80(&paTests[iTest].OutVal), \
4492 FswDiff(Res.FSW, paTests[iTest].fFswOut), \
4493 !RTFLOAT80U_ARE_IDENTICAL(&Res.r80Result, &paTests[iTest].OutVal) ? " - val" : "", \
4494 FormatFcw(paTests[iTest].fFcw) ); \
4495 } \
4496 pfn = a_aSubTests[iFn].pfnNative; \
4497 } \
4498 FREE_DECOMPRESSED_TESTS(a_aSubTests[iFn]); \
4499 } \
4500}
4501
4502TEST_FPU_BINARY_SMALL(0, 64, r64, R64, RT_NOTHING, RTFLOAT64U, FPU_BINARY_R64_T, g_aFpuBinaryR64, FPU_BINARY_R64_TEST_T)
4503TEST_FPU_BINARY_SMALL(0, 32, r32, R32, RT_NOTHING, RTFLOAT32U, FPU_BINARY_R32_T, g_aFpuBinaryR32, FPU_BINARY_R32_TEST_T)
4504TEST_FPU_BINARY_SMALL(1, 32, i32, I32, i, int32_t, FPU_BINARY_I32_T, g_aFpuBinaryI32, FPU_BINARY_I32_TEST_T)
4505TEST_FPU_BINARY_SMALL(1, 16, i16, I16, i, int16_t, FPU_BINARY_I16_T, g_aFpuBinaryI16, FPU_BINARY_I16_TEST_T)
4506
4507
4508/*
4509 * Binary operations on 80-, 64- and 32-bit floating point only affecting FSW.
4510 */
4511#ifdef TSTIEMAIMPL_WITH_GENERATOR
4512static struct { RTFLOAT80U Val1, Val2; } const s_aFpuBinaryFswR80Specials[] =
4513{
4514 { RTFLOAT80U_INIT_C(0, 0xffffeeeeddddcccc, RTFLOAT80U_EXP_BIAS),
4515 RTFLOAT80U_INIT_C(0, 0xffffeeeeddddcccc, RTFLOAT80U_EXP_BIAS) }, /* whatever */
4516};
4517static struct { RTFLOAT80U Val1; RTFLOAT64U Val2; } const s_aFpuBinaryFswR64Specials[] =
4518{
4519 { RTFLOAT80U_INIT_C(0, 0xffffeeeeddddcccc, RTFLOAT80U_EXP_BIAS),
4520 RTFLOAT64U_INIT_C(0, 0xfeeeeddddcccc, RTFLOAT64U_EXP_BIAS) }, /* whatever */
4521};
4522static struct { RTFLOAT80U Val1; RTFLOAT32U Val2; } const s_aFpuBinaryFswR32Specials[] =
4523{
4524 { RTFLOAT80U_INIT_C(0, 0xffffeeeeddddcccc, RTFLOAT80U_EXP_BIAS),
4525 RTFLOAT32U_INIT_C(0, 0x7fffee, RTFLOAT32U_EXP_BIAS) }, /* whatever */
4526};
4527static struct { RTFLOAT80U Val1; int32_t Val2; } const s_aFpuBinaryFswI32Specials[] =
4528{
4529 { RTFLOAT80U_INIT_C(0, 0xffffeeeeddddcccc, RTFLOAT80U_EXP_BIAS), INT32_MAX }, /* whatever */
4530};
4531static struct { RTFLOAT80U Val1; int16_t Val2; } const s_aFpuBinaryFswI16Specials[] =
4532{
4533 { RTFLOAT80U_INIT_C(0, 0xffffeeeeddddcccc, RTFLOAT80U_EXP_BIAS), INT16_MAX }, /* whatever */
4534};
4535
4536# define GEN_FPU_BINARY_FSW(a_fIntType, a_cBits, a_UpBits, a_Type2, a_aSubTests, a_TestType) \
4537static RTEXITCODE FpuBinaryFsw ## a_UpBits ## Generate(uint32_t cTests, const char * const *papszNameFmts) \
4538{ \
4539 cTests = RT_MAX(160, cTests); /* there are 144 standard input variations for r80 by r80 */ \
4540 \
4541 X86FXSTATE State; \
4542 RT_ZERO(State); \
4543 uint32_t cMinNormalPairs = (cTests - 144) / 4; \
4544 for (size_t iFn = 0; iFn < RT_ELEMENTS(a_aSubTests); iFn++) \
4545 { \
4546 IEMBINARYOUTPUT BinOut; \
4547 AssertReturn(GENERATE_BINARY_OPEN(&BinOut, papszNameFmts, a_aSubTests[iFn]), RTEXITCODE_FAILURE); \
4548 uint32_t cNormalInputPairs = 0; \
4549 for (uint32_t iTest = 0; iTest < cTests + RT_ELEMENTS(s_aFpuBinaryFsw ## a_UpBits ## Specials); iTest += 1) \
4550 { \
4551 RTFLOAT80U const InVal1 = iTest < cTests ? RandR80Src1(iTest, a_cBits, a_fIntType) \
4552 : s_aFpuBinaryFsw ## a_UpBits ## Specials[iTest - cTests].Val1; \
4553 a_Type2 const InVal2 = iTest < cTests ? Rand ## a_UpBits ## Src2(iTest) \
4554 : s_aFpuBinaryFsw ## a_UpBits ## Specials[iTest - cTests].Val2; \
4555 if (RTFLOAT80U_IS_NORMAL(&InVal1) && a_Type2 ## _IS_NORMAL(&InVal2)) \
4556 cNormalInputPairs++; \
4557 else if (cNormalInputPairs < cMinNormalPairs && iTest + cMinNormalPairs >= cTests && iTest < cTests) \
4558 { \
4559 iTest -= 1; \
4560 continue; \
4561 } \
4562 \
4563 uint16_t const fFcw = RandFcw(); \
4564 State.FSW = RandFsw(); \
4565 \
4566 /* Guess these aren't affected by precision or rounding, so just flip the exception mask. */ \
4567 for (uint16_t iMask = 0; iMask <= X86_FCW_MASK_ALL; iMask += X86_FCW_MASK_ALL) \
4568 { \
4569 State.FCW = (fFcw & ~(X86_FCW_MASK_ALL)) | iMask; \
4570 uint16_t fFswOut = 0; \
4571 a_aSubTests[iFn].pfn(&State, &fFswOut, &InVal1, &InVal2); \
4572 a_TestType const Test = { State.FCW, State.FSW, fFswOut, InVal1, InVal2 }; \
4573 GenerateBinaryWrite(&BinOut, &Test, sizeof(Test)); \
4574 } \
4575 } \
4576 AssertReturn(GenerateBinaryClose(&BinOut), RTEXITCODE_FAILURE); \
4577 } \
4578 return RTEXITCODE_SUCCESS; \
4579} \
4580DUMP_ALL_FN(FpuBinaryFsw ## a_UpBits, a_aSubTests)
4581#else
4582# define GEN_FPU_BINARY_FSW(a_fIntType, a_cBits, a_UpBits, a_Type2, a_aSubTests, a_TestType)
4583#endif
4584
4585#define TEST_FPU_BINARY_FSW(a_fIntType, a_cBits, a_UpBits, a_Type2, a_SubTestType, a_aSubTests, a_TestType, ...) \
4586TYPEDEF_SUBTEST_TYPE(a_SubTestType, a_TestType, PFNIEMAIMPLFPU ## a_UpBits ## FSW); \
4587\
4588static a_SubTestType a_aSubTests[] = \
4589{ \
4590 __VA_ARGS__ \
4591}; \
4592\
4593GEN_FPU_BINARY_FSW(a_fIntType, a_cBits, a_UpBits, a_Type2, a_aSubTests, a_TestType) \
4594\
4595static void FpuBinaryFsw ## a_UpBits ## Test(void) \
4596{ \
4597 X86FXSTATE State; \
4598 RT_ZERO(State); \
4599 for (size_t iFn = 0; iFn < RT_ELEMENTS(a_aSubTests); iFn++) \
4600 { \
4601 if (!SUBTEST_CHECK_IF_ENABLED_AND_DECOMPRESS(a_aSubTests[iFn])) \
4602 continue; \
4603 \
4604 a_TestType const * const paTests = a_aSubTests[iFn].paTests; \
4605 uint32_t const cTests = a_aSubTests[iFn].cTests; \
4606 PFNIEMAIMPLFPU ## a_UpBits ## FSW pfn = a_aSubTests[iFn].pfn; \
4607 uint32_t const cVars = COUNT_VARIATIONS(a_aSubTests[iFn]); \
4608 if (!cTests) RTTestSkipped(g_hTest, "no tests"); \
4609 for (uint32_t iVar = 0; iVar < cVars; iVar++) \
4610 { \
4611 for (uint32_t iTest = 0; iTest < cTests; iTest++) \
4612 { \
4613 uint16_t fFswOut = 0; \
4614 RTFLOAT80U const InVal1 = paTests[iTest].InVal1; \
4615 a_Type2 const InVal2 = paTests[iTest].InVal2; \
4616 State.FCW = paTests[iTest].fFcw; \
4617 State.FSW = paTests[iTest].fFswIn; \
4618 pfn(&State, &fFswOut, &InVal1, &InVal2); \
4619 if (fFswOut != paTests[iTest].fFswOut) \
4620 RTTestFailed(g_hTest, "#%04u%s: fcw=%#06x fsw=%#06x in1=%s in2=%s\n" \
4621 "%s -> fsw=%#06x\n" \
4622 "%s expected %#06x %s (%s)\n", \
4623 iTest, iVar ? "/n" : "", paTests[iTest].fFcw, paTests[iTest].fFswIn, \
4624 FormatR80(&paTests[iTest].InVal1), Format ## a_UpBits(&paTests[iTest].InVal2), \
4625 iVar ? " " : "", fFswOut, \
4626 iVar ? " " : "", paTests[iTest].fFswOut, \
4627 FswDiff(fFswOut, paTests[iTest].fFswOut), FormatFcw(paTests[iTest].fFcw) ); \
4628 } \
4629 pfn = a_aSubTests[iFn].pfnNative; \
4630 } \
4631 FREE_DECOMPRESSED_TESTS(a_aSubTests[iFn]); \
4632 } \
4633}
4634
4635TEST_FPU_BINARY_FSW(0, 80, R80, RTFLOAT80U, FPU_BINARY_FSW_R80_T, g_aFpuBinaryFswR80, FPU_BINARY_R80_TEST_T, ENTRY_BIN(fcom_r80_by_r80), ENTRY_BIN(fucom_r80_by_r80))
4636TEST_FPU_BINARY_FSW(0, 64, R64, RTFLOAT64U, FPU_BINARY_FSW_R64_T, g_aFpuBinaryFswR64, FPU_BINARY_R64_TEST_T, ENTRY_BIN(fcom_r80_by_r64))
4637TEST_FPU_BINARY_FSW(0, 32, R32, RTFLOAT32U, FPU_BINARY_FSW_R32_T, g_aFpuBinaryFswR32, FPU_BINARY_R32_TEST_T, ENTRY_BIN(fcom_r80_by_r32))
4638TEST_FPU_BINARY_FSW(1, 32, I32, int32_t, FPU_BINARY_FSW_I32_T, g_aFpuBinaryFswI32, FPU_BINARY_I32_TEST_T, ENTRY_BIN(ficom_r80_by_i32))
4639TEST_FPU_BINARY_FSW(1, 16, I16, int16_t, FPU_BINARY_FSW_I16_T, g_aFpuBinaryFswI16, FPU_BINARY_I16_TEST_T, ENTRY_BIN(ficom_r80_by_i16))
4640
4641
4642/*
4643 * Binary operations on 80-bit floating point that effects only EFLAGS and possibly FSW.
4644 */
4645TYPEDEF_SUBTEST_TYPE(FPU_BINARY_EFL_R80_T, FPU_BINARY_EFL_R80_TEST_T, PFNIEMAIMPLFPUR80EFL);
4646
4647static FPU_BINARY_EFL_R80_T g_aFpuBinaryEflR80[] =
4648{
4649 ENTRY_BIN(fcomi_r80_by_r80),
4650 ENTRY_BIN(fucomi_r80_by_r80),
4651};
4652
4653#ifdef TSTIEMAIMPL_WITH_GENERATOR
4654static struct { RTFLOAT80U Val1, Val2; } const s_aFpuBinaryEflR80Specials[] =
4655{
4656 { RTFLOAT80U_INIT_C(0, 0xffffeeeeddddcccc, RTFLOAT80U_EXP_BIAS),
4657 RTFLOAT80U_INIT_C(0, 0xffffeeeeddddcccc, RTFLOAT80U_EXP_BIAS) }, /* whatever */
4658};
4659
4660static RTEXITCODE FpuBinaryEflR80Generate(uint32_t cTests, const char * const *papszNameFmts)
4661{
4662 cTests = RT_MAX(160, cTests); /* there are 144 standard input variations */
4663
4664 X86FXSTATE State;
4665 RT_ZERO(State);
4666 uint32_t cMinNormalPairs = (cTests - 144) / 4;
4667 for (size_t iFn = 0; iFn < RT_ELEMENTS(g_aFpuBinaryEflR80); iFn++)
4668 {
4669 IEMBINARYOUTPUT BinOut;
4670 AssertReturn(GENERATE_BINARY_OPEN(&BinOut, papszNameFmts, g_aFpuBinaryEflR80[iFn]), RTEXITCODE_FAILURE);
4671 uint32_t cNormalInputPairs = 0;
4672 for (uint32_t iTest = 0; iTest < cTests + RT_ELEMENTS(s_aFpuBinaryEflR80Specials); iTest += 1)
4673 {
4674 RTFLOAT80U const InVal1 = iTest < cTests ? RandR80Src1(iTest) : s_aFpuBinaryEflR80Specials[iTest - cTests].Val1;
4675 RTFLOAT80U const InVal2 = iTest < cTests ? RandR80Src2(iTest) : s_aFpuBinaryEflR80Specials[iTest - cTests].Val2;
4676 if (RTFLOAT80U_IS_NORMAL(&InVal1) && RTFLOAT80U_IS_NORMAL(&InVal2))
4677 cNormalInputPairs++;
4678 else if (cNormalInputPairs < cMinNormalPairs && iTest + cMinNormalPairs >= cTests && iTest < cTests)
4679 {
4680 iTest -= 1;
4681 continue;
4682 }
4683
4684 uint16_t const fFcw = RandFcw();
4685 State.FSW = RandFsw();
4686
4687 /* Guess these aren't affected by precision or rounding, so just flip the exception mask. */
4688 for (uint16_t iMask = 0; iMask <= X86_FCW_MASK_ALL; iMask += X86_FCW_MASK_ALL)
4689 {
4690 State.FCW = (fFcw & ~(X86_FCW_MASK_ALL)) | iMask;
4691 uint16_t uFswOut = 0;
4692 uint32_t fEflOut = g_aFpuBinaryEflR80[iFn].pfn(&State, &uFswOut, &InVal1, &InVal2);
4693 FPU_BINARY_EFL_R80_TEST_T const Test = { State.FCW, State.FSW, uFswOut, InVal1, InVal2, fEflOut, };
4694 GenerateBinaryWrite(&BinOut, &Test, sizeof(Test));
4695 }
4696 }
4697 AssertReturn(GenerateBinaryClose(&BinOut), RTEXITCODE_FAILURE);
4698 }
4699 return RTEXITCODE_SUCCESS;
4700}
4701DUMP_ALL_FN(FpuBinaryEflR80, g_aFpuBinaryEflR80)
4702#endif /*TSTIEMAIMPL_WITH_GENERATOR*/
4703
4704static void FpuBinaryEflR80Test(void)
4705{
4706 X86FXSTATE State;
4707 RT_ZERO(State);
4708 for (size_t iFn = 0; iFn < RT_ELEMENTS(g_aFpuBinaryEflR80); iFn++)
4709 {
4710 if (!SUBTEST_CHECK_IF_ENABLED_AND_DECOMPRESS(g_aFpuBinaryEflR80[iFn]))
4711 continue;
4712
4713 FPU_BINARY_EFL_R80_TEST_T const * const paTests = g_aFpuBinaryEflR80[iFn].paTests;
4714 uint32_t const cTests = g_aFpuBinaryEflR80[iFn].cTests;
4715 PFNIEMAIMPLFPUR80EFL pfn = g_aFpuBinaryEflR80[iFn].pfn;
4716 uint32_t const cVars = COUNT_VARIATIONS(g_aFpuBinaryEflR80[iFn]);
4717 if (!cTests) RTTestSkipped(g_hTest, "no tests");
4718 for (uint32_t iVar = 0; iVar < cVars; iVar++)
4719 {
4720 for (uint32_t iTest = 0; iTest < cTests; iTest++)
4721 {
4722 RTFLOAT80U const InVal1 = paTests[iTest].InVal1;
4723 RTFLOAT80U const InVal2 = paTests[iTest].InVal2;
4724 State.FCW = paTests[iTest].fFcw;
4725 State.FSW = paTests[iTest].fFswIn;
4726 uint16_t uFswOut = 0;
4727 uint32_t fEflOut = pfn(&State, &uFswOut, &InVal1, &InVal2);
4728 if ( uFswOut != paTests[iTest].fFswOut
4729 || fEflOut != paTests[iTest].fEflOut)
4730 RTTestFailed(g_hTest, "#%04u%s: fcw=%#06x fsw=%#06x in1=%s in2=%s\n"
4731 "%s -> fsw=%#06x efl=%#08x\n"
4732 "%s expected %#06x %#08x %s%s (%s)\n",
4733 iTest, iVar ? "/n" : "", paTests[iTest].fFcw, paTests[iTest].fFswIn,
4734 FormatR80(&paTests[iTest].InVal1), FormatR80(&paTests[iTest].InVal2),
4735 iVar ? " " : "", uFswOut, fEflOut,
4736 iVar ? " " : "", paTests[iTest].fFswOut, paTests[iTest].fEflOut,
4737 FswDiff(uFswOut, paTests[iTest].fFswOut), EFlagsDiff(fEflOut, paTests[iTest].fEflOut),
4738 FormatFcw(paTests[iTest].fFcw));
4739 }
4740 pfn = g_aFpuBinaryEflR80[iFn].pfnNative;
4741 }
4742
4743 FREE_DECOMPRESSED_TESTS(g_aFpuBinaryEflR80[iFn]);
4744 }
4745}
4746
4747
4748/*********************************************************************************************************************************
4749* x87 FPU Unary Operations *
4750*********************************************************************************************************************************/
4751
4752/*
4753 * Unary FPU operations on one 80-bit floating point value.
4754 *
4755 * Note! The FCW reserved bit 7 is used to indicate whether a test may produce
4756 * a rounding error or not.
4757 */
4758TYPEDEF_SUBTEST_TYPE(FPU_UNARY_R80_T, FPU_UNARY_R80_TEST_T, PFNIEMAIMPLFPUR80UNARY);
4759
4760enum { kUnary_Accurate = 0, kUnary_Accurate_Trigonometry /*probably not accurate, but need impl to know*/, kUnary_Rounding_F2xm1 };
4761static FPU_UNARY_R80_T g_aFpuUnaryR80[] =
4762{
4763 ENTRY_BIN_EX( fabs_r80, kUnary_Accurate),
4764 ENTRY_BIN_EX( fchs_r80, kUnary_Accurate),
4765 ENTRY_BIN_AMD_EX( f2xm1_r80, 0, kUnary_Accurate), // C1 differs for -1m0x3fb263cc2c331e15^-2654 (different ln2 constant?)
4766 ENTRY_BIN_INTEL_EX(f2xm1_r80, 0, kUnary_Rounding_F2xm1),
4767 ENTRY_BIN_EX( fsqrt_r80, kUnary_Accurate),
4768 ENTRY_BIN_EX( frndint_r80, kUnary_Accurate),
4769 ENTRY_BIN_AMD_EX( fsin_r80, 0, kUnary_Accurate_Trigonometry), // value & C1 differences for pseudo denormals and others (e.g. -1m0x2b1e5683cbca5725^-3485)
4770 ENTRY_BIN_INTEL_EX(fsin_r80, 0, kUnary_Accurate_Trigonometry),
4771 ENTRY_BIN_AMD_EX( fcos_r80, 0, kUnary_Accurate_Trigonometry), // value & C1 differences
4772 ENTRY_BIN_INTEL_EX(fcos_r80, 0, kUnary_Accurate_Trigonometry),
4773};
4774
4775#ifdef TSTIEMAIMPL_WITH_GENERATOR
4776
4777static bool FpuUnaryR80MayHaveRoundingError(PCRTFLOAT80U pr80Val, int enmKind)
4778{
4779 if ( enmKind == kUnary_Rounding_F2xm1
4780 && RTFLOAT80U_IS_NORMAL(pr80Val)
4781 && pr80Val->s.uExponent < RTFLOAT80U_EXP_BIAS
4782 && pr80Val->s.uExponent >= RTFLOAT80U_EXP_BIAS - 69)
4783 return true;
4784 return false;
4785}
4786
4787DUMP_ALL_FN(FpuUnaryR80, g_aFpuUnaryR80)
4788static RTEXITCODE FpuUnaryR80Generate(uint32_t cTests, const char * const *papszNameFmts)
4789{
4790 static RTFLOAT80U const s_aSpecials[] =
4791 {
4792 RTFLOAT80U_INIT_C(0, 0x8000000000000000, RTFLOAT80U_EXP_BIAS - 1), /* 0.5 (for f2xm1) */
4793 RTFLOAT80U_INIT_C(1, 0x8000000000000000, RTFLOAT80U_EXP_BIAS - 1), /* -0.5 (for f2xm1) */
4794 RTFLOAT80U_INIT_C(0, 0x8000000000000000, RTFLOAT80U_EXP_BIAS), /* 1.0 (for f2xm1) */
4795 RTFLOAT80U_INIT_C(1, 0x8000000000000000, RTFLOAT80U_EXP_BIAS), /* -1.0 (for f2xm1) */
4796 RTFLOAT80U_INIT_C(0, 0x8000000000000000, 0), /* +1.0^-16382 */
4797 RTFLOAT80U_INIT_C(1, 0x8000000000000000, 0), /* -1.0^-16382 */
4798 RTFLOAT80U_INIT_C(0, 0xc000000000000000, 0), /* +1.1^-16382 */
4799 RTFLOAT80U_INIT_C(1, 0xc000000000000000, 0), /* -1.1^-16382 */
4800 RTFLOAT80U_INIT_C(0, 0xc000100000000000, 0), /* +1.1xxx1^-16382 */
4801 RTFLOAT80U_INIT_C(1, 0xc000100000000000, 0), /* -1.1xxx1^-16382 */
4802 };
4803 X86FXSTATE State;
4804 RT_ZERO(State);
4805 uint32_t cMinNormals = cTests / 4;
4806 for (size_t iFn = 0; iFn < RT_ELEMENTS(g_aFpuUnaryR80); iFn++)
4807 {
4808 PFNIEMAIMPLFPUR80UNARY const pfn = g_aFpuUnaryR80[iFn].pfnNative ? g_aFpuUnaryR80[iFn].pfnNative : g_aFpuUnaryR80[iFn].pfn;
4809 if ( g_aFpuUnaryR80[iFn].idxCpuEflFlavour != IEMTARGETCPU_EFL_BEHAVIOR_NATIVE
4810 && g_aFpuUnaryR80[iFn].idxCpuEflFlavour != g_idxCpuEflFlavour)
4811 continue;
4812
4813 IEMBINARYOUTPUT BinOut;
4814 AssertReturn(GENERATE_BINARY_OPEN(&BinOut, papszNameFmts, g_aFpuUnaryR80[iFn]), RTEXITCODE_FAILURE);
4815 uint32_t cNormalInputs = 0;
4816 uint32_t cTargetRangeInputs = 0;
4817 for (uint32_t iTest = 0; iTest < cTests + RT_ELEMENTS(s_aSpecials); iTest += 1)
4818 {
4819 RTFLOAT80U InVal = iTest < cTests ? RandR80Src(iTest) : s_aSpecials[iTest - cTests];
4820 if (RTFLOAT80U_IS_NORMAL(&InVal))
4821 {
4822 if (g_aFpuUnaryR80[iFn].uExtra == kUnary_Rounding_F2xm1)
4823 {
4824 unsigned uTargetExp = g_aFpuUnaryR80[iFn].uExtra == kUnary_Rounding_F2xm1
4825 ? RTFLOAT80U_EXP_BIAS /* 2^0..2^-69 */ : RTFLOAT80U_EXP_BIAS + 63 + 1 /* 2^64..2^-64 */;
4826 unsigned cTargetExp = g_aFpuUnaryR80[iFn].uExtra == kUnary_Rounding_F2xm1 ? 69 : 63*2 + 2;
4827 if (InVal.s.uExponent <= uTargetExp && InVal.s.uExponent >= uTargetExp - cTargetExp)
4828 cTargetRangeInputs++;
4829 else if (cTargetRangeInputs < cMinNormals / 2 && iTest + cMinNormals / 2 >= cTests && iTest < cTests)
4830 {
4831 InVal.s.uExponent = RTRandU32Ex(uTargetExp - cTargetExp, uTargetExp);
4832 cTargetRangeInputs++;
4833 }
4834 }
4835 cNormalInputs++;
4836 }
4837 else if (cNormalInputs < cMinNormals && iTest + cMinNormals >= cTests && iTest < cTests)
4838 {
4839 iTest -= 1;
4840 continue;
4841 }
4842
4843 uint16_t const fFcwExtra = FpuUnaryR80MayHaveRoundingError(&InVal, g_aFpuUnaryR80[iFn].uExtra) ? 0x80 : 0;
4844 uint16_t const fFcw = RandFcw();
4845 State.FSW = RandFsw();
4846
4847 for (uint16_t iRounding = 0; iRounding < 4; iRounding++)
4848 for (uint16_t iPrecision = 0; iPrecision < 4; iPrecision++)
4849 {
4850 State.FCW = (fFcw & ~(X86_FCW_RC_MASK | X86_FCW_PC_MASK | X86_FCW_MASK_ALL))
4851 | (iRounding << X86_FCW_RC_SHIFT)
4852 | (iPrecision << X86_FCW_PC_SHIFT)
4853 | X86_FCW_MASK_ALL;
4854 IEMFPURESULT ResM = { RTFLOAT80U_INIT(0, 0, 0), 0 };
4855 pfn(&State, &ResM, &InVal);
4856 FPU_UNARY_R80_TEST_T const TestM
4857 = { (uint16_t)(State.FCW | fFcwExtra), State.FSW, ResM.FSW, InVal, ResM.r80Result };
4858 GenerateBinaryWrite(&BinOut, &TestM, sizeof(TestM));
4859
4860 State.FCW = State.FCW & ~X86_FCW_MASK_ALL;
4861 IEMFPURESULT ResU = { RTFLOAT80U_INIT(0, 0, 0), 0 };
4862 pfn(&State, &ResU, &InVal);
4863 FPU_UNARY_R80_TEST_T const TestU
4864 = { (uint16_t)(State.FCW | fFcwExtra), State.FSW, ResU.FSW, InVal, ResU.r80Result };
4865 GenerateBinaryWrite(&BinOut, &TestU, sizeof(TestU));
4866
4867 uint16_t fXcpt = (ResM.FSW | ResU.FSW) & X86_FSW_XCPT_MASK & ~X86_FSW_SF;
4868 if (fXcpt)
4869 {
4870 State.FCW = (State.FCW & ~X86_FCW_MASK_ALL) | fXcpt;
4871 IEMFPURESULT Res1 = { RTFLOAT80U_INIT(0, 0, 0), 0 };
4872 pfn(&State, &Res1, &InVal);
4873 FPU_UNARY_R80_TEST_T const Test1
4874 = { (uint16_t)(State.FCW | fFcwExtra), State.FSW, Res1.FSW, InVal, Res1.r80Result };
4875 GenerateBinaryWrite(&BinOut, &Test1, sizeof(Test1));
4876 if (((Res1.FSW & X86_FSW_XCPT_MASK) & fXcpt) != (Res1.FSW & X86_FSW_XCPT_MASK))
4877 {
4878 fXcpt |= Res1.FSW & X86_FSW_XCPT_MASK;
4879 State.FCW = (State.FCW & ~X86_FCW_MASK_ALL) | fXcpt;
4880 IEMFPURESULT Res2 = { RTFLOAT80U_INIT(0, 0, 0), 0 };
4881 pfn(&State, &Res2, &InVal);
4882 FPU_UNARY_R80_TEST_T const Test2
4883 = { (uint16_t)(State.FCW | fFcwExtra), State.FSW, Res2.FSW, InVal, Res2.r80Result };
4884 GenerateBinaryWrite(&BinOut, &Test2, sizeof(Test2));
4885 }
4886 if (!RT_IS_POWER_OF_TWO(fXcpt))
4887 for (uint16_t fUnmasked = 1; fUnmasked <= X86_FCW_PM; fUnmasked <<= 1)
4888 if (fUnmasked & fXcpt)
4889 {
4890 State.FCW = (State.FCW & ~X86_FCW_MASK_ALL) | (fXcpt & ~fUnmasked);
4891 IEMFPURESULT Res3 = { RTFLOAT80U_INIT(0, 0, 0), 0 };
4892 pfn(&State, &Res3, &InVal);
4893 FPU_UNARY_R80_TEST_T const Test3
4894 = { (uint16_t)(State.FCW | fFcwExtra), State.FSW, Res3.FSW, InVal, Res3.r80Result };
4895 GenerateBinaryWrite(&BinOut, &Test3, sizeof(Test3));
4896 }
4897 }
4898 }
4899 }
4900 AssertReturn(GenerateBinaryClose(&BinOut), RTEXITCODE_FAILURE);
4901 }
4902 return RTEXITCODE_SUCCESS;
4903}
4904#endif
4905
4906static bool FpuIsEqualFcwMaybeIgnoreRoundErr(uint16_t fFcw1, uint16_t fFcw2, bool fRndErrOk, bool *pfRndErr)
4907{
4908 if (fFcw1 == fFcw2)
4909 return true;
4910 if (fRndErrOk && (fFcw1 & ~X86_FSW_C1) == (fFcw2 & ~X86_FSW_C1))
4911 {
4912 *pfRndErr = true;
4913 return true;
4914 }
4915 return false;
4916}
4917
4918static bool FpuIsEqualR80MaybeIgnoreRoundErr(PCRTFLOAT80U pr80Val1, PCRTFLOAT80U pr80Val2, bool fRndErrOk, bool *pfRndErr)
4919{
4920 if (RTFLOAT80U_ARE_IDENTICAL(pr80Val1, pr80Val2))
4921 return true;
4922 if ( fRndErrOk
4923 && pr80Val1->s.fSign == pr80Val2->s.fSign)
4924 {
4925 if ( ( pr80Val1->s.uExponent == pr80Val2->s.uExponent
4926 && ( pr80Val1->s.uMantissa > pr80Val2->s.uMantissa
4927 ? pr80Val1->s.uMantissa - pr80Val2->s.uMantissa == 1
4928 : pr80Val2->s.uMantissa - pr80Val1->s.uMantissa == 1))
4929 ||
4930 ( pr80Val1->s.uExponent + 1 == pr80Val2->s.uExponent
4931 && pr80Val1->s.uMantissa == UINT64_MAX
4932 && pr80Val2->s.uMantissa == RT_BIT_64(63))
4933 ||
4934 ( pr80Val1->s.uExponent == pr80Val2->s.uExponent + 1
4935 && pr80Val2->s.uMantissa == UINT64_MAX
4936 && pr80Val1->s.uMantissa == RT_BIT_64(63)) )
4937 {
4938 *pfRndErr = true;
4939 return true;
4940 }
4941 }
4942 return false;
4943}
4944
4945
4946static void FpuUnaryR80Test(void)
4947{
4948 X86FXSTATE State;
4949 RT_ZERO(State);
4950 for (size_t iFn = 0; iFn < RT_ELEMENTS(g_aFpuUnaryR80); iFn++)
4951 {
4952 if (!SUBTEST_CHECK_IF_ENABLED_AND_DECOMPRESS(g_aFpuUnaryR80[iFn]))
4953 continue;
4954
4955 FPU_UNARY_R80_TEST_T const * const paTests = g_aFpuUnaryR80[iFn].paTests;
4956 uint32_t const cTests = g_aFpuUnaryR80[iFn].cTests;
4957 PFNIEMAIMPLFPUR80UNARY pfn = g_aFpuUnaryR80[iFn].pfn;
4958 uint32_t const cVars = COUNT_VARIATIONS(g_aFpuUnaryR80[iFn]);
4959 uint32_t cRndErrs = 0;
4960 uint32_t cPossibleRndErrs = 0;
4961 if (!cTests) RTTestSkipped(g_hTest, "no tests");
4962 for (uint32_t iVar = 0; iVar < cVars; iVar++)
4963 {
4964 for (uint32_t iTest = 0; iTest < cTests; iTest++)
4965 {
4966 RTFLOAT80U const InVal = paTests[iTest].InVal;
4967 IEMFPURESULT Res = { RTFLOAT80U_INIT(0, 0, 0), 0 };
4968 bool const fRndErrOk = RT_BOOL(paTests[iTest].fFcw & 0x80);
4969 State.FCW = paTests[iTest].fFcw & ~(uint16_t)0x80;
4970 State.FSW = paTests[iTest].fFswIn;
4971 pfn(&State, &Res, &InVal);
4972 bool fRndErr = false;
4973 if ( !FpuIsEqualFcwMaybeIgnoreRoundErr(Res.FSW, paTests[iTest].fFswOut, fRndErrOk, &fRndErr)
4974 || !FpuIsEqualR80MaybeIgnoreRoundErr(&Res.r80Result, &paTests[iTest].OutVal, fRndErrOk, &fRndErr))
4975 RTTestFailed(g_hTest, "#%04u%s: fcw=%#06x fsw=%#06x in=%s\n"
4976 "%s -> fsw=%#06x %s\n"
4977 "%s expected %#06x %s%s%s%s (%s)\n",
4978 iTest, iVar ? "/n" : "", paTests[iTest].fFcw, paTests[iTest].fFswIn,
4979 FormatR80(&paTests[iTest].InVal),
4980 iVar ? " " : "", Res.FSW, FormatR80(&Res.r80Result),
4981 iVar ? " " : "", paTests[iTest].fFswOut, FormatR80(&paTests[iTest].OutVal),
4982 FswDiff(Res.FSW, paTests[iTest].fFswOut),
4983 !RTFLOAT80U_ARE_IDENTICAL(&Res.r80Result, &paTests[iTest].OutVal) ? " - val" : "",
4984 fRndErrOk ? " - rounding errors ok" : "", FormatFcw(paTests[iTest].fFcw));
4985 cRndErrs += fRndErr;
4986 cPossibleRndErrs += fRndErrOk;
4987 }
4988 pfn = g_aFpuUnaryR80[iFn].pfnNative;
4989 }
4990 if (cPossibleRndErrs > 0)
4991 RTTestPrintf(g_hTest, RTTESTLVL_ALWAYS, "rounding errors: %u out of %u\n", cRndErrs, cPossibleRndErrs);
4992 FREE_DECOMPRESSED_TESTS(g_aFpuUnaryR80[iFn]);
4993 }
4994}
4995
4996
4997/*
4998 * Unary FPU operations on one 80-bit floating point value, but only affects the FSW.
4999 */
5000TYPEDEF_SUBTEST_TYPE(FPU_UNARY_FSW_R80_T, FPU_UNARY_R80_TEST_T, PFNIEMAIMPLFPUR80UNARYFSW);
5001
5002static FPU_UNARY_FSW_R80_T g_aFpuUnaryFswR80[] =
5003{
5004 ENTRY_BIN(ftst_r80),
5005 ENTRY_BIN_EX(fxam_r80, 1),
5006};
5007
5008#ifdef TSTIEMAIMPL_WITH_GENERATOR
5009static RTEXITCODE FpuUnaryFswR80Generate(uint32_t cTests, const char * const *papszNameFmts)
5010{
5011 static RTFLOAT80U const s_aSpecials[] =
5012 {
5013 RTFLOAT80U_INIT_C(0, 0xffffeeeeddddcccc, RTFLOAT80U_EXP_BIAS), /* whatever */
5014 };
5015
5016 X86FXSTATE State;
5017 RT_ZERO(State);
5018 uint32_t cMinNormals = cTests / 4;
5019 for (size_t iFn = 0; iFn < RT_ELEMENTS(g_aFpuUnaryFswR80); iFn++)
5020 {
5021 bool const fIsFxam = g_aFpuUnaryFswR80[iFn].uExtra == 1;
5022 PFNIEMAIMPLFPUR80UNARYFSW const pfn = g_aFpuUnaryFswR80[iFn].pfnNative ? g_aFpuUnaryFswR80[iFn].pfnNative : g_aFpuUnaryFswR80[iFn].pfn;
5023 if ( g_aFpuUnaryFswR80[iFn].idxCpuEflFlavour != IEMTARGETCPU_EFL_BEHAVIOR_NATIVE
5024 && g_aFpuUnaryFswR80[iFn].idxCpuEflFlavour != g_idxCpuEflFlavour)
5025 continue;
5026 State.FTW = 0;
5027
5028 IEMBINARYOUTPUT BinOut;
5029 AssertReturn(GENERATE_BINARY_OPEN(&BinOut, papszNameFmts, g_aFpuUnaryFswR80[iFn]), RTEXITCODE_FAILURE);
5030 uint32_t cNormalInputs = 0;
5031 for (uint32_t iTest = 0; iTest < cTests + RT_ELEMENTS(s_aSpecials); iTest += 1)
5032 {
5033 RTFLOAT80U const InVal = iTest < cTests ? RandR80Src(iTest) : s_aSpecials[iTest - cTests];
5034 if (RTFLOAT80U_IS_NORMAL(&InVal))
5035 cNormalInputs++;
5036 else if (cNormalInputs < cMinNormals && iTest + cMinNormals >= cTests && iTest < cTests)
5037 {
5038 iTest -= 1;
5039 continue;
5040 }
5041
5042 uint16_t const fFcw = RandFcw();
5043 State.FSW = RandFsw();
5044 if (!fIsFxam)
5045 {
5046 for (uint16_t iRounding = 0; iRounding < 4; iRounding++)
5047 {
5048 for (uint16_t iPrecision = 0; iPrecision < 4; iPrecision++)
5049 {
5050 for (uint16_t iMask = 0; iMask <= X86_FCW_MASK_ALL; iMask += X86_FCW_MASK_ALL)
5051 {
5052 State.FCW = (fFcw & ~(X86_FCW_RC_MASK | X86_FCW_PC_MASK | X86_FCW_MASK_ALL))
5053 | (iRounding << X86_FCW_RC_SHIFT)
5054 | (iPrecision << X86_FCW_PC_SHIFT)
5055 | iMask;
5056 uint16_t fFswOut = 0;
5057 pfn(&State, &fFswOut, &InVal);
5058 FPU_UNARY_R80_TEST_T const Test = { State.FCW, State.FSW, fFswOut, InVal };
5059 GenerateBinaryWrite(&BinOut, &Test, sizeof(Test));
5060 }
5061 }
5062 }
5063 }
5064 else
5065 {
5066 uint16_t fFswOut = 0;
5067 uint16_t const fEmpty = RTRandU32Ex(0, 3) == 3 ? 0x80 : 0; /* Using MBZ bit 7 in FCW to indicate empty tag value. */
5068 State.FTW = !fEmpty ? 1 << X86_FSW_TOP_GET(State.FSW) : 0;
5069 State.FCW = fFcw;
5070 pfn(&State, &fFswOut, &InVal);
5071 FPU_UNARY_R80_TEST_T const Test = { (uint16_t)(fFcw | fEmpty), State.FSW, fFswOut, InVal };
5072 GenerateBinaryWrite(&BinOut, &Test, sizeof(Test));
5073 }
5074 }
5075 AssertReturn(GenerateBinaryClose(&BinOut), RTEXITCODE_FAILURE);
5076 }
5077 return RTEXITCODE_SUCCESS;
5078}
5079DUMP_ALL_FN(FpuUnaryFswR80, g_aFpuUnaryFswR80)
5080#endif
5081
5082
5083static void FpuUnaryFswR80Test(void)
5084{
5085 X86FXSTATE State;
5086 RT_ZERO(State);
5087 for (size_t iFn = 0; iFn < RT_ELEMENTS(g_aFpuUnaryFswR80); iFn++)
5088 {
5089 if (!SUBTEST_CHECK_IF_ENABLED_AND_DECOMPRESS(g_aFpuUnaryFswR80[iFn]))
5090 continue;
5091
5092 FPU_UNARY_R80_TEST_T const * const paTests = g_aFpuUnaryFswR80[iFn].paTests;
5093 uint32_t const cTests = g_aFpuUnaryFswR80[iFn].cTests;
5094 PFNIEMAIMPLFPUR80UNARYFSW pfn = g_aFpuUnaryFswR80[iFn].pfn;
5095 uint32_t const cVars = COUNT_VARIATIONS(g_aFpuUnaryFswR80[iFn]);
5096 if (!cTests) RTTestSkipped(g_hTest, "no tests");
5097 for (uint32_t iVar = 0; iVar < cVars; iVar++)
5098 {
5099 for (uint32_t iTest = 0; iTest < cTests; iTest++)
5100 {
5101 RTFLOAT80U const InVal = paTests[iTest].InVal;
5102 uint16_t fFswOut = 0;
5103 State.FSW = paTests[iTest].fFswIn;
5104 State.FCW = paTests[iTest].fFcw & ~(uint16_t)0x80; /* see generator code */
5105 State.FTW = paTests[iTest].fFcw & 0x80 ? 0 : 1 << X86_FSW_TOP_GET(paTests[iTest].fFswIn);
5106 pfn(&State, &fFswOut, &InVal);
5107 if (fFswOut != paTests[iTest].fFswOut)
5108 RTTestFailed(g_hTest, "#%04u%s: fcw=%#06x fsw=%#06x in=%s\n"
5109 "%s -> fsw=%#06x\n"
5110 "%s expected %#06x %s (%s%s)\n",
5111 iTest, iVar ? "/n" : "", paTests[iTest].fFcw, paTests[iTest].fFswIn,
5112 FormatR80(&paTests[iTest].InVal),
5113 iVar ? " " : "", fFswOut,
5114 iVar ? " " : "", paTests[iTest].fFswOut,
5115 FswDiff(fFswOut, paTests[iTest].fFswOut), FormatFcw(paTests[iTest].fFcw),
5116 paTests[iTest].fFcw & 0x80 ? " empty" : "");
5117 }
5118 pfn = g_aFpuUnaryFswR80[iFn].pfnNative;
5119 }
5120
5121 FREE_DECOMPRESSED_TESTS(g_aFpuUnaryFswR80[iFn]);
5122 }
5123}
5124
5125/*
5126 * Unary FPU operations on one 80-bit floating point value, but with two outputs.
5127 */
5128TYPEDEF_SUBTEST_TYPE(FPU_UNARY_TWO_R80_T, FPU_UNARY_TWO_R80_TEST_T, PFNIEMAIMPLFPUR80UNARYTWO);
5129
5130static FPU_UNARY_TWO_R80_T g_aFpuUnaryTwoR80[] =
5131{
5132 ENTRY_BIN(fxtract_r80_r80),
5133 ENTRY_BIN_AMD( fptan_r80_r80, 0), // rounding differences
5134 ENTRY_BIN_INTEL(fptan_r80_r80, 0),
5135 ENTRY_BIN_AMD( fsincos_r80_r80, 0), // C1 differences & value differences (e.g. -1m0x235cf2f580244a27^-1696)
5136 ENTRY_BIN_INTEL(fsincos_r80_r80, 0),
5137};
5138
5139#ifdef TSTIEMAIMPL_WITH_GENERATOR
5140static RTEXITCODE FpuUnaryTwoR80Generate(uint32_t cTests, const char * const *papszNameFmts)
5141{
5142 static RTFLOAT80U const s_aSpecials[] =
5143 {
5144 RTFLOAT80U_INIT_C(0, 0xffffeeeeddddcccc, RTFLOAT80U_EXP_BIAS), /* whatever */
5145 };
5146
5147 X86FXSTATE State;
5148 RT_ZERO(State);
5149 uint32_t cMinNormals = cTests / 4;
5150 for (size_t iFn = 0; iFn < RT_ELEMENTS(g_aFpuUnaryTwoR80); iFn++)
5151 {
5152 PFNIEMAIMPLFPUR80UNARYTWO const pfn = g_aFpuUnaryTwoR80[iFn].pfnNative ? g_aFpuUnaryTwoR80[iFn].pfnNative : g_aFpuUnaryTwoR80[iFn].pfn;
5153 if ( g_aFpuUnaryTwoR80[iFn].idxCpuEflFlavour != IEMTARGETCPU_EFL_BEHAVIOR_NATIVE
5154 && g_aFpuUnaryTwoR80[iFn].idxCpuEflFlavour != g_idxCpuEflFlavour)
5155 continue;
5156
5157 IEMBINARYOUTPUT BinOut;
5158 AssertReturn(GENERATE_BINARY_OPEN(&BinOut, papszNameFmts, g_aFpuUnaryTwoR80[iFn]), RTEXITCODE_FAILURE);
5159 uint32_t cNormalInputs = 0;
5160 uint32_t cTargetRangeInputs = 0;
5161 for (uint32_t iTest = 0; iTest < cTests + RT_ELEMENTS(s_aSpecials); iTest += 1)
5162 {
5163 RTFLOAT80U InVal = iTest < cTests ? RandR80Src(iTest) : s_aSpecials[iTest - cTests];
5164 if (RTFLOAT80U_IS_NORMAL(&InVal))
5165 {
5166 if (iFn != 0)
5167 {
5168 unsigned uTargetExp = RTFLOAT80U_EXP_BIAS + 63 + 1 /* 2^64..2^-64 */;
5169 unsigned cTargetExp = g_aFpuUnaryR80[iFn].uExtra == kUnary_Rounding_F2xm1 ? 69 : 63*2 + 2;
5170 if (InVal.s.uExponent <= uTargetExp && InVal.s.uExponent >= uTargetExp - cTargetExp)
5171 cTargetRangeInputs++;
5172 else if (cTargetRangeInputs < cMinNormals / 2 && iTest + cMinNormals / 2 >= cTests && iTest < cTests)
5173 {
5174 InVal.s.uExponent = RTRandU32Ex(uTargetExp - cTargetExp, uTargetExp);
5175 cTargetRangeInputs++;
5176 }
5177 }
5178 cNormalInputs++;
5179 }
5180 else if (cNormalInputs < cMinNormals && iTest + cMinNormals >= cTests && iTest < cTests)
5181 {
5182 iTest -= 1;
5183 continue;
5184 }
5185
5186 uint16_t const fFcwExtra = 0; /* for rounding error indication */
5187 uint16_t const fFcw = RandFcw();
5188 State.FSW = RandFsw();
5189
5190 for (uint16_t iRounding = 0; iRounding < 4; iRounding++)
5191 for (uint16_t iPrecision = 0; iPrecision < 4; iPrecision++)
5192 {
5193 State.FCW = (fFcw & ~(X86_FCW_RC_MASK | X86_FCW_PC_MASK | X86_FCW_MASK_ALL))
5194 | (iRounding << X86_FCW_RC_SHIFT)
5195 | (iPrecision << X86_FCW_PC_SHIFT)
5196 | X86_FCW_MASK_ALL;
5197 IEMFPURESULTTWO ResM = { RTFLOAT80U_INIT(0, 0, 0), 0, RTFLOAT80U_INIT(0, 0, 0) };
5198 pfn(&State, &ResM, &InVal);
5199 FPU_UNARY_TWO_R80_TEST_T const TestM
5200 = { (uint16_t)(State.FCW | fFcwExtra), State.FSW, ResM.FSW, InVal, ResM.r80Result1, ResM.r80Result2 };
5201 GenerateBinaryWrite(&BinOut, &TestM, sizeof(TestM));
5202
5203 State.FCW = State.FCW & ~X86_FCW_MASK_ALL;
5204 IEMFPURESULTTWO ResU = { RTFLOAT80U_INIT(0, 0, 0), 0, RTFLOAT80U_INIT(0, 0, 0) };
5205 pfn(&State, &ResU, &InVal);
5206 FPU_UNARY_TWO_R80_TEST_T const TestU
5207 = { (uint16_t)(State.FCW | fFcwExtra), State.FSW, ResU.FSW, InVal, ResU.r80Result1, ResU.r80Result2 };
5208 GenerateBinaryWrite(&BinOut, &TestU, sizeof(TestU));
5209
5210 uint16_t fXcpt = (ResM.FSW | ResU.FSW) & X86_FSW_XCPT_MASK & ~X86_FSW_SF;
5211 if (fXcpt)
5212 {
5213 State.FCW = (State.FCW & ~X86_FCW_MASK_ALL) | fXcpt;
5214 IEMFPURESULTTWO Res1 = { RTFLOAT80U_INIT(0, 0, 0), 0, RTFLOAT80U_INIT(0, 0, 0) };
5215 pfn(&State, &Res1, &InVal);
5216 FPU_UNARY_TWO_R80_TEST_T const Test1
5217 = { (uint16_t)(State.FCW | fFcwExtra), State.FSW, Res1.FSW, InVal, Res1.r80Result1, Res1.r80Result2 };
5218 GenerateBinaryWrite(&BinOut, &Test1, sizeof(Test1));
5219
5220 if (((Res1.FSW & X86_FSW_XCPT_MASK) & fXcpt) != (Res1.FSW & X86_FSW_XCPT_MASK))
5221 {
5222 fXcpt |= Res1.FSW & X86_FSW_XCPT_MASK;
5223 State.FCW = (State.FCW & ~X86_FCW_MASK_ALL) | fXcpt;
5224 IEMFPURESULTTWO Res2 = { RTFLOAT80U_INIT(0, 0, 0), 0, RTFLOAT80U_INIT(0, 0, 0) };
5225 pfn(&State, &Res2, &InVal);
5226 FPU_UNARY_TWO_R80_TEST_T const Test2
5227 = { (uint16_t)(State.FCW | fFcwExtra), State.FSW, Res2.FSW, InVal, Res2.r80Result1, Res2.r80Result2 };
5228 GenerateBinaryWrite(&BinOut, &Test2, sizeof(Test2));
5229 }
5230 if (!RT_IS_POWER_OF_TWO(fXcpt))
5231 for (uint16_t fUnmasked = 1; fUnmasked <= X86_FCW_PM; fUnmasked <<= 1)
5232 if (fUnmasked & fXcpt)
5233 {
5234 State.FCW = (State.FCW & ~X86_FCW_MASK_ALL) | (fXcpt & ~fUnmasked);
5235 IEMFPURESULTTWO Res3 = { RTFLOAT80U_INIT(0, 0, 0), 0, RTFLOAT80U_INIT(0, 0, 0) };
5236 pfn(&State, &Res3, &InVal);
5237 FPU_UNARY_TWO_R80_TEST_T const Test3
5238 = { (uint16_t)(State.FCW | fFcwExtra), State.FSW, Res3.FSW, InVal, Res3.r80Result1, Res3.r80Result2 };
5239 GenerateBinaryWrite(&BinOut, &Test3, sizeof(Test3));
5240 }
5241 }
5242 }
5243 }
5244 AssertReturn(GenerateBinaryClose(&BinOut), RTEXITCODE_FAILURE);
5245 }
5246 return RTEXITCODE_SUCCESS;
5247}
5248DUMP_ALL_FN(FpuUnaryTwoR80, g_aFpuUnaryTwoR80)
5249#endif
5250
5251
5252static void FpuUnaryTwoR80Test(void)
5253{
5254 X86FXSTATE State;
5255 RT_ZERO(State);
5256 for (size_t iFn = 0; iFn < RT_ELEMENTS(g_aFpuUnaryTwoR80); iFn++)
5257 {
5258 if (!SUBTEST_CHECK_IF_ENABLED_AND_DECOMPRESS(g_aFpuUnaryTwoR80[iFn]))
5259 continue;
5260
5261 FPU_UNARY_TWO_R80_TEST_T const * const paTests = g_aFpuUnaryTwoR80[iFn].paTests;
5262 uint32_t const cTests = g_aFpuUnaryTwoR80[iFn].cTests;
5263 PFNIEMAIMPLFPUR80UNARYTWO pfn = g_aFpuUnaryTwoR80[iFn].pfn;
5264 uint32_t const cVars = COUNT_VARIATIONS(g_aFpuUnaryTwoR80[iFn]);
5265 if (!cTests) RTTestSkipped(g_hTest, "no tests");
5266 for (uint32_t iVar = 0; iVar < cVars; iVar++)
5267 {
5268 for (uint32_t iTest = 0; iTest < cTests; iTest++)
5269 {
5270 IEMFPURESULTTWO Res = { RTFLOAT80U_INIT(0, 0, 0), 0, RTFLOAT80U_INIT(0, 0, 0) };
5271 RTFLOAT80U const InVal = paTests[iTest].InVal;
5272 State.FCW = paTests[iTest].fFcw;
5273 State.FSW = paTests[iTest].fFswIn;
5274 pfn(&State, &Res, &InVal);
5275 if ( Res.FSW != paTests[iTest].fFswOut
5276 || !RTFLOAT80U_ARE_IDENTICAL(&Res.r80Result1, &paTests[iTest].OutVal1)
5277 || !RTFLOAT80U_ARE_IDENTICAL(&Res.r80Result2, &paTests[iTest].OutVal2) )
5278 RTTestFailed(g_hTest, "#%04u%s: fcw=%#06x fsw=%#06x in=%s\n"
5279 "%s -> fsw=%#06x %s %s\n"
5280 "%s expected %#06x %s %s %s%s%s (%s)\n",
5281 iTest, iVar ? "/n" : "", paTests[iTest].fFcw, paTests[iTest].fFswIn,
5282 FormatR80(&paTests[iTest].InVal),
5283 iVar ? " " : "", Res.FSW, FormatR80(&Res.r80Result1), FormatR80(&Res.r80Result2),
5284 iVar ? " " : "", paTests[iTest].fFswOut,
5285 FormatR80(&paTests[iTest].OutVal1), FormatR80(&paTests[iTest].OutVal2),
5286 !RTFLOAT80U_ARE_IDENTICAL(&Res.r80Result1, &paTests[iTest].OutVal1) ? " - val1" : "",
5287 !RTFLOAT80U_ARE_IDENTICAL(&Res.r80Result2, &paTests[iTest].OutVal2) ? " - val2" : "",
5288 FswDiff(Res.FSW, paTests[iTest].fFswOut), FormatFcw(paTests[iTest].fFcw) );
5289 }
5290 pfn = g_aFpuUnaryTwoR80[iFn].pfnNative;
5291 }
5292
5293 FREE_DECOMPRESSED_TESTS(g_aFpuUnaryTwoR80[iFn]);
5294 }
5295}
5296
5297
5298/*********************************************************************************************************************************
5299* SSE floating point Binary Operations *
5300*********************************************************************************************************************************/
5301
5302/*
5303 * Binary SSE operations on packed single precision floating point values.
5304 */
5305TYPEDEF_SUBTEST_TYPE(SSE_BINARY_R32_T, SSE_BINARY_TEST_T, PFNIEMAIMPLFPSSEF2U128);
5306
5307static SSE_BINARY_R32_T g_aSseBinaryR32[] =
5308{
5309 ENTRY_BIN(addps_u128),
5310 ENTRY_BIN(mulps_u128),
5311 ENTRY_BIN(subps_u128),
5312 ENTRY_BIN(minps_u128),
5313 ENTRY_BIN(divps_u128),
5314 ENTRY_BIN(maxps_u128),
5315 ENTRY_BIN(haddps_u128),
5316 ENTRY_BIN(hsubps_u128),
5317 ENTRY_BIN(sqrtps_u128),
5318 ENTRY_BIN(addsubps_u128),
5319 ENTRY_BIN(cvtps2pd_u128),
5320};
5321
5322#ifdef TSTIEMAIMPL_WITH_GENERATOR
5323DUMP_ALL_FN(SseBinaryR32, g_aSseBinaryR32)
5324static RTEXITCODE SseBinaryR32Generate(uint32_t cTests, const char * const *papszNameFmts)
5325{
5326 cTests = RT_MAX(192, cTests); /* there are 144 standard input variations */
5327
5328 static struct { RTFLOAT32U aVal1[4], aVal2[4]; } const s_aSpecials[] =
5329 {
5330 { { RTFLOAT32U_INIT_ZERO(0), RTFLOAT32U_INIT_ZERO(0), RTFLOAT32U_INIT_ZERO(0), RTFLOAT32U_INIT_ZERO(0), },
5331 { RTFLOAT32U_INIT_C(0, 8388607, RTFLOAT32U_EXP_MAX - 1), RTFLOAT32U_INIT_C(0, 8388607, RTFLOAT32U_EXP_MAX - 1), RTFLOAT32U_INIT_C(0, 8388607, RTFLOAT32U_EXP_MAX - 1), RTFLOAT32U_INIT_C(0, 8388607, RTFLOAT32U_EXP_MAX - 1) } },
5332 /** @todo More specials. */
5333 };
5334
5335 X86FXSTATE State;
5336 RT_ZERO(State);
5337 uint32_t cMinNormalPairs = (cTests - 144) / 4;
5338 for (size_t iFn = 0; iFn < RT_ELEMENTS(g_aSseBinaryR32); iFn++)
5339 {
5340 PFNIEMAIMPLFPSSEF2U128 const pfn = g_aSseBinaryR32[iFn].pfnNative ? g_aSseBinaryR32[iFn].pfnNative : g_aSseBinaryR32[iFn].pfn;
5341
5342 IEMBINARYOUTPUT BinOut;
5343 AssertReturn(GENERATE_BINARY_OPEN(&BinOut, papszNameFmts, g_aSseBinaryR32[iFn]), RTEXITCODE_FAILURE);
5344
5345 uint32_t cNormalInputPairs = 0;
5346 for (uint32_t iTest = 0; iTest < cTests + RT_ELEMENTS(s_aSpecials); iTest += 1)
5347 {
5348 SSE_BINARY_TEST_T TestData; RT_ZERO(TestData);
5349
5350 TestData.InVal1.ar32[0] = iTest < cTests ? RandR32Src(iTest) : s_aSpecials[iTest - cTests].aVal1[0];
5351 TestData.InVal1.ar32[1] = iTest < cTests ? RandR32Src(iTest) : s_aSpecials[iTest - cTests].aVal1[1];
5352 TestData.InVal1.ar32[2] = iTest < cTests ? RandR32Src(iTest) : s_aSpecials[iTest - cTests].aVal1[2];
5353 TestData.InVal1.ar32[3] = iTest < cTests ? RandR32Src(iTest) : s_aSpecials[iTest - cTests].aVal1[3];
5354
5355 TestData.InVal2.ar32[0] = iTest < cTests ? RandR32Src2(iTest) : s_aSpecials[iTest - cTests].aVal2[0];
5356 TestData.InVal2.ar32[1] = iTest < cTests ? RandR32Src2(iTest) : s_aSpecials[iTest - cTests].aVal2[1];
5357 TestData.InVal2.ar32[2] = iTest < cTests ? RandR32Src2(iTest) : s_aSpecials[iTest - cTests].aVal2[2];
5358 TestData.InVal2.ar32[3] = iTest < cTests ? RandR32Src2(iTest) : s_aSpecials[iTest - cTests].aVal2[3];
5359
5360 if ( RTFLOAT32U_IS_NORMAL(&TestData.InVal1.ar32[0]) && RTFLOAT32U_IS_NORMAL(&TestData.InVal2.ar32[0])
5361 && RTFLOAT32U_IS_NORMAL(&TestData.InVal1.ar32[1]) && RTFLOAT32U_IS_NORMAL(&TestData.InVal2.ar32[1])
5362 && RTFLOAT32U_IS_NORMAL(&TestData.InVal1.ar32[2]) && RTFLOAT32U_IS_NORMAL(&TestData.InVal2.ar32[2])
5363 && RTFLOAT32U_IS_NORMAL(&TestData.InVal1.ar32[3]) && RTFLOAT32U_IS_NORMAL(&TestData.InVal2.ar32[3]))
5364 cNormalInputPairs++;
5365 else if (cNormalInputPairs < cMinNormalPairs && iTest + cMinNormalPairs >= cTests && iTest < cTests)
5366 {
5367 iTest -= 1;
5368 continue;
5369 }
5370
5371 uint32_t const fMxcsr = RandMxcsr() & X86_MXCSR_XCPT_FLAGS;
5372 for (uint16_t iRounding = 0; iRounding < 4; iRounding++)
5373 for (uint8_t iDaz = 0; iDaz < 2; iDaz++)
5374 for (uint8_t iFz = 0; iFz < 2; iFz++)
5375 {
5376 State.MXCSR = (fMxcsr & ~X86_MXCSR_RC_MASK)
5377 | (iRounding << X86_MXCSR_RC_SHIFT)
5378 | (iDaz ? X86_MXCSR_DAZ : 0)
5379 | (iFz ? X86_MXCSR_FZ : 0)
5380 | X86_MXCSR_XCPT_MASK;
5381 IEMSSERESULT ResM; RT_ZERO(ResM);
5382 pfn(&State, &ResM, &TestData.InVal1, &TestData.InVal2);
5383 TestData.fMxcsrIn = State.MXCSR;
5384 TestData.fMxcsrOut = ResM.MXCSR;
5385 TestData.OutVal = ResM.uResult;
5386 GenerateBinaryWrite(&BinOut, &TestData, sizeof(TestData));
5387
5388 State.MXCSR = State.MXCSR & ~X86_MXCSR_XCPT_MASK;
5389 IEMSSERESULT ResU; RT_ZERO(ResU);
5390 pfn(&State, &ResU, &TestData.InVal1, &TestData.InVal2);
5391 TestData.fMxcsrIn = State.MXCSR;
5392 TestData.fMxcsrOut = ResU.MXCSR;
5393 TestData.OutVal = ResU.uResult;
5394 GenerateBinaryWrite(&BinOut, &TestData, sizeof(TestData));
5395
5396 uint16_t fXcpt = (ResM.MXCSR | ResU.MXCSR) & X86_MXCSR_XCPT_FLAGS;
5397 if (fXcpt)
5398 {
5399 State.MXCSR = (State.MXCSR & ~X86_MXCSR_XCPT_MASK) | fXcpt;
5400 IEMSSERESULT Res1; RT_ZERO(Res1);
5401 pfn(&State, &Res1, &TestData.InVal1, &TestData.InVal2);
5402 TestData.fMxcsrIn = State.MXCSR;
5403 TestData.fMxcsrOut = Res1.MXCSR;
5404 TestData.OutVal = Res1.uResult;
5405 GenerateBinaryWrite(&BinOut, &TestData, sizeof(TestData));
5406
5407 if (((Res1.MXCSR & X86_MXCSR_XCPT_FLAGS) & fXcpt) != (Res1.MXCSR & X86_MXCSR_XCPT_FLAGS))
5408 {
5409 fXcpt |= Res1.MXCSR & X86_MXCSR_XCPT_FLAGS;
5410 State.MXCSR = (State.MXCSR & ~X86_MXCSR_XCPT_MASK) | (fXcpt << X86_MXCSR_XCPT_MASK_SHIFT);
5411 IEMSSERESULT Res2; RT_ZERO(Res2);
5412 pfn(&State, &Res2, &TestData.InVal1, &TestData.InVal2);
5413 TestData.fMxcsrIn = State.MXCSR;
5414 TestData.fMxcsrOut = Res2.MXCSR;
5415 TestData.OutVal = Res2.uResult;
5416 GenerateBinaryWrite(&BinOut, &TestData, sizeof(TestData));
5417 }
5418 if (!RT_IS_POWER_OF_TWO(fXcpt))
5419 for (uint16_t fUnmasked = 1; fUnmasked <= X86_MXCSR_PE; fUnmasked <<= 1)
5420 if (fUnmasked & fXcpt)
5421 {
5422 State.MXCSR = (State.MXCSR & ~X86_MXCSR_XCPT_MASK) | ((fXcpt & ~fUnmasked) << X86_MXCSR_XCPT_MASK_SHIFT);
5423 IEMSSERESULT Res3; RT_ZERO(Res3);
5424 pfn(&State, &Res3, &TestData.InVal1, &TestData.InVal2);
5425 TestData.fMxcsrIn = State.MXCSR;
5426 TestData.fMxcsrOut = Res3.MXCSR;
5427 TestData.OutVal = Res3.uResult;
5428 GenerateBinaryWrite(&BinOut, &TestData, sizeof(TestData));
5429 }
5430 }
5431 }
5432 }
5433 AssertReturn(GenerateBinaryClose(&BinOut), RTEXITCODE_FAILURE);
5434 }
5435
5436 return RTEXITCODE_SUCCESS;
5437}
5438#endif
5439
5440static void SseBinaryR32Test(void)
5441{
5442 X86FXSTATE State;
5443 RT_ZERO(State);
5444 for (size_t iFn = 0; iFn < RT_ELEMENTS(g_aSseBinaryR32); iFn++)
5445 {
5446 if (!SUBTEST_CHECK_IF_ENABLED_AND_DECOMPRESS(g_aSseBinaryR32[iFn]))
5447 continue;
5448
5449 SSE_BINARY_TEST_T const * const paTests = g_aSseBinaryR32[iFn].paTests;
5450 uint32_t const cbTests = g_aSseBinaryR32[iFn].cTests;
5451 PFNIEMAIMPLFPSSEF2U128 pfn = g_aSseBinaryR32[iFn].pfn;
5452 uint32_t const cVars = COUNT_VARIATIONS(g_aSseBinaryR32[iFn]);
5453 if (!cbTests) RTTestSkipped(g_hTest, "no tests");
5454 for (uint32_t iVar = 0; iVar < cVars; iVar++)
5455 {
5456 for (uint32_t iTest = 0; iTest < cbTests / sizeof(paTests[0]); iTest++)
5457 {
5458 IEMSSERESULT Res; RT_ZERO(Res);
5459
5460 State.MXCSR = paTests[iTest].fMxcsrIn;
5461 pfn(&State, &Res, &paTests[iTest].InVal1, &paTests[iTest].InVal2);
5462 bool fValsIdentical = RTFLOAT32U_ARE_IDENTICAL(&Res.uResult.ar32[0], &paTests[iTest].OutVal.ar32[0])
5463 && RTFLOAT32U_ARE_IDENTICAL(&Res.uResult.ar32[1], &paTests[iTest].OutVal.ar32[1])
5464 && RTFLOAT32U_ARE_IDENTICAL(&Res.uResult.ar32[2], &paTests[iTest].OutVal.ar32[2])
5465 && RTFLOAT32U_ARE_IDENTICAL(&Res.uResult.ar32[3], &paTests[iTest].OutVal.ar32[3]);
5466 if ( Res.MXCSR != paTests[iTest].fMxcsrOut
5467 || !fValsIdentical)
5468 RTTestFailed(g_hTest, "#%04u%s: mxcsr=%#08x in1=%s'%s'%s'%s in2=%s'%s'%s'%s\n"
5469 "%s -> mxcsr=%#08x %s'%s'%s'%s\n"
5470 "%s expected %#08x %s'%s'%s'%s%s%s (%s)\n",
5471 iTest, iVar ? "/n" : "", paTests[iTest].fMxcsrIn,
5472 FormatR32(&paTests[iTest].InVal1.ar32[0]), FormatR32(&paTests[iTest].InVal1.ar32[1]),
5473 FormatR32(&paTests[iTest].InVal1.ar32[2]), FormatR32(&paTests[iTest].InVal1.ar32[3]),
5474 FormatR32(&paTests[iTest].InVal2.ar32[0]), FormatR32(&paTests[iTest].InVal2.ar32[1]),
5475 FormatR32(&paTests[iTest].InVal2.ar32[2]), FormatR32(&paTests[iTest].InVal2.ar32[3]),
5476 iVar ? " " : "", Res.MXCSR,
5477 FormatR32(&Res.uResult.ar32[0]), FormatR32(&Res.uResult.ar32[1]),
5478 FormatR32(&Res.uResult.ar32[2]), FormatR32(&Res.uResult.ar32[3]),
5479 iVar ? " " : "", paTests[iTest].fMxcsrOut,
5480 FormatR32(&paTests[iTest].OutVal.ar32[0]), FormatR32(&paTests[iTest].OutVal.ar32[1]),
5481 FormatR32(&paTests[iTest].OutVal.ar32[2]), FormatR32(&paTests[iTest].OutVal.ar32[3]),
5482 MxcsrDiff(Res.MXCSR, paTests[iTest].fMxcsrOut),
5483 !fValsIdentical ? " - val" : "",
5484 FormatMxcsr(paTests[iTest].fMxcsrIn) );
5485 }
5486 pfn = g_aSseBinaryR32[iFn].pfnNative;
5487 }
5488
5489 FREE_DECOMPRESSED_TESTS(g_aSseBinaryR32[iFn]);
5490 }
5491}
5492
5493
5494/*
5495 * Binary SSE operations on packed single precision floating point values.
5496 */
5497TYPEDEF_SUBTEST_TYPE(SSE_BINARY_R64_T, SSE_BINARY_TEST_T, PFNIEMAIMPLFPSSEF2U128);
5498
5499static SSE_BINARY_R64_T g_aSseBinaryR64[] =
5500{
5501 ENTRY_BIN(addpd_u128),
5502 ENTRY_BIN(mulpd_u128),
5503 ENTRY_BIN(subpd_u128),
5504 ENTRY_BIN(minpd_u128),
5505 ENTRY_BIN(divpd_u128),
5506 ENTRY_BIN(maxpd_u128),
5507 ENTRY_BIN(haddpd_u128),
5508 ENTRY_BIN(hsubpd_u128),
5509 ENTRY_BIN(sqrtpd_u128),
5510 ENTRY_BIN(addsubpd_u128),
5511 ENTRY_BIN(cvtpd2ps_u128),
5512};
5513
5514#ifdef TSTIEMAIMPL_WITH_GENERATOR
5515DUMP_ALL_FN(SseBinaryR64, g_aSseBinaryR32)
5516static RTEXITCODE SseBinaryR64Generate(uint32_t cTests, const char * const *papszNameFmts)
5517{
5518 cTests = RT_MAX(192, cTests); /* there are 144 standard input variations */
5519
5520 static struct { RTFLOAT64U aVal1[2], aVal2[2]; } const s_aSpecials[] =
5521 {
5522 { { RTFLOAT64U_INIT_ZERO(0), RTFLOAT64U_INIT_ZERO(0) },
5523 { RTFLOAT64U_INIT_C(0, 8388607, RTFLOAT64U_EXP_MAX - 1), RTFLOAT64U_INIT_C(0, 8388607, RTFLOAT64U_EXP_MAX - 1) } },
5524 /** @todo More specials. */
5525 };
5526
5527 X86FXSTATE State;
5528 RT_ZERO(State);
5529 uint32_t cMinNormalPairs = (cTests - 144) / 4;
5530 for (size_t iFn = 0; iFn < RT_ELEMENTS(g_aSseBinaryR64); iFn++)
5531 {
5532 PFNIEMAIMPLFPSSEF2U128 const pfn = g_aSseBinaryR64[iFn].pfnNative ? g_aSseBinaryR64[iFn].pfnNative : g_aSseBinaryR64[iFn].pfn;
5533
5534 IEMBINARYOUTPUT BinOut;
5535 AssertReturn(GENERATE_BINARY_OPEN(&BinOut, papszNameFmts, g_aSseBinaryR64[iFn]), RTEXITCODE_FAILURE);
5536
5537 uint32_t cNormalInputPairs = 0;
5538 for (uint32_t iTest = 0; iTest < cTests + RT_ELEMENTS(s_aSpecials); iTest += 1)
5539 {
5540 SSE_BINARY_TEST_T TestData; RT_ZERO(TestData);
5541
5542 TestData.InVal1.ar64[0] = iTest < cTests ? RandR64Src(iTest) : s_aSpecials[iTest - cTests].aVal1[0];
5543 TestData.InVal1.ar64[1] = iTest < cTests ? RandR64Src(iTest) : s_aSpecials[iTest - cTests].aVal1[0];
5544 TestData.InVal2.ar64[0] = iTest < cTests ? RandR64Src2(iTest) : s_aSpecials[iTest - cTests].aVal2[0];
5545 TestData.InVal2.ar64[1] = iTest < cTests ? RandR64Src2(iTest) : s_aSpecials[iTest - cTests].aVal2[0];
5546
5547 if ( RTFLOAT64U_IS_NORMAL(&TestData.InVal1.ar64[0]) && RTFLOAT64U_IS_NORMAL(&TestData.InVal1.ar64[1])
5548 && RTFLOAT64U_IS_NORMAL(&TestData.InVal2.ar64[0]) && RTFLOAT64U_IS_NORMAL(&TestData.InVal2.ar64[1]))
5549 cNormalInputPairs++;
5550 else if (cNormalInputPairs < cMinNormalPairs && iTest + cMinNormalPairs >= cTests && iTest < cTests)
5551 {
5552 iTest -= 1;
5553 continue;
5554 }
5555
5556 uint32_t const fMxcsr = RandMxcsr() & X86_MXCSR_XCPT_FLAGS;
5557 for (uint16_t iRounding = 0; iRounding < 4; iRounding++)
5558 for (uint8_t iDaz = 0; iDaz < 2; iDaz++)
5559 for (uint8_t iFz = 0; iFz < 2; iFz++)
5560 {
5561 State.MXCSR = (fMxcsr & ~X86_MXCSR_RC_MASK)
5562 | (iRounding << X86_MXCSR_RC_SHIFT)
5563 | (iDaz ? X86_MXCSR_DAZ : 0)
5564 | (iFz ? X86_MXCSR_FZ : 0)
5565 | X86_MXCSR_XCPT_MASK;
5566 IEMSSERESULT ResM; RT_ZERO(ResM);
5567 pfn(&State, &ResM, &TestData.InVal1, &TestData.InVal2);
5568 TestData.fMxcsrIn = State.MXCSR;
5569 TestData.fMxcsrOut = ResM.MXCSR;
5570 TestData.OutVal = ResM.uResult;
5571 GenerateBinaryWrite(&BinOut, &TestData, sizeof(TestData));
5572
5573 State.MXCSR = State.MXCSR & ~X86_MXCSR_XCPT_MASK;
5574 IEMSSERESULT ResU; RT_ZERO(ResU);
5575 pfn(&State, &ResU, &TestData.InVal1, &TestData.InVal2);
5576 TestData.fMxcsrIn = State.MXCSR;
5577 TestData.fMxcsrOut = ResU.MXCSR;
5578 TestData.OutVal = ResU.uResult;
5579 GenerateBinaryWrite(&BinOut, &TestData, sizeof(TestData));
5580
5581 uint16_t fXcpt = (ResM.MXCSR | ResU.MXCSR) & X86_MXCSR_XCPT_FLAGS;
5582 if (fXcpt)
5583 {
5584 State.MXCSR = (State.MXCSR & ~X86_MXCSR_XCPT_MASK) | fXcpt;
5585 IEMSSERESULT Res1; RT_ZERO(Res1);
5586 pfn(&State, &Res1, &TestData.InVal1, &TestData.InVal2);
5587 TestData.fMxcsrIn = State.MXCSR;
5588 TestData.fMxcsrOut = Res1.MXCSR;
5589 TestData.OutVal = Res1.uResult;
5590 GenerateBinaryWrite(&BinOut, &TestData, sizeof(TestData));
5591
5592 if (((Res1.MXCSR & X86_MXCSR_XCPT_FLAGS) & fXcpt) != (Res1.MXCSR & X86_MXCSR_XCPT_FLAGS))
5593 {
5594 fXcpt |= Res1.MXCSR & X86_MXCSR_XCPT_FLAGS;
5595 State.MXCSR = (State.MXCSR & ~X86_MXCSR_XCPT_MASK) | (fXcpt << X86_MXCSR_XCPT_MASK_SHIFT);
5596 IEMSSERESULT Res2; RT_ZERO(Res2);
5597 pfn(&State, &Res2, &TestData.InVal1, &TestData.InVal2);
5598 TestData.fMxcsrIn = State.MXCSR;
5599 TestData.fMxcsrOut = Res2.MXCSR;
5600 TestData.OutVal = Res2.uResult;
5601 GenerateBinaryWrite(&BinOut, &TestData, sizeof(TestData));
5602 }
5603 if (!RT_IS_POWER_OF_TWO(fXcpt))
5604 for (uint16_t fUnmasked = 1; fUnmasked <= X86_MXCSR_PE; fUnmasked <<= 1)
5605 if (fUnmasked & fXcpt)
5606 {
5607 State.MXCSR = (State.MXCSR & ~X86_MXCSR_XCPT_MASK) | ((fXcpt & ~fUnmasked) << X86_MXCSR_XCPT_MASK_SHIFT);
5608 IEMSSERESULT Res3; RT_ZERO(Res3);
5609 pfn(&State, &Res3, &TestData.InVal1, &TestData.InVal2);
5610 TestData.fMxcsrIn = State.MXCSR;
5611 TestData.fMxcsrOut = Res3.MXCSR;
5612 TestData.OutVal = Res3.uResult;
5613 GenerateBinaryWrite(&BinOut, &TestData, sizeof(TestData));
5614 }
5615 }
5616 }
5617 }
5618 AssertReturn(GenerateBinaryClose(&BinOut), RTEXITCODE_FAILURE);
5619 }
5620
5621 return RTEXITCODE_SUCCESS;
5622}
5623#endif
5624
5625
5626static void SseBinaryR64Test(void)
5627{
5628 X86FXSTATE State;
5629 RT_ZERO(State);
5630 for (size_t iFn = 0; iFn < RT_ELEMENTS(g_aSseBinaryR64); iFn++)
5631 {
5632 if (!SUBTEST_CHECK_IF_ENABLED_AND_DECOMPRESS(g_aSseBinaryR64[iFn]))
5633 continue;
5634
5635 SSE_BINARY_TEST_T const * const paTests = g_aSseBinaryR64[iFn].paTests;
5636 uint32_t const cTests = g_aSseBinaryR64[iFn].cTests;
5637 PFNIEMAIMPLFPSSEF2U128 pfn = g_aSseBinaryR64[iFn].pfn;
5638 uint32_t const cVars = COUNT_VARIATIONS(g_aSseBinaryR64[iFn]);
5639 if (!cTests) RTTestSkipped(g_hTest, "no tests");
5640 for (uint32_t iVar = 0; iVar < cVars; iVar++)
5641 {
5642 for (uint32_t iTest = 0; iTest < cTests; iTest++)
5643 {
5644 IEMSSERESULT Res; RT_ZERO(Res);
5645
5646 State.MXCSR = paTests[iTest].fMxcsrIn;
5647 pfn(&State, &Res, &paTests[iTest].InVal1, &paTests[iTest].InVal2);
5648 if ( Res.MXCSR != paTests[iTest].fMxcsrOut
5649 || !RTFLOAT64U_ARE_IDENTICAL(&Res.uResult.ar64[0], &paTests[iTest].OutVal.ar64[0])
5650 || !RTFLOAT64U_ARE_IDENTICAL(&Res.uResult.ar64[1], &paTests[iTest].OutVal.ar64[1]))
5651 RTTestFailed(g_hTest, "#%04u%s: mxcsr=%#08x in1=%s'%s in2=%s'%s\n"
5652 "%s -> mxcsr=%#08x %s'%s\n"
5653 "%s expected %#08x %s'%s%s%s (%s)\n",
5654 iTest, iVar ? "/n" : "", paTests[iTest].fMxcsrIn,
5655 FormatR64(&paTests[iTest].InVal1.ar64[0]), FormatR64(&paTests[iTest].InVal1.ar64[1]),
5656 FormatR64(&paTests[iTest].InVal2.ar64[0]), FormatR64(&paTests[iTest].InVal2.ar64[1]),
5657 iVar ? " " : "", Res.MXCSR,
5658 FormatR64(&Res.uResult.ar64[0]), FormatR64(&Res.uResult.ar64[1]),
5659 iVar ? " " : "", paTests[iTest].fMxcsrOut,
5660 FormatR64(&paTests[iTest].OutVal.ar64[0]), FormatR64(&paTests[iTest].OutVal.ar64[1]),
5661 MxcsrDiff(Res.MXCSR, paTests[iTest].fMxcsrOut),
5662 ( !RTFLOAT64U_ARE_IDENTICAL(&Res.uResult.ar64[0], &paTests[iTest].OutVal.ar64[0])
5663 || !RTFLOAT64U_ARE_IDENTICAL(&Res.uResult.ar64[1], &paTests[iTest].OutVal.ar64[1]))
5664 ? " - val" : "",
5665 FormatMxcsr(paTests[iTest].fMxcsrIn) );
5666 }
5667 pfn = g_aSseBinaryR64[iFn].pfnNative;
5668 }
5669
5670 FREE_DECOMPRESSED_TESTS(g_aSseBinaryR64[iFn]);
5671 }
5672}
5673
5674
5675/*
5676 * Binary SSE operations on packed single precision floating point values.
5677 */
5678TYPEDEF_SUBTEST_TYPE(SSE_BINARY_U128_R32_T, SSE_BINARY_U128_R32_TEST_T, PFNIEMAIMPLFPSSEF2U128R32);
5679
5680static SSE_BINARY_U128_R32_T g_aSseBinaryU128R32[] =
5681{
5682 ENTRY_BIN(addss_u128_r32),
5683 ENTRY_BIN(mulss_u128_r32),
5684 ENTRY_BIN(subss_u128_r32),
5685 ENTRY_BIN(minss_u128_r32),
5686 ENTRY_BIN(divss_u128_r32),
5687 ENTRY_BIN(maxss_u128_r32),
5688 ENTRY_BIN(cvtss2sd_u128_r32),
5689 ENTRY_BIN(sqrtss_u128_r32),
5690};
5691
5692#ifdef TSTIEMAIMPL_WITH_GENERATOR
5693DUMP_ALL_FN(SseBinaryU128R32, g_aSseBinaryU128R32)
5694static RTEXITCODE SseBinaryU128R32Generate(uint32_t cTests, const char * const *papszNameFmts)
5695{
5696 cTests = RT_MAX(192, cTests); /* there are 144 standard input variations */
5697
5698 static struct { RTFLOAT32U aVal1[4], Val2; } const s_aSpecials[] =
5699 {
5700 { { RTFLOAT32U_INIT_ZERO(0), RTFLOAT32U_INIT_ZERO(0), RTFLOAT32U_INIT_ZERO(0), RTFLOAT32U_INIT_ZERO(0), }, RTFLOAT32U_INIT_C(0, 8388607, RTFLOAT32U_EXP_MAX - 1) },
5701 /** @todo More specials. */
5702 };
5703
5704 X86FXSTATE State;
5705 RT_ZERO(State);
5706 uint32_t cMinNormalPairs = (cTests - 144) / 4;
5707 for (size_t iFn = 0; iFn < RT_ELEMENTS(g_aSseBinaryU128R32); iFn++)
5708 {
5709 PFNIEMAIMPLFPSSEF2U128R32 const pfn = g_aSseBinaryU128R32[iFn].pfnNative ? g_aSseBinaryU128R32[iFn].pfnNative : g_aSseBinaryU128R32[iFn].pfn;
5710
5711 IEMBINARYOUTPUT BinOut;
5712 AssertReturn(GENERATE_BINARY_OPEN(&BinOut, papszNameFmts, g_aSseBinaryU128R32[iFn]), RTEXITCODE_FAILURE);
5713
5714 uint32_t cNormalInputPairs = 0;
5715 for (uint32_t iTest = 0; iTest < cTests + RT_ELEMENTS(s_aSpecials); iTest += 1)
5716 {
5717 SSE_BINARY_U128_R32_TEST_T TestData; RT_ZERO(TestData);
5718
5719 TestData.InVal1.ar32[0] = iTest < cTests ? RandR32Src(iTest) : s_aSpecials[iTest - cTests].aVal1[0];
5720 TestData.InVal1.ar32[1] = iTest < cTests ? RandR32Src(iTest) : s_aSpecials[iTest - cTests].aVal1[1];
5721 TestData.InVal1.ar32[2] = iTest < cTests ? RandR32Src(iTest) : s_aSpecials[iTest - cTests].aVal1[2];
5722 TestData.InVal1.ar32[3] = iTest < cTests ? RandR32Src(iTest) : s_aSpecials[iTest - cTests].aVal1[3];
5723
5724 TestData.r32Val2 = iTest < cTests ? RandR32Src2(iTest) : s_aSpecials[iTest - cTests].Val2;
5725
5726 if ( RTFLOAT32U_IS_NORMAL(&TestData.InVal1.ar32[0])
5727 && RTFLOAT32U_IS_NORMAL(&TestData.InVal1.ar32[1])
5728 && RTFLOAT32U_IS_NORMAL(&TestData.InVal1.ar32[2])
5729 && RTFLOAT32U_IS_NORMAL(&TestData.InVal1.ar32[3])
5730 && RTFLOAT32U_IS_NORMAL(&TestData.r32Val2))
5731 cNormalInputPairs++;
5732 else if (cNormalInputPairs < cMinNormalPairs && iTest + cMinNormalPairs >= cTests && iTest < cTests)
5733 {
5734 iTest -= 1;
5735 continue;
5736 }
5737
5738 uint32_t const fMxcsr = RandMxcsr() & X86_MXCSR_XCPT_FLAGS;
5739 for (uint16_t iRounding = 0; iRounding < 4; iRounding++)
5740 for (uint8_t iDaz = 0; iDaz < 2; iDaz++)
5741 for (uint8_t iFz = 0; iFz < 2; iFz++)
5742 {
5743 State.MXCSR = (fMxcsr & ~X86_MXCSR_RC_MASK)
5744 | (iRounding << X86_MXCSR_RC_SHIFT)
5745 | (iDaz ? X86_MXCSR_DAZ : 0)
5746 | (iFz ? X86_MXCSR_FZ : 0)
5747 | X86_MXCSR_XCPT_MASK;
5748 IEMSSERESULT ResM; RT_ZERO(ResM);
5749 pfn(&State, &ResM, &TestData.InVal1, &TestData.r32Val2);
5750 TestData.fMxcsrIn = State.MXCSR;
5751 TestData.fMxcsrOut = ResM.MXCSR;
5752 TestData.OutVal = ResM.uResult;
5753 GenerateBinaryWrite(&BinOut, &TestData, sizeof(TestData));
5754
5755 State.MXCSR = State.MXCSR & ~X86_MXCSR_XCPT_MASK;
5756 IEMSSERESULT ResU; RT_ZERO(ResU);
5757 pfn(&State, &ResU, &TestData.InVal1, &TestData.r32Val2);
5758 TestData.fMxcsrIn = State.MXCSR;
5759 TestData.fMxcsrOut = ResU.MXCSR;
5760 TestData.OutVal = ResU.uResult;
5761 GenerateBinaryWrite(&BinOut, &TestData, sizeof(TestData));
5762
5763 uint16_t fXcpt = (ResM.MXCSR | ResU.MXCSR) & X86_MXCSR_XCPT_FLAGS;
5764 if (fXcpt)
5765 {
5766 State.MXCSR = (State.MXCSR & ~X86_MXCSR_XCPT_MASK) | fXcpt;
5767 IEMSSERESULT Res1; RT_ZERO(Res1);
5768 pfn(&State, &Res1, &TestData.InVal1, &TestData.r32Val2);
5769 TestData.fMxcsrIn = State.MXCSR;
5770 TestData.fMxcsrOut = Res1.MXCSR;
5771 TestData.OutVal = Res1.uResult;
5772 GenerateBinaryWrite(&BinOut, &TestData, sizeof(TestData));
5773
5774 if (((Res1.MXCSR & X86_MXCSR_XCPT_FLAGS) & fXcpt) != (Res1.MXCSR & X86_MXCSR_XCPT_FLAGS))
5775 {
5776 fXcpt |= Res1.MXCSR & X86_MXCSR_XCPT_FLAGS;
5777 State.MXCSR = (State.MXCSR & ~X86_MXCSR_XCPT_MASK) | (fXcpt << X86_MXCSR_XCPT_MASK_SHIFT);
5778 IEMSSERESULT Res2; RT_ZERO(Res2);
5779 pfn(&State, &Res2, &TestData.InVal1, &TestData.r32Val2);
5780 TestData.fMxcsrIn = State.MXCSR;
5781 TestData.fMxcsrOut = Res2.MXCSR;
5782 TestData.OutVal = Res2.uResult;
5783 GenerateBinaryWrite(&BinOut, &TestData, sizeof(TestData));
5784 }
5785 if (!RT_IS_POWER_OF_TWO(fXcpt))
5786 for (uint16_t fUnmasked = 1; fUnmasked <= X86_MXCSR_PE; fUnmasked <<= 1)
5787 if (fUnmasked & fXcpt)
5788 {
5789 State.MXCSR = (State.MXCSR & ~X86_MXCSR_XCPT_MASK) | ((fXcpt & ~fUnmasked) << X86_MXCSR_XCPT_MASK_SHIFT);
5790 IEMSSERESULT Res3; RT_ZERO(Res3);
5791 pfn(&State, &Res3, &TestData.InVal1, &TestData.r32Val2);
5792 TestData.fMxcsrIn = State.MXCSR;
5793 TestData.fMxcsrOut = Res3.MXCSR;
5794 TestData.OutVal = Res3.uResult;
5795 GenerateBinaryWrite(&BinOut, &TestData, sizeof(TestData));
5796 }
5797 }
5798 }
5799 }
5800 AssertReturn(GenerateBinaryClose(&BinOut), RTEXITCODE_FAILURE);
5801 }
5802
5803 return RTEXITCODE_SUCCESS;
5804}
5805#endif
5806
5807static void SseBinaryU128R32Test(void)
5808{
5809 X86FXSTATE State;
5810 RT_ZERO(State);
5811 for (size_t iFn = 0; iFn < RT_ELEMENTS(g_aSseBinaryU128R32); iFn++)
5812 {
5813 if (!SUBTEST_CHECK_IF_ENABLED_AND_DECOMPRESS(g_aSseBinaryU128R32[iFn]))
5814 continue;
5815
5816 SSE_BINARY_U128_R32_TEST_T const * const paTests = g_aSseBinaryU128R32[iFn].paTests;
5817 uint32_t const cTests = g_aSseBinaryU128R32[iFn].cTests;
5818 PFNIEMAIMPLFPSSEF2U128R32 pfn = g_aSseBinaryU128R32[iFn].pfn;
5819 uint32_t const cVars = COUNT_VARIATIONS(g_aSseBinaryU128R32[iFn]);
5820 if (!cTests) RTTestSkipped(g_hTest, "no tests");
5821 for (uint32_t iVar = 0; iVar < cVars; iVar++)
5822 {
5823 for (uint32_t iTest = 0; iTest < cTests; iTest++)
5824 {
5825 IEMSSERESULT Res; RT_ZERO(Res);
5826
5827 State.MXCSR = paTests[iTest].fMxcsrIn;
5828 pfn(&State, &Res, &paTests[iTest].InVal1, &paTests[iTest].r32Val2);
5829 bool fValsIdentical = RTFLOAT32U_ARE_IDENTICAL(&Res.uResult.ar32[0], &paTests[iTest].OutVal.ar32[0])
5830 && RTFLOAT32U_ARE_IDENTICAL(&Res.uResult.ar32[1], &paTests[iTest].OutVal.ar32[1])
5831 && RTFLOAT32U_ARE_IDENTICAL(&Res.uResult.ar32[2], &paTests[iTest].OutVal.ar32[2])
5832 && RTFLOAT32U_ARE_IDENTICAL(&Res.uResult.ar32[3], &paTests[iTest].OutVal.ar32[3]);
5833 if ( Res.MXCSR != paTests[iTest].fMxcsrOut
5834 || !fValsIdentical)
5835 RTTestFailed(g_hTest, "#%04u%s: mxcsr=%#08x in1=%s'%s'%s'%s in2=%s\n"
5836 "%s -> mxcsr=%#08x %s'%s'%s'%s\n"
5837 "%s expected %#08x %s'%s'%s'%s%s%s (%s)\n",
5838 iTest, iVar ? "/n" : "", paTests[iTest].fMxcsrIn,
5839 FormatR32(&paTests[iTest].InVal1.ar32[0]), FormatR32(&paTests[iTest].InVal1.ar32[1]),
5840 FormatR32(&paTests[iTest].InVal1.ar32[2]), FormatR32(&paTests[iTest].InVal1.ar32[3]),
5841 FormatR32(&paTests[iTest].r32Val2),
5842 iVar ? " " : "", Res.MXCSR,
5843 FormatR32(&Res.uResult.ar32[0]), FormatR32(&Res.uResult.ar32[1]),
5844 FormatR32(&Res.uResult.ar32[2]), FormatR32(&Res.uResult.ar32[3]),
5845 iVar ? " " : "", paTests[iTest].fMxcsrOut,
5846 FormatR32(&paTests[iTest].OutVal.ar32[0]), FormatR32(&paTests[iTest].OutVal.ar32[1]),
5847 FormatR32(&paTests[iTest].OutVal.ar32[2]), FormatR32(&paTests[iTest].OutVal.ar32[3]),
5848 MxcsrDiff(Res.MXCSR, paTests[iTest].fMxcsrOut),
5849 !fValsIdentical ? " - val" : "",
5850 FormatMxcsr(paTests[iTest].fMxcsrIn) );
5851 }
5852 }
5853
5854 FREE_DECOMPRESSED_TESTS(g_aSseBinaryU128R32[iFn]);
5855 }
5856}
5857
5858
5859/*
5860 * Binary SSE operations on packed single precision floating point values (xxxsd xmm1, r/m64).
5861 */
5862TYPEDEF_SUBTEST_TYPE(SSE_BINARY_U128_R64_T, SSE_BINARY_U128_R64_TEST_T, PFNIEMAIMPLFPSSEF2U128R64);
5863
5864static SSE_BINARY_U128_R64_T g_aSseBinaryU128R64[] =
5865{
5866 ENTRY_BIN(addsd_u128_r64),
5867 ENTRY_BIN(mulsd_u128_r64),
5868 ENTRY_BIN(subsd_u128_r64),
5869 ENTRY_BIN(minsd_u128_r64),
5870 ENTRY_BIN(divsd_u128_r64),
5871 ENTRY_BIN(maxsd_u128_r64),
5872 ENTRY_BIN(cvtsd2ss_u128_r64),
5873 ENTRY_BIN(sqrtsd_u128_r64),
5874};
5875
5876#ifdef TSTIEMAIMPL_WITH_GENERATOR
5877DUMP_ALL_FN(SseBinaryU128R64, g_aSseBinaryU128R64)
5878static RTEXITCODE SseBinaryU128R64Generate(uint32_t cTests, const char * const *papszNameFmts)
5879{
5880 cTests = RT_MAX(192, cTests); /* there are 144 standard input variations */
5881
5882 static struct { RTFLOAT64U aVal1[2], Val2; } const s_aSpecials[] =
5883 {
5884 { { RTFLOAT64U_INIT_ZERO(0), RTFLOAT64U_INIT_ZERO(0) }, RTFLOAT64U_INIT_C(0, 8388607, RTFLOAT64U_EXP_MAX - 1) },
5885 /** @todo More specials. */
5886 };
5887
5888 X86FXSTATE State;
5889 RT_ZERO(State);
5890 uint32_t cMinNormalPairs = (cTests - 144) / 4;
5891 for (size_t iFn = 0; iFn < RT_ELEMENTS(g_aSseBinaryU128R64); iFn++)
5892 {
5893 PFNIEMAIMPLFPSSEF2U128R64 const pfn = g_aSseBinaryU128R64[iFn].pfnNative ? g_aSseBinaryU128R64[iFn].pfnNative : g_aSseBinaryU128R64[iFn].pfn;
5894
5895 IEMBINARYOUTPUT BinOut;
5896 AssertReturn(GENERATE_BINARY_OPEN(&BinOut, papszNameFmts, g_aSseBinaryU128R64[iFn]), RTEXITCODE_FAILURE);
5897
5898 uint32_t cNormalInputPairs = 0;
5899 for (uint32_t iTest = 0; iTest < cTests + RT_ELEMENTS(s_aSpecials); iTest += 1)
5900 {
5901 SSE_BINARY_U128_R64_TEST_T TestData; RT_ZERO(TestData);
5902
5903 TestData.InVal1.ar64[0] = iTest < cTests ? RandR64Src(iTest) : s_aSpecials[iTest - cTests].aVal1[0];
5904 TestData.InVal1.ar64[1] = iTest < cTests ? RandR64Src(iTest) : s_aSpecials[iTest - cTests].aVal1[1];
5905 TestData.r64Val2 = iTest < cTests ? RandR64Src2(iTest) : s_aSpecials[iTest - cTests].Val2;
5906
5907 if ( RTFLOAT64U_IS_NORMAL(&TestData.InVal1.ar64[0]) && RTFLOAT64U_IS_NORMAL(&TestData.InVal1.ar64[1])
5908 && RTFLOAT64U_IS_NORMAL(&TestData.r64Val2))
5909 cNormalInputPairs++;
5910 else if (cNormalInputPairs < cMinNormalPairs && iTest + cMinNormalPairs >= cTests && iTest < cTests)
5911 {
5912 iTest -= 1;
5913 continue;
5914 }
5915
5916 uint32_t const fMxcsr = RandMxcsr() & X86_MXCSR_XCPT_FLAGS;
5917 for (uint16_t iRounding = 0; iRounding < 4; iRounding++)
5918 for (uint8_t iDaz = 0; iDaz < 2; iDaz++)
5919 for (uint8_t iFz = 0; iFz < 2; iFz++)
5920 {
5921 State.MXCSR = (fMxcsr & ~X86_MXCSR_RC_MASK)
5922 | (iRounding << X86_MXCSR_RC_SHIFT)
5923 | (iDaz ? X86_MXCSR_DAZ : 0)
5924 | (iFz ? X86_MXCSR_FZ : 0)
5925 | X86_MXCSR_XCPT_MASK;
5926 IEMSSERESULT ResM; RT_ZERO(ResM);
5927 pfn(&State, &ResM, &TestData.InVal1, &TestData.r64Val2);
5928 TestData.fMxcsrIn = State.MXCSR;
5929 TestData.fMxcsrOut = ResM.MXCSR;
5930 TestData.OutVal = ResM.uResult;
5931 GenerateBinaryWrite(&BinOut, &TestData, sizeof(TestData));
5932
5933 State.MXCSR = State.MXCSR & ~X86_MXCSR_XCPT_MASK;
5934 IEMSSERESULT ResU; RT_ZERO(ResU);
5935 pfn(&State, &ResU, &TestData.InVal1, &TestData.r64Val2);
5936 TestData.fMxcsrIn = State.MXCSR;
5937 TestData.fMxcsrOut = ResU.MXCSR;
5938 TestData.OutVal = ResU.uResult;
5939 GenerateBinaryWrite(&BinOut, &TestData, sizeof(TestData));
5940
5941 uint16_t fXcpt = (ResM.MXCSR | ResU.MXCSR) & X86_MXCSR_XCPT_FLAGS;
5942 if (fXcpt)
5943 {
5944 State.MXCSR = (State.MXCSR & ~X86_MXCSR_XCPT_MASK) | fXcpt;
5945 IEMSSERESULT Res1; RT_ZERO(Res1);
5946 pfn(&State, &Res1, &TestData.InVal1, &TestData.r64Val2);
5947 TestData.fMxcsrIn = State.MXCSR;
5948 TestData.fMxcsrOut = Res1.MXCSR;
5949 TestData.OutVal = Res1.uResult;
5950 GenerateBinaryWrite(&BinOut, &TestData, sizeof(TestData));
5951
5952 if (((Res1.MXCSR & X86_MXCSR_XCPT_FLAGS) & fXcpt) != (Res1.MXCSR & X86_MXCSR_XCPT_FLAGS))
5953 {
5954 fXcpt |= Res1.MXCSR & X86_MXCSR_XCPT_FLAGS;
5955 State.MXCSR = (State.MXCSR & ~X86_MXCSR_XCPT_MASK) | (fXcpt << X86_MXCSR_XCPT_MASK_SHIFT);
5956 IEMSSERESULT Res2; RT_ZERO(Res2);
5957 pfn(&State, &Res2, &TestData.InVal1, &TestData.r64Val2);
5958 TestData.fMxcsrIn = State.MXCSR;
5959 TestData.fMxcsrOut = Res2.MXCSR;
5960 TestData.OutVal = Res2.uResult;
5961 GenerateBinaryWrite(&BinOut, &TestData, sizeof(TestData));
5962 }
5963 if (!RT_IS_POWER_OF_TWO(fXcpt))
5964 for (uint16_t fUnmasked = 1; fUnmasked <= X86_MXCSR_PE; fUnmasked <<= 1)
5965 if (fUnmasked & fXcpt)
5966 {
5967 State.MXCSR = (State.MXCSR & ~X86_MXCSR_XCPT_MASK) | ((fXcpt & ~fUnmasked) << X86_MXCSR_XCPT_MASK_SHIFT);
5968 IEMSSERESULT Res3; RT_ZERO(Res3);
5969 pfn(&State, &Res3, &TestData.InVal1, &TestData.r64Val2);
5970 TestData.fMxcsrIn = State.MXCSR;
5971 TestData.fMxcsrOut = Res3.MXCSR;
5972 TestData.OutVal = Res3.uResult;
5973 GenerateBinaryWrite(&BinOut, &TestData, sizeof(TestData));
5974 }
5975 }
5976 }
5977 }
5978 AssertReturn(GenerateBinaryClose(&BinOut), RTEXITCODE_FAILURE);
5979 }
5980
5981 return RTEXITCODE_SUCCESS;
5982}
5983#endif
5984
5985
5986static void SseBinaryU128R64Test(void)
5987{
5988 X86FXSTATE State;
5989 RT_ZERO(State);
5990 for (size_t iFn = 0; iFn < RT_ELEMENTS(g_aSseBinaryU128R64); iFn++)
5991 {
5992 if (!SUBTEST_CHECK_IF_ENABLED_AND_DECOMPRESS(g_aSseBinaryU128R64[iFn]))
5993 continue;
5994
5995 SSE_BINARY_U128_R64_TEST_T const * const paTests = g_aSseBinaryU128R64[iFn].paTests;
5996 uint32_t const cTests = g_aSseBinaryU128R64[iFn].cTests;
5997 PFNIEMAIMPLFPSSEF2U128R64 pfn = g_aSseBinaryU128R64[iFn].pfn;
5998 uint32_t const cVars = COUNT_VARIATIONS(g_aSseBinaryU128R64[iFn]);
5999 if (!cTests) RTTestSkipped(g_hTest, "no tests");
6000 for (uint32_t iVar = 0; iVar < cVars; iVar++)
6001 {
6002 for (uint32_t iTest = 0; iTest < cTests; iTest++)
6003 {
6004 IEMSSERESULT Res; RT_ZERO(Res);
6005
6006 State.MXCSR = paTests[iTest].fMxcsrIn;
6007 pfn(&State, &Res, &paTests[iTest].InVal1, &paTests[iTest].r64Val2);
6008 if ( Res.MXCSR != paTests[iTest].fMxcsrOut
6009 || !RTFLOAT64U_ARE_IDENTICAL(&Res.uResult.ar64[0], &paTests[iTest].OutVal.ar64[0])
6010 || !RTFLOAT64U_ARE_IDENTICAL(&Res.uResult.ar64[1], &paTests[iTest].OutVal.ar64[1]))
6011 RTTestFailed(g_hTest, "#%04u%s: mxcsr=%#08x in1=%s'%s in2=%s\n"
6012 "%s -> mxcsr=%#08x %s'%s\n"
6013 "%s expected %#08x %s'%s%s%s (%s)\n",
6014 iTest, iVar ? "/n" : "", paTests[iTest].fMxcsrIn,
6015 FormatR64(&paTests[iTest].InVal1.ar64[0]), FormatR64(&paTests[iTest].InVal1.ar64[1]),
6016 FormatR64(&paTests[iTest].r64Val2),
6017 iVar ? " " : "", Res.MXCSR,
6018 FormatR64(&Res.uResult.ar64[0]), FormatR64(&Res.uResult.ar64[1]),
6019 iVar ? " " : "", paTests[iTest].fMxcsrOut,
6020 FormatR64(&paTests[iTest].OutVal.ar64[0]), FormatR64(&paTests[iTest].OutVal.ar64[1]),
6021 MxcsrDiff(Res.MXCSR, paTests[iTest].fMxcsrOut),
6022 ( !RTFLOAT64U_ARE_IDENTICAL(&Res.uResult.ar64[0], &paTests[iTest].OutVal.ar64[0])
6023 || !RTFLOAT64U_ARE_IDENTICAL(&Res.uResult.ar64[1], &paTests[iTest].OutVal.ar64[1]))
6024 ? " - val" : "",
6025 FormatMxcsr(paTests[iTest].fMxcsrIn) );
6026 }
6027 }
6028
6029 FREE_DECOMPRESSED_TESTS(g_aSseBinaryU128R64[iFn]);
6030 }
6031}
6032
6033
6034/*
6035 * SSE operations converting single double-precision floating point values to signed double-word integers (cvttsd2si and friends).
6036 */
6037TYPEDEF_SUBTEST_TYPE(SSE_BINARY_I32_R64_T, SSE_BINARY_I32_R64_TEST_T, PFNIEMAIMPLSSEF2I32U64);
6038
6039static SSE_BINARY_I32_R64_T g_aSseBinaryI32R64[] =
6040{
6041 ENTRY_BIN(cvttsd2si_i32_r64),
6042 ENTRY_BIN(cvtsd2si_i32_r64),
6043};
6044
6045#ifdef TSTIEMAIMPL_WITH_GENERATOR
6046DUMP_ALL_FN(SseBinaryI32R64, g_aSseBinaryI32R64)
6047static RTEXITCODE SseBinaryI32R64Generate(uint32_t cTests, const char * const *papszNameFmts)
6048{
6049 cTests = RT_MAX(192, cTests); /* there are 144 standard input variations */
6050
6051 static struct { RTFLOAT64U Val; } const s_aSpecials[] =
6052 {
6053 { RTFLOAT64U_INIT_C(0, 8388607, RTFLOAT64U_EXP_MAX - 1) },
6054 /** @todo More specials. */
6055 };
6056
6057 X86FXSTATE State;
6058 RT_ZERO(State);
6059 uint32_t cMinNormalPairs = (cTests - 144) / 4;
6060 for (size_t iFn = 0; iFn < RT_ELEMENTS(g_aSseBinaryI32R64); iFn++)
6061 {
6062 PFNIEMAIMPLSSEF2I32U64 const pfn = g_aSseBinaryI32R64[iFn].pfnNative ? g_aSseBinaryI32R64[iFn].pfnNative : g_aSseBinaryI32R64[iFn].pfn;
6063
6064 IEMBINARYOUTPUT BinOut;
6065 AssertReturn(GENERATE_BINARY_OPEN(&BinOut, papszNameFmts, g_aSseBinaryI32R64[iFn]), RTEXITCODE_FAILURE);
6066
6067 uint32_t cNormalInputPairs = 0;
6068 for (uint32_t iTest = 0; iTest < cTests + RT_ELEMENTS(s_aSpecials); iTest += 1)
6069 {
6070 SSE_BINARY_I32_R64_TEST_T TestData; RT_ZERO(TestData);
6071
6072 TestData.r64ValIn = iTest < cTests ? RandR64Src(iTest) : s_aSpecials[iTest - cTests].Val;
6073
6074 if (RTFLOAT64U_IS_NORMAL(&TestData.r64ValIn))
6075 cNormalInputPairs++;
6076 else if (cNormalInputPairs < cMinNormalPairs && iTest + cMinNormalPairs >= cTests && iTest < cTests)
6077 {
6078 iTest -= 1;
6079 continue;
6080 }
6081
6082 uint32_t const fMxcsr = RandMxcsr() & X86_MXCSR_XCPT_FLAGS;
6083 for (uint16_t iRounding = 0; iRounding < 4; iRounding++)
6084 for (uint8_t iDaz = 0; iDaz < 2; iDaz++)
6085 for (uint8_t iFz = 0; iFz < 2; iFz++)
6086 {
6087 State.MXCSR = (fMxcsr & ~X86_MXCSR_RC_MASK)
6088 | (iRounding << X86_MXCSR_RC_SHIFT)
6089 | (iDaz ? X86_MXCSR_DAZ : 0)
6090 | (iFz ? X86_MXCSR_FZ : 0)
6091 | X86_MXCSR_XCPT_MASK;
6092 uint32_t fMxcsrM; int32_t i32OutM;
6093 pfn(&State, &fMxcsrM, &i32OutM, &TestData.r64ValIn.u);
6094 TestData.fMxcsrIn = State.MXCSR;
6095 TestData.fMxcsrOut = fMxcsrM;
6096 TestData.i32ValOut = i32OutM;
6097 GenerateBinaryWrite(&BinOut, &TestData, sizeof(TestData));
6098
6099 State.MXCSR = State.MXCSR & ~X86_MXCSR_XCPT_MASK;
6100 uint32_t fMxcsrU; int32_t i32OutU;
6101 pfn(&State, &fMxcsrU, &i32OutU, &TestData.r64ValIn.u);
6102 TestData.fMxcsrIn = State.MXCSR;
6103 TestData.fMxcsrOut = fMxcsrU;
6104 TestData.i32ValOut = i32OutU;
6105 GenerateBinaryWrite(&BinOut, &TestData, sizeof(TestData));
6106
6107 uint16_t fXcpt = (fMxcsrM | fMxcsrU) & X86_MXCSR_XCPT_FLAGS;
6108 if (fXcpt)
6109 {
6110 State.MXCSR = (State.MXCSR & ~X86_MXCSR_XCPT_MASK) | fXcpt;
6111 uint32_t fMxcsr1; int32_t i32Out1;
6112 pfn(&State, &fMxcsr1, &i32Out1, &TestData.r64ValIn.u);
6113 TestData.fMxcsrIn = State.MXCSR;
6114 TestData.fMxcsrOut = fMxcsr1;
6115 TestData.i32ValOut = i32Out1;
6116 GenerateBinaryWrite(&BinOut, &TestData, sizeof(TestData));
6117
6118 if (((fMxcsr1 & X86_MXCSR_XCPT_FLAGS) & fXcpt) != (fMxcsr1 & X86_MXCSR_XCPT_FLAGS))
6119 {
6120 fXcpt |= fMxcsr1 & X86_MXCSR_XCPT_FLAGS;
6121 State.MXCSR = (State.MXCSR & ~X86_MXCSR_XCPT_MASK) | (fXcpt << X86_MXCSR_XCPT_MASK_SHIFT);
6122 uint32_t fMxcsr2; int32_t i32Out2;
6123 pfn(&State, &fMxcsr2, &i32Out2, &TestData.r64ValIn.u);
6124 TestData.fMxcsrIn = State.MXCSR;
6125 TestData.fMxcsrOut = fMxcsr2;
6126 TestData.i32ValOut = i32Out2;
6127 GenerateBinaryWrite(&BinOut, &TestData, sizeof(TestData));
6128 }
6129 if (!RT_IS_POWER_OF_TWO(fXcpt))
6130 for (uint16_t fUnmasked = 1; fUnmasked <= X86_MXCSR_PE; fUnmasked <<= 1)
6131 if (fUnmasked & fXcpt)
6132 {
6133 State.MXCSR = (State.MXCSR & ~X86_MXCSR_XCPT_MASK) | ((fXcpt & ~fUnmasked) << X86_MXCSR_XCPT_MASK_SHIFT);
6134 uint32_t fMxcsr3; int32_t i32Out3;
6135 pfn(&State, &fMxcsr3, &i32Out3, &TestData.r64ValIn.u);
6136 TestData.fMxcsrIn = State.MXCSR;
6137 TestData.fMxcsrOut = fMxcsr3;
6138 TestData.i32ValOut = i32Out3;
6139 GenerateBinaryWrite(&BinOut, &TestData, sizeof(TestData));
6140 }
6141 }
6142 }
6143 }
6144 AssertReturn(GenerateBinaryClose(&BinOut), RTEXITCODE_FAILURE);
6145 }
6146
6147 return RTEXITCODE_SUCCESS;
6148}
6149#endif
6150
6151
6152static void SseBinaryI32R64Test(void)
6153{
6154 X86FXSTATE State;
6155 RT_ZERO(State);
6156 for (size_t iFn = 0; iFn < RT_ELEMENTS(g_aSseBinaryI32R64); iFn++)
6157 {
6158 if (!SUBTEST_CHECK_IF_ENABLED_AND_DECOMPRESS(g_aSseBinaryI32R64[iFn]))
6159 continue;
6160
6161 SSE_BINARY_I32_R64_TEST_T const * const paTests = g_aSseBinaryI32R64[iFn].paTests;
6162 uint32_t const cTests = g_aSseBinaryI32R64[iFn].cTests;
6163 PFNIEMAIMPLSSEF2I32U64 pfn = g_aSseBinaryI32R64[iFn].pfn;
6164 uint32_t const cVars = COUNT_VARIATIONS(g_aSseBinaryI32R64[iFn]);
6165 if (!cTests) RTTestSkipped(g_hTest, "no tests");
6166 for (uint32_t iVar = 0; iVar < cVars; iVar++)
6167 {
6168 for (uint32_t iTest = 0; iTest < cTests; iTest++)
6169 {
6170 uint32_t fMxcsr = 0;
6171 int32_t i32Dst = 0;
6172
6173 State.MXCSR = paTests[iTest].fMxcsrIn;
6174 pfn(&State, &fMxcsr, &i32Dst, &paTests[iTest].r64ValIn.u);
6175 if ( fMxcsr != paTests[iTest].fMxcsrOut
6176 || i32Dst != paTests[iTest].i32ValOut)
6177 RTTestFailed(g_hTest, "#%04u%s: mxcsr=%#08x in1=%s\n"
6178 "%s -> mxcsr=%#08x %RI32\n"
6179 "%s expected %#08x %RI32%s%s (%s)\n",
6180 iTest, iVar ? "/n" : "", paTests[iTest].fMxcsrIn,
6181 FormatR64(&paTests[iTest].r64ValIn),
6182 iVar ? " " : "", fMxcsr, i32Dst,
6183 iVar ? " " : "", paTests[iTest].fMxcsrOut, paTests[iTest].i32ValOut,
6184 MxcsrDiff(fMxcsr, paTests[iTest].fMxcsrOut),
6185 i32Dst != paTests[iTest].i32ValOut
6186 ? " - val" : "",
6187 FormatMxcsr(paTests[iTest].fMxcsrIn) );
6188 }
6189 }
6190
6191 FREE_DECOMPRESSED_TESTS(g_aSseBinaryI32R64[iFn]);
6192 }
6193}
6194
6195
6196/*
6197 * SSE operations converting single double-precision floating point values to signed quad-word integers (cvttsd2si and friends).
6198 */
6199TYPEDEF_SUBTEST_TYPE(SSE_BINARY_I64_R64_T, SSE_BINARY_I64_R64_TEST_T, PFNIEMAIMPLSSEF2I64U64);
6200
6201static SSE_BINARY_I64_R64_T g_aSseBinaryI64R64[] =
6202{
6203 ENTRY_BIN(cvttsd2si_i64_r64),
6204 ENTRY_BIN(cvtsd2si_i64_r64),
6205};
6206
6207#ifdef TSTIEMAIMPL_WITH_GENERATOR
6208DUMP_ALL_FN(SseBinaryI64R64, g_aSseBinaryI64R64)
6209static RTEXITCODE SseBinaryI64R64Generate(uint32_t cTests, const char * const *papszNameFmts)
6210{
6211 cTests = RT_MAX(192, cTests); /* there are 144 standard input variations */
6212
6213 static struct { RTFLOAT64U Val; } const s_aSpecials[] =
6214 {
6215 { RTFLOAT64U_INIT_C(0, 8388607, RTFLOAT64U_EXP_MAX - 1) },
6216 /** @todo More specials. */
6217 };
6218
6219 X86FXSTATE State;
6220 RT_ZERO(State);
6221 uint32_t cMinNormalPairs = (cTests - 144) / 4;
6222 for (size_t iFn = 0; iFn < RT_ELEMENTS(g_aSseBinaryI64R64); iFn++)
6223 {
6224 PFNIEMAIMPLSSEF2I64U64 const pfn = g_aSseBinaryI64R64[iFn].pfnNative ? g_aSseBinaryI64R64[iFn].pfnNative : g_aSseBinaryI64R64[iFn].pfn;
6225
6226 IEMBINARYOUTPUT BinOut;
6227 AssertReturn(GENERATE_BINARY_OPEN(&BinOut, papszNameFmts, g_aSseBinaryI64R64[iFn]), RTEXITCODE_FAILURE);
6228
6229 uint32_t cNormalInputPairs = 0;
6230 for (uint32_t iTest = 0; iTest < cTests + RT_ELEMENTS(s_aSpecials); iTest += 1)
6231 {
6232 SSE_BINARY_I64_R64_TEST_T TestData; RT_ZERO(TestData);
6233
6234 TestData.r64ValIn = iTest < cTests ? RandR64Src(iTest) : s_aSpecials[iTest - cTests].Val;
6235
6236 if (RTFLOAT64U_IS_NORMAL(&TestData.r64ValIn))
6237 cNormalInputPairs++;
6238 else if (cNormalInputPairs < cMinNormalPairs && iTest + cMinNormalPairs >= cTests && iTest < cTests)
6239 {
6240 iTest -= 1;
6241 continue;
6242 }
6243
6244 uint32_t const fMxcsr = RandMxcsr() & X86_MXCSR_XCPT_FLAGS;
6245 for (uint16_t iRounding = 0; iRounding < 4; iRounding++)
6246 for (uint8_t iDaz = 0; iDaz < 2; iDaz++)
6247 for (uint8_t iFz = 0; iFz < 2; iFz++)
6248 {
6249 State.MXCSR = (fMxcsr & ~X86_MXCSR_RC_MASK)
6250 | (iRounding << X86_MXCSR_RC_SHIFT)
6251 | (iDaz ? X86_MXCSR_DAZ : 0)
6252 | (iFz ? X86_MXCSR_FZ : 0)
6253 | X86_MXCSR_XCPT_MASK;
6254 uint32_t fMxcsrM; int64_t i64OutM;
6255 pfn(&State, &fMxcsrM, &i64OutM, &TestData.r64ValIn.u);
6256 TestData.fMxcsrIn = State.MXCSR;
6257 TestData.fMxcsrOut = fMxcsrM;
6258 TestData.i64ValOut = i64OutM;
6259 GenerateBinaryWrite(&BinOut, &TestData, sizeof(TestData));
6260
6261 State.MXCSR = State.MXCSR & ~X86_MXCSR_XCPT_MASK;
6262 uint32_t fMxcsrU; int64_t i64OutU;
6263 pfn(&State, &fMxcsrU, &i64OutU, &TestData.r64ValIn.u);
6264 TestData.fMxcsrIn = State.MXCSR;
6265 TestData.fMxcsrOut = fMxcsrU;
6266 TestData.i64ValOut = i64OutU;
6267 GenerateBinaryWrite(&BinOut, &TestData, sizeof(TestData));
6268
6269 uint16_t fXcpt = (fMxcsrM | fMxcsrU) & X86_MXCSR_XCPT_FLAGS;
6270 if (fXcpt)
6271 {
6272 State.MXCSR = (State.MXCSR & ~X86_MXCSR_XCPT_MASK) | fXcpt;
6273 uint32_t fMxcsr1; int64_t i64Out1;
6274 pfn(&State, &fMxcsr1, &i64Out1, &TestData.r64ValIn.u);
6275 TestData.fMxcsrIn = State.MXCSR;
6276 TestData.fMxcsrOut = fMxcsr1;
6277 TestData.i64ValOut = i64Out1;
6278 GenerateBinaryWrite(&BinOut, &TestData, sizeof(TestData));
6279
6280 if (((fMxcsr1 & X86_MXCSR_XCPT_FLAGS) & fXcpt) != (fMxcsr1 & X86_MXCSR_XCPT_FLAGS))
6281 {
6282 fXcpt |= fMxcsr1 & X86_MXCSR_XCPT_FLAGS;
6283 State.MXCSR = (State.MXCSR & ~X86_MXCSR_XCPT_MASK) | (fXcpt << X86_MXCSR_XCPT_MASK_SHIFT);
6284 uint32_t fMxcsr2; int64_t i64Out2;
6285 pfn(&State, &fMxcsr2, &i64Out2, &TestData.r64ValIn.u);
6286 TestData.fMxcsrIn = State.MXCSR;
6287 TestData.fMxcsrOut = fMxcsr2;
6288 TestData.i64ValOut = i64Out2;
6289 GenerateBinaryWrite(&BinOut, &TestData, sizeof(TestData));
6290 }
6291 if (!RT_IS_POWER_OF_TWO(fXcpt))
6292 for (uint16_t fUnmasked = 1; fUnmasked <= X86_MXCSR_PE; fUnmasked <<= 1)
6293 if (fUnmasked & fXcpt)
6294 {
6295 State.MXCSR = (State.MXCSR & ~X86_MXCSR_XCPT_MASK) | ((fXcpt & ~fUnmasked) << X86_MXCSR_XCPT_MASK_SHIFT);
6296 uint32_t fMxcsr3; int64_t i64Out3;
6297 pfn(&State, &fMxcsr3, &i64Out3, &TestData.r64ValIn.u);
6298 TestData.fMxcsrIn = State.MXCSR;
6299 TestData.fMxcsrOut = fMxcsr3;
6300 TestData.i64ValOut = i64Out3;
6301 GenerateBinaryWrite(&BinOut, &TestData, sizeof(TestData));
6302 }
6303 }
6304 }
6305 }
6306 AssertReturn(GenerateBinaryClose(&BinOut), RTEXITCODE_FAILURE);
6307 }
6308
6309 return RTEXITCODE_SUCCESS;
6310}
6311#endif
6312
6313
6314static void SseBinaryI64R64Test(void)
6315{
6316 X86FXSTATE State;
6317 RT_ZERO(State);
6318 for (size_t iFn = 0; iFn < RT_ELEMENTS(g_aSseBinaryI64R64); iFn++)
6319 {
6320 if (!SUBTEST_CHECK_IF_ENABLED_AND_DECOMPRESS(g_aSseBinaryI64R64[iFn]))
6321 continue;
6322
6323 SSE_BINARY_I64_R64_TEST_T const * const paTests = g_aSseBinaryI64R64[iFn].paTests;
6324 uint32_t const cTests = g_aSseBinaryI64R64[iFn].cTests;
6325 PFNIEMAIMPLSSEF2I64U64 pfn = g_aSseBinaryI64R64[iFn].pfn;
6326 uint32_t const cVars = COUNT_VARIATIONS(g_aSseBinaryI32R64[iFn]);
6327 if (!cTests) RTTestSkipped(g_hTest, "no tests");
6328 for (uint32_t iVar = 0; iVar < cVars; iVar++)
6329 {
6330 for (uint32_t iTest = 0; iTest < cTests; iTest++)
6331 {
6332 uint32_t fMxcsr = 0;
6333 int64_t i64Dst = 0;
6334
6335 State.MXCSR = paTests[iTest].fMxcsrIn;
6336 pfn(&State, &fMxcsr, &i64Dst, &paTests[iTest].r64ValIn.u);
6337 if ( fMxcsr != paTests[iTest].fMxcsrOut
6338 || i64Dst != paTests[iTest].i64ValOut)
6339 RTTestFailed(g_hTest, "#%04u%s: mxcsr=%#08x in1=%s\n"
6340 "%s -> mxcsr=%#08x %RI64\n"
6341 "%s expected %#08x %RI64%s%s (%s)\n",
6342 iTest, iVar ? "/n" : "", paTests[iTest].fMxcsrIn,
6343 FormatR64(&paTests[iTest].r64ValIn),
6344 iVar ? " " : "", fMxcsr, i64Dst,
6345 iVar ? " " : "", paTests[iTest].fMxcsrOut, paTests[iTest].i64ValOut,
6346 MxcsrDiff(fMxcsr, paTests[iTest].fMxcsrOut),
6347 i64Dst != paTests[iTest].i64ValOut
6348 ? " - val" : "",
6349 FormatMxcsr(paTests[iTest].fMxcsrIn) );
6350 }
6351 }
6352
6353 FREE_DECOMPRESSED_TESTS(g_aSseBinaryI64R64[iFn]);
6354 }
6355}
6356
6357
6358/*
6359 * SSE operations converting single single-precision floating point values to signed double-word integers (cvttss2si and friends).
6360 */
6361TYPEDEF_SUBTEST_TYPE(SSE_BINARY_I32_R32_T, SSE_BINARY_I32_R32_TEST_T, PFNIEMAIMPLSSEF2I32U32);
6362
6363static SSE_BINARY_I32_R32_T g_aSseBinaryI32R32[] =
6364{
6365 ENTRY_BIN(cvttss2si_i32_r32),
6366 ENTRY_BIN(cvtss2si_i32_r32),
6367};
6368
6369#ifdef TSTIEMAIMPL_WITH_GENERATOR
6370DUMP_ALL_FN(SseBinaryI32R32, g_aSseBinaryI32R32)
6371static RTEXITCODE SseBinaryI32R32Generate(uint32_t cTests, const char * const *papszNameFmts)
6372{
6373 cTests = RT_MAX(192, cTests); /* there are 144 standard input variations */
6374
6375 static struct { RTFLOAT32U Val; } const s_aSpecials[] =
6376 {
6377 { RTFLOAT32U_INIT_C(0, 8388607, RTFLOAT32U_EXP_MAX - 1) },
6378 /** @todo More specials. */
6379 };
6380
6381 X86FXSTATE State;
6382 RT_ZERO(State);
6383 uint32_t cMinNormalPairs = (cTests - 144) / 4;
6384 for (size_t iFn = 0; iFn < RT_ELEMENTS(g_aSseBinaryI32R32); iFn++)
6385 {
6386 PFNIEMAIMPLSSEF2I32U32 const pfn = g_aSseBinaryI32R32[iFn].pfnNative ? g_aSseBinaryI32R32[iFn].pfnNative : g_aSseBinaryI32R32[iFn].pfn;
6387
6388 IEMBINARYOUTPUT BinOut;
6389 AssertReturn(GENERATE_BINARY_OPEN(&BinOut, papszNameFmts, g_aSseBinaryI32R32[iFn]), RTEXITCODE_FAILURE);
6390
6391 uint32_t cNormalInputPairs = 0;
6392 for (uint32_t iTest = 0; iTest < cTests + RT_ELEMENTS(s_aSpecials); iTest += 1)
6393 {
6394 SSE_BINARY_I32_R32_TEST_T TestData; RT_ZERO(TestData);
6395
6396 TestData.r32ValIn = iTest < cTests ? RandR32Src(iTest) : s_aSpecials[iTest - cTests].Val;
6397
6398 if (RTFLOAT32U_IS_NORMAL(&TestData.r32ValIn))
6399 cNormalInputPairs++;
6400 else if (cNormalInputPairs < cMinNormalPairs && iTest + cMinNormalPairs >= cTests && iTest < cTests)
6401 {
6402 iTest -= 1;
6403 continue;
6404 }
6405
6406 uint32_t const fMxcsr = RandMxcsr() & X86_MXCSR_XCPT_FLAGS;
6407 for (uint16_t iRounding = 0; iRounding < 4; iRounding++)
6408 for (uint8_t iDaz = 0; iDaz < 2; iDaz++)
6409 for (uint8_t iFz = 0; iFz < 2; iFz++)
6410 {
6411 State.MXCSR = (fMxcsr & ~X86_MXCSR_RC_MASK)
6412 | (iRounding << X86_MXCSR_RC_SHIFT)
6413 | (iDaz ? X86_MXCSR_DAZ : 0)
6414 | (iFz ? X86_MXCSR_FZ : 0)
6415 | X86_MXCSR_XCPT_MASK;
6416 uint32_t fMxcsrM; int32_t i32OutM;
6417 pfn(&State, &fMxcsrM, &i32OutM, &TestData.r32ValIn.u);
6418 TestData.fMxcsrIn = State.MXCSR;
6419 TestData.fMxcsrOut = fMxcsrM;
6420 TestData.i32ValOut = i32OutM;
6421 GenerateBinaryWrite(&BinOut, &TestData, sizeof(TestData));
6422
6423 State.MXCSR = State.MXCSR & ~X86_MXCSR_XCPT_MASK;
6424 uint32_t fMxcsrU; int32_t i32OutU;
6425 pfn(&State, &fMxcsrU, &i32OutU, &TestData.r32ValIn.u);
6426 TestData.fMxcsrIn = State.MXCSR;
6427 TestData.fMxcsrOut = fMxcsrU;
6428 TestData.i32ValOut = i32OutU;
6429 GenerateBinaryWrite(&BinOut, &TestData, sizeof(TestData));
6430
6431 uint16_t fXcpt = (fMxcsrM | fMxcsrU) & X86_MXCSR_XCPT_FLAGS;
6432 if (fXcpt)
6433 {
6434 State.MXCSR = (State.MXCSR & ~X86_MXCSR_XCPT_MASK) | fXcpt;
6435 uint32_t fMxcsr1; int32_t i32Out1;
6436 pfn(&State, &fMxcsr1, &i32Out1, &TestData.r32ValIn.u);
6437 TestData.fMxcsrIn = State.MXCSR;
6438 TestData.fMxcsrOut = fMxcsr1;
6439 TestData.i32ValOut = i32Out1;
6440 GenerateBinaryWrite(&BinOut, &TestData, sizeof(TestData));
6441
6442 if (((fMxcsr1 & X86_MXCSR_XCPT_FLAGS) & fXcpt) != (fMxcsr1 & X86_MXCSR_XCPT_FLAGS))
6443 {
6444 fXcpt |= fMxcsr1 & X86_MXCSR_XCPT_FLAGS;
6445 State.MXCSR = (State.MXCSR & ~X86_MXCSR_XCPT_MASK) | (fXcpt << X86_MXCSR_XCPT_MASK_SHIFT);
6446 uint32_t fMxcsr2; int32_t i32Out2;
6447 pfn(&State, &fMxcsr2, &i32Out2, &TestData.r32ValIn.u);
6448 TestData.fMxcsrIn = State.MXCSR;
6449 TestData.fMxcsrOut = fMxcsr2;
6450 TestData.i32ValOut = i32Out2;
6451 GenerateBinaryWrite(&BinOut, &TestData, sizeof(TestData));
6452 }
6453 if (!RT_IS_POWER_OF_TWO(fXcpt))
6454 for (uint16_t fUnmasked = 1; fUnmasked <= X86_MXCSR_PE; fUnmasked <<= 1)
6455 if (fUnmasked & fXcpt)
6456 {
6457 State.MXCSR = (State.MXCSR & ~X86_MXCSR_XCPT_MASK) | ((fXcpt & ~fUnmasked) << X86_MXCSR_XCPT_MASK_SHIFT);
6458 uint32_t fMxcsr3; int32_t i32Out3;
6459 pfn(&State, &fMxcsr3, &i32Out3, &TestData.r32ValIn.u);
6460 TestData.fMxcsrIn = State.MXCSR;
6461 TestData.fMxcsrOut = fMxcsr3;
6462 TestData.i32ValOut = i32Out3;
6463 GenerateBinaryWrite(&BinOut, &TestData, sizeof(TestData));
6464 }
6465 }
6466 }
6467 }
6468 AssertReturn(GenerateBinaryClose(&BinOut), RTEXITCODE_FAILURE);
6469 }
6470
6471 return RTEXITCODE_SUCCESS;
6472}
6473#endif
6474
6475
6476static void SseBinaryI32R32Test(void)
6477{
6478 X86FXSTATE State;
6479 RT_ZERO(State);
6480 for (size_t iFn = 0; iFn < RT_ELEMENTS(g_aSseBinaryI32R32); iFn++)
6481 {
6482 if (!SUBTEST_CHECK_IF_ENABLED_AND_DECOMPRESS(g_aSseBinaryI32R32[iFn]))
6483 continue;
6484
6485 SSE_BINARY_I32_R32_TEST_T const * const paTests = g_aSseBinaryI32R32[iFn].paTests;
6486 uint32_t const cTests = g_aSseBinaryI32R32[iFn].cTests;
6487 PFNIEMAIMPLSSEF2I32U32 pfn = g_aSseBinaryI32R32[iFn].pfn;
6488 uint32_t const cVars = COUNT_VARIATIONS(g_aSseBinaryI32R32[iFn]);
6489 if (!cTests) RTTestSkipped(g_hTest, "no tests");
6490 for (uint32_t iVar = 0; iVar < cVars; iVar++)
6491 {
6492 for (uint32_t iTest = 0; iTest < cTests; iTest++)
6493 {
6494 uint32_t fMxcsr = 0;
6495 int32_t i32Dst = 0;
6496
6497 State.MXCSR = paTests[iTest].fMxcsrIn;
6498 pfn(&State, &fMxcsr, &i32Dst, &paTests[iTest].r32ValIn.u);
6499 if ( fMxcsr != paTests[iTest].fMxcsrOut
6500 || i32Dst != paTests[iTest].i32ValOut)
6501 RTTestFailed(g_hTest, "#%04u%s: mxcsr=%#08x in1=%s\n"
6502 "%s -> mxcsr=%#08x %RI32\n"
6503 "%s expected %#08x %RI32%s%s (%s)\n",
6504 iTest, iVar ? "/n" : "", paTests[iTest].fMxcsrIn,
6505 FormatR32(&paTests[iTest].r32ValIn),
6506 iVar ? " " : "", fMxcsr, i32Dst,
6507 iVar ? " " : "", paTests[iTest].fMxcsrOut, paTests[iTest].i32ValOut,
6508 MxcsrDiff(fMxcsr, paTests[iTest].fMxcsrOut),
6509 i32Dst != paTests[iTest].i32ValOut
6510 ? " - val" : "",
6511 FormatMxcsr(paTests[iTest].fMxcsrIn) );
6512 }
6513 }
6514
6515 FREE_DECOMPRESSED_TESTS(g_aSseBinaryI32R32[iFn]);
6516 }
6517}
6518
6519
6520/*
6521 * SSE operations converting single single-precision floating point values to signed quad-word integers (cvttss2si and friends).
6522 */
6523TYPEDEF_SUBTEST_TYPE(SSE_BINARY_I64_R32_T, SSE_BINARY_I64_R32_TEST_T, PFNIEMAIMPLSSEF2I64U32);
6524
6525static SSE_BINARY_I64_R32_T g_aSseBinaryI64R32[] =
6526{
6527 ENTRY_BIN(cvttss2si_i64_r32),
6528 ENTRY_BIN(cvtss2si_i64_r32),
6529};
6530
6531#ifdef TSTIEMAIMPL_WITH_GENERATOR
6532DUMP_ALL_FN(SseBinaryI64R32, g_aSseBinaryI64R32)
6533static RTEXITCODE SseBinaryI64R32Generate(uint32_t cTests, const char * const *papszNameFmts)
6534{
6535 cTests = RT_MAX(192, cTests); /* there are 144 standard input variations */
6536
6537 static struct { RTFLOAT32U Val; } const s_aSpecials[] =
6538 {
6539 { RTFLOAT32U_INIT_C(0, 8388607, RTFLOAT32U_EXP_MAX - 1) },
6540 /** @todo More specials. */
6541 };
6542
6543 X86FXSTATE State;
6544 RT_ZERO(State);
6545 uint32_t cMinNormalPairs = (cTests - 144) / 4;
6546 for (size_t iFn = 0; iFn < RT_ELEMENTS(g_aSseBinaryI64R32); iFn++)
6547 {
6548 PFNIEMAIMPLSSEF2I64U32 const pfn = g_aSseBinaryI64R32[iFn].pfnNative ? g_aSseBinaryI64R32[iFn].pfnNative : g_aSseBinaryI64R32[iFn].pfn;
6549
6550 IEMBINARYOUTPUT BinOut;
6551 AssertReturn(GENERATE_BINARY_OPEN(&BinOut, papszNameFmts, g_aSseBinaryI64R32[iFn]), RTEXITCODE_FAILURE);
6552
6553 uint32_t cNormalInputPairs = 0;
6554 for (uint32_t iTest = 0; iTest < cTests + RT_ELEMENTS(s_aSpecials); iTest += 1)
6555 {
6556 SSE_BINARY_I64_R32_TEST_T TestData; RT_ZERO(TestData);
6557
6558 TestData.r32ValIn = iTest < cTests ? RandR32Src(iTest) : s_aSpecials[iTest - cTests].Val;
6559
6560 if (RTFLOAT32U_IS_NORMAL(&TestData.r32ValIn))
6561 cNormalInputPairs++;
6562 else if (cNormalInputPairs < cMinNormalPairs && iTest + cMinNormalPairs >= cTests && iTest < cTests)
6563 {
6564 iTest -= 1;
6565 continue;
6566 }
6567
6568 uint32_t const fMxcsr = RandMxcsr() & X86_MXCSR_XCPT_FLAGS;
6569 for (uint16_t iRounding = 0; iRounding < 4; iRounding++)
6570 for (uint8_t iDaz = 0; iDaz < 2; iDaz++)
6571 for (uint8_t iFz = 0; iFz < 2; iFz++)
6572 {
6573 State.MXCSR = (fMxcsr & ~X86_MXCSR_RC_MASK)
6574 | (iRounding << X86_MXCSR_RC_SHIFT)
6575 | (iDaz ? X86_MXCSR_DAZ : 0)
6576 | (iFz ? X86_MXCSR_FZ : 0)
6577 | X86_MXCSR_XCPT_MASK;
6578 uint32_t fMxcsrM; int64_t i64OutM;
6579 pfn(&State, &fMxcsrM, &i64OutM, &TestData.r32ValIn.u);
6580 TestData.fMxcsrIn = State.MXCSR;
6581 TestData.fMxcsrOut = fMxcsrM;
6582 TestData.i64ValOut = i64OutM;
6583 GenerateBinaryWrite(&BinOut, &TestData, sizeof(TestData));
6584
6585 State.MXCSR = State.MXCSR & ~X86_MXCSR_XCPT_MASK;
6586 uint32_t fMxcsrU; int64_t i64OutU;
6587 pfn(&State, &fMxcsrU, &i64OutU, &TestData.r32ValIn.u);
6588 TestData.fMxcsrIn = State.MXCSR;
6589 TestData.fMxcsrOut = fMxcsrU;
6590 TestData.i64ValOut = i64OutU;
6591 GenerateBinaryWrite(&BinOut, &TestData, sizeof(TestData));
6592
6593 uint16_t fXcpt = (fMxcsrM | fMxcsrU) & X86_MXCSR_XCPT_FLAGS;
6594 if (fXcpt)
6595 {
6596 State.MXCSR = (State.MXCSR & ~X86_MXCSR_XCPT_MASK) | fXcpt;
6597 uint32_t fMxcsr1; int64_t i64Out1;
6598 pfn(&State, &fMxcsr1, &i64Out1, &TestData.r32ValIn.u);
6599 TestData.fMxcsrIn = State.MXCSR;
6600 TestData.fMxcsrOut = fMxcsr1;
6601 TestData.i64ValOut = i64Out1;
6602 GenerateBinaryWrite(&BinOut, &TestData, sizeof(TestData));
6603
6604 if (((fMxcsr1 & X86_MXCSR_XCPT_FLAGS) & fXcpt) != (fMxcsr1 & X86_MXCSR_XCPT_FLAGS))
6605 {
6606 fXcpt |= fMxcsr1 & X86_MXCSR_XCPT_FLAGS;
6607 State.MXCSR = (State.MXCSR & ~X86_MXCSR_XCPT_MASK) | (fXcpt << X86_MXCSR_XCPT_MASK_SHIFT);
6608 uint32_t fMxcsr2; int64_t i64Out2;
6609 pfn(&State, &fMxcsr2, &i64Out2, &TestData.r32ValIn.u);
6610 TestData.fMxcsrIn = State.MXCSR;
6611 TestData.fMxcsrOut = fMxcsr2;
6612 TestData.i64ValOut = i64Out2;
6613 GenerateBinaryWrite(&BinOut, &TestData, sizeof(TestData));
6614 }
6615 if (!RT_IS_POWER_OF_TWO(fXcpt))
6616 for (uint16_t fUnmasked = 1; fUnmasked <= X86_MXCSR_PE; fUnmasked <<= 1)
6617 if (fUnmasked & fXcpt)
6618 {
6619 State.MXCSR = (State.MXCSR & ~X86_MXCSR_XCPT_MASK) | ((fXcpt & ~fUnmasked) << X86_MXCSR_XCPT_MASK_SHIFT);
6620 uint32_t fMxcsr3; int64_t i64Out3;
6621 pfn(&State, &fMxcsr3, &i64Out3, &TestData.r32ValIn.u);
6622 TestData.fMxcsrIn = State.MXCSR;
6623 TestData.fMxcsrOut = fMxcsr3;
6624 TestData.i64ValOut = i64Out3;
6625 GenerateBinaryWrite(&BinOut, &TestData, sizeof(TestData));
6626 }
6627 }
6628 }
6629 }
6630 AssertReturn(GenerateBinaryClose(&BinOut), RTEXITCODE_FAILURE);
6631 }
6632
6633 return RTEXITCODE_SUCCESS;
6634}
6635#endif
6636
6637
6638static void SseBinaryI64R32Test(void)
6639{
6640 X86FXSTATE State;
6641 RT_ZERO(State);
6642 for (size_t iFn = 0; iFn < RT_ELEMENTS(g_aSseBinaryI64R32); iFn++)
6643 {
6644 if (!SUBTEST_CHECK_IF_ENABLED_AND_DECOMPRESS(g_aSseBinaryI64R32[iFn]))
6645 continue;
6646
6647 SSE_BINARY_I64_R32_TEST_T const * const paTests = g_aSseBinaryI64R32[iFn].paTests;
6648 uint32_t const cTests = g_aSseBinaryI64R32[iFn].cTests;
6649 PFNIEMAIMPLSSEF2I64U32 pfn = g_aSseBinaryI64R32[iFn].pfn;
6650 uint32_t const cVars = COUNT_VARIATIONS(g_aSseBinaryI64R32[iFn]);
6651 if (!cTests) RTTestSkipped(g_hTest, "no tests");
6652 for (uint32_t iVar = 0; iVar < cVars; iVar++)
6653 {
6654 for (uint32_t iTest = 0; iTest < cTests; iTest++)
6655 {
6656 uint32_t fMxcsr = 0;
6657 int64_t i64Dst = 0;
6658
6659 State.MXCSR = paTests[iTest].fMxcsrIn;
6660 pfn(&State, &fMxcsr, &i64Dst, &paTests[iTest].r32ValIn.u);
6661 if ( fMxcsr != paTests[iTest].fMxcsrOut
6662 || i64Dst != paTests[iTest].i64ValOut)
6663 RTTestFailed(g_hTest, "#%04u%s: mxcsr=%#08x in1=%s\n"
6664 "%s -> mxcsr=%#08x %RI64\n"
6665 "%s expected %#08x %RI64%s%s (%s)\n",
6666 iTest, iVar ? "/n" : "", paTests[iTest].fMxcsrIn,
6667 FormatR32(&paTests[iTest].r32ValIn),
6668 iVar ? " " : "", fMxcsr, i64Dst,
6669 iVar ? " " : "", paTests[iTest].fMxcsrOut, paTests[iTest].i64ValOut,
6670 MxcsrDiff(fMxcsr, paTests[iTest].fMxcsrOut),
6671 i64Dst != paTests[iTest].i64ValOut
6672 ? " - val" : "",
6673 FormatMxcsr(paTests[iTest].fMxcsrIn) );
6674 }
6675 }
6676
6677 FREE_DECOMPRESSED_TESTS(g_aSseBinaryI64R32[iFn]);
6678 }
6679}
6680
6681
6682/*
6683 * SSE operations converting single signed double-word integers to double-precision floating point values (probably only cvtsi2sd).
6684 */
6685TYPEDEF_SUBTEST_TYPE(SSE_BINARY_R64_I32_T, SSE_BINARY_R64_I32_TEST_T, PFNIEMAIMPLSSEF2R64I32);
6686
6687static SSE_BINARY_R64_I32_T g_aSseBinaryR64I32[] =
6688{
6689 ENTRY_BIN(cvtsi2sd_r64_i32)
6690};
6691
6692#ifdef TSTIEMAIMPL_WITH_GENERATOR
6693DUMP_ALL_FN(SseBinaryR64I32, g_aSseBinaryR64I32)
6694static RTEXITCODE SseBinaryR64I32Generate(uint32_t cTests, const char * const *papszNameFmts)
6695{
6696 cTests = RT_MAX(192, cTests); /* there are 144 standard input variations */
6697
6698 static int32_t const s_aSpecials[] =
6699 {
6700 INT32_MIN,
6701 INT32_MAX,
6702 /** @todo More specials. */
6703 };
6704
6705 X86FXSTATE State;
6706 RT_ZERO(State);
6707 for (size_t iFn = 0; iFn < RT_ELEMENTS(g_aSseBinaryR64I32); iFn++)
6708 {
6709 PFNIEMAIMPLSSEF2R64I32 const pfn = g_aSseBinaryR64I32[iFn].pfnNative ? g_aSseBinaryR64I32[iFn].pfnNative : g_aSseBinaryR64I32[iFn].pfn;
6710
6711 IEMBINARYOUTPUT BinOut;
6712 AssertReturn(GENERATE_BINARY_OPEN(&BinOut, papszNameFmts, g_aSseBinaryR64I32[iFn]), RTEXITCODE_FAILURE);
6713
6714 for (uint32_t iTest = 0; iTest < cTests + RT_ELEMENTS(s_aSpecials); iTest += 1)
6715 {
6716 SSE_BINARY_R64_I32_TEST_T TestData; RT_ZERO(TestData);
6717
6718 TestData.i32ValIn = iTest < cTests ? RandI32Src2(iTest) : s_aSpecials[iTest - cTests];
6719
6720 uint32_t const fMxcsr = RandMxcsr() & X86_MXCSR_XCPT_FLAGS;
6721 for (uint16_t iRounding = 0; iRounding < 4; iRounding++)
6722 for (uint8_t iDaz = 0; iDaz < 2; iDaz++)
6723 for (uint8_t iFz = 0; iFz < 2; iFz++)
6724 {
6725 State.MXCSR = (fMxcsr & ~X86_MXCSR_RC_MASK)
6726 | (iRounding << X86_MXCSR_RC_SHIFT)
6727 | (iDaz ? X86_MXCSR_DAZ : 0)
6728 | (iFz ? X86_MXCSR_FZ : 0)
6729 | X86_MXCSR_XCPT_MASK;
6730 uint32_t fMxcsrM; RTFLOAT64U r64OutM;
6731 pfn(&State, &fMxcsrM, &r64OutM, &TestData.i32ValIn);
6732 TestData.fMxcsrIn = State.MXCSR;
6733 TestData.fMxcsrOut = fMxcsrM;
6734 TestData.r64ValOut = r64OutM;
6735 GenerateBinaryWrite(&BinOut, &TestData, sizeof(TestData));
6736
6737 State.MXCSR = State.MXCSR & ~X86_MXCSR_XCPT_MASK;
6738 uint32_t fMxcsrU; RTFLOAT64U r64OutU;
6739 pfn(&State, &fMxcsrU, &r64OutU, &TestData.i32ValIn);
6740 TestData.fMxcsrIn = State.MXCSR;
6741 TestData.fMxcsrOut = fMxcsrU;
6742 TestData.r64ValOut = r64OutU;
6743 GenerateBinaryWrite(&BinOut, &TestData, sizeof(TestData));
6744
6745 uint16_t fXcpt = (fMxcsrM | fMxcsrU) & X86_MXCSR_XCPT_FLAGS;
6746 if (fXcpt)
6747 {
6748 State.MXCSR = (State.MXCSR & ~X86_MXCSR_XCPT_MASK) | fXcpt;
6749 uint32_t fMxcsr1; RTFLOAT64U r64Out1;
6750 pfn(&State, &fMxcsr1, &r64Out1, &TestData.i32ValIn);
6751 TestData.fMxcsrIn = State.MXCSR;
6752 TestData.fMxcsrOut = fMxcsr1;
6753 TestData.r64ValOut = r64Out1;
6754 GenerateBinaryWrite(&BinOut, &TestData, sizeof(TestData));
6755
6756 if (((fMxcsr1 & X86_MXCSR_XCPT_FLAGS) & fXcpt) != (fMxcsr1 & X86_MXCSR_XCPT_FLAGS))
6757 {
6758 fXcpt |= fMxcsr1 & X86_MXCSR_XCPT_FLAGS;
6759 State.MXCSR = (State.MXCSR & ~X86_MXCSR_XCPT_MASK) | (fXcpt << X86_MXCSR_XCPT_MASK_SHIFT);
6760 uint32_t fMxcsr2; RTFLOAT64U r64Out2;
6761 pfn(&State, &fMxcsr2, &r64Out2, &TestData.i32ValIn);
6762 TestData.fMxcsrIn = State.MXCSR;
6763 TestData.fMxcsrOut = fMxcsr2;
6764 TestData.r64ValOut = r64Out2;
6765 GenerateBinaryWrite(&BinOut, &TestData, sizeof(TestData));
6766 }
6767 if (!RT_IS_POWER_OF_TWO(fXcpt))
6768 for (uint16_t fUnmasked = 1; fUnmasked <= X86_MXCSR_PE; fUnmasked <<= 1)
6769 if (fUnmasked & fXcpt)
6770 {
6771 State.MXCSR = (State.MXCSR & ~X86_MXCSR_XCPT_MASK) | ((fXcpt & ~fUnmasked) << X86_MXCSR_XCPT_MASK_SHIFT);
6772 uint32_t fMxcsr3; RTFLOAT64U r64Out3;
6773 pfn(&State, &fMxcsr3, &r64Out3, &TestData.i32ValIn);
6774 TestData.fMxcsrIn = State.MXCSR;
6775 TestData.fMxcsrOut = fMxcsr3;
6776 TestData.r64ValOut = r64Out3;
6777 GenerateBinaryWrite(&BinOut, &TestData, sizeof(TestData));
6778 }
6779 }
6780 }
6781 }
6782 AssertReturn(GenerateBinaryClose(&BinOut), RTEXITCODE_FAILURE);
6783 }
6784
6785 return RTEXITCODE_SUCCESS;
6786}
6787#endif
6788
6789
6790static void SseBinaryR64I32Test(void)
6791{
6792 X86FXSTATE State;
6793 RT_ZERO(State);
6794 for (size_t iFn = 0; iFn < RT_ELEMENTS(g_aSseBinaryR64I32); iFn++)
6795 {
6796 if (!SUBTEST_CHECK_IF_ENABLED_AND_DECOMPRESS(g_aSseBinaryR64I32[iFn]))
6797 continue;
6798
6799 SSE_BINARY_R64_I32_TEST_T const * const paTests = g_aSseBinaryR64I32[iFn].paTests;
6800 uint32_t const cTests = g_aSseBinaryR64I32[iFn].cTests;
6801 PFNIEMAIMPLSSEF2R64I32 pfn = g_aSseBinaryR64I32[iFn].pfn;
6802 uint32_t const cVars = COUNT_VARIATIONS(g_aSseBinaryR64I32[iFn]);
6803 if (!cTests) RTTestSkipped(g_hTest, "no tests");
6804 for (uint32_t iVar = 0; iVar < cVars; iVar++)
6805 {
6806 for (uint32_t iTest = 0; iTest < cTests; iTest++)
6807 {
6808 uint32_t fMxcsr = 0;
6809 RTFLOAT64U r64Dst; RT_ZERO(r64Dst);
6810
6811 State.MXCSR = paTests[iTest].fMxcsrIn;
6812 pfn(&State, &fMxcsr, &r64Dst, &paTests[iTest].i32ValIn);
6813 if ( fMxcsr != paTests[iTest].fMxcsrOut
6814 || !RTFLOAT64U_ARE_IDENTICAL(&r64Dst, &paTests[iTest].r64ValOut))
6815 RTTestFailed(g_hTest, "#%04u%s: mxcsr=%#08x in1=%RI32\n"
6816 "%s -> mxcsr=%#08x %s\n"
6817 "%s expected %#08x %s%s%s (%s)\n",
6818 iTest, iVar ? "/n" : "", paTests[iTest].fMxcsrIn,
6819 &paTests[iTest].i32ValIn,
6820 iVar ? " " : "", fMxcsr, FormatR64(&r64Dst),
6821 iVar ? " " : "", paTests[iTest].fMxcsrOut, FormatR64(&paTests[iTest].r64ValOut),
6822 MxcsrDiff(fMxcsr, paTests[iTest].fMxcsrOut),
6823 !RTFLOAT64U_ARE_IDENTICAL(&r64Dst, &paTests[iTest].r64ValOut)
6824 ? " - val" : "",
6825 FormatMxcsr(paTests[iTest].fMxcsrIn) );
6826 }
6827 }
6828
6829 FREE_DECOMPRESSED_TESTS(g_aSseBinaryR64I32[iFn]);
6830 }
6831}
6832
6833
6834/*
6835 * SSE operations converting single signed quad-word integers to double-precision floating point values (probably only cvtsi2sd).
6836 */
6837TYPEDEF_SUBTEST_TYPE(SSE_BINARY_R64_I64_T, SSE_BINARY_R64_I64_TEST_T, PFNIEMAIMPLSSEF2R64I64);
6838
6839static SSE_BINARY_R64_I64_T g_aSseBinaryR64I64[] =
6840{
6841 ENTRY_BIN(cvtsi2sd_r64_i64),
6842};
6843
6844#ifdef TSTIEMAIMPL_WITH_GENERATOR
6845DUMP_ALL_FN(SseBinaryR64I64, g_aSseBinaryR64I64)
6846static RTEXITCODE SseBinaryR64I64Generate(uint32_t cTests, const char * const *papszNameFmts)
6847{
6848 cTests = RT_MAX(192, cTests); /* there are 144 standard input variations */
6849
6850 static int64_t const s_aSpecials[] =
6851 {
6852 INT64_MIN,
6853 INT64_MAX
6854 /** @todo More specials. */
6855 };
6856
6857 X86FXSTATE State;
6858 RT_ZERO(State);
6859 for (size_t iFn = 0; iFn < RT_ELEMENTS(g_aSseBinaryR64I64); iFn++)
6860 {
6861 PFNIEMAIMPLSSEF2R64I64 const pfn = g_aSseBinaryR64I64[iFn].pfnNative ? g_aSseBinaryR64I64[iFn].pfnNative : g_aSseBinaryR64I64[iFn].pfn;
6862
6863 IEMBINARYOUTPUT BinOut;
6864 AssertReturn(GENERATE_BINARY_OPEN(&BinOut, papszNameFmts, g_aSseBinaryR64I64[iFn]), RTEXITCODE_FAILURE);
6865
6866 for (uint32_t iTest = 0; iTest < cTests + RT_ELEMENTS(s_aSpecials); iTest += 1)
6867 {
6868 SSE_BINARY_R64_I64_TEST_T TestData; RT_ZERO(TestData);
6869
6870 TestData.i64ValIn = iTest < cTests ? RandI64Src(iTest) : s_aSpecials[iTest - cTests];
6871
6872 uint32_t const fMxcsr = RandMxcsr() & X86_MXCSR_XCPT_FLAGS;
6873 for (uint16_t iRounding = 0; iRounding < 4; iRounding++)
6874 for (uint8_t iDaz = 0; iDaz < 2; iDaz++)
6875 for (uint8_t iFz = 0; iFz < 2; iFz++)
6876 {
6877 State.MXCSR = (fMxcsr & ~X86_MXCSR_RC_MASK)
6878 | (iRounding << X86_MXCSR_RC_SHIFT)
6879 | (iDaz ? X86_MXCSR_DAZ : 0)
6880 | (iFz ? X86_MXCSR_FZ : 0)
6881 | X86_MXCSR_XCPT_MASK;
6882 uint32_t fMxcsrM; RTFLOAT64U r64OutM;
6883 pfn(&State, &fMxcsrM, &r64OutM, &TestData.i64ValIn);
6884 TestData.fMxcsrIn = State.MXCSR;
6885 TestData.fMxcsrOut = fMxcsrM;
6886 TestData.r64ValOut = r64OutM;
6887 GenerateBinaryWrite(&BinOut, &TestData, sizeof(TestData));
6888
6889 State.MXCSR = State.MXCSR & ~X86_MXCSR_XCPT_MASK;
6890 uint32_t fMxcsrU; RTFLOAT64U r64OutU;
6891 pfn(&State, &fMxcsrU, &r64OutU, &TestData.i64ValIn);
6892 TestData.fMxcsrIn = State.MXCSR;
6893 TestData.fMxcsrOut = fMxcsrU;
6894 TestData.r64ValOut = r64OutU;
6895 GenerateBinaryWrite(&BinOut, &TestData, sizeof(TestData));
6896
6897 uint16_t fXcpt = (fMxcsrM | fMxcsrU) & X86_MXCSR_XCPT_FLAGS;
6898 if (fXcpt)
6899 {
6900 State.MXCSR = (State.MXCSR & ~X86_MXCSR_XCPT_MASK) | fXcpt;
6901 uint32_t fMxcsr1; RTFLOAT64U r64Out1;
6902 pfn(&State, &fMxcsr1, &r64Out1, &TestData.i64ValIn);
6903 TestData.fMxcsrIn = State.MXCSR;
6904 TestData.fMxcsrOut = fMxcsr1;
6905 TestData.r64ValOut = r64Out1;
6906 GenerateBinaryWrite(&BinOut, &TestData, sizeof(TestData));
6907
6908 if (((fMxcsr1 & X86_MXCSR_XCPT_FLAGS) & fXcpt) != (fMxcsr1 & X86_MXCSR_XCPT_FLAGS))
6909 {
6910 fXcpt |= fMxcsr1 & X86_MXCSR_XCPT_FLAGS;
6911 State.MXCSR = (State.MXCSR & ~X86_MXCSR_XCPT_MASK) | (fXcpt << X86_MXCSR_XCPT_MASK_SHIFT);
6912 uint32_t fMxcsr2; RTFLOAT64U r64Out2;
6913 pfn(&State, &fMxcsr2, &r64Out2, &TestData.i64ValIn);
6914 TestData.fMxcsrIn = State.MXCSR;
6915 TestData.fMxcsrOut = fMxcsr2;
6916 TestData.r64ValOut = r64Out2;
6917 GenerateBinaryWrite(&BinOut, &TestData, sizeof(TestData));
6918 }
6919 if (!RT_IS_POWER_OF_TWO(fXcpt))
6920 for (uint16_t fUnmasked = 1; fUnmasked <= X86_MXCSR_PE; fUnmasked <<= 1)
6921 if (fUnmasked & fXcpt)
6922 {
6923 State.MXCSR = (State.MXCSR & ~X86_MXCSR_XCPT_MASK) | ((fXcpt & ~fUnmasked) << X86_MXCSR_XCPT_MASK_SHIFT);
6924 uint32_t fMxcsr3; RTFLOAT64U r64Out3;
6925 pfn(&State, &fMxcsr3, &r64Out3, &TestData.i64ValIn);
6926 TestData.fMxcsrIn = State.MXCSR;
6927 TestData.fMxcsrOut = fMxcsr3;
6928 TestData.r64ValOut = r64Out3;
6929 GenerateBinaryWrite(&BinOut, &TestData, sizeof(TestData));
6930 }
6931 }
6932 }
6933 }
6934 AssertReturn(GenerateBinaryClose(&BinOut), RTEXITCODE_FAILURE);
6935 }
6936
6937 return RTEXITCODE_SUCCESS;
6938}
6939#endif
6940
6941
6942static void SseBinaryR64I64Test(void)
6943{
6944 X86FXSTATE State;
6945 RT_ZERO(State);
6946 for (size_t iFn = 0; iFn < RT_ELEMENTS(g_aSseBinaryR64I64); iFn++)
6947 {
6948 if (!SUBTEST_CHECK_IF_ENABLED_AND_DECOMPRESS(g_aSseBinaryR64I64[iFn]))
6949 continue;
6950
6951 SSE_BINARY_R64_I64_TEST_T const * const paTests = g_aSseBinaryR64I64[iFn].paTests;
6952 uint32_t const cTests = g_aSseBinaryR64I64[iFn].cTests;
6953 PFNIEMAIMPLSSEF2R64I64 pfn = g_aSseBinaryR64I64[iFn].pfn;
6954 uint32_t const cVars = COUNT_VARIATIONS(g_aSseBinaryR64I64[iFn]);
6955 if (!cTests) RTTestSkipped(g_hTest, "no tests");
6956 for (uint32_t iVar = 0; iVar < cVars; iVar++)
6957 {
6958 for (uint32_t iTest = 0; iTest < cTests; iTest++)
6959 {
6960 uint32_t fMxcsr = 0;
6961 RTFLOAT64U r64Dst; RT_ZERO(r64Dst);
6962
6963 State.MXCSR = paTests[iTest].fMxcsrIn;
6964 pfn(&State, &fMxcsr, &r64Dst, &paTests[iTest].i64ValIn);
6965 if ( fMxcsr != paTests[iTest].fMxcsrOut
6966 || !RTFLOAT64U_ARE_IDENTICAL(&r64Dst, &paTests[iTest].r64ValOut))
6967 RTTestFailed(g_hTest, "#%04u%s: mxcsr=%#08x in1=%RI64\n"
6968 "%s -> mxcsr=%#08x %s\n"
6969 "%s expected %#08x %s%s%s (%s)\n",
6970 iTest, iVar ? "/n" : "", paTests[iTest].fMxcsrIn,
6971 &paTests[iTest].i64ValIn,
6972 iVar ? " " : "", fMxcsr, FormatR64(&r64Dst),
6973 iVar ? " " : "", paTests[iTest].fMxcsrOut, FormatR64(&paTests[iTest].r64ValOut),
6974 MxcsrDiff(fMxcsr, paTests[iTest].fMxcsrOut),
6975 !RTFLOAT64U_ARE_IDENTICAL(&r64Dst, &paTests[iTest].r64ValOut)
6976 ? " - val" : "",
6977 FormatMxcsr(paTests[iTest].fMxcsrIn) );
6978 }
6979 }
6980
6981 FREE_DECOMPRESSED_TESTS(g_aSseBinaryR64I64[iFn]);
6982 }
6983}
6984
6985
6986/*
6987 * SSE operations converting single signed double-word integers to single-precision floating point values (probably only cvtsi2ss).
6988 */
6989TYPEDEF_SUBTEST_TYPE(SSE_BINARY_R32_I32_T, SSE_BINARY_R32_I32_TEST_T, PFNIEMAIMPLSSEF2R32I32);
6990
6991static SSE_BINARY_R32_I32_T g_aSseBinaryR32I32[] =
6992{
6993 ENTRY_BIN(cvtsi2ss_r32_i32),
6994};
6995
6996#ifdef TSTIEMAIMPL_WITH_GENERATOR
6997DUMP_ALL_FN(SseBinaryR32I32, g_aSseBinaryR32I32)
6998static RTEXITCODE SseBinaryR32I32Generate(uint32_t cTests, const char * const *papszNameFmts)
6999{
7000 cTests = RT_MAX(192, cTests); /* there are 144 standard input variations */
7001
7002 static int32_t const s_aSpecials[] =
7003 {
7004 INT32_MIN,
7005 INT32_MAX,
7006 /** @todo More specials. */
7007 };
7008
7009 X86FXSTATE State;
7010 RT_ZERO(State);
7011 for (size_t iFn = 0; iFn < RT_ELEMENTS(g_aSseBinaryR32I32); iFn++)
7012 {
7013 PFNIEMAIMPLSSEF2R32I32 const pfn = g_aSseBinaryR32I32[iFn].pfnNative ? g_aSseBinaryR32I32[iFn].pfnNative : g_aSseBinaryR32I32[iFn].pfn;
7014
7015 IEMBINARYOUTPUT BinOut;
7016 AssertReturn(GENERATE_BINARY_OPEN(&BinOut, papszNameFmts, g_aSseBinaryR32I32[iFn]), RTEXITCODE_FAILURE);
7017
7018 for (uint32_t iTest = 0; iTest < cTests + RT_ELEMENTS(s_aSpecials); iTest += 1)
7019 {
7020 SSE_BINARY_R32_I32_TEST_T TestData; RT_ZERO(TestData);
7021
7022 TestData.i32ValIn = iTest < cTests ? RandI32Src2(iTest) : s_aSpecials[iTest - cTests];
7023
7024 uint32_t const fMxcsr = RandMxcsr() & X86_MXCSR_XCPT_FLAGS;
7025 for (uint16_t iRounding = 0; iRounding < 4; iRounding++)
7026 for (uint8_t iDaz = 0; iDaz < 2; iDaz++)
7027 for (uint8_t iFz = 0; iFz < 2; iFz++)
7028 {
7029 State.MXCSR = (fMxcsr & ~X86_MXCSR_RC_MASK)
7030 | (iRounding << X86_MXCSR_RC_SHIFT)
7031 | (iDaz ? X86_MXCSR_DAZ : 0)
7032 | (iFz ? X86_MXCSR_FZ : 0)
7033 | X86_MXCSR_XCPT_MASK;
7034 uint32_t fMxcsrM; RTFLOAT32U r32OutM;
7035 pfn(&State, &fMxcsrM, &r32OutM, &TestData.i32ValIn);
7036 TestData.fMxcsrIn = State.MXCSR;
7037 TestData.fMxcsrOut = fMxcsrM;
7038 TestData.r32ValOut = r32OutM;
7039 GenerateBinaryWrite(&BinOut, &TestData, sizeof(TestData));
7040
7041 State.MXCSR = State.MXCSR & ~X86_MXCSR_XCPT_MASK;
7042 uint32_t fMxcsrU; RTFLOAT32U r32OutU;
7043 pfn(&State, &fMxcsrU, &r32OutU, &TestData.i32ValIn);
7044 TestData.fMxcsrIn = State.MXCSR;
7045 TestData.fMxcsrOut = fMxcsrU;
7046 TestData.r32ValOut = r32OutU;
7047 GenerateBinaryWrite(&BinOut, &TestData, sizeof(TestData));
7048
7049 uint16_t fXcpt = (fMxcsrM | fMxcsrU) & X86_MXCSR_XCPT_FLAGS;
7050 if (fXcpt)
7051 {
7052 State.MXCSR = (State.MXCSR & ~X86_MXCSR_XCPT_MASK) | fXcpt;
7053 uint32_t fMxcsr1; RTFLOAT32U r32Out1;
7054 pfn(&State, &fMxcsr1, &r32Out1, &TestData.i32ValIn);
7055 TestData.fMxcsrIn = State.MXCSR;
7056 TestData.fMxcsrOut = fMxcsr1;
7057 TestData.r32ValOut = r32Out1;
7058 GenerateBinaryWrite(&BinOut, &TestData, sizeof(TestData));
7059
7060 if (((fMxcsr1 & X86_MXCSR_XCPT_FLAGS) & fXcpt) != (fMxcsr1 & X86_MXCSR_XCPT_FLAGS))
7061 {
7062 fXcpt |= fMxcsr1 & X86_MXCSR_XCPT_FLAGS;
7063 State.MXCSR = (State.MXCSR & ~X86_MXCSR_XCPT_MASK) | (fXcpt << X86_MXCSR_XCPT_MASK_SHIFT);
7064 uint32_t fMxcsr2; RTFLOAT32U r32Out2;
7065 pfn(&State, &fMxcsr2, &r32Out2, &TestData.i32ValIn);
7066 TestData.fMxcsrIn = State.MXCSR;
7067 TestData.fMxcsrOut = fMxcsr2;
7068 TestData.r32ValOut = r32Out2;
7069 GenerateBinaryWrite(&BinOut, &TestData, sizeof(TestData));
7070 }
7071 if (!RT_IS_POWER_OF_TWO(fXcpt))
7072 for (uint16_t fUnmasked = 1; fUnmasked <= X86_MXCSR_PE; fUnmasked <<= 1)
7073 if (fUnmasked & fXcpt)
7074 {
7075 State.MXCSR = (State.MXCSR & ~X86_MXCSR_XCPT_MASK) | ((fXcpt & ~fUnmasked) << X86_MXCSR_XCPT_MASK_SHIFT);
7076 uint32_t fMxcsr3; RTFLOAT32U r32Out3;
7077 pfn(&State, &fMxcsr3, &r32Out3, &TestData.i32ValIn);
7078 TestData.fMxcsrIn = State.MXCSR;
7079 TestData.fMxcsrOut = fMxcsr3;
7080 TestData.r32ValOut = r32Out3;
7081 GenerateBinaryWrite(&BinOut, &TestData, sizeof(TestData));
7082 }
7083 }
7084 }
7085 }
7086 AssertReturn(GenerateBinaryClose(&BinOut), RTEXITCODE_FAILURE);
7087 }
7088
7089 return RTEXITCODE_SUCCESS;
7090}
7091#endif
7092
7093
7094static void SseBinaryR32I32Test(void)
7095{
7096 X86FXSTATE State;
7097 RT_ZERO(State);
7098 for (size_t iFn = 0; iFn < RT_ELEMENTS(g_aSseBinaryR32I32); iFn++)
7099 {
7100 if (!SUBTEST_CHECK_IF_ENABLED_AND_DECOMPRESS(g_aSseBinaryR32I32[iFn]))
7101 continue;
7102
7103 SSE_BINARY_R32_I32_TEST_T const * const paTests = g_aSseBinaryR32I32[iFn].paTests;
7104 uint32_t const cTests = g_aSseBinaryR32I32[iFn].cTests;
7105 PFNIEMAIMPLSSEF2R32I32 pfn = g_aSseBinaryR32I32[iFn].pfn;
7106 uint32_t const cVars = COUNT_VARIATIONS(g_aSseBinaryR32I32[iFn]);
7107 if (!cTests) RTTestSkipped(g_hTest, "no tests");
7108 for (uint32_t iVar = 0; iVar < cVars; iVar++)
7109 {
7110 for (uint32_t iTest = 0; iTest < cTests; iTest++)
7111 {
7112 uint32_t fMxcsr = 0;
7113 RTFLOAT32U r32Dst; RT_ZERO(r32Dst);
7114
7115 State.MXCSR = paTests[iTest].fMxcsrIn;
7116 pfn(&State, &fMxcsr, &r32Dst, &paTests[iTest].i32ValIn);
7117 if ( fMxcsr != paTests[iTest].fMxcsrOut
7118 || !RTFLOAT32U_ARE_IDENTICAL(&r32Dst, &paTests[iTest].r32ValOut))
7119 RTTestFailed(g_hTest, "#%04u%s: mxcsr=%#08x in1=%RI32\n"
7120 "%s -> mxcsr=%#08x %RI32\n"
7121 "%s expected %#08x %RI32%s%s (%s)\n",
7122 iTest, iVar ? "/n" : "", paTests[iTest].fMxcsrIn,
7123 &paTests[iTest].i32ValIn,
7124 iVar ? " " : "", fMxcsr, FormatR32(&r32Dst),
7125 iVar ? " " : "", paTests[iTest].fMxcsrOut, FormatR32(&paTests[iTest].r32ValOut),
7126 MxcsrDiff(fMxcsr, paTests[iTest].fMxcsrOut),
7127 !RTFLOAT32U_ARE_IDENTICAL(&r32Dst, &paTests[iTest].r32ValOut)
7128 ? " - val" : "",
7129 FormatMxcsr(paTests[iTest].fMxcsrIn) );
7130 }
7131 }
7132
7133 FREE_DECOMPRESSED_TESTS(g_aSseBinaryR32I32[iFn]);
7134 }
7135}
7136
7137
7138/*
7139 * SSE operations converting single signed quad-word integers to single-precision floating point values (probably only cvtsi2ss).
7140 */
7141TYPEDEF_SUBTEST_TYPE(SSE_BINARY_R32_I64_T, SSE_BINARY_R32_I64_TEST_T, PFNIEMAIMPLSSEF2R32I64);
7142
7143static SSE_BINARY_R32_I64_T g_aSseBinaryR32I64[] =
7144{
7145 ENTRY_BIN(cvtsi2ss_r32_i64),
7146};
7147
7148#ifdef TSTIEMAIMPL_WITH_GENERATOR
7149DUMP_ALL_FN(SseBinaryR32I64, g_aSseBinaryR32I64)
7150static RTEXITCODE SseBinaryR32I64Generate(uint32_t cTests, const char * const *papszNameFmts)
7151{
7152 cTests = RT_MAX(192, cTests); /* there are 144 standard input variations */
7153
7154 static int64_t const s_aSpecials[] =
7155 {
7156 INT64_MIN,
7157 INT64_MAX
7158 /** @todo More specials. */
7159 };
7160
7161 X86FXSTATE State;
7162 RT_ZERO(State);
7163 for (size_t iFn = 0; iFn < RT_ELEMENTS(g_aSseBinaryR32I64); iFn++)
7164 {
7165 PFNIEMAIMPLSSEF2R32I64 const pfn = g_aSseBinaryR32I64[iFn].pfnNative ? g_aSseBinaryR32I64[iFn].pfnNative : g_aSseBinaryR32I64[iFn].pfn;
7166
7167 IEMBINARYOUTPUT BinOut;
7168 AssertReturn(GENERATE_BINARY_OPEN(&BinOut, papszNameFmts, g_aSseBinaryR32I64[iFn]), RTEXITCODE_FAILURE);
7169
7170 for (uint32_t iTest = 0; iTest < cTests + RT_ELEMENTS(s_aSpecials); iTest += 1)
7171 {
7172 SSE_BINARY_R32_I64_TEST_T TestData; RT_ZERO(TestData);
7173
7174 TestData.i64ValIn = iTest < cTests ? RandI64Src(iTest) : s_aSpecials[iTest - cTests];
7175
7176 uint32_t const fMxcsr = RandMxcsr() & X86_MXCSR_XCPT_FLAGS;
7177 for (uint16_t iRounding = 0; iRounding < 4; iRounding++)
7178 for (uint8_t iDaz = 0; iDaz < 2; iDaz++)
7179 for (uint8_t iFz = 0; iFz < 2; iFz++)
7180 {
7181 State.MXCSR = (fMxcsr & ~X86_MXCSR_RC_MASK)
7182 | (iRounding << X86_MXCSR_RC_SHIFT)
7183 | (iDaz ? X86_MXCSR_DAZ : 0)
7184 | (iFz ? X86_MXCSR_FZ : 0)
7185 | X86_MXCSR_XCPT_MASK;
7186 uint32_t fMxcsrM; RTFLOAT32U r32OutM;
7187 pfn(&State, &fMxcsrM, &r32OutM, &TestData.i64ValIn);
7188 TestData.fMxcsrIn = State.MXCSR;
7189 TestData.fMxcsrOut = fMxcsrM;
7190 TestData.r32ValOut = r32OutM;
7191 GenerateBinaryWrite(&BinOut, &TestData, sizeof(TestData));
7192
7193 State.MXCSR = State.MXCSR & ~X86_MXCSR_XCPT_MASK;
7194 uint32_t fMxcsrU; RTFLOAT32U r32OutU;
7195 pfn(&State, &fMxcsrU, &r32OutU, &TestData.i64ValIn);
7196 TestData.fMxcsrIn = State.MXCSR;
7197 TestData.fMxcsrOut = fMxcsrU;
7198 TestData.r32ValOut = r32OutU;
7199 GenerateBinaryWrite(&BinOut, &TestData, sizeof(TestData));
7200
7201 uint16_t fXcpt = (fMxcsrM | fMxcsrU) & X86_MXCSR_XCPT_FLAGS;
7202 if (fXcpt)
7203 {
7204 State.MXCSR = (State.MXCSR & ~X86_MXCSR_XCPT_MASK) | fXcpt;
7205 uint32_t fMxcsr1; RTFLOAT32U r32Out1;
7206 pfn(&State, &fMxcsr1, &r32Out1, &TestData.i64ValIn);
7207 TestData.fMxcsrIn = State.MXCSR;
7208 TestData.fMxcsrOut = fMxcsr1;
7209 TestData.r32ValOut = r32Out1;
7210 GenerateBinaryWrite(&BinOut, &TestData, sizeof(TestData));
7211
7212 if (((fMxcsr1 & X86_MXCSR_XCPT_FLAGS) & fXcpt) != (fMxcsr1 & X86_MXCSR_XCPT_FLAGS))
7213 {
7214 fXcpt |= fMxcsr1 & X86_MXCSR_XCPT_FLAGS;
7215 State.MXCSR = (State.MXCSR & ~X86_MXCSR_XCPT_MASK) | (fXcpt << X86_MXCSR_XCPT_MASK_SHIFT);
7216 uint32_t fMxcsr2; RTFLOAT32U r32Out2;
7217 pfn(&State, &fMxcsr2, &r32Out2, &TestData.i64ValIn);
7218 TestData.fMxcsrIn = State.MXCSR;
7219 TestData.fMxcsrOut = fMxcsr2;
7220 TestData.r32ValOut = r32Out2;
7221 GenerateBinaryWrite(&BinOut, &TestData, sizeof(TestData));
7222 }
7223 if (!RT_IS_POWER_OF_TWO(fXcpt))
7224 for (uint16_t fUnmasked = 1; fUnmasked <= X86_MXCSR_PE; fUnmasked <<= 1)
7225 if (fUnmasked & fXcpt)
7226 {
7227 State.MXCSR = (State.MXCSR & ~X86_MXCSR_XCPT_MASK) | ((fXcpt & ~fUnmasked) << X86_MXCSR_XCPT_MASK_SHIFT);
7228 uint32_t fMxcsr3; RTFLOAT32U r32Out3;
7229 pfn(&State, &fMxcsr3, &r32Out3, &TestData.i64ValIn);
7230 TestData.fMxcsrIn = State.MXCSR;
7231 TestData.fMxcsrOut = fMxcsr3;
7232 TestData.r32ValOut = r32Out3;
7233 GenerateBinaryWrite(&BinOut, &TestData, sizeof(TestData));
7234 }
7235 }
7236 }
7237 }
7238 AssertReturn(GenerateBinaryClose(&BinOut), RTEXITCODE_FAILURE);
7239 }
7240
7241 return RTEXITCODE_SUCCESS;
7242}
7243#endif
7244
7245
7246static void SseBinaryR32I64Test(void)
7247{
7248 X86FXSTATE State;
7249 RT_ZERO(State);
7250 for (size_t iFn = 0; iFn < RT_ELEMENTS(g_aSseBinaryR32I64); iFn++)
7251 {
7252 if (!SUBTEST_CHECK_IF_ENABLED_AND_DECOMPRESS(g_aSseBinaryR32I64[iFn]))
7253 continue;
7254
7255 SSE_BINARY_R32_I64_TEST_T const * const paTests = g_aSseBinaryR32I64[iFn].paTests;
7256 uint32_t const cTests = g_aSseBinaryR32I64[iFn].cTests;
7257 PFNIEMAIMPLSSEF2R32I64 pfn = g_aSseBinaryR32I64[iFn].pfn;
7258 uint32_t const cVars = COUNT_VARIATIONS(g_aSseBinaryR32I64[iFn]);
7259 if (!cTests) RTTestSkipped(g_hTest, "no tests");
7260 for (uint32_t iVar = 0; iVar < cVars; iVar++)
7261 {
7262 for (uint32_t iTest = 0; iTest < cTests; iTest++)
7263 {
7264 uint32_t fMxcsr = 0;
7265 RTFLOAT32U r32Dst; RT_ZERO(r32Dst);
7266
7267 State.MXCSR = paTests[iTest].fMxcsrIn;
7268 pfn(&State, &fMxcsr, &r32Dst, &paTests[iTest].i64ValIn);
7269 if ( fMxcsr != paTests[iTest].fMxcsrOut
7270 || !RTFLOAT32U_ARE_IDENTICAL(&r32Dst, &paTests[iTest].r32ValOut))
7271 RTTestFailed(g_hTest, "#%04u%s: mxcsr=%#08x in1=%RI64\n"
7272 "%s -> mxcsr=%#08x %RI32\n"
7273 "%s expected %#08x %RI32%s%s (%s)\n",
7274 iTest, iVar ? "/n" : "", paTests[iTest].fMxcsrIn,
7275 &paTests[iTest].i64ValIn,
7276 iVar ? " " : "", fMxcsr, FormatR32(&r32Dst),
7277 iVar ? " " : "", paTests[iTest].fMxcsrOut, FormatR32(&paTests[iTest].r32ValOut),
7278 MxcsrDiff(fMxcsr, paTests[iTest].fMxcsrOut),
7279 !RTFLOAT32U_ARE_IDENTICAL(&r32Dst, &paTests[iTest].r32ValOut)
7280 ? " - val" : "",
7281 FormatMxcsr(paTests[iTest].fMxcsrIn) );
7282 }
7283 }
7284
7285 FREE_DECOMPRESSED_TESTS(g_aSseBinaryR32I64[iFn]);
7286 }
7287}
7288
7289
7290/*
7291 * Compare SSE operations on single single-precision floating point values - outputting only EFLAGS.
7292 */
7293TYPEDEF_SUBTEST_TYPE(SSE_COMPARE_EFL_R32_R32_T, SSE_COMPARE_EFL_R32_R32_TEST_T, PFNIEMAIMPLF2EFLMXCSR128);
7294
7295static SSE_COMPARE_EFL_R32_R32_T g_aSseCompareEflR32R32[] =
7296{
7297 ENTRY_BIN(ucomiss_u128),
7298 ENTRY_BIN(comiss_u128),
7299 ENTRY_BIN_AVX(vucomiss_u128),
7300 ENTRY_BIN_AVX(vcomiss_u128),
7301};
7302
7303#ifdef TSTIEMAIMPL_WITH_GENERATOR
7304DUMP_ALL_FN(SseCompareEflR32R32, g_aSseCompareEflR32R32)
7305static RTEXITCODE SseCompareEflR32R32Generate(uint32_t cTests, const char * const *papszNameFmts)
7306{
7307 cTests = RT_MAX(192, cTests); /* there are 144 standard input variations */
7308
7309 static struct { RTFLOAT32U Val1, Val2; } const s_aSpecials[] =
7310 {
7311 { RTFLOAT32U_INIT_ZERO(0), RTFLOAT32U_INIT_ZERO(0) },
7312 { RTFLOAT32U_INIT_ZERO(0), RTFLOAT32U_INIT_ZERO(1) },
7313 { RTFLOAT32U_INIT_ZERO(1), RTFLOAT32U_INIT_ZERO(0) },
7314 { RTFLOAT32U_INIT_ZERO(1), RTFLOAT32U_INIT_ZERO(1) },
7315 { RTFLOAT32U_INIT_INF(0), RTFLOAT32U_INIT_INF(0) },
7316 { RTFLOAT32U_INIT_INF(0), RTFLOAT32U_INIT_INF(1) },
7317 { RTFLOAT32U_INIT_INF(1), RTFLOAT32U_INIT_INF(0) },
7318 { RTFLOAT32U_INIT_INF(1), RTFLOAT32U_INIT_INF(1) },
7319 /** @todo More specials. */
7320 };
7321
7322 uint32_t cMinNormalPairs = (cTests - 144) / 4;
7323 for (size_t iFn = 0; iFn < RT_ELEMENTS(g_aSseCompareEflR32R32); iFn++)
7324 {
7325 PFNIEMAIMPLF2EFLMXCSR128 const pfn = g_aSseCompareEflR32R32[iFn].pfnNative ? g_aSseCompareEflR32R32[iFn].pfnNative : g_aSseCompareEflR32R32[iFn].pfn;
7326
7327 IEMBINARYOUTPUT BinOut;
7328 AssertReturn(GENERATE_BINARY_OPEN(&BinOut, papszNameFmts, g_aSseCompareEflR32R32[iFn]), RTEXITCODE_FAILURE);
7329
7330 uint32_t cNormalInputPairs = 0;
7331 for (uint32_t iTest = 0; iTest < cTests + RT_ELEMENTS(s_aSpecials); iTest += 1)
7332 {
7333 SSE_COMPARE_EFL_R32_R32_TEST_T TestData; RT_ZERO(TestData);
7334 X86XMMREG ValIn1; RT_ZERO(ValIn1);
7335 X86XMMREG ValIn2; RT_ZERO(ValIn2);
7336
7337 TestData.r32ValIn1 = iTest < cTests ? RandR32Src(iTest) : s_aSpecials[iTest - cTests].Val1;
7338 TestData.r32ValIn2 = iTest < cTests ? RandR32Src(iTest) : s_aSpecials[iTest - cTests].Val2;
7339
7340 ValIn1.ar32[0] = TestData.r32ValIn1;
7341 ValIn2.ar32[0] = TestData.r32ValIn2;
7342
7343 if ( RTFLOAT32U_IS_NORMAL(&TestData.r32ValIn1)
7344 && RTFLOAT32U_IS_NORMAL(&TestData.r32ValIn2))
7345 cNormalInputPairs++;
7346 else if (cNormalInputPairs < cMinNormalPairs && iTest + cMinNormalPairs >= cTests && iTest < cTests)
7347 {
7348 iTest -= 1;
7349 continue;
7350 }
7351
7352 uint32_t const fMxcsr = RandMxcsr() & X86_MXCSR_XCPT_FLAGS;
7353 uint32_t const fEFlags = RandEFlags();
7354 for (uint16_t iRounding = 0; iRounding < 4; iRounding++)
7355 for (uint8_t iDaz = 0; iDaz < 2; iDaz++)
7356 for (uint8_t iFz = 0; iFz < 2; iFz++)
7357 {
7358 uint32_t fMxcsrIn = (fMxcsr & ~X86_MXCSR_RC_MASK)
7359 | (iRounding << X86_MXCSR_RC_SHIFT)
7360 | (iDaz ? X86_MXCSR_DAZ : 0)
7361 | (iFz ? X86_MXCSR_FZ : 0)
7362 | X86_MXCSR_XCPT_MASK;
7363 uint32_t fMxcsrM = fMxcsrIn;
7364 uint32_t fEFlagsM = fEFlags;
7365 pfn(&fMxcsrM, &fEFlagsM, &ValIn1, &ValIn2);
7366 TestData.fMxcsrIn = fMxcsrIn;
7367 TestData.fMxcsrOut = fMxcsrM;
7368 TestData.fEflIn = fEFlags;
7369 TestData.fEflOut = fEFlagsM;
7370 GenerateBinaryWrite(&BinOut, &TestData, sizeof(TestData));
7371
7372 fMxcsrIn &= ~X86_MXCSR_XCPT_MASK;
7373 uint32_t fMxcsrU = fMxcsrIn;
7374 uint32_t fEFlagsU = fEFlags;
7375 pfn(&fMxcsrU, &fEFlagsU, &ValIn1, &ValIn2);
7376 TestData.fMxcsrIn = fMxcsrIn;
7377 TestData.fMxcsrOut = fMxcsrU;
7378 TestData.fEflIn = fEFlags;
7379 TestData.fEflOut = fEFlagsU;
7380 GenerateBinaryWrite(&BinOut, &TestData, sizeof(TestData));
7381
7382 uint16_t fXcpt = (fMxcsrM | fMxcsrU) & X86_MXCSR_XCPT_FLAGS;
7383 if (fXcpt)
7384 {
7385 fMxcsrIn = (fMxcsrIn & ~X86_MXCSR_XCPT_MASK) | fXcpt;
7386 uint32_t fMxcsr1 = fMxcsrIn;
7387 uint32_t fEFlags1 = fEFlags;
7388 pfn(&fMxcsr1, &fEFlags1, &ValIn1, &ValIn2);
7389 TestData.fMxcsrIn = fMxcsrIn;
7390 TestData.fMxcsrOut = fMxcsr1;
7391 TestData.fEflIn = fEFlags;
7392 TestData.fEflOut = fEFlags1;
7393 GenerateBinaryWrite(&BinOut, &TestData, sizeof(TestData));
7394
7395 if (((fMxcsr1 & X86_MXCSR_XCPT_FLAGS) & fXcpt) != (fMxcsr1 & X86_MXCSR_XCPT_FLAGS))
7396 {
7397 fXcpt |= fMxcsr1 & X86_MXCSR_XCPT_FLAGS;
7398 fMxcsrIn = (fMxcsrIn & ~X86_MXCSR_XCPT_MASK) | (fXcpt << X86_MXCSR_XCPT_MASK_SHIFT);
7399 uint32_t fMxcsr2 = fMxcsrIn;
7400 uint32_t fEFlags2 = fEFlags;
7401 pfn(&fMxcsr2, &fEFlags2, &ValIn1, &ValIn2);
7402 TestData.fMxcsrIn = fMxcsrIn;
7403 TestData.fMxcsrOut = fMxcsr2;
7404 TestData.fEflIn = fEFlags;
7405 TestData.fEflOut = fEFlags2;
7406 GenerateBinaryWrite(&BinOut, &TestData, sizeof(TestData));
7407 }
7408 if (!RT_IS_POWER_OF_TWO(fXcpt))
7409 for (uint16_t fUnmasked = 1; fUnmasked <= X86_MXCSR_PE; fUnmasked <<= 1)
7410 if (fUnmasked & fXcpt)
7411 {
7412 fMxcsrIn = (fMxcsrIn & ~X86_MXCSR_XCPT_MASK) | ((fXcpt & ~fUnmasked) << X86_MXCSR_XCPT_MASK_SHIFT);
7413 uint32_t fMxcsr3 = fMxcsrIn;
7414 uint32_t fEFlags3 = fEFlags;
7415 pfn(&fMxcsr3, &fEFlags3, &ValIn1, &ValIn2);
7416 TestData.fMxcsrIn = fMxcsrIn;
7417 TestData.fMxcsrOut = fMxcsr3;
7418 TestData.fEflIn = fEFlags;
7419 TestData.fEflOut = fEFlags3;
7420 GenerateBinaryWrite(&BinOut, &TestData, sizeof(TestData));
7421 }
7422 }
7423 }
7424 }
7425 AssertReturn(GenerateBinaryClose(&BinOut), RTEXITCODE_FAILURE);
7426 }
7427
7428 return RTEXITCODE_SUCCESS;
7429}
7430#endif
7431
7432static void SseCompareEflR32R32Test(void)
7433{
7434 for (size_t iFn = 0; iFn < RT_ELEMENTS(g_aSseCompareEflR32R32); iFn++)
7435 {
7436 if (!SUBTEST_CHECK_IF_ENABLED_AND_DECOMPRESS(g_aSseCompareEflR32R32[iFn]))
7437 continue;
7438
7439 SSE_COMPARE_EFL_R32_R32_TEST_T const * const paTests = g_aSseCompareEflR32R32[iFn].paTests;
7440 uint32_t const cTests = g_aSseCompareEflR32R32[iFn].cTests;
7441 PFNIEMAIMPLF2EFLMXCSR128 pfn = g_aSseCompareEflR32R32[iFn].pfn;
7442 uint32_t const cVars = COUNT_VARIATIONS(g_aSseCompareEflR32R32[iFn]);
7443 if (!cTests) RTTestSkipped(g_hTest, "no tests");
7444 for (uint32_t iVar = 0; iVar < cVars; iVar++)
7445 {
7446 for (uint32_t iTest = 0; iTest < cTests; iTest++)
7447 {
7448 X86XMMREG ValIn1; RT_ZERO(ValIn1);
7449 X86XMMREG ValIn2; RT_ZERO(ValIn2);
7450
7451 ValIn1.ar32[0] = paTests[iTest].r32ValIn1;
7452 ValIn2.ar32[0] = paTests[iTest].r32ValIn2;
7453 uint32_t fMxcsr = paTests[iTest].fMxcsrIn;
7454 uint32_t fEFlags = paTests[iTest].fEflIn;
7455 pfn(&fMxcsr, &fEFlags, &ValIn1, &ValIn2);
7456 if ( fMxcsr != paTests[iTest].fMxcsrOut
7457 || fEFlags != paTests[iTest].fEflOut)
7458 RTTestFailed(g_hTest, "#%04u%s: mxcsr=%#08x efl=%#08x in1=%s in2=%s\n"
7459 "%s -> mxcsr=%#08x %#08x\n"
7460 "%s expected %#08x %#08x%s (%s) (EFL: %s)\n",
7461 iTest, iVar ? "/n" : "", paTests[iTest].fMxcsrIn, paTests[iTest].fEflIn,
7462 FormatR32(&paTests[iTest].r32ValIn1), FormatR32(&paTests[iTest].r32ValIn2),
7463 iVar ? " " : "", fMxcsr, fEFlags,
7464 iVar ? " " : "", paTests[iTest].fMxcsrOut, paTests[iTest].fEflOut,
7465 MxcsrDiff(fMxcsr, paTests[iTest].fMxcsrOut),
7466 FormatMxcsr(paTests[iTest].fMxcsrIn),
7467 EFlagsDiff(fEFlags, paTests[iTest].fEflOut));
7468 }
7469 }
7470
7471 FREE_DECOMPRESSED_TESTS(g_aSseCompareEflR32R32[iFn]);
7472 }
7473}
7474
7475
7476/*
7477 * Compare SSE operations on single single-precision floating point values - outputting only EFLAGS.
7478 */
7479TYPEDEF_SUBTEST_TYPE(SSE_COMPARE_EFL_R64_R64_T, SSE_COMPARE_EFL_R64_R64_TEST_T, PFNIEMAIMPLF2EFLMXCSR128);
7480
7481static SSE_COMPARE_EFL_R64_R64_T g_aSseCompareEflR64R64[] =
7482{
7483 ENTRY_BIN(ucomisd_u128),
7484 ENTRY_BIN(comisd_u128),
7485 ENTRY_BIN_AVX(vucomisd_u128),
7486 ENTRY_BIN_AVX(vcomisd_u128)
7487};
7488
7489#ifdef TSTIEMAIMPL_WITH_GENERATOR
7490DUMP_ALL_FN(SseCompareEflR64R64, g_aSseCompareEflR64R64)
7491static RTEXITCODE SseCompareEflR64R64Generate(uint32_t cTests, const char * const *papszNameFmts)
7492{
7493 cTests = RT_MAX(192, cTests); /* there are 144 standard input variations */
7494
7495 static struct { RTFLOAT64U Val1, Val2; } const s_aSpecials[] =
7496 {
7497 { RTFLOAT64U_INIT_ZERO(0), RTFLOAT64U_INIT_ZERO(0) },
7498 { RTFLOAT64U_INIT_ZERO(0), RTFLOAT64U_INIT_ZERO(1) },
7499 { RTFLOAT64U_INIT_ZERO(1), RTFLOAT64U_INIT_ZERO(0) },
7500 { RTFLOAT64U_INIT_ZERO(1), RTFLOAT64U_INIT_ZERO(1) },
7501 { RTFLOAT64U_INIT_INF(0), RTFLOAT64U_INIT_INF(0) },
7502 { RTFLOAT64U_INIT_INF(0), RTFLOAT64U_INIT_INF(1) },
7503 { RTFLOAT64U_INIT_INF(1), RTFLOAT64U_INIT_INF(0) },
7504 { RTFLOAT64U_INIT_INF(1), RTFLOAT64U_INIT_INF(1) },
7505 /** @todo More specials. */
7506 };
7507
7508 uint32_t cMinNormalPairs = (cTests - 144) / 4;
7509 for (size_t iFn = 0; iFn < RT_ELEMENTS(g_aSseCompareEflR64R64); iFn++)
7510 {
7511 PFNIEMAIMPLF2EFLMXCSR128 const pfn = g_aSseCompareEflR64R64[iFn].pfnNative ? g_aSseCompareEflR64R64[iFn].pfnNative : g_aSseCompareEflR64R64[iFn].pfn;
7512
7513 IEMBINARYOUTPUT BinOut;
7514 AssertReturn(GENERATE_BINARY_OPEN(&BinOut, papszNameFmts, g_aSseCompareEflR64R64[iFn]), RTEXITCODE_FAILURE);
7515
7516 uint32_t cNormalInputPairs = 0;
7517 for (uint32_t iTest = 0; iTest < cTests + RT_ELEMENTS(s_aSpecials); iTest += 1)
7518 {
7519 SSE_COMPARE_EFL_R64_R64_TEST_T TestData; RT_ZERO(TestData);
7520 X86XMMREG ValIn1; RT_ZERO(ValIn1);
7521 X86XMMREG ValIn2; RT_ZERO(ValIn2);
7522
7523 TestData.r64ValIn1 = iTest < cTests ? RandR64Src(iTest) : s_aSpecials[iTest - cTests].Val1;
7524 TestData.r64ValIn2 = iTest < cTests ? RandR64Src(iTest) : s_aSpecials[iTest - cTests].Val2;
7525
7526 ValIn1.ar64[0] = TestData.r64ValIn1;
7527 ValIn2.ar64[0] = TestData.r64ValIn2;
7528
7529 if ( RTFLOAT64U_IS_NORMAL(&TestData.r64ValIn1)
7530 && RTFLOAT64U_IS_NORMAL(&TestData.r64ValIn2))
7531 cNormalInputPairs++;
7532 else if (cNormalInputPairs < cMinNormalPairs && iTest + cMinNormalPairs >= cTests && iTest < cTests)
7533 {
7534 iTest -= 1;
7535 continue;
7536 }
7537
7538 uint32_t const fMxcsr = RandMxcsr() & X86_MXCSR_XCPT_FLAGS;
7539 uint32_t const fEFlags = RandEFlags();
7540 for (uint16_t iRounding = 0; iRounding < 4; iRounding++)
7541 for (uint8_t iDaz = 0; iDaz < 2; iDaz++)
7542 for (uint8_t iFz = 0; iFz < 2; iFz++)
7543 {
7544 uint32_t fMxcsrIn = (fMxcsr & ~X86_MXCSR_RC_MASK)
7545 | (iRounding << X86_MXCSR_RC_SHIFT)
7546 | (iDaz ? X86_MXCSR_DAZ : 0)
7547 | (iFz ? X86_MXCSR_FZ : 0)
7548 | X86_MXCSR_XCPT_MASK;
7549 uint32_t fMxcsrM = fMxcsrIn;
7550 uint32_t fEFlagsM = fEFlags;
7551 pfn(&fMxcsrM, &fEFlagsM, &ValIn1, &ValIn2);
7552 TestData.fMxcsrIn = fMxcsrIn;
7553 TestData.fMxcsrOut = fMxcsrM;
7554 TestData.fEflIn = fEFlags;
7555 TestData.fEflOut = fEFlagsM;
7556 GenerateBinaryWrite(&BinOut, &TestData, sizeof(TestData));
7557
7558 fMxcsrIn &= ~X86_MXCSR_XCPT_MASK;
7559 uint32_t fMxcsrU = fMxcsrIn;
7560 uint32_t fEFlagsU = fEFlags;
7561 pfn(&fMxcsrU, &fEFlagsU, &ValIn1, &ValIn2);
7562 TestData.fMxcsrIn = fMxcsrIn;
7563 TestData.fMxcsrOut = fMxcsrU;
7564 TestData.fEflIn = fEFlags;
7565 TestData.fEflOut = fEFlagsU;
7566 GenerateBinaryWrite(&BinOut, &TestData, sizeof(TestData));
7567
7568 uint16_t fXcpt = (fMxcsrM | fMxcsrU) & X86_MXCSR_XCPT_FLAGS;
7569 if (fXcpt)
7570 {
7571 fMxcsrIn = (fMxcsrIn & ~X86_MXCSR_XCPT_MASK) | fXcpt;
7572 uint32_t fMxcsr1 = fMxcsrIn;
7573 uint32_t fEFlags1 = fEFlags;
7574 pfn(&fMxcsr1, &fEFlags1, &ValIn1, &ValIn2);
7575 TestData.fMxcsrIn = fMxcsrIn;
7576 TestData.fMxcsrOut = fMxcsr1;
7577 TestData.fEflIn = fEFlags;
7578 TestData.fEflOut = fEFlags1;
7579 GenerateBinaryWrite(&BinOut, &TestData, sizeof(TestData));
7580
7581 if (((fMxcsr1 & X86_MXCSR_XCPT_FLAGS) & fXcpt) != (fMxcsr1 & X86_MXCSR_XCPT_FLAGS))
7582 {
7583 fXcpt |= fMxcsr1 & X86_MXCSR_XCPT_FLAGS;
7584 fMxcsrIn = (fMxcsrIn & ~X86_MXCSR_XCPT_MASK) | (fXcpt << X86_MXCSR_XCPT_MASK_SHIFT);
7585 uint32_t fMxcsr2 = fMxcsrIn;
7586 uint32_t fEFlags2 = fEFlags;
7587 pfn(&fMxcsr2, &fEFlags2, &ValIn1, &ValIn2);
7588 TestData.fMxcsrIn = fMxcsrIn;
7589 TestData.fMxcsrOut = fMxcsr2;
7590 TestData.fEflIn = fEFlags;
7591 TestData.fEflOut = fEFlags2;
7592 GenerateBinaryWrite(&BinOut, &TestData, sizeof(TestData));
7593 }
7594 if (!RT_IS_POWER_OF_TWO(fXcpt))
7595 for (uint16_t fUnmasked = 1; fUnmasked <= X86_MXCSR_PE; fUnmasked <<= 1)
7596 if (fUnmasked & fXcpt)
7597 {
7598 fMxcsrIn = (fMxcsrIn & ~X86_MXCSR_XCPT_MASK) | ((fXcpt & ~fUnmasked) << X86_MXCSR_XCPT_MASK_SHIFT);
7599 uint32_t fMxcsr3 = fMxcsrIn;
7600 uint32_t fEFlags3 = fEFlags;
7601 pfn(&fMxcsr3, &fEFlags3, &ValIn1, &ValIn2);
7602 TestData.fMxcsrIn = fMxcsrIn;
7603 TestData.fMxcsrOut = fMxcsr3;
7604 TestData.fEflIn = fEFlags;
7605 TestData.fEflOut = fEFlags3;
7606 GenerateBinaryWrite(&BinOut, &TestData, sizeof(TestData));
7607 }
7608 }
7609 }
7610 }
7611 AssertReturn(GenerateBinaryClose(&BinOut), RTEXITCODE_FAILURE);
7612 }
7613
7614 return RTEXITCODE_SUCCESS;
7615}
7616#endif
7617
7618static void SseCompareEflR64R64Test(void)
7619{
7620 for (size_t iFn = 0; iFn < RT_ELEMENTS(g_aSseCompareEflR64R64); iFn++)
7621 {
7622 if (!SUBTEST_CHECK_IF_ENABLED_AND_DECOMPRESS(g_aSseCompareEflR64R64[iFn]))
7623 continue;
7624
7625 SSE_COMPARE_EFL_R64_R64_TEST_T const * const paTests = g_aSseCompareEflR64R64[iFn].paTests;
7626 uint32_t const cTests = g_aSseCompareEflR64R64[iFn].cTests;
7627 PFNIEMAIMPLF2EFLMXCSR128 pfn = g_aSseCompareEflR64R64[iFn].pfn;
7628 uint32_t const cVars = COUNT_VARIATIONS(g_aSseCompareEflR64R64[iFn]);
7629 if (!cTests) RTTestSkipped(g_hTest, "no tests");
7630 for (uint32_t iVar = 0; iVar < cVars; iVar++)
7631 {
7632 for (uint32_t iTest = 0; iTest < cTests; iTest++)
7633 {
7634 X86XMMREG ValIn1; RT_ZERO(ValIn1);
7635 X86XMMREG ValIn2; RT_ZERO(ValIn2);
7636
7637 ValIn1.ar64[0] = paTests[iTest].r64ValIn1;
7638 ValIn2.ar64[0] = paTests[iTest].r64ValIn2;
7639 uint32_t fMxcsr = paTests[iTest].fMxcsrIn;
7640 uint32_t fEFlags = paTests[iTest].fEflIn;
7641 pfn(&fMxcsr, &fEFlags, &ValIn1, &ValIn2);
7642 if ( fMxcsr != paTests[iTest].fMxcsrOut
7643 || fEFlags != paTests[iTest].fEflOut)
7644 RTTestFailed(g_hTest, "#%04u%s: mxcsr=%#08x efl=%#08x in1=%s in2=%s\n"
7645 "%s -> mxcsr=%#08x %#08x\n"
7646 "%s expected %#08x %#08x%s (%s) (EFL: %s)\n",
7647 iTest, iVar ? "/n" : "", paTests[iTest].fMxcsrIn, paTests[iTest].fEflIn,
7648 FormatR64(&paTests[iTest].r64ValIn1), FormatR64(&paTests[iTest].r64ValIn2),
7649 iVar ? " " : "", fMxcsr, fEFlags,
7650 iVar ? " " : "", paTests[iTest].fMxcsrOut, paTests[iTest].fEflOut,
7651 MxcsrDiff(fMxcsr, paTests[iTest].fMxcsrOut),
7652 FormatMxcsr(paTests[iTest].fMxcsrIn),
7653 EFlagsDiff(fEFlags, paTests[iTest].fEflOut));
7654 }
7655 }
7656
7657 FREE_DECOMPRESSED_TESTS(g_aSseCompareEflR64R64[iFn]);
7658 }
7659}
7660
7661
7662/*
7663 * Compare SSE operations on packed and single single-precision floating point values - outputting a mask.
7664 */
7665/** Maximum immediate to try to keep the testdata size under control (at least a little bit)- */
7666#define SSE_COMPARE_F2_XMM_IMM8_MAX 0x1f
7667
7668TYPEDEF_SUBTEST_TYPE(SSE_COMPARE_F2_XMM_IMM8_T, SSE_COMPARE_F2_XMM_IMM8_TEST_T, PFNIEMAIMPLMXCSRF2XMMIMM8);
7669
7670static SSE_COMPARE_F2_XMM_IMM8_T g_aSseCompareF2XmmR32Imm8[] =
7671{
7672 ENTRY_BIN(cmpps_u128),
7673 ENTRY_BIN(cmpss_u128)
7674};
7675
7676#ifdef TSTIEMAIMPL_WITH_GENERATOR
7677DUMP_ALL_FN(SseCompareF2XmmR32Imm8, g_aSseCompareF2XmmR32Imm8)
7678static RTEXITCODE SseCompareF2XmmR32Imm8Generate(uint32_t cTests, const char * const *papszNameFmts)
7679{
7680 cTests = RT_MAX(192, cTests); /* there are 144 standard input variations */
7681
7682 static struct { RTFLOAT32U Val1, Val2; } const s_aSpecials[] =
7683 {
7684 { RTFLOAT32U_INIT_ZERO(0), RTFLOAT32U_INIT_ZERO(0) },
7685 { RTFLOAT32U_INIT_ZERO(0), RTFLOAT32U_INIT_ZERO(1) },
7686 { RTFLOAT32U_INIT_ZERO(1), RTFLOAT32U_INIT_ZERO(0) },
7687 { RTFLOAT32U_INIT_ZERO(1), RTFLOAT32U_INIT_ZERO(1) },
7688 { RTFLOAT32U_INIT_INF(0), RTFLOAT32U_INIT_INF(0) },
7689 { RTFLOAT32U_INIT_INF(0), RTFLOAT32U_INIT_INF(1) },
7690 { RTFLOAT32U_INIT_INF(1), RTFLOAT32U_INIT_INF(0) },
7691 { RTFLOAT32U_INIT_INF(1), RTFLOAT32U_INIT_INF(1) },
7692 /** @todo More specials. */
7693 };
7694
7695 uint32_t cMinNormalPairs = (cTests - 144) / 4;
7696 for (size_t iFn = 0; iFn < RT_ELEMENTS(g_aSseCompareF2XmmR32Imm8); iFn++)
7697 {
7698 PFNIEMAIMPLMXCSRF2XMMIMM8 const pfn = g_aSseCompareF2XmmR32Imm8[iFn].pfnNative ? g_aSseCompareF2XmmR32Imm8[iFn].pfnNative : g_aSseCompareF2XmmR32Imm8[iFn].pfn;
7699
7700 IEMBINARYOUTPUT BinOut;
7701 AssertReturn(GENERATE_BINARY_OPEN(&BinOut, papszNameFmts, g_aSseCompareF2XmmR32Imm8[iFn]), RTEXITCODE_FAILURE);
7702
7703 uint32_t cNormalInputPairs = 0;
7704 for (uint32_t iTest = 0; iTest < cTests + RT_ELEMENTS(s_aSpecials); iTest += 1)
7705 {
7706 SSE_COMPARE_F2_XMM_IMM8_TEST_T TestData; RT_ZERO(TestData);
7707
7708 TestData.InVal1.ar32[0] = iTest < cTests ? RandR32Src(iTest) : s_aSpecials[iTest - cTests].Val1;
7709 TestData.InVal1.ar32[1] = iTest < cTests ? RandR32Src(iTest) : s_aSpecials[iTest - cTests].Val1;
7710 TestData.InVal1.ar32[2] = iTest < cTests ? RandR32Src(iTest) : s_aSpecials[iTest - cTests].Val1;
7711 TestData.InVal1.ar32[3] = iTest < cTests ? RandR32Src(iTest) : s_aSpecials[iTest - cTests].Val1;
7712
7713 TestData.InVal2.ar32[0] = iTest < cTests ? RandR32Src(iTest) : s_aSpecials[iTest - cTests].Val2;
7714 TestData.InVal2.ar32[1] = iTest < cTests ? RandR32Src(iTest) : s_aSpecials[iTest - cTests].Val2;
7715 TestData.InVal2.ar32[2] = iTest < cTests ? RandR32Src(iTest) : s_aSpecials[iTest - cTests].Val2;
7716 TestData.InVal2.ar32[3] = iTest < cTests ? RandR32Src(iTest) : s_aSpecials[iTest - cTests].Val2;
7717
7718 if ( RTFLOAT32U_IS_NORMAL(&TestData.InVal1.ar32[0])
7719 && RTFLOAT32U_IS_NORMAL(&TestData.InVal1.ar32[1])
7720 && RTFLOAT32U_IS_NORMAL(&TestData.InVal1.ar32[2])
7721 && RTFLOAT32U_IS_NORMAL(&TestData.InVal1.ar32[3])
7722 && RTFLOAT32U_IS_NORMAL(&TestData.InVal2.ar32[0])
7723 && RTFLOAT32U_IS_NORMAL(&TestData.InVal2.ar32[1])
7724 && RTFLOAT32U_IS_NORMAL(&TestData.InVal2.ar32[2])
7725 && RTFLOAT32U_IS_NORMAL(&TestData.InVal2.ar32[3]))
7726 cNormalInputPairs++;
7727 else if (cNormalInputPairs < cMinNormalPairs && iTest + cMinNormalPairs >= cTests && iTest < cTests)
7728 {
7729 iTest -= 1;
7730 continue;
7731 }
7732
7733 IEMMEDIAF2XMMSRC Src;
7734 Src.uSrc1 = TestData.InVal1;
7735 Src.uSrc2 = TestData.InVal2;
7736 uint32_t const fMxcsr = RandMxcsr() & X86_MXCSR_XCPT_FLAGS;
7737 for (uint8_t bImm = 0; bImm <= SSE_COMPARE_F2_XMM_IMM8_MAX; bImm++)
7738 for (uint16_t iRounding = 0; iRounding < 4; iRounding++)
7739 for (uint8_t iDaz = 0; iDaz < 2; iDaz++)
7740 for (uint8_t iFz = 0; iFz < 2; iFz++)
7741 {
7742 uint32_t fMxcsrIn = (fMxcsr & ~X86_MXCSR_RC_MASK)
7743 | (iRounding << X86_MXCSR_RC_SHIFT)
7744 | (iDaz ? X86_MXCSR_DAZ : 0)
7745 | (iFz ? X86_MXCSR_FZ : 0)
7746 | X86_MXCSR_XCPT_MASK;
7747 uint32_t fMxcsrM = fMxcsrIn;
7748 X86XMMREG ResM;
7749 pfn(&fMxcsrM, &ResM, &Src, bImm);
7750 TestData.fMxcsrIn = fMxcsrIn;
7751 TestData.fMxcsrOut = fMxcsrM;
7752 TestData.bImm = bImm;
7753 TestData.OutVal = ResM;
7754 GenerateBinaryWrite(&BinOut, &TestData, sizeof(TestData));
7755
7756 fMxcsrIn &= ~X86_MXCSR_XCPT_MASK;
7757 uint32_t fMxcsrU = fMxcsrIn;
7758 X86XMMREG ResU;
7759 pfn(&fMxcsrU, &ResU, &Src, bImm);
7760 TestData.fMxcsrIn = fMxcsrIn;
7761 TestData.fMxcsrOut = fMxcsrU;
7762 TestData.bImm = bImm;
7763 TestData.OutVal = ResU;
7764 GenerateBinaryWrite(&BinOut, &TestData, sizeof(TestData));
7765
7766 uint16_t fXcpt = (fMxcsrM | fMxcsrU) & X86_MXCSR_XCPT_FLAGS;
7767 if (fXcpt)
7768 {
7769 fMxcsrIn = (fMxcsrIn & ~X86_MXCSR_XCPT_MASK) | fXcpt;
7770 uint32_t fMxcsr1 = fMxcsrIn;
7771 X86XMMREG Res1;
7772 pfn(&fMxcsr1, &Res1, &Src, bImm);
7773 TestData.fMxcsrIn = fMxcsrIn;
7774 TestData.fMxcsrOut = fMxcsr1;
7775 TestData.bImm = bImm;
7776 TestData.OutVal = Res1;
7777 GenerateBinaryWrite(&BinOut, &TestData, sizeof(TestData));
7778
7779 if (((fMxcsr1 & X86_MXCSR_XCPT_FLAGS) & fXcpt) != (fMxcsr1 & X86_MXCSR_XCPT_FLAGS))
7780 {
7781 fXcpt |= fMxcsr1 & X86_MXCSR_XCPT_FLAGS;
7782 fMxcsrIn = (fMxcsrIn & ~X86_MXCSR_XCPT_MASK) | (fXcpt << X86_MXCSR_XCPT_MASK_SHIFT);
7783 uint32_t fMxcsr2 = fMxcsrIn;
7784 X86XMMREG Res2;
7785 pfn(&fMxcsr2, &Res2, &Src, bImm);
7786 TestData.fMxcsrIn = fMxcsrIn;
7787 TestData.fMxcsrOut = fMxcsr2;
7788 TestData.bImm = bImm;
7789 TestData.OutVal = Res2;
7790 GenerateBinaryWrite(&BinOut, &TestData, sizeof(TestData));
7791 }
7792 if (!RT_IS_POWER_OF_TWO(fXcpt))
7793 for (uint16_t fUnmasked = 1; fUnmasked <= X86_MXCSR_PE; fUnmasked <<= 1)
7794 if (fUnmasked & fXcpt)
7795 {
7796 fMxcsrIn = (fMxcsrIn & ~X86_MXCSR_XCPT_MASK) | ((fXcpt & ~fUnmasked) << X86_MXCSR_XCPT_MASK_SHIFT);
7797 uint32_t fMxcsr3 = fMxcsrIn;
7798 X86XMMREG Res3;
7799 pfn(&fMxcsr3, &Res3, &Src, bImm);
7800 TestData.fMxcsrIn = fMxcsrIn;
7801 TestData.fMxcsrOut = fMxcsr3;
7802 TestData.bImm = bImm;
7803 TestData.OutVal = Res3;
7804 GenerateBinaryWrite(&BinOut, &TestData, sizeof(TestData));
7805 }
7806 }
7807 }
7808 }
7809 AssertReturn(GenerateBinaryClose(&BinOut), RTEXITCODE_FAILURE);
7810 }
7811
7812 return RTEXITCODE_SUCCESS;
7813}
7814#endif
7815
7816static void SseCompareF2XmmR32Imm8Test(void)
7817{
7818 for (size_t iFn = 0; iFn < RT_ELEMENTS(g_aSseCompareF2XmmR32Imm8); iFn++)
7819 {
7820 if (!SUBTEST_CHECK_IF_ENABLED_AND_DECOMPRESS(g_aSseCompareF2XmmR32Imm8[iFn]))
7821 continue;
7822
7823 SSE_COMPARE_F2_XMM_IMM8_TEST_T const * const paTests = g_aSseCompareF2XmmR32Imm8[iFn].paTests;
7824 uint32_t const cTests = g_aSseCompareF2XmmR32Imm8[iFn].cTests;
7825 PFNIEMAIMPLMXCSRF2XMMIMM8 pfn = g_aSseCompareF2XmmR32Imm8[iFn].pfn;
7826 uint32_t const cVars = COUNT_VARIATIONS(g_aSseCompareF2XmmR32Imm8[iFn]);
7827 if (!cTests) RTTestSkipped(g_hTest, "no tests");
7828 for (uint32_t iVar = 0; iVar < cVars; iVar++)
7829 {
7830 for (uint32_t iTest = 0; iTest < cTests; iTest++)
7831 {
7832 IEMMEDIAF2XMMSRC Src;
7833 X86XMMREG ValOut;
7834
7835 Src.uSrc1 = paTests[iTest].InVal1;
7836 Src.uSrc2 = paTests[iTest].InVal2;
7837 uint32_t fMxcsr = paTests[iTest].fMxcsrIn;
7838 pfn(&fMxcsr, &ValOut, &Src, paTests[iTest].bImm);
7839 if ( fMxcsr != paTests[iTest].fMxcsrOut
7840 || ValOut.au32[0] != paTests[iTest].OutVal.au32[0]
7841 || ValOut.au32[1] != paTests[iTest].OutVal.au32[1]
7842 || ValOut.au32[2] != paTests[iTest].OutVal.au32[2]
7843 || ValOut.au32[3] != paTests[iTest].OutVal.au32[3])
7844 RTTestFailed(g_hTest, "#%04u%s: mxcsr=%#08x in1=%s'%s'%s'%s in2=%s'%s'%s'%s imm8=%x\n"
7845 "%s -> mxcsr=%#08x %RX32'%RX32'%RX32'%RX32\n"
7846 "%s expected %#08x %RX32'%RX32'%RX32'%RX32%s%s (%s)\n",
7847 iTest, iVar ? "/n" : "", paTests[iTest].fMxcsrIn,
7848 FormatR32(&paTests[iTest].InVal1.ar32[0]), FormatR32(&paTests[iTest].InVal1.ar32[1]),
7849 FormatR32(&paTests[iTest].InVal1.ar32[2]), FormatR32(&paTests[iTest].InVal1.ar32[3]),
7850 FormatR32(&paTests[iTest].InVal2.ar32[0]), FormatR32(&paTests[iTest].InVal2.ar32[1]),
7851 FormatR32(&paTests[iTest].InVal2.ar32[2]), FormatR32(&paTests[iTest].InVal2.ar32[3]),
7852 paTests[iTest].bImm,
7853 iVar ? " " : "", fMxcsr, ValOut.au32[0], ValOut.au32[1], ValOut.au32[2], ValOut.au32[3],
7854 iVar ? " " : "", paTests[iTest].fMxcsrOut,
7855 paTests[iTest].OutVal.au32[0], paTests[iTest].OutVal.au32[1],
7856 paTests[iTest].OutVal.au32[2], paTests[iTest].OutVal.au32[3],
7857 MxcsrDiff(fMxcsr, paTests[iTest].fMxcsrOut),
7858 ( ValOut.au32[0] != paTests[iTest].OutVal.au32[0]
7859 || ValOut.au32[1] != paTests[iTest].OutVal.au32[1]
7860 || ValOut.au32[2] != paTests[iTest].OutVal.au32[2]
7861 || ValOut.au32[3] != paTests[iTest].OutVal.au32[3])
7862 ? " - val" : "",
7863 FormatMxcsr(paTests[iTest].fMxcsrIn));
7864 }
7865 }
7866
7867 FREE_DECOMPRESSED_TESTS(g_aSseCompareF2XmmR32Imm8[iFn]);
7868 }
7869}
7870
7871
7872/*
7873 * Compare SSE operations on packed and single double-precision floating point values - outputting a mask.
7874 */
7875static SSE_COMPARE_F2_XMM_IMM8_T g_aSseCompareF2XmmR64Imm8[] =
7876{
7877 ENTRY_BIN(cmppd_u128),
7878 ENTRY_BIN(cmpsd_u128)
7879};
7880
7881#ifdef TSTIEMAIMPL_WITH_GENERATOR
7882DUMP_ALL_FN(SseCompareF2XmmR64Imm8, g_aSseCompareF2XmmR64Imm8)
7883static RTEXITCODE SseCompareF2XmmR64Imm8Generate(uint32_t cTests, const char * const *papszNameFmts)
7884{
7885 cTests = RT_MAX(192, cTests); /* there are 144 standard input variations */
7886
7887 static struct { RTFLOAT64U Val1, Val2; } const s_aSpecials[] =
7888 {
7889 { RTFLOAT64U_INIT_ZERO(0), RTFLOAT64U_INIT_ZERO(0) },
7890 { RTFLOAT64U_INIT_ZERO(0), RTFLOAT64U_INIT_ZERO(1) },
7891 { RTFLOAT64U_INIT_ZERO(1), RTFLOAT64U_INIT_ZERO(0) },
7892 { RTFLOAT64U_INIT_ZERO(1), RTFLOAT64U_INIT_ZERO(1) },
7893 { RTFLOAT64U_INIT_INF(0), RTFLOAT64U_INIT_INF(0) },
7894 { RTFLOAT64U_INIT_INF(0), RTFLOAT64U_INIT_INF(1) },
7895 { RTFLOAT64U_INIT_INF(1), RTFLOAT64U_INIT_INF(0) },
7896 { RTFLOAT64U_INIT_INF(1), RTFLOAT64U_INIT_INF(1) },
7897 /** @todo More specials. */
7898 };
7899
7900 uint32_t cMinNormalPairs = (cTests - 144) / 4;
7901 for (size_t iFn = 0; iFn < RT_ELEMENTS(g_aSseCompareF2XmmR64Imm8); iFn++)
7902 {
7903 PFNIEMAIMPLMXCSRF2XMMIMM8 const pfn = g_aSseCompareF2XmmR64Imm8[iFn].pfnNative ? g_aSseCompareF2XmmR64Imm8[iFn].pfnNative : g_aSseCompareF2XmmR64Imm8[iFn].pfn;
7904
7905 IEMBINARYOUTPUT BinOut;
7906 AssertReturn(GENERATE_BINARY_OPEN(&BinOut, papszNameFmts, g_aSseCompareF2XmmR64Imm8[iFn]), RTEXITCODE_FAILURE);
7907
7908 uint32_t cNormalInputPairs = 0;
7909 for (uint32_t iTest = 0; iTest < cTests + RT_ELEMENTS(s_aSpecials); iTest += 1)
7910 {
7911 SSE_COMPARE_F2_XMM_IMM8_TEST_T TestData; RT_ZERO(TestData);
7912
7913 TestData.InVal1.ar64[0] = iTest < cTests ? RandR64Src(iTest) : s_aSpecials[iTest - cTests].Val1;
7914 TestData.InVal1.ar64[1] = iTest < cTests ? RandR64Src(iTest) : s_aSpecials[iTest - cTests].Val1;
7915
7916 TestData.InVal2.ar64[0] = iTest < cTests ? RandR64Src(iTest) : s_aSpecials[iTest - cTests].Val2;
7917 TestData.InVal2.ar64[1] = iTest < cTests ? RandR64Src(iTest) : s_aSpecials[iTest - cTests].Val2;
7918
7919 if ( RTFLOAT64U_IS_NORMAL(&TestData.InVal1.ar64[0])
7920 && RTFLOAT64U_IS_NORMAL(&TestData.InVal1.ar64[1])
7921 && RTFLOAT64U_IS_NORMAL(&TestData.InVal2.ar64[0])
7922 && RTFLOAT64U_IS_NORMAL(&TestData.InVal2.ar64[1]))
7923 cNormalInputPairs++;
7924 else if (cNormalInputPairs < cMinNormalPairs && iTest + cMinNormalPairs >= cTests && iTest < cTests)
7925 {
7926 iTest -= 1;
7927 continue;
7928 }
7929
7930 IEMMEDIAF2XMMSRC Src;
7931 Src.uSrc1 = TestData.InVal1;
7932 Src.uSrc2 = TestData.InVal2;
7933 uint32_t const fMxcsr = RandMxcsr() & X86_MXCSR_XCPT_FLAGS;
7934 for (uint8_t bImm = 0; bImm <= SSE_COMPARE_F2_XMM_IMM8_MAX; bImm++)
7935 for (uint16_t iRounding = 0; iRounding < 4; iRounding++)
7936 for (uint8_t iDaz = 0; iDaz < 2; iDaz++)
7937 for (uint8_t iFz = 0; iFz < 2; iFz++)
7938 {
7939 uint32_t fMxcsrIn = (fMxcsr & ~X86_MXCSR_RC_MASK)
7940 | (iRounding << X86_MXCSR_RC_SHIFT)
7941 | (iDaz ? X86_MXCSR_DAZ : 0)
7942 | (iFz ? X86_MXCSR_FZ : 0)
7943 | X86_MXCSR_XCPT_MASK;
7944 uint32_t fMxcsrM = fMxcsrIn;
7945 X86XMMREG ResM;
7946 pfn(&fMxcsrM, &ResM, &Src, bImm);
7947 TestData.fMxcsrIn = fMxcsrIn;
7948 TestData.fMxcsrOut = fMxcsrM;
7949 TestData.bImm = bImm;
7950 TestData.OutVal = ResM;
7951 GenerateBinaryWrite(&BinOut, &TestData, sizeof(TestData));
7952
7953 fMxcsrIn &= ~X86_MXCSR_XCPT_MASK;
7954 uint32_t fMxcsrU = fMxcsrIn;
7955 X86XMMREG ResU;
7956 pfn(&fMxcsrU, &ResU, &Src, bImm);
7957 TestData.fMxcsrIn = fMxcsrIn;
7958 TestData.fMxcsrOut = fMxcsrU;
7959 TestData.bImm = bImm;
7960 TestData.OutVal = ResU;
7961 GenerateBinaryWrite(&BinOut, &TestData, sizeof(TestData));
7962
7963 uint16_t fXcpt = (fMxcsrM | fMxcsrU) & X86_MXCSR_XCPT_FLAGS;
7964 if (fXcpt)
7965 {
7966 fMxcsrIn = (fMxcsrIn & ~X86_MXCSR_XCPT_MASK) | fXcpt;
7967 uint32_t fMxcsr1 = fMxcsrIn;
7968 X86XMMREG Res1;
7969 pfn(&fMxcsr1, &Res1, &Src, bImm);
7970 TestData.fMxcsrIn = fMxcsrIn;
7971 TestData.fMxcsrOut = fMxcsr1;
7972 TestData.bImm = bImm;
7973 TestData.OutVal = Res1;
7974 GenerateBinaryWrite(&BinOut, &TestData, sizeof(TestData));
7975
7976 if (((fMxcsr1 & X86_MXCSR_XCPT_FLAGS) & fXcpt) != (fMxcsr1 & X86_MXCSR_XCPT_FLAGS))
7977 {
7978 fXcpt |= fMxcsr1 & X86_MXCSR_XCPT_FLAGS;
7979 fMxcsrIn = (fMxcsrIn & ~X86_MXCSR_XCPT_MASK) | (fXcpt << X86_MXCSR_XCPT_MASK_SHIFT);
7980 uint32_t fMxcsr2 = fMxcsrIn;
7981 X86XMMREG Res2;
7982 pfn(&fMxcsr2, &Res2, &Src, bImm);
7983 TestData.fMxcsrIn = fMxcsrIn;
7984 TestData.fMxcsrOut = fMxcsr2;
7985 TestData.bImm = bImm;
7986 TestData.OutVal = Res2;
7987 GenerateBinaryWrite(&BinOut, &TestData, sizeof(TestData));
7988 }
7989 if (!RT_IS_POWER_OF_TWO(fXcpt))
7990 for (uint16_t fUnmasked = 1; fUnmasked <= X86_MXCSR_PE; fUnmasked <<= 1)
7991 if (fUnmasked & fXcpt)
7992 {
7993 fMxcsrIn = (fMxcsrIn & ~X86_MXCSR_XCPT_MASK) | ((fXcpt & ~fUnmasked) << X86_MXCSR_XCPT_MASK_SHIFT);
7994 uint32_t fMxcsr3 = fMxcsrIn;
7995 X86XMMREG Res3;
7996 pfn(&fMxcsr3, &Res3, &Src, bImm);
7997 TestData.fMxcsrIn = fMxcsrIn;
7998 TestData.fMxcsrOut = fMxcsr3;
7999 TestData.bImm = bImm;
8000 TestData.OutVal = Res3;
8001 GenerateBinaryWrite(&BinOut, &TestData, sizeof(TestData));
8002 }
8003 }
8004 }
8005 }
8006 AssertReturn(GenerateBinaryClose(&BinOut), RTEXITCODE_FAILURE);
8007 }
8008
8009 return RTEXITCODE_SUCCESS;
8010}
8011#endif
8012
8013static void SseCompareF2XmmR64Imm8Test(void)
8014{
8015 for (size_t iFn = 0; iFn < RT_ELEMENTS(g_aSseCompareF2XmmR64Imm8); iFn++)
8016 {
8017 if (!SUBTEST_CHECK_IF_ENABLED_AND_DECOMPRESS(g_aSseCompareF2XmmR64Imm8[iFn]))
8018 continue;
8019
8020 SSE_COMPARE_F2_XMM_IMM8_TEST_T const * const paTests = g_aSseCompareF2XmmR64Imm8[iFn].paTests;
8021 uint32_t const cTests = g_aSseCompareF2XmmR64Imm8[iFn].cTests;
8022 PFNIEMAIMPLMXCSRF2XMMIMM8 pfn = g_aSseCompareF2XmmR64Imm8[iFn].pfn;
8023 uint32_t const cVars = COUNT_VARIATIONS(g_aSseCompareF2XmmR64Imm8[iFn]);
8024 if (!cTests) RTTestSkipped(g_hTest, "no tests");
8025 for (uint32_t iVar = 0; iVar < cVars; iVar++)
8026 {
8027 for (uint32_t iTest = 0; iTest < cTests; iTest++)
8028 {
8029 IEMMEDIAF2XMMSRC Src;
8030 X86XMMREG ValOut;
8031
8032 Src.uSrc1 = paTests[iTest].InVal1;
8033 Src.uSrc2 = paTests[iTest].InVal2;
8034 uint32_t fMxcsr = paTests[iTest].fMxcsrIn;
8035 pfn(&fMxcsr, &ValOut, &Src, paTests[iTest].bImm);
8036 if ( fMxcsr != paTests[iTest].fMxcsrOut
8037 || ValOut.au64[0] != paTests[iTest].OutVal.au64[0]
8038 || ValOut.au64[1] != paTests[iTest].OutVal.au64[1])
8039 RTTestFailed(g_hTest, "#%04u%s: mxcsr=%#08x in1=%s'%s in2=%s'%s imm8=%x\n"
8040 "%s -> mxcsr=%#08x %RX64'%RX64\n"
8041 "%s expected %#08x %RX64'%RX64%s%s (%s)\n",
8042 iTest, iVar ? "/n" : "", paTests[iTest].fMxcsrIn,
8043 FormatR64(&paTests[iTest].InVal1.ar64[0]), FormatR64(&paTests[iTest].InVal1.ar64[1]),
8044 FormatR64(&paTests[iTest].InVal2.ar64[0]), FormatR64(&paTests[iTest].InVal2.ar64[1]),
8045 paTests[iTest].bImm,
8046 iVar ? " " : "", fMxcsr, ValOut.au64[0], ValOut.au64[1],
8047 iVar ? " " : "", paTests[iTest].fMxcsrOut,
8048 paTests[iTest].OutVal.au64[0], paTests[iTest].OutVal.au64[1],
8049 MxcsrDiff(fMxcsr, paTests[iTest].fMxcsrOut),
8050 ( ValOut.au64[0] != paTests[iTest].OutVal.au64[0]
8051 || ValOut.au64[1] != paTests[iTest].OutVal.au64[1])
8052 ? " - val" : "",
8053 FormatMxcsr(paTests[iTest].fMxcsrIn));
8054 }
8055 }
8056
8057 FREE_DECOMPRESSED_TESTS(g_aSseCompareF2XmmR64Imm8[iFn]);
8058 }
8059}
8060
8061
8062/*
8063 * Convert SSE operations converting signed double-words to single-precision floating point values.
8064 */
8065TYPEDEF_SUBTEST_TYPE(SSE_CONVERT_XMM_T, SSE_CONVERT_XMM_TEST_T, PFNIEMAIMPLFPSSEF2U128);
8066
8067static SSE_CONVERT_XMM_T g_aSseConvertXmmI32R32[] =
8068{
8069 ENTRY_BIN(cvtdq2ps_u128)
8070};
8071
8072#ifdef TSTIEMAIMPL_WITH_GENERATOR
8073DUMP_ALL_FN(SseConvertXmmI32R32, g_aSseConvertXmmI32R32)
8074static RTEXITCODE SseConvertXmmI32R32Generate(uint32_t cTests, const char * const *papszNameFmts)
8075{
8076 cTests = RT_MAX(192, cTests); /* there are 144 standard input variations */
8077
8078 static int32_t const s_aSpecials[] =
8079 {
8080 INT32_MIN,
8081 INT32_MIN / 2,
8082 0,
8083 INT32_MAX / 2,
8084 INT32_MAX,
8085 (int32_t)0x80000000
8086 /** @todo More specials. */
8087 };
8088
8089 for (size_t iFn = 0; iFn < RT_ELEMENTS(g_aSseConvertXmmI32R32); iFn++)
8090 {
8091 PFNIEMAIMPLFPSSEF2U128 const pfn = g_aSseConvertXmmI32R32[iFn].pfnNative ? g_aSseConvertXmmI32R32[iFn].pfnNative : g_aSseConvertXmmI32R32[iFn].pfn;
8092
8093 IEMBINARYOUTPUT BinOut;
8094 AssertReturn(GENERATE_BINARY_OPEN(&BinOut, papszNameFmts, g_aSseConvertXmmI32R32[iFn]), RTEXITCODE_FAILURE);
8095
8096 X86FXSTATE State;
8097 RT_ZERO(State);
8098 for (uint32_t iTest = 0; iTest < cTests + RT_ELEMENTS(s_aSpecials); iTest += 1)
8099 {
8100 SSE_CONVERT_XMM_TEST_T TestData; RT_ZERO(TestData);
8101
8102 TestData.InVal.ai32[0] = iTest < cTests ? RandI32Src2(iTest) : s_aSpecials[iTest - cTests];
8103 TestData.InVal.ai32[1] = iTest < cTests ? RandI32Src2(iTest) : s_aSpecials[iTest - cTests];
8104 TestData.InVal.ai32[2] = iTest < cTests ? RandI32Src2(iTest) : s_aSpecials[iTest - cTests];
8105 TestData.InVal.ai32[3] = iTest < cTests ? RandI32Src2(iTest) : s_aSpecials[iTest - cTests];
8106
8107 uint32_t const fMxcsr = RandMxcsr() & X86_MXCSR_XCPT_FLAGS;
8108 for (uint16_t iRounding = 0; iRounding < 4; iRounding++)
8109 for (uint8_t iDaz = 0; iDaz < 2; iDaz++)
8110 for (uint8_t iFz = 0; iFz < 2; iFz++)
8111 {
8112 State.MXCSR = (fMxcsr & ~X86_MXCSR_RC_MASK)
8113 | (iRounding << X86_MXCSR_RC_SHIFT)
8114 | (iDaz ? X86_MXCSR_DAZ : 0)
8115 | (iFz ? X86_MXCSR_FZ : 0)
8116 | X86_MXCSR_XCPT_MASK;
8117 IEMSSERESULT ResM; RT_ZERO(ResM);
8118 pfn(&State, &ResM, &ResM.uResult, &TestData.InVal);
8119 TestData.fMxcsrIn = State.MXCSR;
8120 TestData.fMxcsrOut = ResM.MXCSR;
8121 TestData.OutVal = ResM.uResult;
8122 GenerateBinaryWrite(&BinOut, &TestData, sizeof(TestData));
8123
8124 State.MXCSR = State.MXCSR & ~X86_MXCSR_XCPT_MASK;
8125 IEMSSERESULT ResU; RT_ZERO(ResU);
8126 pfn(&State, &ResU, &ResU.uResult, &TestData.InVal);
8127 TestData.fMxcsrIn = State.MXCSR;
8128 TestData.fMxcsrOut = ResU.MXCSR;
8129 TestData.OutVal = ResU.uResult;
8130 GenerateBinaryWrite(&BinOut, &TestData, sizeof(TestData));
8131
8132 uint16_t fXcpt = (ResM.MXCSR | ResU.MXCSR) & X86_MXCSR_XCPT_FLAGS;
8133 if (fXcpt)
8134 {
8135 State.MXCSR = (State.MXCSR & ~X86_MXCSR_XCPT_MASK) | fXcpt;
8136 IEMSSERESULT Res1; RT_ZERO(Res1);
8137 pfn(&State, &Res1, &Res1.uResult, &TestData.InVal);
8138 TestData.fMxcsrIn = State.MXCSR;
8139 TestData.fMxcsrOut = Res1.MXCSR;
8140 TestData.OutVal = Res1.uResult;
8141 GenerateBinaryWrite(&BinOut, &TestData, sizeof(TestData));
8142
8143 if (((Res1.MXCSR & X86_MXCSR_XCPT_FLAGS) & fXcpt) != (Res1.MXCSR & X86_MXCSR_XCPT_FLAGS))
8144 {
8145 fXcpt |= Res1.MXCSR & X86_MXCSR_XCPT_FLAGS;
8146 State.MXCSR = (State.MXCSR & ~X86_MXCSR_XCPT_MASK) | (fXcpt << X86_MXCSR_XCPT_MASK_SHIFT);
8147 IEMSSERESULT Res2; RT_ZERO(Res2);
8148 pfn(&State, &Res2, &Res2.uResult, &TestData.InVal);
8149 TestData.fMxcsrIn = State.MXCSR;
8150 TestData.fMxcsrOut = Res2.MXCSR;
8151 TestData.OutVal = Res2.uResult;
8152 GenerateBinaryWrite(&BinOut, &TestData, sizeof(TestData));
8153 }
8154 if (!RT_IS_POWER_OF_TWO(fXcpt))
8155 for (uint16_t fUnmasked = 1; fUnmasked <= X86_MXCSR_PE; fUnmasked <<= 1)
8156 if (fUnmasked & fXcpt)
8157 {
8158 State.MXCSR = (State.MXCSR & ~X86_MXCSR_XCPT_MASK) | ((fXcpt & ~fUnmasked) << X86_MXCSR_XCPT_MASK_SHIFT);
8159 IEMSSERESULT Res3; RT_ZERO(Res3);
8160 pfn(&State, &Res3, &Res3.uResult, &TestData.InVal);
8161 TestData.fMxcsrIn = State.MXCSR;
8162 TestData.fMxcsrOut = Res3.MXCSR;
8163 TestData.OutVal = Res3.uResult;
8164 GenerateBinaryWrite(&BinOut, &TestData, sizeof(TestData));
8165 }
8166 }
8167 }
8168 }
8169 AssertReturn(GenerateBinaryClose(&BinOut), RTEXITCODE_FAILURE);
8170 }
8171
8172 return RTEXITCODE_SUCCESS;
8173}
8174#endif
8175
8176static void SseConvertXmmI32R32Test(void)
8177{
8178 X86FXSTATE State;
8179 RT_ZERO(State);
8180
8181 for (size_t iFn = 0; iFn < RT_ELEMENTS(g_aSseConvertXmmI32R32); iFn++)
8182 {
8183 if (!SUBTEST_CHECK_IF_ENABLED_AND_DECOMPRESS(g_aSseConvertXmmI32R32[iFn]))
8184 continue;
8185
8186 SSE_CONVERT_XMM_TEST_T const * const paTests = g_aSseConvertXmmI32R32[iFn].paTests;
8187 uint32_t const cTests = g_aSseConvertXmmI32R32[iFn].cTests;
8188 PFNIEMAIMPLFPSSEF2U128 pfn = g_aSseConvertXmmI32R32[iFn].pfn;
8189 uint32_t const cVars = COUNT_VARIATIONS(g_aSseConvertXmmI32R32[iFn]);
8190 if (!cTests) RTTestSkipped(g_hTest, "no tests");
8191 for (uint32_t iVar = 0; iVar < cVars; iVar++)
8192 {
8193 for (uint32_t iTest = 0; iTest < cTests; iTest++)
8194 {
8195 IEMSSERESULT Res; RT_ZERO(Res);
8196
8197 State.MXCSR = paTests[iTest].fMxcsrIn;
8198 pfn(&State, &Res, &Res.uResult, &paTests[iTest].InVal);
8199 if ( Res.MXCSR != paTests[iTest].fMxcsrOut
8200 || !RTFLOAT32U_ARE_IDENTICAL(&Res.uResult.ar32[0], &paTests[iTest].OutVal.ar32[0])
8201 || !RTFLOAT32U_ARE_IDENTICAL(&Res.uResult.ar32[1], &paTests[iTest].OutVal.ar32[1])
8202 || !RTFLOAT32U_ARE_IDENTICAL(&Res.uResult.ar32[2], &paTests[iTest].OutVal.ar32[2])
8203 || !RTFLOAT32U_ARE_IDENTICAL(&Res.uResult.ar32[3], &paTests[iTest].OutVal.ar32[3]))
8204 RTTestFailed(g_hTest, "#%04u%s: mxcsr=%#08x in1=%RI32'%RI32'%RI32'%RI32 \n"
8205 "%s -> mxcsr=%#08x %s'%s'%s'%s\n"
8206 "%s expected %#08x %s'%s'%s'%s%s%s (%s)\n",
8207 iTest, iVar ? "/n" : "", paTests[iTest].fMxcsrIn,
8208 paTests[iTest].InVal.ai32[0], paTests[iTest].InVal.ai32[1],
8209 paTests[iTest].InVal.ai32[2], paTests[iTest].InVal.ai32[3],
8210 iVar ? " " : "", Res.MXCSR,
8211 FormatR32(&Res.uResult.ar32[0]), FormatR32(&Res.uResult.ar32[1]),
8212 FormatR32(&Res.uResult.ar32[2]), FormatR32(&Res.uResult.ar32[3]),
8213 iVar ? " " : "", paTests[iTest].fMxcsrOut,
8214 FormatR32(&paTests[iTest].OutVal.ar32[0]), FormatR32(&paTests[iTest].OutVal.ar32[1]),
8215 FormatR32(&paTests[iTest].OutVal.ar32[2]), FormatR32(&paTests[iTest].OutVal.ar32[3]),
8216 MxcsrDiff(Res.MXCSR, paTests[iTest].fMxcsrOut),
8217 ( !RTFLOAT32U_ARE_IDENTICAL(&Res.uResult.ar32[0], &paTests[iTest].OutVal.ar32[0])
8218 || !RTFLOAT32U_ARE_IDENTICAL(&Res.uResult.ar32[1], &paTests[iTest].OutVal.ar32[1])
8219 || !RTFLOAT32U_ARE_IDENTICAL(&Res.uResult.ar32[2], &paTests[iTest].OutVal.ar32[2])
8220 || !RTFLOAT32U_ARE_IDENTICAL(&Res.uResult.ar32[3], &paTests[iTest].OutVal.ar32[3]))
8221 ? " - val" : "",
8222 FormatMxcsr(paTests[iTest].fMxcsrIn));
8223 }
8224 }
8225
8226 FREE_DECOMPRESSED_TESTS(g_aSseConvertXmmI32R32[iFn]);
8227 }
8228}
8229
8230
8231/*
8232 * Convert SSE operations converting signed double-words to single-precision floating point values.
8233 */
8234static SSE_CONVERT_XMM_T g_aSseConvertXmmR32I32[] =
8235{
8236 ENTRY_BIN(cvtps2dq_u128),
8237 ENTRY_BIN(cvttps2dq_u128)
8238};
8239
8240#ifdef TSTIEMAIMPL_WITH_GENERATOR
8241DUMP_ALL_FN(SseConvertXmmR32I32, g_aSseConvertXmmR32I32)
8242static RTEXITCODE SseConvertXmmR32I32Generate(uint32_t cTests, const char * const *papszNameFmts)
8243{
8244 cTests = RT_MAX(192, cTests); /* there are 144 standard input variations */
8245
8246 static struct { RTFLOAT32U aVal1[4]; } const s_aSpecials[] =
8247 {
8248 { { RTFLOAT32U_INIT_ZERO(0), RTFLOAT32U_INIT_ZERO(0), RTFLOAT32U_INIT_ZERO(0), RTFLOAT32U_INIT_ZERO(0) } },
8249 { { RTFLOAT32U_INIT_ZERO(1), RTFLOAT32U_INIT_ZERO(1), RTFLOAT32U_INIT_ZERO(1), RTFLOAT32U_INIT_ZERO(1) } },
8250 { { RTFLOAT32U_INIT_INF(0), RTFLOAT32U_INIT_INF(0), RTFLOAT32U_INIT_INF(0), RTFLOAT32U_INIT_INF(0) } },
8251 { { RTFLOAT32U_INIT_INF(1), RTFLOAT32U_INIT_INF(1), RTFLOAT32U_INIT_INF(1), RTFLOAT32U_INIT_INF(1) } }
8252 /** @todo More specials. */
8253 };
8254
8255 X86FXSTATE State;
8256 RT_ZERO(State);
8257 uint32_t cMinNormalPairs = (cTests - 144) / 4;
8258 for (size_t iFn = 0; iFn < RT_ELEMENTS(g_aSseConvertXmmR32I32); iFn++)
8259 {
8260 PFNIEMAIMPLFPSSEF2U128 const pfn = g_aSseConvertXmmR32I32[iFn].pfnNative ? g_aSseConvertXmmR32I32[iFn].pfnNative : g_aSseConvertXmmR32I32[iFn].pfn;
8261
8262 IEMBINARYOUTPUT BinOut;
8263 AssertReturn(GENERATE_BINARY_OPEN(&BinOut, papszNameFmts, g_aSseConvertXmmR32I32[iFn]), RTEXITCODE_FAILURE);
8264
8265 uint32_t cNormalInputPairs = 0;
8266 for (uint32_t iTest = 0; iTest < cTests + RT_ELEMENTS(s_aSpecials); iTest += 1)
8267 {
8268 SSE_CONVERT_XMM_TEST_T TestData; RT_ZERO(TestData);
8269
8270 TestData.InVal.ar32[0] = iTest < cTests ? RandR32Src(iTest) : s_aSpecials[iTest - cTests].aVal1[0];
8271 TestData.InVal.ar32[1] = iTest < cTests ? RandR32Src(iTest) : s_aSpecials[iTest - cTests].aVal1[1];
8272 TestData.InVal.ar32[2] = iTest < cTests ? RandR32Src(iTest) : s_aSpecials[iTest - cTests].aVal1[2];
8273 TestData.InVal.ar32[3] = iTest < cTests ? RandR32Src(iTest) : s_aSpecials[iTest - cTests].aVal1[3];
8274
8275 if ( RTFLOAT32U_IS_NORMAL(&TestData.InVal.ar32[0])
8276 && RTFLOAT32U_IS_NORMAL(&TestData.InVal.ar32[1])
8277 && RTFLOAT32U_IS_NORMAL(&TestData.InVal.ar32[2])
8278 && RTFLOAT32U_IS_NORMAL(&TestData.InVal.ar32[3]))
8279 cNormalInputPairs++;
8280 else if (cNormalInputPairs < cMinNormalPairs && iTest + cMinNormalPairs >= cTests && iTest < cTests)
8281 {
8282 iTest -= 1;
8283 continue;
8284 }
8285
8286 uint32_t const fMxcsr = RandMxcsr() & X86_MXCSR_XCPT_FLAGS;
8287 for (uint16_t iRounding = 0; iRounding < 4; iRounding++)
8288 for (uint8_t iDaz = 0; iDaz < 2; iDaz++)
8289 for (uint8_t iFz = 0; iFz < 2; iFz++)
8290 {
8291 State.MXCSR = (fMxcsr & ~X86_MXCSR_RC_MASK)
8292 | (iRounding << X86_MXCSR_RC_SHIFT)
8293 | (iDaz ? X86_MXCSR_DAZ : 0)
8294 | (iFz ? X86_MXCSR_FZ : 0)
8295 | X86_MXCSR_XCPT_MASK;
8296 IEMSSERESULT ResM; RT_ZERO(ResM);
8297 pfn(&State, &ResM, &ResM.uResult, &TestData.InVal);
8298 TestData.fMxcsrIn = State.MXCSR;
8299 TestData.fMxcsrOut = ResM.MXCSR;
8300 TestData.OutVal = ResM.uResult;
8301 GenerateBinaryWrite(&BinOut, &TestData, sizeof(TestData));
8302
8303 State.MXCSR = State.MXCSR & ~X86_MXCSR_XCPT_MASK;
8304 IEMSSERESULT ResU; RT_ZERO(ResU);
8305 pfn(&State, &ResU, &ResU.uResult, &TestData.InVal);
8306 TestData.fMxcsrIn = State.MXCSR;
8307 TestData.fMxcsrOut = ResU.MXCSR;
8308 TestData.OutVal = ResU.uResult;
8309 GenerateBinaryWrite(&BinOut, &TestData, sizeof(TestData));
8310
8311 uint16_t fXcpt = (ResM.MXCSR | ResU.MXCSR) & X86_MXCSR_XCPT_FLAGS;
8312 if (fXcpt)
8313 {
8314 State.MXCSR = (State.MXCSR & ~X86_MXCSR_XCPT_MASK) | fXcpt;
8315 IEMSSERESULT Res1; RT_ZERO(Res1);
8316 pfn(&State, &Res1, &Res1.uResult, &TestData.InVal);
8317 TestData.fMxcsrIn = State.MXCSR;
8318 TestData.fMxcsrOut = Res1.MXCSR;
8319 TestData.OutVal = Res1.uResult;
8320 GenerateBinaryWrite(&BinOut, &TestData, sizeof(TestData));
8321
8322 if (((Res1.MXCSR & X86_MXCSR_XCPT_FLAGS) & fXcpt) != (Res1.MXCSR & X86_MXCSR_XCPT_FLAGS))
8323 {
8324 fXcpt |= Res1.MXCSR & X86_MXCSR_XCPT_FLAGS;
8325 State.MXCSR = (State.MXCSR & ~X86_MXCSR_XCPT_MASK) | (fXcpt << X86_MXCSR_XCPT_MASK_SHIFT);
8326 IEMSSERESULT Res2; RT_ZERO(Res2);
8327 pfn(&State, &Res2, &Res2.uResult, &TestData.InVal);
8328 TestData.fMxcsrIn = State.MXCSR;
8329 TestData.fMxcsrOut = Res2.MXCSR;
8330 TestData.OutVal = Res2.uResult;
8331 GenerateBinaryWrite(&BinOut, &TestData, sizeof(TestData));
8332 }
8333 if (!RT_IS_POWER_OF_TWO(fXcpt))
8334 for (uint16_t fUnmasked = 1; fUnmasked <= X86_MXCSR_PE; fUnmasked <<= 1)
8335 if (fUnmasked & fXcpt)
8336 {
8337 State.MXCSR = (State.MXCSR & ~X86_MXCSR_XCPT_MASK) | ((fXcpt & ~fUnmasked) << X86_MXCSR_XCPT_MASK_SHIFT);
8338 IEMSSERESULT Res3; RT_ZERO(Res3);
8339 pfn(&State, &Res3, &Res3.uResult, &TestData.InVal);
8340 TestData.fMxcsrIn = State.MXCSR;
8341 TestData.fMxcsrOut = Res3.MXCSR;
8342 TestData.OutVal = Res3.uResult;
8343 GenerateBinaryWrite(&BinOut, &TestData, sizeof(TestData));
8344 }
8345 }
8346 }
8347 }
8348 AssertReturn(GenerateBinaryClose(&BinOut), RTEXITCODE_FAILURE);
8349 }
8350
8351 return RTEXITCODE_SUCCESS;
8352}
8353#endif
8354
8355static void SseConvertXmmR32I32Test(void)
8356{
8357 X86FXSTATE State;
8358 RT_ZERO(State);
8359
8360 for (size_t iFn = 0; iFn < RT_ELEMENTS(g_aSseConvertXmmR32I32); iFn++)
8361 {
8362 if (!SUBTEST_CHECK_IF_ENABLED_AND_DECOMPRESS(g_aSseConvertXmmR32I32[iFn]))
8363 continue;
8364
8365 SSE_CONVERT_XMM_TEST_T const * const paTests = g_aSseConvertXmmR32I32[iFn].paTests;
8366 uint32_t const cTests = g_aSseConvertXmmR32I32[iFn].cTests;
8367 PFNIEMAIMPLFPSSEF2U128 pfn = g_aSseConvertXmmR32I32[iFn].pfn;
8368 uint32_t const cVars = COUNT_VARIATIONS(g_aSseConvertXmmR32I32[iFn]);
8369 if (!cTests) RTTestSkipped(g_hTest, "no tests");
8370 for (uint32_t iVar = 0; iVar < cVars; iVar++)
8371 {
8372 for (uint32_t iTest = 0; iTest < cTests; iTest++)
8373 {
8374 IEMSSERESULT Res; RT_ZERO(Res);
8375
8376 State.MXCSR = paTests[iTest].fMxcsrIn;
8377 pfn(&State, &Res, &Res.uResult, &paTests[iTest].InVal);
8378 if ( Res.MXCSR != paTests[iTest].fMxcsrOut
8379 || Res.uResult.ai32[0] != paTests[iTest].OutVal.ai32[0]
8380 || Res.uResult.ai32[1] != paTests[iTest].OutVal.ai32[1]
8381 || Res.uResult.ai32[2] != paTests[iTest].OutVal.ai32[2]
8382 || Res.uResult.ai32[3] != paTests[iTest].OutVal.ai32[3])
8383 RTTestFailed(g_hTest, "#%04u%s: mxcsr=%#08x in1=%s'%s'%s'%s \n"
8384 "%s -> mxcsr=%#08x %RI32'%RI32'%RI32'%RI32\n"
8385 "%s expected %#08x %RI32'%RI32'%RI32'%RI32%s%s (%s)\n",
8386 iTest, iVar ? "/n" : "", paTests[iTest].fMxcsrIn,
8387 FormatR32(&paTests[iTest].InVal.ar32[0]), FormatR32(&paTests[iTest].InVal.ar32[1]),
8388 FormatR32(&paTests[iTest].InVal.ar32[2]), FormatR32(&paTests[iTest].InVal.ar32[3]),
8389 iVar ? " " : "", Res.MXCSR,
8390 Res.uResult.ai32[0], Res.uResult.ai32[1],
8391 Res.uResult.ai32[2], Res.uResult.ai32[3],
8392 iVar ? " " : "", paTests[iTest].fMxcsrOut,
8393 paTests[iTest].OutVal.ai32[0], paTests[iTest].OutVal.ai32[1],
8394 paTests[iTest].OutVal.ai32[2], paTests[iTest].OutVal.ai32[3],
8395 MxcsrDiff(Res.MXCSR, paTests[iTest].fMxcsrOut),
8396 ( Res.uResult.ai32[0] != paTests[iTest].OutVal.ai32[0]
8397 || Res.uResult.ai32[1] != paTests[iTest].OutVal.ai32[1]
8398 || Res.uResult.ai32[2] != paTests[iTest].OutVal.ai32[2]
8399 || Res.uResult.ai32[3] != paTests[iTest].OutVal.ai32[3])
8400 ? " - val" : "",
8401 FormatMxcsr(paTests[iTest].fMxcsrIn));
8402 }
8403 }
8404
8405 FREE_DECOMPRESSED_TESTS(g_aSseConvertXmmR32I32[iFn]);
8406 }
8407}
8408
8409
8410/*
8411 * Convert SSE operations converting signed double-words to double-precision floating point values.
8412 */
8413static SSE_CONVERT_XMM_T g_aSseConvertXmmI32R64[] =
8414{
8415 ENTRY_BIN(cvtdq2pd_u128)
8416};
8417
8418#ifdef TSTIEMAIMPL_WITH_GENERATOR
8419DUMP_ALL_FN(SseConvertXmmI32R64, g_aSseConvertXmmI32R64)
8420static RTEXITCODE SseConvertXmmI32R64Generate(uint32_t cTests, const char * const *papszNameFmts)
8421{
8422 cTests = RT_MAX(192, cTests); /* there are 144 standard input variations */
8423
8424 static int32_t const s_aSpecials[] =
8425 {
8426 INT32_MIN,
8427 INT32_MIN / 2,
8428 0,
8429 INT32_MAX / 2,
8430 INT32_MAX,
8431 (int32_t)0x80000000
8432 /** @todo More specials. */
8433 };
8434
8435 for (size_t iFn = 0; iFn < RT_ELEMENTS(g_aSseConvertXmmI32R64); iFn++)
8436 {
8437 PFNIEMAIMPLFPSSEF2U128 const pfn = g_aSseConvertXmmI32R64[iFn].pfnNative ? g_aSseConvertXmmI32R64[iFn].pfnNative : g_aSseConvertXmmI32R64[iFn].pfn;
8438
8439 IEMBINARYOUTPUT BinOut;
8440 AssertReturn(GENERATE_BINARY_OPEN(&BinOut, papszNameFmts, g_aSseConvertXmmI32R64[iFn]), RTEXITCODE_FAILURE);
8441
8442 X86FXSTATE State;
8443 RT_ZERO(State);
8444 for (uint32_t iTest = 0; iTest < cTests + RT_ELEMENTS(s_aSpecials); iTest += 1)
8445 {
8446 SSE_CONVERT_XMM_TEST_T TestData; RT_ZERO(TestData);
8447
8448 TestData.InVal.ai32[0] = iTest < cTests ? RandI32Src2(iTest) : s_aSpecials[iTest - cTests];
8449 TestData.InVal.ai32[1] = iTest < cTests ? RandI32Src2(iTest) : s_aSpecials[iTest - cTests];
8450 TestData.InVal.ai32[2] = iTest < cTests ? RandI32Src2(iTest) : s_aSpecials[iTest - cTests];
8451 TestData.InVal.ai32[3] = iTest < cTests ? RandI32Src2(iTest) : s_aSpecials[iTest - cTests];
8452
8453 uint32_t const fMxcsr = RandMxcsr() & X86_MXCSR_XCPT_FLAGS;
8454 for (uint16_t iRounding = 0; iRounding < 4; iRounding++)
8455 for (uint8_t iDaz = 0; iDaz < 2; iDaz++)
8456 for (uint8_t iFz = 0; iFz < 2; iFz++)
8457 {
8458 State.MXCSR = (fMxcsr & ~X86_MXCSR_RC_MASK)
8459 | (iRounding << X86_MXCSR_RC_SHIFT)
8460 | (iDaz ? X86_MXCSR_DAZ : 0)
8461 | (iFz ? X86_MXCSR_FZ : 0)
8462 | X86_MXCSR_XCPT_MASK;
8463 IEMSSERESULT ResM; RT_ZERO(ResM);
8464 pfn(&State, &ResM, &ResM.uResult, &TestData.InVal);
8465 TestData.fMxcsrIn = State.MXCSR;
8466 TestData.fMxcsrOut = ResM.MXCSR;
8467 TestData.OutVal = ResM.uResult;
8468 GenerateBinaryWrite(&BinOut, &TestData, sizeof(TestData));
8469
8470 State.MXCSR = State.MXCSR & ~X86_MXCSR_XCPT_MASK;
8471 IEMSSERESULT ResU; RT_ZERO(ResU);
8472 pfn(&State, &ResU, &ResU.uResult, &TestData.InVal);
8473 TestData.fMxcsrIn = State.MXCSR;
8474 TestData.fMxcsrOut = ResU.MXCSR;
8475 TestData.OutVal = ResU.uResult;
8476 GenerateBinaryWrite(&BinOut, &TestData, sizeof(TestData));
8477
8478 uint16_t fXcpt = (ResM.MXCSR | ResU.MXCSR) & X86_MXCSR_XCPT_FLAGS;
8479 if (fXcpt)
8480 {
8481 State.MXCSR = (State.MXCSR & ~X86_MXCSR_XCPT_MASK) | fXcpt;
8482 IEMSSERESULT Res1; RT_ZERO(Res1);
8483 pfn(&State, &Res1, &Res1.uResult, &TestData.InVal);
8484 TestData.fMxcsrIn = State.MXCSR;
8485 TestData.fMxcsrOut = Res1.MXCSR;
8486 TestData.OutVal = Res1.uResult;
8487 GenerateBinaryWrite(&BinOut, &TestData, sizeof(TestData));
8488
8489 if (((Res1.MXCSR & X86_MXCSR_XCPT_FLAGS) & fXcpt) != (Res1.MXCSR & X86_MXCSR_XCPT_FLAGS))
8490 {
8491 fXcpt |= Res1.MXCSR & X86_MXCSR_XCPT_FLAGS;
8492 State.MXCSR = (State.MXCSR & ~X86_MXCSR_XCPT_MASK) | (fXcpt << X86_MXCSR_XCPT_MASK_SHIFT);
8493 IEMSSERESULT Res2; RT_ZERO(Res2);
8494 pfn(&State, &Res2, &Res2.uResult, &TestData.InVal);
8495 TestData.fMxcsrIn = State.MXCSR;
8496 TestData.fMxcsrOut = Res2.MXCSR;
8497 TestData.OutVal = Res2.uResult;
8498 GenerateBinaryWrite(&BinOut, &TestData, sizeof(TestData));
8499 }
8500 if (!RT_IS_POWER_OF_TWO(fXcpt))
8501 for (uint16_t fUnmasked = 1; fUnmasked <= X86_MXCSR_PE; fUnmasked <<= 1)
8502 if (fUnmasked & fXcpt)
8503 {
8504 State.MXCSR = (State.MXCSR & ~X86_MXCSR_XCPT_MASK) | ((fXcpt & ~fUnmasked) << X86_MXCSR_XCPT_MASK_SHIFT);
8505 IEMSSERESULT Res3; RT_ZERO(Res3);
8506 pfn(&State, &Res3, &Res3.uResult, &TestData.InVal);
8507 TestData.fMxcsrIn = State.MXCSR;
8508 TestData.fMxcsrOut = Res3.MXCSR;
8509 TestData.OutVal = Res3.uResult;
8510 GenerateBinaryWrite(&BinOut, &TestData, sizeof(TestData));
8511 }
8512 }
8513 }
8514 }
8515 AssertReturn(GenerateBinaryClose(&BinOut), RTEXITCODE_FAILURE);
8516 }
8517
8518 return RTEXITCODE_SUCCESS;
8519}
8520#endif
8521
8522static void SseConvertXmmI32R64Test(void)
8523{
8524 X86FXSTATE State;
8525 RT_ZERO(State);
8526
8527 for (size_t iFn = 0; iFn < RT_ELEMENTS(g_aSseConvertXmmI32R64); iFn++)
8528 {
8529 if (!SUBTEST_CHECK_IF_ENABLED_AND_DECOMPRESS(g_aSseConvertXmmI32R64[iFn]))
8530 continue;
8531
8532 SSE_CONVERT_XMM_TEST_T const * const paTests = g_aSseConvertXmmI32R64[iFn].paTests;
8533 uint32_t const cTests = g_aSseConvertXmmI32R64[iFn].cTests;
8534 PFNIEMAIMPLFPSSEF2U128 pfn = g_aSseConvertXmmI32R64[iFn].pfn;
8535 uint32_t const cVars = COUNT_VARIATIONS(g_aSseConvertXmmI32R64[iFn]);
8536 if (!cTests) RTTestSkipped(g_hTest, "no tests");
8537 for (uint32_t iVar = 0; iVar < cVars; iVar++)
8538 {
8539 for (uint32_t iTest = 0; iTest < cTests; iTest++)
8540 {
8541 IEMSSERESULT Res; RT_ZERO(Res);
8542
8543 State.MXCSR = paTests[iTest].fMxcsrIn;
8544 pfn(&State, &Res, &Res.uResult, &paTests[iTest].InVal);
8545 if ( Res.MXCSR != paTests[iTest].fMxcsrOut
8546 || !RTFLOAT64U_ARE_IDENTICAL(&Res.uResult.ar64[0], &paTests[iTest].OutVal.ar64[0])
8547 || !RTFLOAT64U_ARE_IDENTICAL(&Res.uResult.ar64[1], &paTests[iTest].OutVal.ar64[1]))
8548 RTTestFailed(g_hTest, "#%04u%s: mxcsr=%#08x in1=%RI32'%RI32'%RI32'%RI32 \n"
8549 "%s -> mxcsr=%#08x %s'%s\n"
8550 "%s expected %#08x %s'%s%s%s (%s)\n",
8551 iTest, iVar ? "/n" : "", paTests[iTest].fMxcsrIn,
8552 paTests[iTest].InVal.ai32[0], paTests[iTest].InVal.ai32[1],
8553 paTests[iTest].InVal.ai32[2], paTests[iTest].InVal.ai32[3],
8554 iVar ? " " : "", Res.MXCSR,
8555 FormatR64(&Res.uResult.ar64[0]), FormatR64(&Res.uResult.ar64[1]),
8556 iVar ? " " : "", paTests[iTest].fMxcsrOut,
8557 FormatR64(&paTests[iTest].OutVal.ar64[0]), FormatR64(&paTests[iTest].OutVal.ar64[1]),
8558 MxcsrDiff(Res.MXCSR, paTests[iTest].fMxcsrOut),
8559 ( !RTFLOAT64U_ARE_IDENTICAL(&Res.uResult.ar64[0], &paTests[iTest].OutVal.ar64[0])
8560 || !RTFLOAT64U_ARE_IDENTICAL(&Res.uResult.ar64[1], &paTests[iTest].OutVal.ar64[1]))
8561 ? " - val" : "",
8562 FormatMxcsr(paTests[iTest].fMxcsrIn));
8563 }
8564 }
8565
8566 FREE_DECOMPRESSED_TESTS(g_aSseConvertXmmI32R64[iFn]);
8567 }
8568}
8569
8570
8571/*
8572 * Convert SSE operations converting signed double-words to double-precision floating point values.
8573 */
8574static SSE_CONVERT_XMM_T g_aSseConvertXmmR64I32[] =
8575{
8576 ENTRY_BIN(cvtpd2dq_u128),
8577 ENTRY_BIN(cvttpd2dq_u128)
8578};
8579
8580#ifdef TSTIEMAIMPL_WITH_GENERATOR
8581DUMP_ALL_FN(SseConvertXmmR64I32, g_aSseConvertXmmR64I32)
8582static RTEXITCODE SseConvertXmmR64I32Generate(uint32_t cTests, const char * const *papszNameFmts)
8583{
8584 cTests = RT_MAX(192, cTests); /* there are 144 standard input variations */
8585
8586 static struct { RTFLOAT64U aVal1[2]; } const s_aSpecials[] =
8587 {
8588 { { RTFLOAT64U_INIT_ZERO(0), RTFLOAT64U_INIT_ZERO(0) } },
8589 { { RTFLOAT64U_INIT_ZERO(1), RTFLOAT64U_INIT_ZERO(1) } },
8590 { { RTFLOAT64U_INIT_INF(0), RTFLOAT64U_INIT_INF(0) } },
8591 { { RTFLOAT64U_INIT_INF(1), RTFLOAT64U_INIT_INF(1) } }
8592 /** @todo More specials. */
8593 };
8594
8595 X86FXSTATE State;
8596 RT_ZERO(State);
8597 uint32_t cMinNormalPairs = (cTests - 144) / 4;
8598 for (size_t iFn = 0; iFn < RT_ELEMENTS(g_aSseConvertXmmR64I32); iFn++)
8599 {
8600 PFNIEMAIMPLFPSSEF2U128 const pfn = g_aSseConvertXmmR64I32[iFn].pfnNative ? g_aSseConvertXmmR64I32[iFn].pfnNative : g_aSseConvertXmmR64I32[iFn].pfn;
8601
8602 IEMBINARYOUTPUT BinOut;
8603 AssertReturn(GENERATE_BINARY_OPEN(&BinOut, papszNameFmts, g_aSseConvertXmmR64I32[iFn]), RTEXITCODE_FAILURE);
8604
8605 uint32_t cNormalInputPairs = 0;
8606 for (uint32_t iTest = 0; iTest < cTests + RT_ELEMENTS(s_aSpecials); iTest += 1)
8607 {
8608 SSE_CONVERT_XMM_TEST_T TestData; RT_ZERO(TestData);
8609
8610 TestData.InVal.ar64[0] = iTest < cTests ? RandR64Src(iTest) : s_aSpecials[iTest - cTests].aVal1[0];
8611 TestData.InVal.ar64[1] = iTest < cTests ? RandR64Src(iTest) : s_aSpecials[iTest - cTests].aVal1[1];
8612
8613 if ( RTFLOAT64U_IS_NORMAL(&TestData.InVal.ar64[0])
8614 && RTFLOAT64U_IS_NORMAL(&TestData.InVal.ar64[1]))
8615 cNormalInputPairs++;
8616 else if (cNormalInputPairs < cMinNormalPairs && iTest + cMinNormalPairs >= cTests && iTest < cTests)
8617 {
8618 iTest -= 1;
8619 continue;
8620 }
8621
8622 uint32_t const fMxcsr = RandMxcsr() & X86_MXCSR_XCPT_FLAGS;
8623 for (uint16_t iRounding = 0; iRounding < 4; iRounding++)
8624 for (uint8_t iDaz = 0; iDaz < 2; iDaz++)
8625 for (uint8_t iFz = 0; iFz < 2; iFz++)
8626 {
8627 State.MXCSR = (fMxcsr & ~X86_MXCSR_RC_MASK)
8628 | (iRounding << X86_MXCSR_RC_SHIFT)
8629 | (iDaz ? X86_MXCSR_DAZ : 0)
8630 | (iFz ? X86_MXCSR_FZ : 0)
8631 | X86_MXCSR_XCPT_MASK;
8632 IEMSSERESULT ResM; RT_ZERO(ResM);
8633 pfn(&State, &ResM, &ResM.uResult, &TestData.InVal);
8634 TestData.fMxcsrIn = State.MXCSR;
8635 TestData.fMxcsrOut = ResM.MXCSR;
8636 TestData.OutVal = ResM.uResult;
8637 GenerateBinaryWrite(&BinOut, &TestData, sizeof(TestData));
8638
8639 State.MXCSR = State.MXCSR & ~X86_MXCSR_XCPT_MASK;
8640 IEMSSERESULT ResU; RT_ZERO(ResU);
8641 pfn(&State, &ResU, &ResU.uResult, &TestData.InVal);
8642 TestData.fMxcsrIn = State.MXCSR;
8643 TestData.fMxcsrOut = ResU.MXCSR;
8644 TestData.OutVal = ResU.uResult;
8645 GenerateBinaryWrite(&BinOut, &TestData, sizeof(TestData));
8646
8647 uint16_t fXcpt = (ResM.MXCSR | ResU.MXCSR) & X86_MXCSR_XCPT_FLAGS;
8648 if (fXcpt)
8649 {
8650 State.MXCSR = (State.MXCSR & ~X86_MXCSR_XCPT_MASK) | fXcpt;
8651 IEMSSERESULT Res1; RT_ZERO(Res1);
8652 pfn(&State, &Res1, &Res1.uResult, &TestData.InVal);
8653 TestData.fMxcsrIn = State.MXCSR;
8654 TestData.fMxcsrOut = Res1.MXCSR;
8655 TestData.OutVal = Res1.uResult;
8656 GenerateBinaryWrite(&BinOut, &TestData, sizeof(TestData));
8657
8658 if (((Res1.MXCSR & X86_MXCSR_XCPT_FLAGS) & fXcpt) != (Res1.MXCSR & X86_MXCSR_XCPT_FLAGS))
8659 {
8660 fXcpt |= Res1.MXCSR & X86_MXCSR_XCPT_FLAGS;
8661 State.MXCSR = (State.MXCSR & ~X86_MXCSR_XCPT_MASK) | (fXcpt << X86_MXCSR_XCPT_MASK_SHIFT);
8662 IEMSSERESULT Res2; RT_ZERO(Res2);
8663 pfn(&State, &Res2, &Res2.uResult, &TestData.InVal);
8664 TestData.fMxcsrIn = State.MXCSR;
8665 TestData.fMxcsrOut = Res2.MXCSR;
8666 TestData.OutVal = Res2.uResult;
8667 GenerateBinaryWrite(&BinOut, &TestData, sizeof(TestData));
8668 }
8669 if (!RT_IS_POWER_OF_TWO(fXcpt))
8670 for (uint16_t fUnmasked = 1; fUnmasked <= X86_MXCSR_PE; fUnmasked <<= 1)
8671 if (fUnmasked & fXcpt)
8672 {
8673 State.MXCSR = (State.MXCSR & ~X86_MXCSR_XCPT_MASK) | ((fXcpt & ~fUnmasked) << X86_MXCSR_XCPT_MASK_SHIFT);
8674 IEMSSERESULT Res3; RT_ZERO(Res3);
8675 pfn(&State, &Res3, &Res3.uResult, &TestData.InVal);
8676 TestData.fMxcsrIn = State.MXCSR;
8677 TestData.fMxcsrOut = Res3.MXCSR;
8678 TestData.OutVal = Res3.uResult;
8679 GenerateBinaryWrite(&BinOut, &TestData, sizeof(TestData));
8680 }
8681 }
8682 }
8683 }
8684 AssertReturn(GenerateBinaryClose(&BinOut), RTEXITCODE_FAILURE);
8685 }
8686
8687 return RTEXITCODE_SUCCESS;
8688}
8689#endif
8690
8691static void SseConvertXmmR64I32Test(void)
8692{
8693 X86FXSTATE State;
8694 RT_ZERO(State);
8695
8696 for (size_t iFn = 0; iFn < RT_ELEMENTS(g_aSseConvertXmmR64I32); iFn++)
8697 {
8698 if (!SUBTEST_CHECK_IF_ENABLED_AND_DECOMPRESS(g_aSseConvertXmmR64I32[iFn]))
8699 continue;
8700
8701 SSE_CONVERT_XMM_TEST_T const * const paTests = g_aSseConvertXmmR64I32[iFn].paTests;
8702 uint32_t const cTests = g_aSseConvertXmmR64I32[iFn].cTests;
8703 PFNIEMAIMPLFPSSEF2U128 pfn = g_aSseConvertXmmR64I32[iFn].pfn;
8704 uint32_t const cVars = COUNT_VARIATIONS(g_aSseConvertXmmR64I32[iFn]);
8705 if (!cTests) RTTestSkipped(g_hTest, "no tests");
8706 for (uint32_t iVar = 0; iVar < cVars; iVar++)
8707 {
8708 for (uint32_t iTest = 0; iTest < cTests; iTest++)
8709 {
8710 IEMSSERESULT Res; RT_ZERO(Res);
8711
8712 State.MXCSR = paTests[iTest].fMxcsrIn;
8713 pfn(&State, &Res, &Res.uResult, &paTests[iTest].InVal);
8714 if ( Res.MXCSR != paTests[iTest].fMxcsrOut
8715 || Res.uResult.ai32[0] != paTests[iTest].OutVal.ai32[0]
8716 || Res.uResult.ai32[1] != paTests[iTest].OutVal.ai32[1]
8717 || Res.uResult.ai32[2] != paTests[iTest].OutVal.ai32[2]
8718 || Res.uResult.ai32[3] != paTests[iTest].OutVal.ai32[3])
8719 RTTestFailed(g_hTest, "#%04u%s: mxcsr=%#08x in1=%s'%s \n"
8720 "%s -> mxcsr=%#08x %RI32'%RI32'%RI32'%RI32\n"
8721 "%s expected %#08x %RI32'%RI32'%RI32'%RI32%s%s (%s)\n",
8722 iTest, iVar ? "/n" : "", paTests[iTest].fMxcsrIn,
8723 FormatR64(&paTests[iTest].InVal.ar64[0]), FormatR64(&paTests[iTest].InVal.ar64[1]),
8724 iVar ? " " : "", Res.MXCSR,
8725 Res.uResult.ai32[0], Res.uResult.ai32[1],
8726 Res.uResult.ai32[2], Res.uResult.ai32[3],
8727 iVar ? " " : "", paTests[iTest].fMxcsrOut,
8728 paTests[iTest].OutVal.ai32[0], paTests[iTest].OutVal.ai32[1],
8729 paTests[iTest].OutVal.ai32[2], paTests[iTest].OutVal.ai32[3],
8730 MxcsrDiff(Res.MXCSR, paTests[iTest].fMxcsrOut),
8731 ( Res.uResult.ai32[0] != paTests[iTest].OutVal.ai32[0]
8732 || Res.uResult.ai32[1] != paTests[iTest].OutVal.ai32[1]
8733 || Res.uResult.ai32[2] != paTests[iTest].OutVal.ai32[2]
8734 || Res.uResult.ai32[3] != paTests[iTest].OutVal.ai32[3])
8735 ? " - val" : "",
8736 FormatMxcsr(paTests[iTest].fMxcsrIn));
8737 }
8738 }
8739
8740 FREE_DECOMPRESSED_TESTS(g_aSseConvertXmmR64I32[iFn]);
8741 }
8742}
8743
8744
8745/*
8746 * Convert SSE operations converting double-precision floating point values to signed double-word values.
8747 */
8748TYPEDEF_SUBTEST_TYPE(SSE_CONVERT_MM_XMM_T, SSE_CONVERT_MM_XMM_TEST_T, PFNIEMAIMPLMXCSRU64U128);
8749
8750static SSE_CONVERT_MM_XMM_T g_aSseConvertMmXmm[] =
8751{
8752 ENTRY_BIN(cvtpd2pi_u128),
8753 ENTRY_BIN(cvttpd2pi_u128)
8754};
8755
8756#ifdef TSTIEMAIMPL_WITH_GENERATOR
8757DUMP_ALL_FN(SseConvertMmXmm, g_aSseConvertMmXmm)
8758static RTEXITCODE SseConvertMmXmmGenerate(uint32_t cTests, const char * const *papszNameFmts)
8759{
8760 cTests = RT_MAX(192, cTests); /* there are 144 standard input variations */
8761
8762 static struct { RTFLOAT64U aVal1[2]; } const s_aSpecials[] =
8763 {
8764 { { RTFLOAT64U_INIT_ZERO(0), RTFLOAT64U_INIT_ZERO(0) } },
8765 { { RTFLOAT64U_INIT_ZERO(1), RTFLOAT64U_INIT_ZERO(1) } },
8766 { { RTFLOAT64U_INIT_INF(0), RTFLOAT64U_INIT_INF(0) } },
8767 { { RTFLOAT64U_INIT_INF(1), RTFLOAT64U_INIT_INF(1) } }
8768 /** @todo More specials. */
8769 };
8770
8771 uint32_t cMinNormalPairs = (cTests - 144) / 4;
8772 for (size_t iFn = 0; iFn < RT_ELEMENTS(g_aSseConvertMmXmm); iFn++)
8773 {
8774 PFNIEMAIMPLMXCSRU64U128 const pfn = g_aSseConvertMmXmm[iFn].pfnNative ? g_aSseConvertMmXmm[iFn].pfnNative : g_aSseConvertMmXmm[iFn].pfn;
8775
8776 IEMBINARYOUTPUT BinOut;
8777 AssertReturn(GENERATE_BINARY_OPEN(&BinOut, papszNameFmts, g_aSseConvertMmXmm[iFn]), RTEXITCODE_FAILURE);
8778
8779 uint32_t cNormalInputPairs = 0;
8780 for (uint32_t iTest = 0; iTest < cTests + RT_ELEMENTS(s_aSpecials); iTest += 1)
8781 {
8782 SSE_CONVERT_MM_XMM_TEST_T TestData; RT_ZERO(TestData);
8783
8784 TestData.InVal.ar64[0] = iTest < cTests ? RandR64Src(iTest) : s_aSpecials[iTest - cTests].aVal1[0];
8785 TestData.InVal.ar64[1] = iTest < cTests ? RandR64Src(iTest) : s_aSpecials[iTest - cTests].aVal1[1];
8786
8787 if ( RTFLOAT64U_IS_NORMAL(&TestData.InVal.ar64[0])
8788 && RTFLOAT64U_IS_NORMAL(&TestData.InVal.ar64[1]))
8789 cNormalInputPairs++;
8790 else if (cNormalInputPairs < cMinNormalPairs && iTest + cMinNormalPairs >= cTests && iTest < cTests)
8791 {
8792 iTest -= 1;
8793 continue;
8794 }
8795
8796 uint32_t const fMxcsr = RandMxcsr() & X86_MXCSR_XCPT_FLAGS;
8797 for (uint16_t iRounding = 0; iRounding < 4; iRounding++)
8798 for (uint8_t iDaz = 0; iDaz < 2; iDaz++)
8799 for (uint8_t iFz = 0; iFz < 2; iFz++)
8800 {
8801 uint32_t fMxcsrIn = (fMxcsr & ~X86_MXCSR_RC_MASK)
8802 | (iRounding << X86_MXCSR_RC_SHIFT)
8803 | (iDaz ? X86_MXCSR_DAZ : 0)
8804 | (iFz ? X86_MXCSR_FZ : 0)
8805 | X86_MXCSR_XCPT_MASK;
8806 uint32_t fMxcsrM = fMxcsrIn;
8807 uint64_t u64ResM;
8808 pfn(&fMxcsrM, &u64ResM, &TestData.InVal);
8809 TestData.fMxcsrIn = fMxcsrIn;
8810 TestData.fMxcsrOut = fMxcsrM;
8811 TestData.OutVal.u = u64ResM;
8812 GenerateBinaryWrite(&BinOut, &TestData, sizeof(TestData));
8813
8814 fMxcsrIn &= ~X86_MXCSR_XCPT_MASK;
8815 uint32_t fMxcsrU = fMxcsrIn;
8816 uint64_t u64ResU;
8817 pfn(&fMxcsrU, &u64ResU, &TestData.InVal);
8818 TestData.fMxcsrIn = fMxcsrIn;
8819 TestData.fMxcsrOut = fMxcsrU;
8820 TestData.OutVal.u = u64ResU;
8821 GenerateBinaryWrite(&BinOut, &TestData, sizeof(TestData));
8822
8823 uint16_t fXcpt = (fMxcsrM | fMxcsrU) & X86_MXCSR_XCPT_FLAGS;
8824 if (fXcpt)
8825 {
8826 fMxcsrIn = (fMxcsrIn & ~X86_MXCSR_XCPT_MASK) | fXcpt;
8827 uint32_t fMxcsr1 = fMxcsrIn;
8828 uint64_t u64Res1;
8829 pfn(&fMxcsr1, &u64Res1, &TestData.InVal);
8830 TestData.fMxcsrIn = fMxcsrIn;
8831 TestData.fMxcsrOut = fMxcsr1;
8832 TestData.OutVal.u = u64Res1;
8833 GenerateBinaryWrite(&BinOut, &TestData, sizeof(TestData));
8834
8835 if (((fMxcsr1 & X86_MXCSR_XCPT_FLAGS) & fXcpt) != (fMxcsr1 & X86_MXCSR_XCPT_FLAGS))
8836 {
8837 fXcpt |= fMxcsr1 & X86_MXCSR_XCPT_FLAGS;
8838 fMxcsrIn = (fMxcsrIn & ~X86_MXCSR_XCPT_MASK) | (fXcpt << X86_MXCSR_XCPT_MASK_SHIFT);
8839 uint32_t fMxcsr2 = fMxcsrIn;
8840 uint64_t u64Res2;
8841 pfn(&fMxcsr2, &u64Res2, &TestData.InVal);
8842 TestData.fMxcsrIn = fMxcsrIn;
8843 TestData.fMxcsrOut = fMxcsr2;
8844 TestData.OutVal.u = u64Res2;
8845 GenerateBinaryWrite(&BinOut, &TestData, sizeof(TestData));
8846 }
8847 if (!RT_IS_POWER_OF_TWO(fXcpt))
8848 for (uint16_t fUnmasked = 1; fUnmasked <= X86_MXCSR_PE; fUnmasked <<= 1)
8849 if (fUnmasked & fXcpt)
8850 {
8851 fMxcsrIn = (fMxcsrIn & ~X86_MXCSR_XCPT_MASK) | ((fXcpt & ~fUnmasked) << X86_MXCSR_XCPT_MASK_SHIFT);
8852 uint32_t fMxcsr3 = fMxcsrIn;
8853 uint64_t u64Res3;
8854 pfn(&fMxcsr3, &u64Res3, &TestData.InVal);
8855 TestData.fMxcsrIn = fMxcsrIn;
8856 TestData.fMxcsrOut = fMxcsr3;
8857 TestData.OutVal.u = u64Res3;
8858 GenerateBinaryWrite(&BinOut, &TestData, sizeof(TestData));
8859 }
8860 }
8861 }
8862 }
8863 AssertReturn(GenerateBinaryClose(&BinOut), RTEXITCODE_FAILURE);
8864 }
8865
8866 return RTEXITCODE_SUCCESS;
8867}
8868#endif
8869
8870static void SseConvertMmXmmTest(void)
8871{
8872 X86FXSTATE State;
8873 RT_ZERO(State);
8874
8875 for (size_t iFn = 0; iFn < RT_ELEMENTS(g_aSseConvertMmXmm); iFn++)
8876 {
8877 if (!SUBTEST_CHECK_IF_ENABLED_AND_DECOMPRESS(g_aSseConvertMmXmm[iFn]))
8878 continue;
8879
8880 SSE_CONVERT_MM_XMM_TEST_T const * const paTests = g_aSseConvertMmXmm[iFn].paTests;
8881 uint32_t const cTests = g_aSseConvertMmXmm[iFn].cTests;
8882 PFNIEMAIMPLMXCSRU64U128 pfn = g_aSseConvertMmXmm[iFn].pfn;
8883 uint32_t const cVars = COUNT_VARIATIONS(g_aSseConvertMmXmm[iFn]);
8884 if (!cTests) RTTestSkipped(g_hTest, "no tests");
8885 for (uint32_t iVar = 0; iVar < cVars; iVar++)
8886 {
8887 for (uint32_t iTest = 0; iTest < cTests; iTest++)
8888 {
8889 RTUINT64U ValOut;
8890 uint32_t fMxcsr = paTests[iTest].fMxcsrIn;
8891 pfn(&fMxcsr, &ValOut.u, &paTests[iTest].InVal);
8892 if ( fMxcsr != paTests[iTest].fMxcsrOut
8893 || ValOut.ai32[0] != paTests[iTest].OutVal.ai32[0]
8894 || ValOut.ai32[1] != paTests[iTest].OutVal.ai32[1])
8895 RTTestFailed(g_hTest, "#%04u%s: mxcsr=%#08x in1=%s'%s\n"
8896 "%s -> mxcsr=%#08x %RI32'%RI32\n"
8897 "%s expected %#08x %RI32'%RI32%s%s (%s)\n",
8898 iTest, iVar ? "/n" : "", paTests[iTest].fMxcsrIn,
8899 FormatR64(&paTests[iTest].InVal.ar64[0]), FormatR64(&paTests[iTest].InVal.ar64[1]),
8900 iVar ? " " : "", fMxcsr, ValOut.ai32[0], ValOut.ai32[1],
8901 iVar ? " " : "", paTests[iTest].fMxcsrOut,
8902 paTests[iTest].OutVal.ai32[0], paTests[iTest].OutVal.ai32[1],
8903 MxcsrDiff(fMxcsr, paTests[iTest].fMxcsrOut),
8904 ( ValOut.ai32[0] != paTests[iTest].OutVal.ai32[0]
8905 || ValOut.ai32[1] != paTests[iTest].OutVal.ai32[1])
8906 ? " - val" : "",
8907 FormatMxcsr(paTests[iTest].fMxcsrIn));
8908 }
8909 }
8910
8911 FREE_DECOMPRESSED_TESTS(g_aSseConvertMmXmm[iFn]);
8912 }
8913}
8914
8915
8916/*
8917 * Convert SSE operations converting signed double-word values to double precision floating-point values (probably only cvtpi2pd).
8918 */
8919TYPEDEF_SUBTEST_TYPE(SSE_CONVERT_XMM_R64_MM_T, SSE_CONVERT_XMM_MM_TEST_T, PFNIEMAIMPLMXCSRU128U64);
8920
8921static SSE_CONVERT_XMM_R64_MM_T g_aSseConvertXmmR64Mm[] =
8922{
8923 ENTRY_BIN(cvtpi2pd_u128)
8924};
8925
8926#ifdef TSTIEMAIMPL_WITH_GENERATOR
8927DUMP_ALL_FN(SseConvertXmmR64Mm, g_aSseConvertXmmR64Mm)
8928static RTEXITCODE SseConvertXmmR64MmGenerate(uint32_t cTests, const char * const *papszNameFmts)
8929{
8930 cTests = RT_MAX(192, cTests); /* there are 144 standard input variations */
8931
8932 static struct { int32_t aVal[2]; } const s_aSpecials[] =
8933 {
8934 { { INT32_MIN, INT32_MIN } },
8935 { { INT32_MAX, INT32_MAX } }
8936 /** @todo More specials. */
8937 };
8938
8939 for (size_t iFn = 0; iFn < RT_ELEMENTS(g_aSseConvertXmmR64Mm); iFn++)
8940 {
8941 PFNIEMAIMPLMXCSRU128U64 const pfn = g_aSseConvertXmmR64Mm[iFn].pfnNative ? g_aSseConvertXmmR64Mm[iFn].pfnNative : g_aSseConvertXmmR64Mm[iFn].pfn;
8942
8943 IEMBINARYOUTPUT BinOut;
8944 AssertReturn(GENERATE_BINARY_OPEN(&BinOut, papszNameFmts, g_aSseConvertXmmR64Mm[iFn]), RTEXITCODE_FAILURE);
8945
8946 for (uint32_t iTest = 0; iTest < cTests + RT_ELEMENTS(s_aSpecials); iTest += 1)
8947 {
8948 SSE_CONVERT_XMM_MM_TEST_T TestData; RT_ZERO(TestData);
8949
8950 TestData.InVal.ai32[0] = iTest < cTests ? RandI32Src2(iTest) : s_aSpecials[iTest - cTests].aVal[0];
8951 TestData.InVal.ai32[1] = iTest < cTests ? RandI32Src2(iTest) : s_aSpecials[iTest - cTests].aVal[1];
8952
8953 uint32_t const fMxcsr = RandMxcsr() & X86_MXCSR_XCPT_FLAGS;
8954 for (uint16_t iRounding = 0; iRounding < 4; iRounding++)
8955 for (uint8_t iDaz = 0; iDaz < 2; iDaz++)
8956 for (uint8_t iFz = 0; iFz < 2; iFz++)
8957 {
8958 uint32_t fMxcsrIn = (fMxcsr & ~X86_MXCSR_RC_MASK)
8959 | (iRounding << X86_MXCSR_RC_SHIFT)
8960 | (iDaz ? X86_MXCSR_DAZ : 0)
8961 | (iFz ? X86_MXCSR_FZ : 0)
8962 | X86_MXCSR_XCPT_MASK;
8963 uint32_t fMxcsrM = fMxcsrIn;
8964 pfn(&fMxcsrM, &TestData.OutVal, TestData.InVal.u);
8965 TestData.fMxcsrIn = fMxcsrIn;
8966 TestData.fMxcsrOut = fMxcsrM;
8967 GenerateBinaryWrite(&BinOut, &TestData, sizeof(TestData));
8968
8969 fMxcsrIn &= ~X86_MXCSR_XCPT_MASK;
8970 uint32_t fMxcsrU = fMxcsrIn;
8971 pfn(&fMxcsrU, &TestData.OutVal, TestData.InVal.u);
8972 TestData.fMxcsrIn = fMxcsrIn;
8973 TestData.fMxcsrOut = fMxcsrU;
8974 GenerateBinaryWrite(&BinOut, &TestData, sizeof(TestData));
8975
8976 uint16_t fXcpt = (fMxcsrM | fMxcsrU) & X86_MXCSR_XCPT_FLAGS;
8977 if (fXcpt)
8978 {
8979 fMxcsrIn = (fMxcsrIn & ~X86_MXCSR_XCPT_MASK) | fXcpt;
8980 uint32_t fMxcsr1 = fMxcsrIn;
8981 pfn(&fMxcsr1, &TestData.OutVal, TestData.InVal.u);
8982 TestData.fMxcsrIn = fMxcsrIn;
8983 TestData.fMxcsrOut = fMxcsr1;
8984 GenerateBinaryWrite(&BinOut, &TestData, sizeof(TestData));
8985
8986 if (((fMxcsr1 & X86_MXCSR_XCPT_FLAGS) & fXcpt) != (fMxcsr1 & X86_MXCSR_XCPT_FLAGS))
8987 {
8988 fXcpt |= fMxcsr1 & X86_MXCSR_XCPT_FLAGS;
8989 fMxcsrIn = (fMxcsrIn & ~X86_MXCSR_XCPT_MASK) | (fXcpt << X86_MXCSR_XCPT_MASK_SHIFT);
8990 uint32_t fMxcsr2 = fMxcsrIn;
8991 pfn(&fMxcsr2, &TestData.OutVal, TestData.InVal.u);
8992 TestData.fMxcsrIn = fMxcsrIn;
8993 TestData.fMxcsrOut = fMxcsr2;
8994 GenerateBinaryWrite(&BinOut, &TestData, sizeof(TestData));
8995 }
8996 if (!RT_IS_POWER_OF_TWO(fXcpt))
8997 for (uint16_t fUnmasked = 1; fUnmasked <= X86_MXCSR_PE; fUnmasked <<= 1)
8998 if (fUnmasked & fXcpt)
8999 {
9000 fMxcsrIn = (fMxcsrIn & ~X86_MXCSR_XCPT_MASK) | ((fXcpt & ~fUnmasked) << X86_MXCSR_XCPT_MASK_SHIFT);
9001 uint32_t fMxcsr3 = fMxcsrIn;
9002 pfn(&fMxcsr3, &TestData.OutVal, TestData.InVal.u);
9003 TestData.fMxcsrIn = fMxcsrIn;
9004 TestData.fMxcsrOut = fMxcsr3;
9005 GenerateBinaryWrite(&BinOut, &TestData, sizeof(TestData));
9006 }
9007 }
9008 }
9009 }
9010 AssertReturn(GenerateBinaryClose(&BinOut), RTEXITCODE_FAILURE);
9011 }
9012
9013 return RTEXITCODE_SUCCESS;
9014}
9015#endif
9016
9017static void SseConvertXmmR64MmTest(void)
9018{
9019 for (size_t iFn = 0; iFn < RT_ELEMENTS(g_aSseConvertXmmR64Mm); iFn++)
9020 {
9021 if (!SUBTEST_CHECK_IF_ENABLED_AND_DECOMPRESS(g_aSseConvertXmmR64Mm[iFn]))
9022 continue;
9023
9024 SSE_CONVERT_XMM_MM_TEST_T const * const paTests = g_aSseConvertXmmR64Mm[iFn].paTests;
9025 uint32_t const cTests = g_aSseConvertXmmR64Mm[iFn].cTests;
9026 PFNIEMAIMPLMXCSRU128U64 pfn = g_aSseConvertXmmR64Mm[iFn].pfn;
9027 uint32_t const cVars = COUNT_VARIATIONS(g_aSseConvertXmmR64Mm[iFn]);
9028 if (!cTests) RTTestSkipped(g_hTest, "no tests");
9029 for (uint32_t iVar = 0; iVar < cVars; iVar++)
9030 {
9031 for (uint32_t iTest = 0; iTest < cTests; iTest++)
9032 {
9033 X86XMMREG ValOut;
9034 uint32_t fMxcsr = paTests[iTest].fMxcsrIn;
9035 pfn(&fMxcsr, &ValOut, paTests[iTest].InVal.u);
9036 if ( fMxcsr != paTests[iTest].fMxcsrOut
9037 || !RTFLOAT64U_ARE_IDENTICAL(&ValOut.ar64[0], &paTests[iTest].OutVal.ar64[0])
9038 || !RTFLOAT64U_ARE_IDENTICAL(&ValOut.ar64[1], &paTests[iTest].OutVal.ar64[1]))
9039 RTTestFailed(g_hTest, "#%04u%s: mxcsr=%#08x in1=%RI32'%RI32\n"
9040 "%s -> mxcsr=%#08x %s'%s\n"
9041 "%s expected %#08x %s'%s%s%s (%s)\n",
9042 iTest, iVar ? "/n" : "", paTests[iTest].fMxcsrIn,
9043 paTests[iTest].InVal.ai32[0], paTests[iTest].InVal.ai32[1],
9044 iVar ? " " : "", fMxcsr,
9045 FormatR64(&ValOut.ar64[0]), FormatR64(&ValOut.ar64[1]),
9046 iVar ? " " : "", paTests[iTest].fMxcsrOut,
9047 FormatR64(&paTests[iTest].OutVal.ar64[0]), FormatR64(&paTests[iTest].OutVal.ar64[1]),
9048 MxcsrDiff(fMxcsr, paTests[iTest].fMxcsrOut),
9049 ( !RTFLOAT64U_ARE_IDENTICAL(&ValOut.ar64[0], &paTests[iTest].OutVal.ar64[0])
9050 || !RTFLOAT64U_ARE_IDENTICAL(&ValOut.ar64[1], &paTests[iTest].OutVal.ar64[1]))
9051 ? " - val" : "",
9052 FormatMxcsr(paTests[iTest].fMxcsrIn));
9053 }
9054 }
9055
9056 FREE_DECOMPRESSED_TESTS(g_aSseConvertXmmR64Mm[iFn]);
9057 }
9058}
9059
9060
9061/*
9062 * Convert SSE operations converting signed double-word values to double precision floating-point values (probably only cvtpi2pd).
9063 */
9064TYPEDEF_SUBTEST_TYPE(SSE_CONVERT_XMM_R32_MM_T, SSE_CONVERT_XMM_MM_TEST_T, PFNIEMAIMPLMXCSRU128U64);
9065
9066static SSE_CONVERT_XMM_R32_MM_T g_aSseConvertXmmR32Mm[] =
9067{
9068 ENTRY_BIN(cvtpi2ps_u128)
9069};
9070
9071#ifdef TSTIEMAIMPL_WITH_GENERATOR
9072DUMP_ALL_FN(SseConvertXmmR32Mm, g_aSseConvertXmmR32Mm)
9073static RTEXITCODE SseConvertXmmR32MmGenerate(uint32_t cTests, const char * const *papszNameFmts)
9074{
9075 cTests = RT_MAX(192, cTests); /* there are 144 standard input variations */
9076
9077 static struct { int32_t aVal[2]; } const s_aSpecials[] =
9078 {
9079 { { INT32_MIN, INT32_MIN } },
9080 { { INT32_MAX, INT32_MAX } }
9081 /** @todo More specials. */
9082 };
9083
9084 for (size_t iFn = 0; iFn < RT_ELEMENTS(g_aSseConvertXmmR32Mm); iFn++)
9085 {
9086 PFNIEMAIMPLMXCSRU128U64 const pfn = g_aSseConvertXmmR32Mm[iFn].pfnNative ? g_aSseConvertXmmR32Mm[iFn].pfnNative : g_aSseConvertXmmR32Mm[iFn].pfn;
9087
9088 IEMBINARYOUTPUT BinOut;
9089 AssertReturn(GENERATE_BINARY_OPEN(&BinOut, papszNameFmts, g_aSseConvertXmmR32Mm[iFn]), RTEXITCODE_FAILURE);
9090
9091 for (uint32_t iTest = 0; iTest < cTests + RT_ELEMENTS(s_aSpecials); iTest += 1)
9092 {
9093 SSE_CONVERT_XMM_MM_TEST_T TestData; RT_ZERO(TestData);
9094
9095 TestData.InVal.ai32[0] = iTest < cTests ? RandI32Src2(iTest) : s_aSpecials[iTest - cTests].aVal[0];
9096 TestData.InVal.ai32[1] = iTest < cTests ? RandI32Src2(iTest) : s_aSpecials[iTest - cTests].aVal[1];
9097
9098 uint32_t const fMxcsr = RandMxcsr() & X86_MXCSR_XCPT_FLAGS;
9099 for (uint16_t iRounding = 0; iRounding < 4; iRounding++)
9100 for (uint8_t iDaz = 0; iDaz < 2; iDaz++)
9101 for (uint8_t iFz = 0; iFz < 2; iFz++)
9102 {
9103 uint32_t fMxcsrIn = (fMxcsr & ~X86_MXCSR_RC_MASK)
9104 | (iRounding << X86_MXCSR_RC_SHIFT)
9105 | (iDaz ? X86_MXCSR_DAZ : 0)
9106 | (iFz ? X86_MXCSR_FZ : 0)
9107 | X86_MXCSR_XCPT_MASK;
9108 uint32_t fMxcsrM = fMxcsrIn;
9109 pfn(&fMxcsrM, &TestData.OutVal, TestData.InVal.u);
9110 TestData.fMxcsrIn = fMxcsrIn;
9111 TestData.fMxcsrOut = fMxcsrM;
9112 GenerateBinaryWrite(&BinOut, &TestData, sizeof(TestData));
9113
9114 fMxcsrIn &= ~X86_MXCSR_XCPT_MASK;
9115 uint32_t fMxcsrU = fMxcsrIn;
9116 pfn(&fMxcsrU, &TestData.OutVal, TestData.InVal.u);
9117 TestData.fMxcsrIn = fMxcsrIn;
9118 TestData.fMxcsrOut = fMxcsrU;
9119 GenerateBinaryWrite(&BinOut, &TestData, sizeof(TestData));
9120
9121 uint16_t fXcpt = (fMxcsrM | fMxcsrU) & X86_MXCSR_XCPT_FLAGS;
9122 if (fXcpt)
9123 {
9124 fMxcsrIn = (fMxcsrIn & ~X86_MXCSR_XCPT_MASK) | fXcpt;
9125 uint32_t fMxcsr1 = fMxcsrIn;
9126 pfn(&fMxcsr1, &TestData.OutVal, TestData.InVal.u);
9127 TestData.fMxcsrIn = fMxcsrIn;
9128 TestData.fMxcsrOut = fMxcsr1;
9129 GenerateBinaryWrite(&BinOut, &TestData, sizeof(TestData));
9130
9131 if (((fMxcsr1 & X86_MXCSR_XCPT_FLAGS) & fXcpt) != (fMxcsr1 & X86_MXCSR_XCPT_FLAGS))
9132 {
9133 fXcpt |= fMxcsr1 & X86_MXCSR_XCPT_FLAGS;
9134 fMxcsrIn = (fMxcsrIn & ~X86_MXCSR_XCPT_MASK) | (fXcpt << X86_MXCSR_XCPT_MASK_SHIFT);
9135 uint32_t fMxcsr2 = fMxcsrIn;
9136 pfn(&fMxcsr2, &TestData.OutVal, TestData.InVal.u);
9137 TestData.fMxcsrIn = fMxcsrIn;
9138 TestData.fMxcsrOut = fMxcsr2;
9139 GenerateBinaryWrite(&BinOut, &TestData, sizeof(TestData));
9140 }
9141 if (!RT_IS_POWER_OF_TWO(fXcpt))
9142 for (uint16_t fUnmasked = 1; fUnmasked <= X86_MXCSR_PE; fUnmasked <<= 1)
9143 if (fUnmasked & fXcpt)
9144 {
9145 fMxcsrIn = (fMxcsrIn & ~X86_MXCSR_XCPT_MASK) | ((fXcpt & ~fUnmasked) << X86_MXCSR_XCPT_MASK_SHIFT);
9146 uint32_t fMxcsr3 = fMxcsrIn;
9147 pfn(&fMxcsr3, &TestData.OutVal, TestData.InVal.u);
9148 TestData.fMxcsrIn = fMxcsrIn;
9149 TestData.fMxcsrOut = fMxcsr3;
9150 GenerateBinaryWrite(&BinOut, &TestData, sizeof(TestData));
9151 }
9152 }
9153 }
9154 }
9155 AssertReturn(GenerateBinaryClose(&BinOut), RTEXITCODE_FAILURE);
9156 }
9157
9158 return RTEXITCODE_SUCCESS;
9159}
9160#endif
9161
9162static void SseConvertXmmR32MmTest(void)
9163{
9164 for (size_t iFn = 0; iFn < RT_ELEMENTS(g_aSseConvertXmmR32Mm); iFn++)
9165 {
9166 if (!SUBTEST_CHECK_IF_ENABLED_AND_DECOMPRESS(g_aSseConvertXmmR32Mm[iFn]))
9167 continue;
9168
9169 SSE_CONVERT_XMM_MM_TEST_T const * const paTests = g_aSseConvertXmmR32Mm[iFn].paTests;
9170 uint32_t const cTests = g_aSseConvertXmmR32Mm[iFn].cTests;
9171 PFNIEMAIMPLMXCSRU128U64 pfn = g_aSseConvertXmmR32Mm[iFn].pfn;
9172 uint32_t const cVars = COUNT_VARIATIONS(g_aSseConvertXmmR32Mm[iFn]);
9173 if (!cTests) RTTestSkipped(g_hTest, "no tests");
9174 for (uint32_t iVar = 0; iVar < cVars; iVar++)
9175 {
9176 for (uint32_t iTest = 0; iTest < cTests; iTest++)
9177 {
9178 X86XMMREG ValOut;
9179 uint32_t fMxcsr = paTests[iTest].fMxcsrIn;
9180 pfn(&fMxcsr, &ValOut, paTests[iTest].InVal.u);
9181 if ( fMxcsr != paTests[iTest].fMxcsrOut
9182 || !RTFLOAT32U_ARE_IDENTICAL(&ValOut.ar32[0], &paTests[iTest].OutVal.ar32[0])
9183 || !RTFLOAT32U_ARE_IDENTICAL(&ValOut.ar32[1], &paTests[iTest].OutVal.ar32[1]))
9184 RTTestFailed(g_hTest, "#%04u%s: mxcsr=%#08x in1=%RI32'%RI32\n"
9185 "%s -> mxcsr=%#08x %s'%s\n"
9186 "%s expected %#08x %s'%s%s%s (%s)\n",
9187 iTest, iVar ? "/n" : "", paTests[iTest].fMxcsrIn,
9188 paTests[iTest].InVal.ai32[0], paTests[iTest].InVal.ai32[1],
9189 iVar ? " " : "", fMxcsr,
9190 FormatR32(&ValOut.ar32[0]), FormatR32(&ValOut.ar32[1]),
9191 iVar ? " " : "", paTests[iTest].fMxcsrOut,
9192 FormatR32(&paTests[iTest].OutVal.ar32[0]), FormatR32(&paTests[iTest].OutVal.ar32[1]),
9193 MxcsrDiff(fMxcsr, paTests[iTest].fMxcsrOut),
9194 ( !RTFLOAT32U_ARE_IDENTICAL(&ValOut.ar32[0], &paTests[iTest].OutVal.ar32[0])
9195 || !RTFLOAT32U_ARE_IDENTICAL(&ValOut.ar32[1], &paTests[iTest].OutVal.ar32[1]))
9196 ? " - val" : "",
9197 FormatMxcsr(paTests[iTest].fMxcsrIn));
9198 }
9199 }
9200
9201 FREE_DECOMPRESSED_TESTS(g_aSseConvertXmmR32Mm[iFn]);
9202 }
9203}
9204
9205
9206/*
9207 * Convert SSE operations converting single-precision floating point values to signed double-word values.
9208 */
9209TYPEDEF_SUBTEST_TYPE(SSE_CONVERT_MM_I32_XMM_R32_T, SSE_CONVERT_MM_R32_TEST_T, PFNIEMAIMPLMXCSRU64U64);
9210
9211static SSE_CONVERT_MM_I32_XMM_R32_T g_aSseConvertMmI32XmmR32[] =
9212{
9213 ENTRY_BIN(cvtps2pi_u128),
9214 ENTRY_BIN(cvttps2pi_u128)
9215};
9216
9217#ifdef TSTIEMAIMPL_WITH_GENERATOR
9218DUMP_ALL_FN(SseConvertMmI32XmmR32, g_aSseConvertMmI32XmmR32)
9219static RTEXITCODE SseConvertMmI32XmmR32Generate(uint32_t cTests, const char * const *papszNameFmts)
9220{
9221 cTests = RT_MAX(192, cTests); /* there are 144 standard input variations */
9222
9223 static struct { RTFLOAT32U aVal1[2]; } const s_aSpecials[] =
9224 {
9225 { { RTFLOAT32U_INIT_ZERO(0), RTFLOAT32U_INIT_ZERO(0) } },
9226 { { RTFLOAT32U_INIT_ZERO(1), RTFLOAT32U_INIT_ZERO(1) } },
9227 { { RTFLOAT32U_INIT_INF(0), RTFLOAT32U_INIT_INF(0) } },
9228 { { RTFLOAT32U_INIT_INF(1), RTFLOAT32U_INIT_INF(1) } }
9229 /** @todo More specials. */
9230 };
9231
9232 uint32_t cMinNormalPairs = (cTests - 144) / 4;
9233 for (size_t iFn = 0; iFn < RT_ELEMENTS(g_aSseConvertMmI32XmmR32); iFn++)
9234 {
9235 PFNIEMAIMPLMXCSRU64U64 const pfn = g_aSseConvertMmI32XmmR32[iFn].pfnNative ? g_aSseConvertMmI32XmmR32[iFn].pfnNative : g_aSseConvertMmI32XmmR32[iFn].pfn;
9236
9237 IEMBINARYOUTPUT BinOut;
9238 AssertReturn(GENERATE_BINARY_OPEN(&BinOut, papszNameFmts, g_aSseConvertMmI32XmmR32[iFn]), RTEXITCODE_FAILURE);
9239
9240 uint32_t cNormalInputPairs = 0;
9241 for (uint32_t iTest = 0; iTest < cTests + RT_ELEMENTS(s_aSpecials); iTest += 1)
9242 {
9243 SSE_CONVERT_MM_R32_TEST_T TestData; RT_ZERO(TestData);
9244
9245 TestData.ar32InVal[0] = iTest < cTests ? RandR32Src(iTest) : s_aSpecials[iTest - cTests].aVal1[0];
9246 TestData.ar32InVal[1] = iTest < cTests ? RandR32Src(iTest) : s_aSpecials[iTest - cTests].aVal1[1];
9247
9248 if ( RTFLOAT32U_IS_NORMAL(&TestData.ar32InVal[0])
9249 && RTFLOAT32U_IS_NORMAL(&TestData.ar32InVal[1]))
9250 cNormalInputPairs++;
9251 else if (cNormalInputPairs < cMinNormalPairs && iTest + cMinNormalPairs >= cTests && iTest < cTests)
9252 {
9253 iTest -= 1;
9254 continue;
9255 }
9256
9257 RTFLOAT64U TestVal;
9258 TestVal.au32[0] = TestData.ar32InVal[0].u;
9259 TestVal.au32[1] = TestData.ar32InVal[1].u;
9260
9261 uint32_t const fMxcsr = RandMxcsr() & X86_MXCSR_XCPT_FLAGS;
9262 for (uint16_t iRounding = 0; iRounding < 4; iRounding++)
9263 for (uint8_t iDaz = 0; iDaz < 2; iDaz++)
9264 for (uint8_t iFz = 0; iFz < 2; iFz++)
9265 {
9266 uint32_t fMxcsrIn = (fMxcsr & ~X86_MXCSR_RC_MASK)
9267 | (iRounding << X86_MXCSR_RC_SHIFT)
9268 | (iDaz ? X86_MXCSR_DAZ : 0)
9269 | (iFz ? X86_MXCSR_FZ : 0)
9270 | X86_MXCSR_XCPT_MASK;
9271 uint32_t fMxcsrM = fMxcsrIn;
9272 uint64_t u64ResM;
9273 pfn(&fMxcsrM, &u64ResM, TestVal.u);
9274 TestData.fMxcsrIn = fMxcsrIn;
9275 TestData.fMxcsrOut = fMxcsrM;
9276 TestData.OutVal.u = u64ResM;
9277 GenerateBinaryWrite(&BinOut, &TestData, sizeof(TestData));
9278
9279 fMxcsrIn &= ~X86_MXCSR_XCPT_MASK;
9280 uint32_t fMxcsrU = fMxcsrIn;
9281 uint64_t u64ResU;
9282 pfn(&fMxcsrU, &u64ResU, TestVal.u);
9283 TestData.fMxcsrIn = fMxcsrIn;
9284 TestData.fMxcsrOut = fMxcsrU;
9285 TestData.OutVal.u = u64ResU;
9286 GenerateBinaryWrite(&BinOut, &TestData, sizeof(TestData));
9287
9288 uint16_t fXcpt = (fMxcsrM | fMxcsrU) & X86_MXCSR_XCPT_FLAGS;
9289 if (fXcpt)
9290 {
9291 fMxcsrIn = (fMxcsrIn & ~X86_MXCSR_XCPT_MASK) | fXcpt;
9292 uint32_t fMxcsr1 = fMxcsrIn;
9293 uint64_t u64Res1;
9294 pfn(&fMxcsr1, &u64Res1, TestVal.u);
9295 TestData.fMxcsrIn = fMxcsrIn;
9296 TestData.fMxcsrOut = fMxcsr1;
9297 TestData.OutVal.u = u64Res1;
9298 GenerateBinaryWrite(&BinOut, &TestData, sizeof(TestData));
9299
9300 if (((fMxcsr1 & X86_MXCSR_XCPT_FLAGS) & fXcpt) != (fMxcsr1 & X86_MXCSR_XCPT_FLAGS))
9301 {
9302 fXcpt |= fMxcsr1 & X86_MXCSR_XCPT_FLAGS;
9303 fMxcsrIn = (fMxcsrIn & ~X86_MXCSR_XCPT_MASK) | (fXcpt << X86_MXCSR_XCPT_MASK_SHIFT);
9304 uint32_t fMxcsr2 = fMxcsrIn;
9305 uint64_t u64Res2;
9306 pfn(&fMxcsr2, &u64Res2, TestVal.u);
9307 TestData.fMxcsrIn = fMxcsrIn;
9308 TestData.fMxcsrOut = fMxcsr2;
9309 TestData.OutVal.u = u64Res2;
9310 GenerateBinaryWrite(&BinOut, &TestData, sizeof(TestData));
9311 }
9312 if (!RT_IS_POWER_OF_TWO(fXcpt))
9313 for (uint16_t fUnmasked = 1; fUnmasked <= X86_MXCSR_PE; fUnmasked <<= 1)
9314 if (fUnmasked & fXcpt)
9315 {
9316 fMxcsrIn = (fMxcsrIn & ~X86_MXCSR_XCPT_MASK) | ((fXcpt & ~fUnmasked) << X86_MXCSR_XCPT_MASK_SHIFT);
9317 uint32_t fMxcsr3 = fMxcsrIn;
9318 uint64_t u64Res3;
9319 pfn(&fMxcsr3, &u64Res3, TestVal.u);
9320 TestData.fMxcsrIn = fMxcsrIn;
9321 TestData.fMxcsrOut = fMxcsr3;
9322 TestData.OutVal.u = u64Res3;
9323 GenerateBinaryWrite(&BinOut, &TestData, sizeof(TestData));
9324 }
9325 }
9326 }
9327 }
9328 AssertReturn(GenerateBinaryClose(&BinOut), RTEXITCODE_FAILURE);
9329 }
9330
9331 return RTEXITCODE_SUCCESS;
9332}
9333#endif
9334
9335static void SseConvertMmI32XmmR32Test(void)
9336{
9337 X86FXSTATE State;
9338 RT_ZERO(State);
9339
9340 for (size_t iFn = 0; iFn < RT_ELEMENTS(g_aSseConvertMmI32XmmR32); iFn++)
9341 {
9342 if (!SUBTEST_CHECK_IF_ENABLED_AND_DECOMPRESS(g_aSseConvertMmI32XmmR32[iFn]))
9343 continue;
9344
9345 SSE_CONVERT_MM_R32_TEST_T const * const paTests = g_aSseConvertMmI32XmmR32[iFn].paTests;
9346 uint32_t const cTests = g_aSseConvertMmI32XmmR32[iFn].cTests;
9347 PFNIEMAIMPLMXCSRU64U64 pfn = g_aSseConvertMmI32XmmR32[iFn].pfn;
9348 uint32_t const cVars = COUNT_VARIATIONS(g_aSseConvertMmI32XmmR32[iFn]);
9349 if (!cTests) RTTestSkipped(g_hTest, "no tests");
9350 for (uint32_t iVar = 0; iVar < cVars; iVar++)
9351 {
9352 for (uint32_t iTest = 0; iTest < cTests; iTest++)
9353 {
9354 RTUINT64U ValOut;
9355 RTUINT64U ValIn;
9356
9357 ValIn.au32[0] = paTests[iTest].ar32InVal[0].u;
9358 ValIn.au32[1] = paTests[iTest].ar32InVal[1].u;
9359
9360 uint32_t fMxcsr = paTests[iTest].fMxcsrIn;
9361 pfn(&fMxcsr, &ValOut.u, ValIn.u);
9362 if ( fMxcsr != paTests[iTest].fMxcsrOut
9363 || ValOut.ai32[0] != paTests[iTest].OutVal.ai32[0]
9364 || ValOut.ai32[1] != paTests[iTest].OutVal.ai32[1])
9365 RTTestFailed(g_hTest, "#%04u%s: mxcsr=%#08x in1=%s'%s \n"
9366 "%s -> mxcsr=%#08x %RI32'%RI32\n"
9367 "%s expected %#08x %RI32'%RI32%s%s (%s)\n",
9368 iTest, iVar ? "/n" : "", paTests[iTest].fMxcsrIn,
9369 FormatR32(&paTests[iTest].ar32InVal[0]), FormatR32(&paTests[iTest].ar32InVal[1]),
9370 iVar ? " " : "", fMxcsr,
9371 ValOut.ai32[0], ValOut.ai32[1],
9372 iVar ? " " : "", paTests[iTest].fMxcsrOut,
9373 paTests[iTest].OutVal.ai32[0], paTests[iTest].OutVal.ai32[1],
9374 MxcsrDiff(fMxcsr, paTests[iTest].fMxcsrOut),
9375 ( ValOut.ai32[0] != paTests[iTest].OutVal.ai32[0]
9376 || ValOut.ai32[1] != paTests[iTest].OutVal.ai32[1])
9377 ? " - val" : "",
9378 FormatMxcsr(paTests[iTest].fMxcsrIn));
9379 }
9380 }
9381
9382 FREE_DECOMPRESSED_TESTS(g_aSseConvertMmI32XmmR32[iFn]);
9383 }
9384}
9385
9386
9387/*
9388 * SSE 4.2 pcmpxstrx instructions.
9389 */
9390TYPEDEF_SUBTEST_TYPE(SSE_PCMPISTRI_T, SSE_PCMPISTRI_TEST_T, PFNIEMAIMPLPCMPISTRIU128IMM8);
9391
9392static SSE_PCMPISTRI_T g_aSsePcmpistri[] =
9393{
9394 ENTRY_BIN_SSE_OPT(pcmpistri_u128),
9395};
9396
9397#ifdef TSTIEMAIMPL_WITH_GENERATOR
9398DUMP_ALL_FN(SseComparePcmpistri, g_aSsePcmpistri)
9399static RTEXITCODE SseComparePcmpistriGenerate(uint32_t cTests, const char * const *papszNameFmts)
9400{
9401 cTests = RT_MAX(192, cTests); /* there are 144 standard input variations */
9402
9403 static struct { RTUINT128U uSrc1; RTUINT128U uSrc2; } const s_aSpecials[] =
9404 {
9405 { RTUINT128_INIT_C(0, 0), RTUINT128_INIT_C(0, 0) },
9406 /** @todo More specials. */
9407 };
9408
9409 for (size_t iFn = 0; iFn < RT_ELEMENTS(g_aSsePcmpistri); iFn++)
9410 {
9411 PFNIEMAIMPLPCMPISTRIU128IMM8 const pfn = g_aSsePcmpistri[iFn].pfnNative ? g_aSsePcmpistri[iFn].pfnNative : g_aSsePcmpistri[iFn].pfn;
9412
9413 IEMBINARYOUTPUT BinOut;
9414 AssertReturn(GENERATE_BINARY_OPEN(&BinOut, papszNameFmts, g_aSsePcmpistri[iFn]), RTEXITCODE_FAILURE);
9415
9416 for (uint32_t iTest = 0; iTest < cTests + RT_ELEMENTS(s_aSpecials); iTest += 1)
9417 {
9418 SSE_PCMPISTRI_TEST_T TestData; RT_ZERO(TestData);
9419
9420 TestData.InVal1.uXmm = iTest < cTests ? RandU128() : s_aSpecials[iTest - cTests].uSrc1;
9421 TestData.InVal2.uXmm = iTest < cTests ? RandU128() : s_aSpecials[iTest - cTests].uSrc2;
9422
9423 IEMPCMPISTRXSRC TestVal;
9424 TestVal.uSrc1 = TestData.InVal1.uXmm;
9425 TestVal.uSrc2 = TestData.InVal2.uXmm;
9426
9427 uint32_t const fEFlagsIn = RandEFlags();
9428 for (uint16_t u16Imm = 0; u16Imm < 256; u16Imm++)
9429 {
9430 uint32_t fEFlagsOut = fEFlagsIn;
9431 pfn(&TestData.u32EcxOut, &fEFlagsOut, &TestVal, (uint8_t)u16Imm);
9432 TestData.fEFlagsIn = fEFlagsIn;
9433 TestData.fEFlagsOut = fEFlagsOut;
9434 TestData.bImm = (uint8_t)u16Imm;
9435 GenerateBinaryWrite(&BinOut, &TestData, sizeof(TestData));
9436 }
9437
9438 /* Repeat the test with the input value being the same. */
9439 TestData.InVal2.uXmm = TestData.InVal1.uXmm;
9440 TestVal.uSrc1 = TestData.InVal1.uXmm;
9441 TestVal.uSrc2 = TestData.InVal2.uXmm;
9442
9443 for (uint16_t u16Imm = 0; u16Imm < 256; u16Imm++)
9444 {
9445 uint32_t fEFlagsOut = fEFlagsIn;
9446 pfn(&TestData.u32EcxOut, &fEFlagsOut, &TestVal, (uint8_t)u16Imm);
9447 TestData.fEFlagsIn = fEFlagsIn;
9448 TestData.fEFlagsOut = fEFlagsOut;
9449 TestData.bImm = (uint8_t)u16Imm;
9450 GenerateBinaryWrite(&BinOut, &TestData, sizeof(TestData));
9451 }
9452 }
9453 AssertReturn(GenerateBinaryClose(&BinOut), RTEXITCODE_FAILURE);
9454 }
9455
9456 return RTEXITCODE_SUCCESS;
9457}
9458#endif
9459
9460static void SseComparePcmpistriTest(void)
9461{
9462 X86FXSTATE State;
9463 RT_ZERO(State);
9464
9465 for (size_t iFn = 0; iFn < RT_ELEMENTS(g_aSsePcmpistri); iFn++)
9466 {
9467 if (!SUBTEST_CHECK_IF_ENABLED_AND_DECOMPRESS(g_aSsePcmpistri[iFn]))
9468 continue;
9469
9470 SSE_PCMPISTRI_TEST_T const * const paTests = g_aSsePcmpistri[iFn].paTests;
9471 uint32_t const cTests = g_aSsePcmpistri[iFn].cTests;
9472 PFNIEMAIMPLPCMPISTRIU128IMM8 pfn = g_aSsePcmpistri[iFn].pfn;
9473 uint32_t const cVars = COUNT_VARIATIONS(g_aSsePcmpistri[iFn]);
9474 if (!cTests) RTTestSkipped(g_hTest, "no tests");
9475 for (uint32_t iVar = 0; iVar < cVars; iVar++)
9476 {
9477 for (uint32_t iTest = 0; iTest < cTests; iTest++)
9478 {
9479 IEMPCMPISTRXSRC TestVal;
9480 TestVal.uSrc1 = paTests[iTest].InVal1.uXmm;
9481 TestVal.uSrc2 = paTests[iTest].InVal2.uXmm;
9482
9483 uint32_t fEFlags = paTests[iTest].fEFlagsIn;
9484 uint32_t u32EcxOut = 0;
9485 pfn(&u32EcxOut, &fEFlags, &TestVal, paTests[iTest].bImm);
9486 if ( fEFlags != paTests[iTest].fEFlagsOut
9487 || u32EcxOut != paTests[iTest].u32EcxOut)
9488 RTTestFailed(g_hTest, "#%04u%s: efl=%#08x in1=%s in2=%s bImm=%#x\n"
9489 "%s -> efl=%#08x %RU32\n"
9490 "%s expected %#08x %RU32%s%s\n",
9491 iTest, iVar ? "/n" : "", paTests[iTest].fEFlagsIn,
9492 FormatU128(&paTests[iTest].InVal1.uXmm), FormatU128(&paTests[iTest].InVal2.uXmm), paTests[iTest].bImm,
9493 iVar ? " " : "", fEFlags, u32EcxOut,
9494 iVar ? " " : "", paTests[iTest].fEFlagsOut, paTests[iTest].u32EcxOut,
9495 EFlagsDiff(fEFlags, paTests[iTest].fEFlagsOut),
9496 (u32EcxOut != paTests[iTest].u32EcxOut) ? " - val" : "");
9497 }
9498 }
9499
9500 FREE_DECOMPRESSED_TESTS(g_aSsePcmpistri[iFn]);
9501 }
9502}
9503
9504
9505TYPEDEF_SUBTEST_TYPE(SSE_PCMPISTRM_T, SSE_PCMPISTRM_TEST_T, PFNIEMAIMPLPCMPISTRMU128IMM8);
9506
9507static SSE_PCMPISTRM_T g_aSsePcmpistrm[] =
9508{
9509 ENTRY_BIN_SSE_OPT(pcmpistrm_u128),
9510};
9511
9512#ifdef TSTIEMAIMPL_WITH_GENERATOR
9513DUMP_ALL_FN(SseComparePcmpistrm, g_aSsePcmpistrm)
9514static RTEXITCODE SseComparePcmpistrmGenerate(uint32_t cTests, const char * const *papszNameFmts)
9515{
9516 cTests = RT_MAX(192, cTests); /* there are 144 standard input variations */
9517
9518 static struct { RTUINT128U uSrc1; RTUINT128U uSrc2; } const s_aSpecials[] =
9519 {
9520 { RTUINT128_INIT_C(0, 0), RTUINT128_INIT_C(0, 0) },
9521 /** @todo More specials. */
9522 };
9523
9524 for (size_t iFn = 0; iFn < RT_ELEMENTS(g_aSsePcmpistrm); iFn++)
9525 {
9526 PFNIEMAIMPLPCMPISTRMU128IMM8 const pfn = g_aSsePcmpistrm[iFn].pfnNative ? g_aSsePcmpistrm[iFn].pfnNative : g_aSsePcmpistrm[iFn].pfn;
9527
9528 IEMBINARYOUTPUT BinOut;
9529 AssertReturn(GENERATE_BINARY_OPEN(&BinOut, papszNameFmts, g_aSsePcmpistrm[iFn]), RTEXITCODE_FAILURE);
9530
9531 for (uint32_t iTest = 0; iTest < cTests + RT_ELEMENTS(s_aSpecials); iTest += 1)
9532 {
9533 SSE_PCMPISTRM_TEST_T TestData; RT_ZERO(TestData);
9534
9535 TestData.InVal1.uXmm = iTest < cTests ? RandU128() : s_aSpecials[iTest - cTests].uSrc1;
9536 TestData.InVal2.uXmm = iTest < cTests ? RandU128() : s_aSpecials[iTest - cTests].uSrc2;
9537
9538 IEMPCMPISTRXSRC TestVal;
9539 TestVal.uSrc1 = TestData.InVal1.uXmm;
9540 TestVal.uSrc2 = TestData.InVal2.uXmm;
9541
9542 uint32_t const fEFlagsIn = RandEFlags();
9543 for (uint16_t u16Imm = 0; u16Imm < 256; u16Imm++)
9544 {
9545 uint32_t fEFlagsOut = fEFlagsIn;
9546 pfn(&TestData.OutVal.uXmm, &fEFlagsOut, &TestVal, (uint8_t)u16Imm);
9547 TestData.fEFlagsIn = fEFlagsIn;
9548 TestData.fEFlagsOut = fEFlagsOut;
9549 TestData.bImm = (uint8_t)u16Imm;
9550 GenerateBinaryWrite(&BinOut, &TestData, sizeof(TestData));
9551 }
9552
9553 /* Repeat the test with the input value being the same. */
9554 TestData.InVal2.uXmm = TestData.InVal1.uXmm;
9555 TestVal.uSrc1 = TestData.InVal1.uXmm;
9556 TestVal.uSrc2 = TestData.InVal2.uXmm;
9557
9558 for (uint16_t u16Imm = 0; u16Imm < 256; u16Imm++)
9559 {
9560 uint32_t fEFlagsOut = fEFlagsIn;
9561 pfn(&TestData.OutVal.uXmm, &fEFlagsOut, &TestVal, (uint8_t)u16Imm);
9562 TestData.fEFlagsIn = fEFlagsIn;
9563 TestData.fEFlagsOut = fEFlagsOut;
9564 TestData.bImm = (uint8_t)u16Imm;
9565 GenerateBinaryWrite(&BinOut, &TestData, sizeof(TestData));
9566 }
9567 }
9568 AssertReturn(GenerateBinaryClose(&BinOut), RTEXITCODE_FAILURE);
9569 }
9570
9571 return RTEXITCODE_SUCCESS;
9572}
9573#endif
9574
9575static void SseComparePcmpistrmTest(void)
9576{
9577 X86FXSTATE State;
9578 RT_ZERO(State);
9579
9580 for (size_t iFn = 0; iFn < RT_ELEMENTS(g_aSsePcmpistrm); iFn++)
9581 {
9582 if (!SUBTEST_CHECK_IF_ENABLED_AND_DECOMPRESS(g_aSsePcmpistrm[iFn]))
9583 continue;
9584
9585 SSE_PCMPISTRM_TEST_T const * const paTests = g_aSsePcmpistrm[iFn].paTests;
9586 uint32_t const cTests = g_aSsePcmpistrm[iFn].cTests;
9587 PFNIEMAIMPLPCMPISTRMU128IMM8 pfn = g_aSsePcmpistrm[iFn].pfn;
9588 uint32_t const cVars = COUNT_VARIATIONS(g_aSsePcmpistrm[iFn]);
9589 if (!cTests) RTTestSkipped(g_hTest, "no tests");
9590 for (uint32_t iVar = 0; iVar < cVars; iVar++)
9591 {
9592 for (uint32_t iTest = 0; iTest < cTests; iTest++)
9593 {
9594 IEMPCMPISTRXSRC TestVal;
9595 TestVal.uSrc1 = paTests[iTest].InVal1.uXmm;
9596 TestVal.uSrc2 = paTests[iTest].InVal2.uXmm;
9597
9598 uint32_t fEFlags = paTests[iTest].fEFlagsIn;
9599 RTUINT128U OutVal;
9600 pfn(&OutVal, &fEFlags, &TestVal, paTests[iTest].bImm);
9601 if ( fEFlags != paTests[iTest].fEFlagsOut
9602 || OutVal.s.Hi != paTests[iTest].OutVal.uXmm.s.Hi
9603 || OutVal.s.Lo != paTests[iTest].OutVal.uXmm.s.Lo)
9604 RTTestFailed(g_hTest, "#%04u%s: efl=%#08x in1=%s in2=%s bImm=%#x\n"
9605 "%s -> efl=%#08x %s\n"
9606 "%s expected %#08x %s%s%s\n",
9607 iTest, iVar ? "/n" : "", paTests[iTest].fEFlagsIn,
9608 FormatU128(&paTests[iTest].InVal1.uXmm), FormatU128(&paTests[iTest].InVal2.uXmm), paTests[iTest].bImm,
9609 iVar ? " " : "", fEFlags, FormatU128(&OutVal),
9610 iVar ? " " : "", paTests[iTest].fEFlagsOut, FormatU128(&paTests[iTest].OutVal.uXmm),
9611 EFlagsDiff(fEFlags, paTests[iTest].fEFlagsOut),
9612 ( OutVal.s.Hi != paTests[iTest].OutVal.uXmm.s.Hi
9613 || OutVal.s.Lo != paTests[iTest].OutVal.uXmm.s.Lo) ? " - val" : "");
9614 }
9615 }
9616
9617 FREE_DECOMPRESSED_TESTS(g_aSsePcmpistrm[iFn]);
9618 }
9619}
9620
9621
9622TYPEDEF_SUBTEST_TYPE(SSE_PCMPESTRI_T, SSE_PCMPESTRI_TEST_T, PFNIEMAIMPLPCMPESTRIU128IMM8);
9623
9624static SSE_PCMPESTRI_T g_aSsePcmpestri[] =
9625{
9626 ENTRY_BIN_SSE_OPT(pcmpestri_u128),
9627};
9628
9629#ifdef TSTIEMAIMPL_WITH_GENERATOR
9630DUMP_ALL_FN(SseComparePcmpestri, g_aSsePcmpestri)
9631static RTEXITCODE SseComparePcmpestriGenerate(uint32_t cTests, const char * const *papszNameFmts)
9632{
9633 cTests = RT_MAX(192, cTests); /* there are 144 standard input variations */
9634
9635 static struct { RTUINT128U uSrc1; RTUINT128U uSrc2; } const s_aSpecials[] =
9636 {
9637 { RTUINT128_INIT_C(0, 0), RTUINT128_INIT_C(0, 0) },
9638 /** @todo More specials. */
9639 };
9640
9641 for (size_t iFn = 0; iFn < RT_ELEMENTS(g_aSsePcmpestri); iFn++)
9642 {
9643 PFNIEMAIMPLPCMPESTRIU128IMM8 const pfn = g_aSsePcmpestri[iFn].pfnNative ? g_aSsePcmpestri[iFn].pfnNative : g_aSsePcmpestri[iFn].pfn;
9644
9645 IEMBINARYOUTPUT BinOut;
9646 AssertReturn(GENERATE_BINARY_OPEN(&BinOut, papszNameFmts, g_aSsePcmpestri[iFn]), RTEXITCODE_FAILURE);
9647
9648 for (uint32_t iTest = 0; iTest < cTests + RT_ELEMENTS(s_aSpecials); iTest += 1)
9649 {
9650 SSE_PCMPESTRI_TEST_T TestData; RT_ZERO(TestData);
9651
9652 TestData.InVal1.uXmm = iTest < cTests ? RandU128() : s_aSpecials[iTest - cTests].uSrc1;
9653 TestData.InVal2.uXmm = iTest < cTests ? RandU128() : s_aSpecials[iTest - cTests].uSrc2;
9654
9655 for (int64_t i64Rax = -20; i64Rax < 20; i64Rax += 20)
9656 for (int64_t i64Rdx = -20; i64Rdx < 20; i64Rdx += 20)
9657 {
9658 TestData.u64Rax = (uint64_t)i64Rax;
9659 TestData.u64Rdx = (uint64_t)i64Rdx;
9660
9661 IEMPCMPESTRXSRC TestVal;
9662 TestVal.uSrc1 = TestData.InVal1.uXmm;
9663 TestVal.uSrc2 = TestData.InVal2.uXmm;
9664 TestVal.u64Rax = TestData.u64Rax;
9665 TestVal.u64Rdx = TestData.u64Rdx;
9666
9667 uint32_t const fEFlagsIn = RandEFlags();
9668 for (uint16_t u16Imm = 0; u16Imm < 256; u16Imm++)
9669 {
9670 uint32_t fEFlagsOut = fEFlagsIn;
9671 pfn(&TestData.u32EcxOut, &fEFlagsOut, &TestVal, (uint8_t)u16Imm);
9672 TestData.fEFlagsIn = fEFlagsIn;
9673 TestData.fEFlagsOut = fEFlagsOut;
9674 TestData.bImm = (uint8_t)u16Imm;
9675 GenerateBinaryWrite(&BinOut, &TestData, sizeof(TestData));
9676 }
9677
9678 /* Repeat the test with the input value being the same. */
9679 TestData.InVal2.uXmm = TestData.InVal1.uXmm;
9680 TestVal.uSrc1 = TestData.InVal1.uXmm;
9681 TestVal.uSrc2 = TestData.InVal2.uXmm;
9682
9683 for (uint16_t u16Imm = 0; u16Imm < 256; u16Imm++)
9684 {
9685 uint32_t fEFlagsOut = fEFlagsIn;
9686 pfn(&TestData.u32EcxOut, &fEFlagsOut, &TestVal, (uint8_t)u16Imm);
9687 TestData.fEFlagsIn = fEFlagsIn;
9688 TestData.fEFlagsOut = fEFlagsOut;
9689 TestData.bImm = (uint8_t)u16Imm;
9690 GenerateBinaryWrite(&BinOut, &TestData, sizeof(TestData));
9691 }
9692 }
9693 }
9694 AssertReturn(GenerateBinaryClose(&BinOut), RTEXITCODE_FAILURE);
9695 }
9696
9697 return RTEXITCODE_SUCCESS;
9698}
9699#endif
9700
9701static void SseComparePcmpestriTest(void)
9702{
9703 X86FXSTATE State;
9704 RT_ZERO(State);
9705
9706 for (size_t iFn = 0; iFn < RT_ELEMENTS(g_aSsePcmpestri); iFn++)
9707 {
9708 if (!SUBTEST_CHECK_IF_ENABLED_AND_DECOMPRESS(g_aSsePcmpestri[iFn]))
9709 continue;
9710
9711 SSE_PCMPESTRI_TEST_T const * const paTests = g_aSsePcmpestri[iFn].paTests;
9712 uint32_t const cTests = g_aSsePcmpestri[iFn].cTests;
9713 PFNIEMAIMPLPCMPESTRIU128IMM8 pfn = g_aSsePcmpestri[iFn].pfn;
9714 uint32_t const cVars = COUNT_VARIATIONS(g_aSsePcmpestri[iFn]);
9715 if (!cTests) RTTestSkipped(g_hTest, "no tests");
9716 for (uint32_t iVar = 0; iVar < cVars; iVar++)
9717 {
9718 for (uint32_t iTest = 0; iTest < cTests; iTest++)
9719 {
9720 IEMPCMPESTRXSRC TestVal;
9721 TestVal.uSrc1 = paTests[iTest].InVal1.uXmm;
9722 TestVal.uSrc2 = paTests[iTest].InVal2.uXmm;
9723 TestVal.u64Rax = paTests[iTest].u64Rax;
9724 TestVal.u64Rdx = paTests[iTest].u64Rdx;
9725
9726 uint32_t fEFlags = paTests[iTest].fEFlagsIn;
9727 uint32_t u32EcxOut = 0;
9728 pfn(&u32EcxOut, &fEFlags, &TestVal, paTests[iTest].bImm);
9729 if ( fEFlags != paTests[iTest].fEFlagsOut
9730 || u32EcxOut != paTests[iTest].u32EcxOut)
9731 RTTestFailed(g_hTest, "#%04u%s: efl=%#08x in1=%s rax1=%RI64 in2=%s rdx2=%RI64 bImm=%#x\n"
9732 "%s -> efl=%#08x %RU32\n"
9733 "%s expected %#08x %RU32%s%s\n",
9734 iTest, iVar ? "/n" : "", paTests[iTest].fEFlagsIn,
9735 FormatU128(&paTests[iTest].InVal1.uXmm), paTests[iTest].u64Rax,
9736 FormatU128(&paTests[iTest].InVal2.uXmm), paTests[iTest].u64Rdx,
9737 paTests[iTest].bImm,
9738 iVar ? " " : "", fEFlags, u32EcxOut,
9739 iVar ? " " : "", paTests[iTest].fEFlagsOut, paTests[iTest].u32EcxOut,
9740 EFlagsDiff(fEFlags, paTests[iTest].fEFlagsOut),
9741 (u32EcxOut != paTests[iTest].u32EcxOut) ? " - val" : "");
9742 }
9743 }
9744
9745 FREE_DECOMPRESSED_TESTS(g_aSsePcmpestri[iFn]);
9746 }
9747}
9748
9749
9750TYPEDEF_SUBTEST_TYPE(SSE_PCMPESTRM_T, SSE_PCMPESTRM_TEST_T, PFNIEMAIMPLPCMPESTRMU128IMM8);
9751
9752static SSE_PCMPESTRM_T g_aSsePcmpestrm[] =
9753{
9754 ENTRY_BIN_SSE_OPT(pcmpestrm_u128),
9755};
9756
9757#ifdef TSTIEMAIMPL_WITH_GENERATOR
9758DUMP_ALL_FN(SseComparePcmpestrm, g_aSsePcmpestrm)
9759static RTEXITCODE SseComparePcmpestrmGenerate(uint32_t cTests, const char * const *papszNameFmts)
9760{
9761 cTests = RT_MAX(192, cTests); /* there are 144 standard input variations */
9762
9763 static struct { RTUINT128U uSrc1; RTUINT128U uSrc2; } const s_aSpecials[] =
9764 {
9765 { RTUINT128_INIT_C(0, 0), RTUINT128_INIT_C(0, 0) },
9766 /** @todo More specials. */
9767 };
9768
9769 for (size_t iFn = 0; iFn < RT_ELEMENTS(g_aSsePcmpestrm); iFn++)
9770 {
9771 PFNIEMAIMPLPCMPESTRMU128IMM8 const pfn = g_aSsePcmpestrm[iFn].pfnNative ? g_aSsePcmpestrm[iFn].pfnNative : g_aSsePcmpestrm[iFn].pfn;
9772
9773 IEMBINARYOUTPUT BinOut;
9774 AssertReturn(GENERATE_BINARY_OPEN(&BinOut, papszNameFmts, g_aSsePcmpestrm[iFn]), RTEXITCODE_FAILURE);
9775
9776 for (uint32_t iTest = 0; iTest < cTests + RT_ELEMENTS(s_aSpecials); iTest += 1)
9777 {
9778 SSE_PCMPESTRM_TEST_T TestData; RT_ZERO(TestData);
9779
9780 TestData.InVal1.uXmm = iTest < cTests ? RandU128() : s_aSpecials[iTest - cTests].uSrc1;
9781 TestData.InVal2.uXmm = iTest < cTests ? RandU128() : s_aSpecials[iTest - cTests].uSrc2;
9782
9783 for (int64_t i64Rax = -20; i64Rax < 20; i64Rax += 20)
9784 for (int64_t i64Rdx = -20; i64Rdx < 20; i64Rdx += 20)
9785 {
9786 TestData.u64Rax = (uint64_t)i64Rax;
9787 TestData.u64Rdx = (uint64_t)i64Rdx;
9788
9789 IEMPCMPESTRXSRC TestVal;
9790 TestVal.uSrc1 = TestData.InVal1.uXmm;
9791 TestVal.uSrc2 = TestData.InVal2.uXmm;
9792 TestVal.u64Rax = TestData.u64Rax;
9793 TestVal.u64Rdx = TestData.u64Rdx;
9794
9795 uint32_t const fEFlagsIn = RandEFlags();
9796 for (uint16_t u16Imm = 0; u16Imm < 256; u16Imm++)
9797 {
9798 uint32_t fEFlagsOut = fEFlagsIn;
9799 pfn(&TestData.OutVal.uXmm, &fEFlagsOut, &TestVal, (uint8_t)u16Imm);
9800 TestData.fEFlagsIn = fEFlagsIn;
9801 TestData.fEFlagsOut = fEFlagsOut;
9802 TestData.bImm = (uint8_t)u16Imm;
9803 GenerateBinaryWrite(&BinOut, &TestData, sizeof(TestData));
9804 }
9805
9806 /* Repeat the test with the input value being the same. */
9807 TestData.InVal2.uXmm = TestData.InVal1.uXmm;
9808 TestVal.uSrc1 = TestData.InVal1.uXmm;
9809 TestVal.uSrc2 = TestData.InVal2.uXmm;
9810
9811 for (uint16_t u16Imm = 0; u16Imm < 256; u16Imm++)
9812 {
9813 uint32_t fEFlagsOut = fEFlagsIn;
9814 pfn(&TestData.OutVal.uXmm, &fEFlagsOut, &TestVal, (uint8_t)u16Imm);
9815 TestData.fEFlagsIn = fEFlagsIn;
9816 TestData.fEFlagsOut = fEFlagsOut;
9817 TestData.bImm = (uint8_t)u16Imm;
9818 GenerateBinaryWrite(&BinOut, &TestData, sizeof(TestData));
9819 }
9820 }
9821 }
9822 AssertReturn(GenerateBinaryClose(&BinOut), RTEXITCODE_FAILURE);
9823 }
9824
9825 return RTEXITCODE_SUCCESS;
9826}
9827#endif
9828
9829static void SseComparePcmpestrmTest(void)
9830{
9831 X86FXSTATE State;
9832 RT_ZERO(State);
9833
9834 for (size_t iFn = 0; iFn < RT_ELEMENTS(g_aSsePcmpestrm); iFn++)
9835 {
9836 if (!SUBTEST_CHECK_IF_ENABLED_AND_DECOMPRESS(g_aSsePcmpestrm[iFn]))
9837 continue;
9838
9839 SSE_PCMPESTRM_TEST_T const * const paTests = g_aSsePcmpestrm[iFn].paTests;
9840 uint32_t const cTests = g_aSsePcmpestrm[iFn].cTests;
9841 PFNIEMAIMPLPCMPESTRMU128IMM8 pfn = g_aSsePcmpestrm[iFn].pfn;
9842 uint32_t const cVars = COUNT_VARIATIONS(g_aSsePcmpestrm[iFn]);
9843 if (!cTests) RTTestSkipped(g_hTest, "no tests");
9844 for (uint32_t iVar = 0; iVar < cVars; iVar++)
9845 {
9846 for (uint32_t iTest = 0; iTest < cTests; iTest++)
9847 {
9848 IEMPCMPESTRXSRC TestVal;
9849 TestVal.uSrc1 = paTests[iTest].InVal1.uXmm;
9850 TestVal.uSrc2 = paTests[iTest].InVal2.uXmm;
9851 TestVal.u64Rax = paTests[iTest].u64Rax;
9852 TestVal.u64Rdx = paTests[iTest].u64Rdx;
9853
9854 uint32_t fEFlags = paTests[iTest].fEFlagsIn;
9855 RTUINT128U OutVal;
9856 pfn(&OutVal, &fEFlags, &TestVal, paTests[iTest].bImm);
9857 if ( fEFlags != paTests[iTest].fEFlagsOut
9858 || OutVal.s.Hi != paTests[iTest].OutVal.uXmm.s.Hi
9859 || OutVal.s.Lo != paTests[iTest].OutVal.uXmm.s.Lo)
9860 RTTestFailed(g_hTest, "#%04u%s: efl=%#08x in1=%s rax1=%RI64 in2=%s rdx2=%RI64 bImm=%#x\n"
9861 "%s -> efl=%#08x %s\n"
9862 "%s expected %#08x %s%s%s\n",
9863 iTest, iVar ? "/n" : "", paTests[iTest].fEFlagsIn,
9864 FormatU128(&paTests[iTest].InVal1.uXmm), paTests[iTest].u64Rax,
9865 FormatU128(&paTests[iTest].InVal2.uXmm), paTests[iTest].u64Rdx,
9866 paTests[iTest].bImm,
9867 iVar ? " " : "", fEFlags, FormatU128(&OutVal),
9868 iVar ? " " : "", paTests[iTest].fEFlagsOut, FormatU128(&paTests[iTest].OutVal.uXmm),
9869 EFlagsDiff(fEFlags, paTests[iTest].fEFlagsOut),
9870 ( OutVal.s.Hi != paTests[iTest].OutVal.uXmm.s.Hi
9871 || OutVal.s.Lo != paTests[iTest].OutVal.uXmm.s.Lo) ? " - val" : "");
9872 }
9873 }
9874
9875 FREE_DECOMPRESSED_TESTS(g_aSsePcmpestrm[iFn]);
9876 }
9877}
9878
9879
9880
9881int main(int argc, char **argv)
9882{
9883 int rc = RTR3InitExe(argc, &argv, 0);
9884 if (RT_FAILURE(rc))
9885 return RTMsgInitFailure(rc);
9886
9887 /*
9888 * Determin the host CPU.
9889 * If not using the IEMAllAImpl.asm code, this will be set to Intel.
9890 */
9891#if (defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64)) && !defined(IEM_WITHOUT_ASSEMBLY)
9892 g_idxCpuEflFlavour = ASMIsAmdCpu() || ASMIsHygonCpu()
9893 ? IEMTARGETCPU_EFL_BEHAVIOR_AMD
9894 : IEMTARGETCPU_EFL_BEHAVIOR_INTEL;
9895#else
9896 g_idxCpuEflFlavour = IEMTARGETCPU_EFL_BEHAVIOR_INTEL;
9897#endif
9898
9899 /*
9900 * Parse arguments.
9901 */
9902 enum { kModeNotSet, kModeTest, kModeGenerate, kModeDump }
9903 enmMode = kModeNotSet;
9904#define CATEGORY_INT RT_BIT_32(0)
9905#define CATEGORY_FPU_LD_ST RT_BIT_32(1)
9906#define CATEGORY_FPU_BINARY_1 RT_BIT_32(2)
9907#define CATEGORY_FPU_BINARY_2 RT_BIT_32(3)
9908#define CATEGORY_FPU_OTHER RT_BIT_32(4)
9909#define CATEGORY_SSE_FP_BINARY RT_BIT_32(5)
9910#define CATEGORY_SSE_FP_OTHER RT_BIT_32(6)
9911#define CATEGORY_SSE_PCMPXSTRX RT_BIT_32(7)
9912 uint32_t fCategories = UINT32_MAX;
9913 bool fCpuData = true;
9914 bool fCommonData = true;
9915 uint32_t const cDefaultTests = 96;
9916 uint32_t cTests = cDefaultTests;
9917 RTGETOPTDEF const s_aOptions[] =
9918 {
9919 // mode:
9920 { "--generate", 'g', RTGETOPT_REQ_NOTHING },
9921 { "--dump", 'G', RTGETOPT_REQ_NOTHING },
9922 { "--test", 't', RTGETOPT_REQ_NOTHING },
9923 { "--benchmark", 'b', RTGETOPT_REQ_NOTHING },
9924 // test selection (both)
9925 { "--all", 'a', RTGETOPT_REQ_NOTHING },
9926 { "--none", 'z', RTGETOPT_REQ_NOTHING },
9927 { "--zap", 'z', RTGETOPT_REQ_NOTHING },
9928 { "--fpu-ld-st", 'F', RTGETOPT_REQ_NOTHING }, /* FPU stuff is upper case */
9929 { "--fpu-load-store", 'F', RTGETOPT_REQ_NOTHING },
9930 { "--fpu-binary-1", 'B', RTGETOPT_REQ_NOTHING },
9931 { "--fpu-binary-2", 'P', RTGETOPT_REQ_NOTHING },
9932 { "--fpu-other", 'O', RTGETOPT_REQ_NOTHING },
9933 { "--sse-fp-binary", 'S', RTGETOPT_REQ_NOTHING },
9934 { "--sse-fp-other", 'T', RTGETOPT_REQ_NOTHING },
9935 { "--sse-pcmpxstrx", 'C', RTGETOPT_REQ_NOTHING },
9936 { "--int", 'i', RTGETOPT_REQ_NOTHING },
9937 { "--include", 'I', RTGETOPT_REQ_STRING },
9938 { "--exclude", 'X', RTGETOPT_REQ_STRING },
9939 // generation parameters
9940 { "--common", 'm', RTGETOPT_REQ_NOTHING },
9941 { "--cpu", 'c', RTGETOPT_REQ_NOTHING },
9942 { "--number-of-tests", 'n', RTGETOPT_REQ_UINT32 },
9943 { "--verbose", 'v', RTGETOPT_REQ_NOTHING },
9944 { "--quiet", 'q', RTGETOPT_REQ_NOTHING },
9945 };
9946
9947 RTGETOPTSTATE State;
9948 rc = RTGetOptInit(&State, argc, argv, s_aOptions, RT_ELEMENTS(s_aOptions), 1, 0);
9949 AssertRCReturn(rc, RTEXITCODE_FAILURE);
9950
9951 RTGETOPTUNION ValueUnion;
9952 while ((rc = RTGetOpt(&State, &ValueUnion)))
9953 {
9954 switch (rc)
9955 {
9956 case 'g':
9957 enmMode = kModeGenerate;
9958 g_cPicoSecBenchmark = 0;
9959 break;
9960 case 'G':
9961 enmMode = kModeDump;
9962 g_cPicoSecBenchmark = 0;
9963 break;
9964 case 't':
9965 enmMode = kModeTest;
9966 g_cPicoSecBenchmark = 0;
9967 break;
9968 case 'b':
9969 enmMode = kModeTest;
9970 g_cPicoSecBenchmark += RT_NS_1SEC / 2 * UINT64_C(1000); /* half a second in pico seconds */
9971 break;
9972
9973 case 'a':
9974 fCpuData = true;
9975 fCommonData = true;
9976 fCategories = UINT32_MAX;
9977 break;
9978 case 'z':
9979 fCpuData = false;
9980 fCommonData = false;
9981 fCategories = 0;
9982 break;
9983
9984 case 'F':
9985 fCategories |= CATEGORY_FPU_LD_ST;
9986 break;
9987 case 'O':
9988 fCategories |= CATEGORY_FPU_OTHER;
9989 break;
9990 case 'B':
9991 fCategories |= CATEGORY_FPU_BINARY_1;
9992 break;
9993 case 'P':
9994 fCategories |= CATEGORY_FPU_BINARY_2;
9995 break;
9996 case 'S':
9997 fCategories |= CATEGORY_SSE_FP_BINARY;
9998 break;
9999 case 'T':
10000 fCategories |= CATEGORY_SSE_FP_OTHER;
10001 break;
10002 case 'C':
10003 fCategories |= CATEGORY_SSE_PCMPXSTRX;
10004 break;
10005 case 'i':
10006 fCategories |= CATEGORY_INT;
10007 break;
10008
10009 case 'I':
10010 if (g_cIncludeTestPatterns >= RT_ELEMENTS(g_apszIncludeTestPatterns))
10011 return RTMsgErrorExit(RTEXITCODE_SYNTAX, "Too many include patterns (max %zu)",
10012 RT_ELEMENTS(g_apszIncludeTestPatterns));
10013 g_apszIncludeTestPatterns[g_cIncludeTestPatterns++] = ValueUnion.psz;
10014 break;
10015 case 'X':
10016 if (g_cExcludeTestPatterns >= RT_ELEMENTS(g_apszExcludeTestPatterns))
10017 return RTMsgErrorExit(RTEXITCODE_SYNTAX, "Too many exclude patterns (max %zu)",
10018 RT_ELEMENTS(g_apszExcludeTestPatterns));
10019 g_apszExcludeTestPatterns[g_cExcludeTestPatterns++] = ValueUnion.psz;
10020 break;
10021
10022 case 'm':
10023 fCommonData = true;
10024 break;
10025 case 'c':
10026 fCpuData = true;
10027 break;
10028 case 'n':
10029 cTests = ValueUnion.u32;
10030 break;
10031
10032 case 'q':
10033 g_cVerbosity = 0;
10034 break;
10035 case 'v':
10036 g_cVerbosity++;
10037 break;
10038
10039 case 'h':
10040 RTPrintf("usage: %s <-g|-t> [options]\n"
10041 "\n"
10042 "Mode:\n"
10043 " -g, --generate\n"
10044 " Generate test data.\n"
10045 " -t, --test\n"
10046 " Execute tests.\n"
10047 " -b, --benchmark\n"
10048 " Execute tests and do 1/2 seconds of benchmarking.\n"
10049 " Repeating the option increases the benchmark duration by 0.5 seconds.\n"
10050 "\n"
10051 "Test selection (both modes):\n"
10052 " -a, --all\n"
10053 " Enable all tests and generated test data. (default)\n"
10054 " -z, --zap, --none\n"
10055 " Disable all tests and test data types.\n"
10056 " -i, --int\n"
10057 " Enable non-FPU tests.\n"
10058 " -F, --fpu-ld-st\n"
10059 " Enable FPU load and store tests.\n"
10060 " -B, --fpu-binary-1\n"
10061 " Enable FPU binary 80-bit FP tests.\n"
10062 " -P, --fpu-binary-2\n"
10063 " Enable FPU binary 64- and 32-bit FP tests.\n"
10064 " -O, --fpu-other\n"
10065 " Enable FPU binary 64- and 32-bit FP tests.\n"
10066 " -S, --sse-fp-binary\n"
10067 " Enable SSE binary 64- and 32-bit FP tests.\n"
10068 " -T, --sse-fp-other\n"
10069 " Enable misc SSE 64- and 32-bit FP tests.\n"
10070 " -C, --sse-pcmpxstrx\n"
10071 " Enable SSE pcmpxstrx tests.\n"
10072 " -I,--include=<test-patter>\n"
10073 " Enable tests matching the given pattern.\n"
10074 " -X,--exclude=<test-patter>\n"
10075 " Skip tests matching the given pattern (overrides --include).\n"
10076 "\n"
10077 "Generation:\n"
10078 " -m, --common\n"
10079 " Enable generating common test data.\n"
10080 " -c, --only-cpu\n"
10081 " Enable generating CPU specific test data.\n"
10082 " -n, --number-of-test <count>\n"
10083 " Number of tests to generate. Default: %u\n"
10084 "\n"
10085 "Other:\n"
10086 " -v, --verbose\n"
10087 " -q, --quiet\n"
10088 " Noise level. Default: --quiet\n"
10089 , argv[0], cDefaultTests);
10090 return RTEXITCODE_SUCCESS;
10091 default:
10092 return RTGetOptPrintError(rc, &ValueUnion);
10093 }
10094 }
10095
10096 static const struct
10097 {
10098 uint32_t fCategory;
10099 void (*pfnTest)(void);
10100#ifdef TSTIEMAIMPL_WITH_GENERATOR
10101 const char *pszFilenameFmt;
10102 RTEXITCODE (*pfnGenerate)(uint32_t cTests, const char * const *papszNameFmts);
10103 RTEXITCODE (*pfnDumpAll)(const char * const *papszNameFmts);
10104 uint32_t cMinTests;
10105# define GROUP_ENTRY(a_fCategory, a_BaseNm, a_szFilenameFmt, a_cMinTests) \
10106 { a_fCategory, a_BaseNm ## Test, a_szFilenameFmt, a_BaseNm ## Generate, a_BaseNm ## DumpAll, a_cMinTests }
10107#else
10108# define GROUP_ENTRY(a_fCategory, a_BaseNm, a_szFilenameFmt, a_cMinTests) \
10109 { a_fCategory, a_BaseNm ## Test }
10110#endif
10111#define GROUP_ENTRY_MANUAL(a_fCategory, a_BaseNm) \
10112 { a_fCategory, a_BaseNm ## Test }
10113 } s_aGroups[] =
10114 {
10115 GROUP_ENTRY(CATEGORY_INT, BinU8, "tstIEMAImplDataInt-%s.bin.gz", 0),
10116 GROUP_ENTRY(CATEGORY_INT, BinU16, "tstIEMAImplDataInt-%s.bin.gz", 0),
10117 GROUP_ENTRY(CATEGORY_INT, BinU32, "tstIEMAImplDataInt-%s.bin.gz", 0),
10118 GROUP_ENTRY(CATEGORY_INT, BinU64, "tstIEMAImplDataInt-%s.bin.gz", 0),
10119 GROUP_ENTRY(CATEGORY_INT, ShiftDbl, "tstIEMAImplDataInt-%s.bin.gz", 128),
10120 GROUP_ENTRY(CATEGORY_INT, Unary, "tstIEMAImplDataInt-%s.bin.gz", 0),
10121 GROUP_ENTRY(CATEGORY_INT, Shift, "tstIEMAImplDataInt-%s.bin.gz", 0),
10122 GROUP_ENTRY(CATEGORY_INT, MulDiv, "tstIEMAImplDataInt-%s.bin.gz", 0),
10123 GROUP_ENTRY_MANUAL(CATEGORY_INT, Xchg),
10124 GROUP_ENTRY_MANUAL(CATEGORY_INT, Xadd),
10125 GROUP_ENTRY_MANUAL(CATEGORY_INT, CmpXchg),
10126 GROUP_ENTRY_MANUAL(CATEGORY_INT, CmpXchg8b),
10127 GROUP_ENTRY_MANUAL(CATEGORY_INT, CmpXchg16b),
10128 GROUP_ENTRY_MANUAL(CATEGORY_INT, Bswap),
10129
10130 GROUP_ENTRY(CATEGORY_FPU_LD_ST, FpuLdConst, "tstIEMAImplDataFpuLdSt-%s.bin.gz", 0),
10131 GROUP_ENTRY(CATEGORY_FPU_LD_ST, FpuLdInt, "tstIEMAImplDataFpuLdSt-%s.bin.gz", 0),
10132 GROUP_ENTRY(CATEGORY_FPU_LD_ST, FpuLdD80, "tstIEMAImplDataFpuLdSt-%s.bin.gz", 0),
10133 GROUP_ENTRY(CATEGORY_FPU_LD_ST, FpuLdMem, "tstIEMAImplDataFpuLdSt-%s.bin.gz", 384), /* needs better coverage */
10134
10135 GROUP_ENTRY(CATEGORY_FPU_LD_ST, FpuStInt, "tstIEMAImplDataFpuLdSt-%s.bin.gz", 0),
10136 GROUP_ENTRY(CATEGORY_FPU_LD_ST, FpuStD80, "tstIEMAImplDataFpuLdSt-%s.bin.gz", 0),
10137 GROUP_ENTRY(CATEGORY_FPU_LD_ST, FpuStMem, "tstIEMAImplDataFpuLdSt-%s.bin.gz", 384), /* needs better coverage */
10138
10139 GROUP_ENTRY(CATEGORY_FPU_BINARY_1, FpuBinaryR80, "tstIEMAImplDataFpuBinary1-%s.bin.gz", 0),
10140 GROUP_ENTRY(CATEGORY_FPU_BINARY_1, FpuBinaryFswR80, "tstIEMAImplDataFpuBinary1-%s.bin.gz", 0),
10141 GROUP_ENTRY(CATEGORY_FPU_BINARY_1, FpuBinaryEflR80, "tstIEMAImplDataFpuBinary1-%s.bin.gz", 0),
10142
10143 GROUP_ENTRY(CATEGORY_FPU_BINARY_2, FpuBinaryR64, "tstIEMAImplDataFpuBinary2-%s.bin.gz", 0),
10144 GROUP_ENTRY(CATEGORY_FPU_BINARY_2, FpuBinaryR32, "tstIEMAImplDataFpuBinary2-%s.bin.gz", 0),
10145 GROUP_ENTRY(CATEGORY_FPU_BINARY_2, FpuBinaryI32, "tstIEMAImplDataFpuBinary2-%s.bin.gz", 0),
10146 GROUP_ENTRY(CATEGORY_FPU_BINARY_2, FpuBinaryI16, "tstIEMAImplDataFpuBinary2-%s.bin.gz", 0),
10147
10148 GROUP_ENTRY(CATEGORY_FPU_BINARY_2, FpuBinaryFswR64, "tstIEMAImplDataFpuBinary2-%s.bin.gz", 0),
10149 GROUP_ENTRY(CATEGORY_FPU_BINARY_2, FpuBinaryFswR32, "tstIEMAImplDataFpuBinary2-%s.bin.gz", 0),
10150 GROUP_ENTRY(CATEGORY_FPU_BINARY_2, FpuBinaryFswI32, "tstIEMAImplDataFpuBinary2-%s.bin.gz", 0),
10151 GROUP_ENTRY(CATEGORY_FPU_BINARY_2, FpuBinaryFswI16, "tstIEMAImplDataFpuBinary2-%s.bin.gz", 0),
10152
10153 GROUP_ENTRY(CATEGORY_FPU_OTHER, FpuUnaryR80, "tstIEMAImplDataFpuOther-%s.bin.gz", 0),
10154 GROUP_ENTRY(CATEGORY_FPU_OTHER, FpuUnaryFswR80, "tstIEMAImplDataFpuOther-%s.bin.gz", 0),
10155 GROUP_ENTRY(CATEGORY_FPU_OTHER, FpuUnaryTwoR80, "tstIEMAImplDataFpuOther-%s.bin.gz", 0),
10156
10157 GROUP_ENTRY(CATEGORY_SSE_FP_BINARY, SseBinaryR32, "tstIEMAImplDataSseBinary-%s.bin.gz", 0),
10158 GROUP_ENTRY(CATEGORY_SSE_FP_BINARY, SseBinaryR64, "tstIEMAImplDataSseBinary-%s.bin.gz", 0),
10159 GROUP_ENTRY(CATEGORY_SSE_FP_BINARY, SseBinaryU128R32, "tstIEMAImplDataSseBinary-%s.bin.gz", 0),
10160 GROUP_ENTRY(CATEGORY_SSE_FP_BINARY, SseBinaryU128R64, "tstIEMAImplDataSseBinary-%s.bin.gz", 0),
10161
10162 GROUP_ENTRY(CATEGORY_SSE_FP_BINARY, SseBinaryI32R64, "tstIEMAImplDataSseBinary-%s.bin.gz", 0),
10163 GROUP_ENTRY(CATEGORY_SSE_FP_BINARY, SseBinaryI64R64, "tstIEMAImplDataSseBinary-%s.bin.gz", 0),
10164 GROUP_ENTRY(CATEGORY_SSE_FP_BINARY, SseBinaryI32R32, "tstIEMAImplDataSseBinary-%s.bin.gz", 0),
10165 GROUP_ENTRY(CATEGORY_SSE_FP_BINARY, SseBinaryI64R32, "tstIEMAImplDataSseBinary-%s.bin.gz", 0),
10166
10167 GROUP_ENTRY(CATEGORY_SSE_FP_BINARY, SseBinaryR64I32, "tstIEMAImplDataSseBinary-%s.bin.gz", 0),
10168 GROUP_ENTRY(CATEGORY_SSE_FP_BINARY, SseBinaryR64I64, "tstIEMAImplDataSseBinary-%s.bin.gz", 0),
10169 GROUP_ENTRY(CATEGORY_SSE_FP_BINARY, SseBinaryR32I32, "tstIEMAImplDataSseBinary-%s.bin.gz", 0),
10170 GROUP_ENTRY(CATEGORY_SSE_FP_BINARY, SseBinaryR32I64, "tstIEMAImplDataSseBinary-%s.bin.gz", 0),
10171
10172 GROUP_ENTRY(CATEGORY_SSE_FP_OTHER, SseCompareEflR32R32, "tstIEMAImplDataSseCompare-%s.bin.gz", 0),
10173 GROUP_ENTRY(CATEGORY_SSE_FP_OTHER, SseCompareEflR64R64, "tstIEMAImplDataSseCompare-%s.bin.gz", 0),
10174 GROUP_ENTRY(CATEGORY_SSE_FP_OTHER, SseCompareF2XmmR32Imm8, "tstIEMAImplDataSseCompare-%s.bin.gz", 0),
10175 GROUP_ENTRY(CATEGORY_SSE_FP_OTHER, SseCompareF2XmmR64Imm8, "tstIEMAImplDataSseCompare-%s.bin.gz", 0),
10176
10177 GROUP_ENTRY(CATEGORY_SSE_FP_OTHER, SseConvertXmmI32R32, "tstIEMAImplDataSseConvert-%s.bin.gz", 0),
10178 GROUP_ENTRY(CATEGORY_SSE_FP_OTHER, SseConvertXmmR32I32, "tstIEMAImplDataSseConvert-%s.bin.gz", 0),
10179 GROUP_ENTRY(CATEGORY_SSE_FP_OTHER, SseConvertXmmI32R64, "tstIEMAImplDataSseConvert-%s.bin.gz", 0),
10180 GROUP_ENTRY(CATEGORY_SSE_FP_OTHER, SseConvertXmmR64I32, "tstIEMAImplDataSseConvert-%s.bin.gz", 0),
10181 GROUP_ENTRY(CATEGORY_SSE_FP_OTHER, SseConvertMmXmm, "tstIEMAImplDataSseConvert-%s.bin.gz", 0),
10182 GROUP_ENTRY(CATEGORY_SSE_FP_OTHER, SseConvertXmmR32Mm, "tstIEMAImplDataSseConvert-%s.bin.gz", 0),
10183 GROUP_ENTRY(CATEGORY_SSE_FP_OTHER, SseConvertXmmR64Mm, "tstIEMAImplDataSseConvert-%s.bin.gz", 0),
10184 GROUP_ENTRY(CATEGORY_SSE_FP_OTHER, SseConvertMmI32XmmR32, "tstIEMAImplDataSseConvert-%s.bin.gz", 0),
10185
10186 GROUP_ENTRY(CATEGORY_SSE_PCMPXSTRX, SseComparePcmpistri, "tstIEMAImplDataSsePcmpxstrx-%s.bin.gz", 0),
10187 GROUP_ENTRY(CATEGORY_SSE_PCMPXSTRX, SseComparePcmpistrm, "tstIEMAImplDataSsePcmpxstrx-%s.bin.gz", 0),
10188 GROUP_ENTRY(CATEGORY_SSE_PCMPXSTRX, SseComparePcmpestri, "tstIEMAImplDataSsePcmpxstrx-%s.bin.gz", 0),
10189 GROUP_ENTRY(CATEGORY_SSE_PCMPXSTRX, SseComparePcmpestrm, "tstIEMAImplDataSsePcmpxstrx-%s.bin.gz", 0),
10190 };
10191
10192 /*
10193 * Generate data?
10194 */
10195 if (enmMode == kModeGenerate)
10196 {
10197#ifdef TSTIEMAIMPL_WITH_GENERATOR
10198 if (cTests == 0)
10199 cTests = cDefaultTests;
10200 g_cZeroDstTests = RT_MIN(cTests / 16, 32);
10201 g_cZeroSrcTests = g_cZeroDstTests * 2;
10202
10203 RTMpGetDescription(NIL_RTCPUID, g_szCpuDesc, sizeof(g_szCpuDesc));
10204
10205 /* For the revision, use the highest for this file and VBoxRT. */
10206 static const char s_szRev[] = "$Revision: 103100 $";
10207 const char *pszRev = s_szRev;
10208 while (*pszRev && !RT_C_IS_DIGIT(*pszRev))
10209 pszRev++;
10210 g_uSvnRev = RTStrToUInt32(pszRev);
10211 g_uSvnRev = RT_MAX(g_uSvnRev, RTBldCfgRevision());
10212
10213 /* Loop thru the groups and call the generate for any that's enabled. */
10214 for (size_t i = 0; i < RT_ELEMENTS(s_aGroups); i++)
10215 if ((s_aGroups[i].fCategory & fCategories) && s_aGroups[i].pfnGenerate)
10216 {
10217 const char * const apszNameFmts[] =
10218 {
10219 /*[IEMTARGETCPU_EFL_BEHAVIOR_NATIVE] =*/ fCommonData ? s_aGroups[i].pszFilenameFmt : NULL,
10220 /*[IEMTARGETCPU_EFL_BEHAVIOR_INTEL] =*/ fCpuData ? s_aGroups[i].pszFilenameFmt : NULL,
10221 /*[IEMTARGETCPU_EFL_BEHAVIOR_AMD] =*/ fCpuData ? s_aGroups[i].pszFilenameFmt : NULL,
10222 };
10223 RTEXITCODE rcExit = s_aGroups[i].pfnGenerate(RT_MAX(cTests, s_aGroups[i].cMinTests), apszNameFmts);
10224 if (rcExit != RTEXITCODE_SUCCESS)
10225 return rcExit;
10226 }
10227 return RTEXITCODE_SUCCESS;
10228#else
10229 return RTMsgErrorExitFailure("Test data generator not compiled in!");
10230#endif
10231 }
10232
10233 /*
10234 * Dump tables (used for the conversion, mostly useless now).
10235 */
10236 if (enmMode == kModeDump)
10237 {
10238#ifdef TSTIEMAIMPL_WITH_GENERATOR
10239 /* Loop thru the groups and call the generate for any that's enabled. */
10240 for (size_t i = 0; i < RT_ELEMENTS(s_aGroups); i++)
10241 if ((s_aGroups[i].fCategory & fCategories) && s_aGroups[i].pfnDumpAll)
10242 {
10243 const char * const apszNameFmts[] =
10244 {
10245 /*[IEMTARGETCPU_EFL_BEHAVIOR_NATIVE] =*/ fCommonData ? s_aGroups[i].pszFilenameFmt : NULL,
10246 /*[IEMTARGETCPU_EFL_BEHAVIOR_INTEL] =*/ fCpuData ? s_aGroups[i].pszFilenameFmt : NULL,
10247 /*[IEMTARGETCPU_EFL_BEHAVIOR_AMD] =*/ fCpuData ? s_aGroups[i].pszFilenameFmt : NULL,
10248 };
10249 RTEXITCODE rcExit = s_aGroups[i].pfnGenerate(RT_MAX(cTests, s_aGroups[i].cMinTests), apszNameFmts);
10250 if (rcExit != RTEXITCODE_SUCCESS)
10251 return rcExit;
10252 }
10253 return RTEXITCODE_SUCCESS;
10254#else
10255 return RTMsgErrorExitFailure("Test data generator not compiled in!");
10256#endif
10257 }
10258
10259
10260 /*
10261 * Do testing. Currrently disabled by default as data needs to be checked
10262 * on both intel and AMD systems first.
10263 */
10264 rc = RTTestCreate("tstIEMAImpl", &g_hTest);
10265 AssertRCReturn(rc, RTEXITCODE_FAILURE);
10266 if (enmMode == kModeTest)
10267 {
10268 RTTestBanner(g_hTest);
10269
10270 /* Allocate guarded memory for use in the tests. */
10271#define ALLOC_GUARDED_VAR(a_puVar) do { \
10272 rc = RTTestGuardedAlloc(g_hTest, sizeof(*a_puVar), sizeof(*a_puVar), false /*fHead*/, (void **)&a_puVar); \
10273 if (RT_FAILURE(rc)) RTTestFailed(g_hTest, "Failed to allocate guarded mem: " #a_puVar); \
10274 } while (0)
10275 ALLOC_GUARDED_VAR(g_pu8);
10276 ALLOC_GUARDED_VAR(g_pu16);
10277 ALLOC_GUARDED_VAR(g_pu32);
10278 ALLOC_GUARDED_VAR(g_pu64);
10279 ALLOC_GUARDED_VAR(g_pu128);
10280 ALLOC_GUARDED_VAR(g_pu8Two);
10281 ALLOC_GUARDED_VAR(g_pu16Two);
10282 ALLOC_GUARDED_VAR(g_pu32Two);
10283 ALLOC_GUARDED_VAR(g_pu64Two);
10284 ALLOC_GUARDED_VAR(g_pu128Two);
10285 ALLOC_GUARDED_VAR(g_pfEfl);
10286 if (RTTestErrorCount(g_hTest) == 0)
10287 {
10288 /* Loop thru the groups and call test function for anything that's enabled. */
10289 for (size_t i = 0; i < RT_ELEMENTS(s_aGroups); i++)
10290 if ((s_aGroups[i].fCategory & fCategories))
10291 s_aGroups[i].pfnTest();
10292 }
10293 return RTTestSummaryAndDestroy(g_hTest);
10294 }
10295 return RTTestSkipAndDestroy(g_hTest, "unfinished testcase");
10296}
10297
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette