+ constexpr uint16_t k_expWriteMask = 0xFFFF;
+ _mm512_mask_cvtepi32_storeu_epi8(expStore, k_expWriteMask, exponent);
+}
+
+
+/// Pack compressed 9 bit data in network byte order
+/// See https://soco.intel.com/docs/DOC-2665619
+__m512i
+networkBytePack9b(const __m512i compData)
+{
+ /// Logical shift left to align network order byte parts
+ const __m512i k_shiftLeft = _mm512_set_epi64(0x0000000100020003, 0x0004000500060007,
+ 0x0000000100020003, 0x0004000500060007,
+ 0x0000000100020003, 0x0004000500060007,
+ 0x0000000100020003, 0x0004000500060007);
+ auto compDataPacked = _mm512_sllv_epi16(compData, k_shiftLeft);
+
+ /// First epi8 shuffle of even indexed samples
+ const __m512i k_byteShuffleMask1 = _mm512_set_epi64(0x0000000000000000, 0x0C0D080904050001,
+ 0x0000000000000000, 0x0C0D080904050001,
+ 0x0000000000000000, 0x0C0D080904050001,
+ 0x0000000000000000, 0x0C0D080904050001);
+ constexpr uint64_t k_byteMask1 = 0x000000FF00FF00FF;
+ auto compDataShuff1 = _mm512_maskz_shuffle_epi8(k_byteMask1, compDataPacked, k_byteShuffleMask1);
+
+ /// Second epi8 shuffle of odd indexed samples
+ const __m512i k_byteShuffleMask2 = _mm512_set_epi64(0x000000000000000E, 0x0F0A0B0607020300,
+ 0x000000000000000E, 0x0F0A0B0607020300,
+ 0x000000000000000E, 0x0F0A0B0607020300,
+ 0x000000000000000E, 0x0F0A0B0607020300);
+ constexpr uint64_t k_byteMask2 = 0x000001FE01FE01FE;
+ auto compDataShuff2 = _mm512_maskz_shuffle_epi8(k_byteMask2, compDataPacked, k_byteShuffleMask2);
+
+ /// Ternary blend of the two shuffled results
+ const __m512i k_ternLogSelect = _mm512_set_epi64(0x00000000000000FF, 0x01FC07F01FC07F00,
+ 0x00000000000000FF, 0x01FC07F01FC07F00,
+ 0x00000000000000FF, 0x01FC07F01FC07F00,
+ 0x00000000000000FF, 0x01FC07F01FC07F00);
+ return _mm512_ternarylogic_epi64(compDataShuff1, compDataShuff2, k_ternLogSelect, 0xd8);
+}
+
+
+/// Pack compressed 10 bit data in network byte order
+/// See https://soco.intel.com/docs/DOC-2665619
+__m512i
+networkBytePack10b(const __m512i compData)
+{
+ /// Logical shift left to align network order byte parts
+ const __m512i k_shiftLeft = _mm512_set_epi64(0x0000000200040006, 0x0000000200040006,
+ 0x0000000200040006, 0x0000000200040006,
+ 0x0000000200040006, 0x0000000200040006,
+ 0x0000000200040006, 0x0000000200040006);
+ auto compDataPacked = _mm512_sllv_epi16(compData, k_shiftLeft);
+
+ /// First epi8 shuffle of even indexed samples
+ const __m512i k_byteShuffleMask1 = _mm512_set_epi64(0x000000000000000C, 0x0D08090004050001,
+ 0x000000000000000C, 0x0D08090004050001,
+ 0x000000000000000C, 0x0D08090004050001,
+ 0x000000000000000C, 0x0D08090004050001);
+ constexpr uint64_t k_byteMask1 = 0x000001EF01EF01EF;
+ auto compDataShuff1 = _mm512_maskz_shuffle_epi8(k_byteMask1, compDataPacked, k_byteShuffleMask1);
+
+ /// Second epi8 shuffle of odd indexed samples
+ const __m512i k_byteShuffleMask2 = _mm512_set_epi64(0x0000000000000E0F, 0x0A0B000607020300,
+ 0x0000000000000E0F, 0x0A0B000607020300,
+ 0x0000000000000E0F, 0x0A0B000607020300,
+ 0x0000000000000E0F, 0x0A0B000607020300);
+ constexpr uint64_t k_byteMask2 = 0x000003DE03DE03DE;
+ auto compDataShuff2 = _mm512_maskz_shuffle_epi8(k_byteMask2, compDataPacked, k_byteShuffleMask2);
+
+ /// Ternary blend of the two shuffled results
+ const __m512i k_ternLogSelect = _mm512_set_epi64(0x000000000000FF03, 0xF03F00FF03F03F00,
+ 0x000000000000FF03, 0xF03F00FF03F03F00,
+ 0x000000000000FF03, 0xF03F00FF03F03F00,
+ 0x000000000000FF03, 0xF03F00FF03F03F00);
+ return _mm512_ternarylogic_epi64(compDataShuff1, compDataShuff2, k_ternLogSelect, 0xd8);
+}
+
+
+/// Pack compressed 12 bit data in network byte order
+/// See https://soco.intel.com/docs/DOC-2665619
+__m512i
+networkBytePack12b(const __m512i compData)
+{
+ /// Logical shift left to align network order byte parts
+ const __m512i k_shiftLeft = _mm512_set_epi64(0x0000000400000004, 0x0000000400000004,
+ 0x0000000400000004, 0x0000000400000004,
+ 0x0000000400000004, 0x0000000400000004,
+ 0x0000000400000004, 0x0000000400000004);
+ auto compDataPacked = _mm512_sllv_epi16(compData, k_shiftLeft);
+
+ /// First epi8 shuffle of even indexed samples
+ const __m512i k_byteShuffleMask1 = _mm512_set_epi64(0x00000000000C0D00, 0x0809000405000001,
+ 0x00000000000C0D00, 0x0809000405000001,
+ 0x00000000000C0D00, 0x0809000405000001,
+ 0x00000000000C0D00, 0x0809000405000001);
+ constexpr uint64_t k_byteMask1 = 0x000006DB06DB06DB;
+ auto compDataShuff1 = _mm512_maskz_shuffle_epi8(k_byteMask1, compDataPacked, k_byteShuffleMask1);
+
+ /// Second epi8 shuffle of odd indexed samples
+ const __m512i k_byteShuffleMask2 = _mm512_set_epi64(0x000000000E0F000A, 0x0B00060700020300,
+ 0x000000000E0F000A, 0x0B00060700020300,
+ 0x000000000E0F000A, 0x0B00060700020300,
+ 0x000000000E0F000A, 0x0B00060700020300);
+ constexpr uint64_t k_byteMask2 = 0x00000DB60DB60DB6;
+ auto compDataShuff2 = _mm512_maskz_shuffle_epi8(k_byteMask2, compDataPacked, k_byteShuffleMask2);
+
+ /// Ternary blend of the two shuffled results
+ const __m512i k_ternLogSelect = _mm512_set_epi64(0x00000000FF0F00FF, 0x0F00FF0F00FF0F00,
+ 0x00000000FF0F00FF, 0x0F00FF0F00FF0F00,
+ 0x00000000FF0F00FF, 0x0F00FF0F00FF0F00,
+ 0x00000000FF0F00FF, 0x0F00FF0F00FF0F00);
+ return _mm512_ternarylogic_epi64(compDataShuff1, compDataShuff2, k_ternLogSelect, 0xd8);
+}
+
+
+/// Unpack compressed 9 bit data in network byte order
+/// See https://soco.intel.com/docs/DOC-2665619
+__m512i
+networkByteUnpack9b(const uint8_t* inData)
+{
+ /// Align chunks of compressed bytes into lanes to allow for expansion
+ const __m512i* rawDataIn = reinterpret_cast<const __m512i*>(inData);
+ const auto k_expPerm = _mm512_set_epi32(15, 14, 13, 12, 7, 6, 5, 4,
+ 5, 4, 3, 2, 3, 2, 1, 0);
+ auto expData = _mm512_permutexvar_epi32(k_expPerm, *rawDataIn);
+
+ /// Byte shuffle to get all bits for each sample into 16b chunks
+ /// Due to previous permute to get chunks of bytes into each lane, there is
+ /// a different shuffle offset in each lane
+ const __m512i k_byteShuffleMask = _mm512_set_epi64(0x0F0E0D0C0B0A0908, 0x0706050403020100,
+ 0x090A080907080607, 0x0506040503040203,
+ 0x0809070806070506, 0x0405030402030102,
+ 0x0708060705060405, 0x0304020301020001);
+ expData = _mm512_shuffle_epi8(expData, k_byteShuffleMask);
+
+ /// Logical shift left to set sign bit
+ const __m512i k_slBits = _mm512_set_epi64(0x0007000600050004, 0x0003000200010000,
+ 0x0007000600050004, 0x0003000200010000,
+ 0x0007000600050004, 0x0003000200010000,
+ 0x0007000600050004, 0x0003000200010000);
+ expData = _mm512_sllv_epi16(expData, k_slBits);
+
+ /// Mask to zero unwanted bits
+ const __m512i k_expMask = _mm512_set1_epi16(0xFF80);
+ return _mm512_and_epi64(expData, k_expMask);
+}
+
+
+/// Unpack compressed 10 bit data in network byte order
+/// See https://soco.intel.com/docs/DOC-2665619
+__m512i
+networkByteUnpack10b(const uint8_t* inData)
+{
+ /// Align chunks of compressed bytes into lanes to allow for expansion
+ const __m512i* rawDataIn = reinterpret_cast<const __m512i*>(inData);
+ const auto k_expPerm = _mm512_set_epi32(15, 14, 13, 12, 8, 7, 6, 5,
+ 5, 4, 3, 2, 3, 2, 1, 0);
+ auto expData = _mm512_permutexvar_epi32(k_expPerm, *rawDataIn);
+
+ /// Byte shuffle to get all bits for each sample into 16b chunks
+ /// Due to previous permute to get chunks of bytes into each lane, lanes
+ /// 0 and 2 happen to be aligned, but lane 1 is offset by 2 bytes
+ const __m512i k_byteShuffleMask = _mm512_set_epi64(0x0809070806070506, 0x0304020301020001,
+ 0x0809070806070506, 0x0304020301020001,
+ 0x0A0B090A08090708, 0x0506040503040203,
+ 0x0809070806070506, 0x0304020301020001);
+ expData = _mm512_shuffle_epi8(expData, k_byteShuffleMask);
+
+ /// Logical shift left to set sign bit
+ const __m512i k_slBits = _mm512_set_epi64(0x0006000400020000, 0x0006000400020000,
+ 0x0006000400020000, 0x0006000400020000,
+ 0x0006000400020000, 0x0006000400020000,
+ 0x0006000400020000, 0x0006000400020000);
+ expData = _mm512_sllv_epi16(expData, k_slBits);
+
+ /// Mask to zero unwanted bits
+ const __m512i k_expMask = _mm512_set1_epi16(0xFFC0);
+ return _mm512_and_epi64(expData, k_expMask);
+}
+
+
+/// Unpack compressed 12 bit data in network byte order
+/// See https://soco.intel.com/docs/DOC-2665619
+__m512i
+networkByteUnpack12b(const uint8_t* inData)
+{
+ /// Align chunks of compressed bytes into lanes to allow for expansion
+ const __m512i* rawDataIn = reinterpret_cast<const __m512i*>(inData);
+ const auto k_expPerm = _mm512_set_epi32(15, 14, 13, 12, 9, 8, 7, 6,
+ 6, 5, 4, 3, 3, 2, 1, 0);
+ auto expData = _mm512_permutexvar_epi32(k_expPerm, *rawDataIn);
+
+ /// Byte shuffle to get all bits for each sample into 16b chunks
+ /// For 12b mantissa all lanes post-permute are aligned and require same shuffle offset
+ const __m512i k_byteShuffleMask = _mm512_set_epi64(0x0A0B090A07080607, 0x0405030401020001,
+ 0x0A0B090A07080607, 0x0405030401020001,
+ 0x0A0B090A07080607, 0x0405030401020001,
+ 0x0A0B090A07080607, 0x0405030401020001);
+ expData = _mm512_shuffle_epi8(expData, k_byteShuffleMask);
+
+ /// Logical shift left to set sign bit
+ const __m512i k_slBits = _mm512_set_epi64(0x0004000000040000, 0x0004000000040000,
+ 0x0004000000040000, 0x0004000000040000,
+ 0x0004000000040000, 0x0004000000040000,
+ 0x0004000000040000, 0x0004000000040000);
+ expData = _mm512_sllv_epi16(expData, k_slBits);
+
+ /// Mask to zero unwanted bits
+ const __m512i k_expMask = _mm512_set1_epi16(0xFFF0);
+ return _mm512_and_epi64(expData, k_expMask);
+}
+
+
+/// 8 bit compression
+void
+BlockFloatCompander::BlockFloatCompress_8b_AVX512(const ExpandedData& dataIn, CompressedData* dataOut)
+{
+ /// Compute exponent and store for later use
+ int8_t storedExp[BlockFloatCompander::k_numRB] = {};
+ computeExponent(dataIn, storedExp);
+
+ /// Shift 1RB by corresponding exponent and write exponent and data to output
+#pragma unroll(BlockFloatCompander::k_numRB)
+ for (int n = 0; n < BlockFloatCompander::k_numRB; ++n)
+ {
+ const __m512i* rawDataIn = reinterpret_cast<const __m512i*>(dataIn.dataExpanded + n * BlockFloatCompander::k_numREReal);
+ auto compData = _mm512_srai_epi16(*rawDataIn, storedExp[n]);
+ auto thisRBExpAddr = n * (BlockFloatCompander::k_numREReal + 1);
+ /// Store exponent first
+ dataOut->dataCompressed[thisRBExpAddr] = storedExp[n];
+ /// Store compressed RB
+ constexpr uint32_t k_rbMask = 0x00FFFFFF; // Write mask for 1RB (24 values)
+ _mm256_mask_storeu_epi8(dataOut->dataCompressed + thisRBExpAddr + 1, k_rbMask, _mm512_cvtepi16_epi8(compData));
+ }
+}
+
+
+/// 9 bit compression
+void
+BlockFloatCompander::BlockFloatCompress_9b_AVX512(const ExpandedData& dataIn, CompressedData* dataOut)
+{
+ /// Compute exponent and store for later use