1 /******************************************************************************
3 * Copyright (c) 2020 Intel.
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
9 * http://www.apache.org/licenses/LICENSE-2.0
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
17 *******************************************************************************/
20 * @brief xRAN BFP compression/decompression for C-plane with 32T32R
22 * @file xran_bfp_cplane32.cpp
23 * @ingroup group_source_xran
24 * @author Intel Corporation
27 #include "xran_compression.hpp"
28 #include "xran_bfp_utils.hpp"
29 #include "xran_bfp_byte_packing_utils.hpp"
32 #include <immintrin.h>
35 namespace BFP_CPlane_32
37 /// Namespace constants
38 const int k_numDataElements = 64; /// 16 IQ pairs
39 const int k_numRegsPerBlock = 2; /// Number of AVX512 registers per compression block (input)
42 maxAbsOneBlock(const __m512i* inData)
44 /// Vertical maxAbs on all registers
45 __m512i maxAbsReg = __m512i();
46 #pragma unroll(k_numRegsPerBlock)
47 for (int n = 0; n < k_numRegsPerBlock; ++n)
49 const auto thisRegAbs = _mm512_abs_epi16(inData[n]);
50 maxAbsReg = _mm512_max_epi16(thisRegAbs, maxAbsReg);
52 /// Horizontal max across remaining register
53 return BlockFloatCompander::horizontalMax1x32(maxAbsReg);
56 /// Compute exponent value for a set of 16 RB from the maximum absolute value
58 computeExponent_16RB(const BlockFloatCompander::ExpandedData& dataIn, const __m512i totShiftBits)
60 __m512i maxAbs = __m512i();
61 const __m512i* dataInAddr = reinterpret_cast<const __m512i*>(dataIn.dataExpanded);
63 for (int n = 0; n < 16; ++n)
65 ((uint32_t*)&maxAbs)[n] = maxAbsOneBlock(dataInAddr + n * k_numRegsPerBlock);
67 /// Calculate exponent
68 return BlockFloatCompander::expLzCnt(maxAbs, totShiftBits);
71 /// Compute exponent value for a set of 4 RB from the maximum absolute value
73 computeExponent_4RB(const BlockFloatCompander::ExpandedData& dataIn, const __m512i totShiftBits)
75 __m512i maxAbs = __m512i();
76 const __m512i* dataInAddr = reinterpret_cast<const __m512i*>(dataIn.dataExpanded);
78 for (int n = 0; n < 4; ++n)
80 ((uint32_t*)&maxAbs)[n] = maxAbsOneBlock(dataInAddr + n * k_numRegsPerBlock);
82 /// Calculate exponent
83 return BlockFloatCompander::expLzCnt(maxAbs, totShiftBits);
86 /// Compute exponent value for 1 RB from the maximum absolute value
88 computeExponent_1RB(const BlockFloatCompander::ExpandedData& dataIn, const __m512i totShiftBits)
90 __m512i maxAbs = __m512i();
91 const __m512i* dataInAddr = reinterpret_cast<const __m512i*>(dataIn.dataExpanded);
92 ((uint32_t*)&maxAbs)[0] = maxAbsOneBlock(dataInAddr);
93 /// Calculate exponent
94 const auto exps = BlockFloatCompander::expLzCnt(maxAbs, totShiftBits);
95 return ((uint8_t*)&exps)[0];
100 /// Apply compression to one compression block
101 template<BlockFloatCompander::PackFunction networkBytePack>
103 applyCompressionN_1RB(const __m512i* dataIn, uint8_t* outBlockAddr,
104 const int iqWidth, const uint8_t thisExp, const int totNumBytesPerReg, const uint16_t rbWriteMask)
106 /// Store exponent first
107 *outBlockAddr = thisExp;
108 #pragma unroll(k_numRegsPerBlock)
109 for (int n = 0; n < k_numRegsPerBlock; ++n)
111 /// Apply the exponent shift
112 const auto compData = _mm512_srai_epi16(dataIn[n], thisExp);
113 /// Pack compressed data network byte order
114 const auto compDataBytePacked = networkBytePack(compData);
115 /// Now have 1 register worth of bytes separated into 4 chunks (1 per lane)
116 /// Use four offset stores to join
117 const auto thisOutRegAddr = outBlockAddr + 1 + n * totNumBytesPerReg;
118 _mm_mask_storeu_epi8(thisOutRegAddr, rbWriteMask, _mm512_extracti64x2_epi64(compDataBytePacked, 0));
119 _mm_mask_storeu_epi8(thisOutRegAddr + iqWidth, rbWriteMask, _mm512_extracti64x2_epi64(compDataBytePacked, 1));
120 _mm_mask_storeu_epi8(thisOutRegAddr + (2 * iqWidth), rbWriteMask, _mm512_extracti64x2_epi64(compDataBytePacked, 2));
121 _mm_mask_storeu_epi8(thisOutRegAddr + (3 * iqWidth), rbWriteMask, _mm512_extracti64x2_epi64(compDataBytePacked, 3));
125 /// Derive and apply 9, 10, or 12bit compression to 16 compression blocks
126 template<BlockFloatCompander::PackFunction networkBytePack>
128 compressN_16RB(const BlockFloatCompander::ExpandedData& dataIn, BlockFloatCompander::CompressedData* dataOut,
129 const __m512i totShiftBits, const int totNumBytesPerBlock, const int totNumBytesPerReg, const uint16_t rbWriteMask)
131 const auto exponents = computeExponent_16RB(dataIn, totShiftBits);
132 const __m512i* dataInAddr = reinterpret_cast<const __m512i*>(dataIn.dataExpanded);
134 for (int n = 0; n < 16; ++n)
136 applyCompressionN_1RB<networkBytePack>(dataInAddr + n * k_numRegsPerBlock, dataOut->dataCompressed + n * totNumBytesPerBlock, dataIn.iqWidth, ((uint8_t*)&exponents)[n * 4], totNumBytesPerReg, rbWriteMask);
140 /// Derive and apply 9, 10, or 12bit compression to 4 compression blocks
141 template<BlockFloatCompander::PackFunction networkBytePack>
143 compressN_4RB(const BlockFloatCompander::ExpandedData& dataIn, BlockFloatCompander::CompressedData* dataOut,
144 const __m512i totShiftBits, const int totNumBytesPerBlock, const int totNumBytesPerReg, const uint16_t rbWriteMask)
146 const auto exponents = computeExponent_4RB(dataIn, totShiftBits);
147 const __m512i* dataInAddr = reinterpret_cast<const __m512i*>(dataIn.dataExpanded);
149 for (int n = 0; n < 4; ++n)
151 applyCompressionN_1RB<networkBytePack>(dataInAddr + n * k_numRegsPerBlock, dataOut->dataCompressed + n * totNumBytesPerBlock, dataIn.iqWidth, ((uint8_t*)&exponents)[n * 4], totNumBytesPerReg, rbWriteMask);
155 /// Derive and apply 9, 10, or 12bit compression to 1 RB
156 template<BlockFloatCompander::PackFunction networkBytePack>
158 compressN_1RB(const BlockFloatCompander::ExpandedData& dataIn, BlockFloatCompander::CompressedData* dataOut,
159 const __m512i totShiftBits, const int totNumBytesPerBlock, const int totNumBytesPerReg, const uint16_t rbWriteMask)
161 const auto thisExponent = computeExponent_1RB(dataIn, totShiftBits);
162 const __m512i* dataInAddr = reinterpret_cast<const __m512i*>(dataIn.dataExpanded);
163 applyCompressionN_1RB<networkBytePack>(dataInAddr, dataOut->dataCompressed, dataIn.iqWidth, thisExponent, totNumBytesPerReg, rbWriteMask);
166 /// Calls compression function specific to the number of blocks to be executed. For 9, 10, or 12bit iqWidth.
167 template<BlockFloatCompander::PackFunction networkBytePack>
169 compressByAllocN(const BlockFloatCompander::ExpandedData& dataIn, BlockFloatCompander::CompressedData* dataOut,
170 const __m512i totShiftBits, const int totNumBytesPerBlock, const int totNumBytesPerReg, const uint16_t rbWriteMask)
172 switch (dataIn.numBlocks)
175 compressN_16RB<networkBytePack>(dataIn, dataOut, totShiftBits, totNumBytesPerBlock, totNumBytesPerReg, rbWriteMask);
179 compressN_4RB<networkBytePack>(dataIn, dataOut, totShiftBits, totNumBytesPerBlock, totNumBytesPerReg, rbWriteMask);
183 compressN_1RB<networkBytePack>(dataIn, dataOut, totShiftBits, totNumBytesPerBlock, totNumBytesPerReg, rbWriteMask);
190 /// Apply 8b compression to 1 compression block.
192 applyCompression8_1RB(const __m512i* dataIn, uint8_t* outBlockAddr, const uint8_t thisExp)
194 /// Store exponent first
195 *outBlockAddr = thisExp;
196 constexpr uint32_t k_writeMask = 0xFFFFFFFF;
197 __m256i* regOutAddr = reinterpret_cast<__m256i*>(outBlockAddr + 1);
198 #pragma unroll(k_numRegsPerBlock)
199 for (int n = 0; n < k_numRegsPerBlock; ++n)
201 /// Apply the exponent shift
202 const auto compData = _mm512_srai_epi16(dataIn[n], thisExp);
203 /// Truncate to 8bit and store
204 _mm256_mask_storeu_epi8(regOutAddr + n, k_writeMask, _mm512_cvtepi16_epi8(compData));
208 /// Derive and apply 8b compression to 16 compression blocks
210 compress8_16RB(const BlockFloatCompander::ExpandedData& dataIn, BlockFloatCompander::CompressedData* dataOut, const __m512i totShiftBits)
212 const __m512i exponents = computeExponent_16RB(dataIn, totShiftBits);
213 const __m512i* dataInAddr = reinterpret_cast<const __m512i*>(dataIn.dataExpanded);
215 for (int n = 0; n < 16; ++n)
217 applyCompression8_1RB(dataInAddr + n * k_numRegsPerBlock, dataOut->dataCompressed + n * (k_numDataElements + 1), ((uint8_t*)&exponents)[n * 4]);
221 /// Derive and apply 8b compression to 4 compression blocks
223 compress8_4RB(const BlockFloatCompander::ExpandedData& dataIn, BlockFloatCompander::CompressedData* dataOut, const __m512i totShiftBits)
225 const __m512i exponents = computeExponent_4RB(dataIn, totShiftBits);
226 const __m512i* dataInAddr = reinterpret_cast<const __m512i*>(dataIn.dataExpanded);
228 for (int n = 0; n < 4; ++n)
230 applyCompression8_1RB(dataInAddr + n * k_numRegsPerBlock, dataOut->dataCompressed + n * (k_numDataElements + 1), ((uint8_t*)&exponents)[n * 4]);
234 /// Derive and apply 8b compression to 1 compression block
236 compress8_1RB(const BlockFloatCompander::ExpandedData& dataIn, BlockFloatCompander::CompressedData* dataOut, const __m512i totShiftBits)
238 const auto thisExponent = computeExponent_1RB(dataIn, totShiftBits);
239 const __m512i* dataInAddr = reinterpret_cast<const __m512i*>(dataIn.dataExpanded);
240 applyCompression8_1RB(dataInAddr, dataOut->dataCompressed, thisExponent);
243 /// Calls compression function specific to the number of RB to be executed. For 8 bit iqWidth.
245 compressByAlloc8(const BlockFloatCompander::ExpandedData& dataIn, BlockFloatCompander::CompressedData* dataOut, const __m512i totShiftBits)
247 switch (dataIn.numBlocks)
250 compress8_16RB(dataIn, dataOut, totShiftBits);
254 compress8_4RB(dataIn, dataOut, totShiftBits);
258 compress8_1RB(dataIn, dataOut, totShiftBits);
265 /// Expand 1 compression block
266 template<BlockFloatCompander::UnpackFunction networkByteUnpack>
268 applyExpansionN_1RB(const uint8_t* expAddr, __m512i* dataOutAddr, const int maxExpShift, const int totNumBytesPerReg)
270 static constexpr uint8_t k_WriteMask = 0xFF;
271 const auto thisExpShift = maxExpShift - *expAddr;
272 #pragma unroll(k_numRegsPerBlock)
273 for (int n = 0; n < k_numRegsPerBlock; ++n)
275 const auto thisInRegAddr = expAddr + 1 + n * totNumBytesPerReg;
276 /// Unpack network order packed data
277 const auto inDataUnpacked = networkByteUnpack(thisInRegAddr);
278 /// Apply exponent scaling (by appropriate arithmetic shift right)
279 const auto expandedData = _mm512_srai_epi16(inDataUnpacked, thisExpShift);
280 /// Write expanded data to output
281 _mm512_mask_storeu_epi64(dataOutAddr + n, k_WriteMask, expandedData);
285 /// Calls expansion function specific to the number of blocks to be executed. For 9, 10, or 12bit iqWidth.
286 template<BlockFloatCompander::UnpackFunction networkByteUnpack>
288 expandByAllocN(const BlockFloatCompander::CompressedData& dataIn, BlockFloatCompander::ExpandedData* dataOut,
289 const int totNumBytesPerBlock, const int totNumBytesPerReg, const int maxExpShift)
291 __m512i* dataOutAddr = reinterpret_cast<__m512i*>(dataOut->dataExpanded);
292 switch (dataIn.numBlocks)
296 for (int n = 0; n < 16; ++n)
298 applyExpansionN_1RB<networkByteUnpack>(dataIn.dataCompressed + n * totNumBytesPerBlock, dataOutAddr + n * k_numRegsPerBlock, maxExpShift, totNumBytesPerReg);
304 for (int n = 0; n < 4; ++n)
306 applyExpansionN_1RB<networkByteUnpack>(dataIn.dataCompressed + n * totNumBytesPerBlock, dataOutAddr + n * k_numRegsPerBlock, maxExpShift, totNumBytesPerReg);
311 applyExpansionN_1RB<networkByteUnpack>(dataIn.dataCompressed, dataOutAddr, maxExpShift, totNumBytesPerReg);
317 /// Apply expansion to 1 compression block
319 applyExpansion8_1RB(const uint8_t* expAddr, __m512i* dataOutAddr)
321 const __m256i* rawDataIn = reinterpret_cast<const __m256i*>(expAddr + 1);
322 static constexpr uint8_t k_WriteMask = 0xFF;
323 #pragma unroll(k_numRegsPerBlock)
324 for (int n = 0; n < k_numRegsPerBlock; ++n)
326 const auto compData16 = _mm512_cvtepi8_epi16(rawDataIn[n]);
327 const auto expData = _mm512_slli_epi16(compData16, *expAddr);
328 _mm512_mask_storeu_epi64(dataOutAddr + n, k_WriteMask, expData);
332 /// Calls expansion function specific to the number of RB to be executed. For 8 bit iqWidth.
334 expandByAlloc8(const BlockFloatCompander::CompressedData& dataIn, BlockFloatCompander::ExpandedData* dataOut)
336 __m512i* dataOutAddr = reinterpret_cast<__m512i*>(dataOut->dataExpanded);
337 switch (dataIn.numBlocks)
341 for (int n = 0; n < 16; ++n)
343 applyExpansion8_1RB(dataIn.dataCompressed + n * (k_numDataElements + 1), dataOutAddr + n * k_numRegsPerBlock);
349 for (int n = 0; n < 4; ++n)
351 applyExpansion8_1RB(dataIn.dataCompressed + n * (k_numDataElements + 1), dataOutAddr + n * k_numRegsPerBlock);
356 applyExpansion8_1RB(dataIn.dataCompressed, dataOutAddr);
363 /// Main kernel function for 32 antenna C-plane compression.
364 /// Starts by determining iqWidth specific parameters and functions.
366 BlockFloatCompander::BFPCompressCtrlPlane32Avx512(const ExpandedData& dataIn, CompressedData* dataOut)
368 /// Compensation for extra zeros in 32b leading zero count when computing exponent
369 const auto totShiftBits8 = _mm512_set1_epi32(25);
370 const auto totShiftBits9 = _mm512_set1_epi32(24);
371 const auto totShiftBits10 = _mm512_set1_epi32(23);
372 const auto totShiftBits12 = _mm512_set1_epi32(21);
374 /// Total number of data bytes per compression block is (iqWidth * numElements / 8) + 1
375 const auto totNumBytesPerBlock = ((BFP_CPlane_32::k_numDataElements * dataIn.iqWidth) >> 3) + 1;
376 /// Total number of compressed bytes to handle per register is 32 * iqWidth / 8
377 const auto totNumBytesPerReg = dataIn.iqWidth << 2;
379 /// Compressed data write mask for each iqWidth option
380 constexpr uint16_t rbWriteMask9 = 0x01FF;
381 constexpr uint16_t rbWriteMask10 = 0x03FF;
382 constexpr uint16_t rbWriteMask12 = 0x0FFF;
384 switch (dataIn.iqWidth)
387 BFP_CPlane_32::compressByAlloc8(dataIn, dataOut, totShiftBits8);
391 BFP_CPlane_32::compressByAllocN<BlockFloatCompander::networkBytePack9b>(dataIn, dataOut, totShiftBits9, totNumBytesPerBlock, totNumBytesPerReg, rbWriteMask9);
395 BFP_CPlane_32::compressByAllocN<BlockFloatCompander::networkBytePack10b>(dataIn, dataOut, totShiftBits10, totNumBytesPerBlock, totNumBytesPerReg, rbWriteMask10);
399 BFP_CPlane_32::compressByAllocN<BlockFloatCompander::networkBytePack12b>(dataIn, dataOut, totShiftBits12, totNumBytesPerBlock, totNumBytesPerReg, rbWriteMask12);
405 /// Main kernel function for 32 antenna C-plane expansion.
406 /// Starts by determining iqWidth specific parameters and functions.
408 BlockFloatCompander::BFPExpandCtrlPlane32Avx512(const CompressedData& dataIn, ExpandedData* dataOut)
410 constexpr int k_maxExpShift9 = 7;
411 constexpr int k_maxExpShift10 = 6;
412 constexpr int k_maxExpShift12 = 4;
414 /// Total number of data bytes per compression block is (iqWidth * numElements / 8) + 1
415 const auto totNumBytesPerBlock = ((BFP_CPlane_32::k_numDataElements * dataIn.iqWidth) >> 3) + 1;
416 /// Total number of compressed bytes to handle per register is 32 * iqWidth / 8
417 const auto totNumBytesPerReg = dataIn.iqWidth << 2;
419 switch (dataIn.iqWidth)
422 BFP_CPlane_32::expandByAlloc8(dataIn, dataOut);
426 BFP_CPlane_32::expandByAllocN<BlockFloatCompander::networkByteUnpack9b>(dataIn, dataOut, totNumBytesPerBlock, totNumBytesPerReg, k_maxExpShift9);
430 BFP_CPlane_32::expandByAllocN<BlockFloatCompander::networkByteUnpack10b>(dataIn, dataOut, totNumBytesPerBlock, totNumBytesPerReg, k_maxExpShift10);
434 BFP_CPlane_32::expandByAllocN<BlockFloatCompander::networkByteUnpack12b>(dataIn, dataOut, totNumBytesPerBlock, totNumBytesPerReg, k_maxExpShift12);