linux/lib/packing_test.c

414 lines
13 KiB
C
Raw Permalink Normal View History

// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2024, Vladimir Oltean <olteanv@gmail.com>
* Copyright (c) 2024, Intel Corporation.
*/
#include <kunit/test.h>
#include <linux/packing.h>
struct packing_test_case {
const char *desc;
const u8 *pbuf;
size_t pbuf_size;
u64 uval;
size_t start_bit;
size_t end_bit;
u8 quirks;
};
#define NO_QUIRKS 0
/**
* PBUF - Initialize .pbuf and .pbuf_size
* @array: elements of constant physical buffer
*
* Initializes the .pbuf and .pbuf_size fields of a struct packing_test_case
* with a constant array of the specified elements.
*/
#define PBUF(array...) \
.pbuf = (const u8[]){ array }, \
.pbuf_size = sizeof((const u8 []){ array })
static const struct packing_test_case cases[] = {
/* These tests pack and unpack a magic 64-bit value
* (0xcafedeadbeefcafe) at a fixed logical offset (32) within an
* otherwise zero array of 128 bits (16 bytes). They test all possible
* bit layouts of the 128 bit buffer.
*/
{
.desc = "no quirks, 16 bytes",
PBUF(0x00, 0x00, 0x00, 0x00, 0xca, 0xfe, 0xde, 0xad,
0xbe, 0xef, 0xca, 0xfe, 0x00, 0x00, 0x00, 0x00),
.uval = 0xcafedeadbeefcafe,
.start_bit = 95,
.end_bit = 32,
.quirks = NO_QUIRKS,
},
{
.desc = "lsw32 first, 16 bytes",
PBUF(0x00, 0x00, 0x00, 0x00, 0xbe, 0xef, 0xca, 0xfe,
0xca, 0xfe, 0xde, 0xad, 0x00, 0x00, 0x00, 0x00),
.uval = 0xcafedeadbeefcafe,
.start_bit = 95,
.end_bit = 32,
.quirks = QUIRK_LSW32_IS_FIRST,
},
{
.desc = "little endian words, 16 bytes",
PBUF(0x00, 0x00, 0x00, 0x00, 0xad, 0xde, 0xfe, 0xca,
0xfe, 0xca, 0xef, 0xbe, 0x00, 0x00, 0x00, 0x00),
.uval = 0xcafedeadbeefcafe,
.start_bit = 95,
.end_bit = 32,
.quirks = QUIRK_LITTLE_ENDIAN,
},
{
.desc = "lsw32 first + little endian words, 16 bytes",
PBUF(0x00, 0x00, 0x00, 0x00, 0xfe, 0xca, 0xef, 0xbe,
0xad, 0xde, 0xfe, 0xca, 0x00, 0x00, 0x00, 0x00),
.uval = 0xcafedeadbeefcafe,
.start_bit = 95,
.end_bit = 32,
.quirks = QUIRK_LSW32_IS_FIRST | QUIRK_LITTLE_ENDIAN,
},
{
.desc = "msb right, 16 bytes",
PBUF(0x00, 0x00, 0x00, 0x00, 0x53, 0x7f, 0x7b, 0xb5,
0x7d, 0xf7, 0x53, 0x7f, 0x00, 0x00, 0x00, 0x00),
.uval = 0xcafedeadbeefcafe,
.start_bit = 95,
.end_bit = 32,
.quirks = QUIRK_MSB_ON_THE_RIGHT,
},
{
.desc = "msb right + lsw32 first, 16 bytes",
PBUF(0x00, 0x00, 0x00, 0x00, 0x7d, 0xf7, 0x53, 0x7f,
0x53, 0x7f, 0x7b, 0xb5, 0x00, 0x00, 0x00, 0x00),
.uval = 0xcafedeadbeefcafe,
.start_bit = 95,
.end_bit = 32,
.quirks = QUIRK_MSB_ON_THE_RIGHT | QUIRK_LSW32_IS_FIRST,
},
{
.desc = "msb right + little endian words, 16 bytes",
PBUF(0x00, 0x00, 0x00, 0x00, 0xb5, 0x7b, 0x7f, 0x53,
0x7f, 0x53, 0xf7, 0x7d, 0x00, 0x00, 0x00, 0x00),
.uval = 0xcafedeadbeefcafe,
.start_bit = 95,
.end_bit = 32,
.quirks = QUIRK_MSB_ON_THE_RIGHT | QUIRK_LITTLE_ENDIAN,
},
{
.desc = "msb right + lsw32 first + little endian words, 16 bytes",
PBUF(0x00, 0x00, 0x00, 0x00, 0x7f, 0x53, 0xf7, 0x7d,
0xb5, 0x7b, 0x7f, 0x53, 0x00, 0x00, 0x00, 0x00),
.uval = 0xcafedeadbeefcafe,
.start_bit = 95,
.end_bit = 32,
.quirks = QUIRK_MSB_ON_THE_RIGHT | QUIRK_LSW32_IS_FIRST | QUIRK_LITTLE_ENDIAN,
},
/* These tests pack and unpack a magic 64-bit value
* (0xcafedeadbeefcafe) at a fixed logical offset (32) within an
* otherwise zero array of varying size from 18 bytes to 24 bytes.
*/
{
.desc = "no quirks, 18 bytes",
PBUF(0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xca, 0xfe,
0xde, 0xad, 0xbe, 0xef, 0xca, 0xfe, 0x00, 0x00,
0x00, 0x00),
.uval = 0xcafedeadbeefcafe,
.start_bit = 95,
.end_bit = 32,
.quirks = NO_QUIRKS,
},
{
.desc = "no quirks, 19 bytes",
PBUF(0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xca,
0xfe, 0xde, 0xad, 0xbe, 0xef, 0xca, 0xfe, 0x00,
0x00, 0x00, 0x00),
.uval = 0xcafedeadbeefcafe,
.start_bit = 95,
.end_bit = 32,
.quirks = NO_QUIRKS,
},
{
.desc = "no quirks, 20 bytes",
PBUF(0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0xca, 0xfe, 0xde, 0xad, 0xbe, 0xef, 0xca, 0xfe,
0x00, 0x00, 0x00, 0x00),
.uval = 0xcafedeadbeefcafe,
.start_bit = 95,
.end_bit = 32,
.quirks = NO_QUIRKS,
},
{
.desc = "no quirks, 22 bytes",
PBUF(0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0xca, 0xfe, 0xde, 0xad, 0xbe, 0xef,
0xca, 0xfe, 0x00, 0x00, 0x00, 0x00),
.uval = 0xcafedeadbeefcafe,
.start_bit = 95,
.end_bit = 32,
.quirks = NO_QUIRKS,
},
{
.desc = "no quirks, 24 bytes",
PBUF(0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0xca, 0xfe, 0xde, 0xad,
0xbe, 0xef, 0xca, 0xfe, 0x00, 0x00, 0x00, 0x00),
.uval = 0xcafedeadbeefcafe,
.start_bit = 95,
.end_bit = 32,
.quirks = NO_QUIRKS,
},
{
.desc = "lsw32 first + little endian words, 18 bytes",
PBUF(0x00, 0x00, 0x00, 0x00, 0xfe, 0xca, 0xef, 0xbe,
0xad, 0xde, 0xfe, 0xca, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00),
.uval = 0xcafedeadbeefcafe,
.start_bit = 95,
.end_bit = 32,
.quirks = QUIRK_LSW32_IS_FIRST | QUIRK_LITTLE_ENDIAN,
},
{
.desc = "lsw32 first + little endian words, 19 bytes",
PBUF(0x00, 0x00, 0x00, 0x00, 0xfe, 0xca, 0xef, 0xbe,
0xad, 0xde, 0xfe, 0xca, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00),
.uval = 0xcafedeadbeefcafe,
.start_bit = 95,
.end_bit = 32,
.quirks = QUIRK_LSW32_IS_FIRST | QUIRK_LITTLE_ENDIAN,
},
{
.desc = "lsw32 first + little endian words, 20 bytes",
PBUF(0x00, 0x00, 0x00, 0x00, 0xfe, 0xca, 0xef, 0xbe,
0xad, 0xde, 0xfe, 0xca, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00),
.uval = 0xcafedeadbeefcafe,
.start_bit = 95,
.end_bit = 32,
.quirks = QUIRK_LSW32_IS_FIRST | QUIRK_LITTLE_ENDIAN,
},
{
.desc = "lsw32 first + little endian words, 22 bytes",
PBUF(0x00, 0x00, 0x00, 0x00, 0xfe, 0xca, 0xef, 0xbe,
0xad, 0xde, 0xfe, 0xca, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00),
.uval = 0xcafedeadbeefcafe,
.start_bit = 95,
.end_bit = 32,
.quirks = QUIRK_LSW32_IS_FIRST | QUIRK_LITTLE_ENDIAN,
},
{
.desc = "lsw32 first + little endian words, 24 bytes",
PBUF(0x00, 0x00, 0x00, 0x00, 0xfe, 0xca, 0xef, 0xbe,
0xad, 0xde, 0xfe, 0xca, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00),
.uval = 0xcafedeadbeefcafe,
.start_bit = 95,
.end_bit = 32,
.quirks = QUIRK_LSW32_IS_FIRST | QUIRK_LITTLE_ENDIAN,
},
/* These tests pack and unpack a magic 64-bit value
* (0x1122334455667788) at an odd starting bit (43) within an
* otherwise zero array of 128 bits (16 bytes). They test all possible
* bit layouts of the 128 bit buffer.
*/
{
.desc = "no quirks, 16 bytes, non-aligned",
PBUF(0x00, 0x00, 0x00, 0x89, 0x11, 0x9a, 0x22, 0xab,
0x33, 0xbc, 0x40, 0x00, 0x00, 0x00, 0x00, 0x00),
.uval = 0x1122334455667788,
.start_bit = 106,
.end_bit = 43,
.quirks = NO_QUIRKS,
},
{
.desc = "lsw32 first, 16 bytes, non-aligned",
PBUF(0x00, 0x00, 0x00, 0x00, 0x33, 0xbc, 0x40, 0x00,
0x11, 0x9a, 0x22, 0xab, 0x00, 0x00, 0x00, 0x89),
.uval = 0x1122334455667788,
.start_bit = 106,
.end_bit = 43,
.quirks = QUIRK_LSW32_IS_FIRST,
},
{
.desc = "little endian words, 16 bytes, non-aligned",
PBUF(0x89, 0x00, 0x00, 0x00, 0xab, 0x22, 0x9a, 0x11,
0x00, 0x40, 0xbc, 0x33, 0x00, 0x00, 0x00, 0x00),
.uval = 0x1122334455667788,
.start_bit = 106,
.end_bit = 43,
.quirks = QUIRK_LITTLE_ENDIAN,
},
{
.desc = "lsw32 first + little endian words, 16 bytes, non-aligned",
PBUF(0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0xbc, 0x33,
0xab, 0x22, 0x9a, 0x11, 0x89, 0x00, 0x00, 0x00),
.uval = 0x1122334455667788,
.start_bit = 106,
.end_bit = 43,
.quirks = QUIRK_LSW32_IS_FIRST | QUIRK_LITTLE_ENDIAN,
},
lib: packing: fix QUIRK_MSB_ON_THE_RIGHT behavior The QUIRK_MSB_ON_THE_RIGHT quirk is intended to modify pack() and unpack() so that the most significant bit of each byte in the packed layout is on the right. The way the quirk is currently implemented is broken whenever the packing code packs or unpacks any value that is not exactly a full byte. The broken behavior can occur when packing any values smaller than one byte, when packing any value that is not exactly a whole number of bytes, or when the packing is not aligned to a byte boundary. This quirk is documented in the following way: 1. Normally (no quirks), we would do it like this: :: 63 62 61 60 59 58 57 56 55 54 53 52 51 50 49 48 47 46 45 44 43 42 41 40 39 38 37 36 35 34 33 32 7 6 5 4 31 30 29 28 27 26 25 24 23 22 21 20 19 18 17 16 15 14 13 12 11 10 9 8 7 6 5 4 3 2 1 0 3 2 1 0 <snip> 2. If QUIRK_MSB_ON_THE_RIGHT is set, we do it like this: :: 56 57 58 59 60 61 62 63 48 49 50 51 52 53 54 55 40 41 42 43 44 45 46 47 32 33 34 35 36 37 38 39 7 6 5 4 24 25 26 27 28 29 30 31 16 17 18 19 20 21 22 23 8 9 10 11 12 13 14 15 0 1 2 3 4 5 6 7 3 2 1 0 That is, QUIRK_MSB_ON_THE_RIGHT does not affect byte positioning, but inverts bit offsets inside a byte. Essentially, the mapping for physical bit offsets should be reserved for a given byte within the payload. This reversal should be fixed to the bytes in the packing layout. The logic to implement this quirk is handled within the adjust_for_msb_right_quirk() function. This function does not work properly when dealing with the bytes that contain only a partial amount of data. In particular, consider trying to pack or unpack the range 53-44. We should always be mapping the bits from the logical ordering to their physical ordering in the same way, regardless of what sequence of bits we are unpacking. This, we should grab the following logical bits: Logical: 55 54 53 52 51 50 49 48 47 45 44 43 42 41 40 39 ^ ^ ^ ^ ^ ^ ^ ^ ^ And pack them into the physical bits: Physical: 48 49 50 51 52 53 54 55 40 41 42 43 44 45 46 47 Logical: 48 49 50 51 52 53 44 45 46 47 ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ The current logic in adjust_for_msb_right_quirk is broken. I believe it is intending to map according to the following: Physical: 48 49 50 51 52 53 54 55 40 41 42 43 44 45 46 47 Logical: 48 49 50 51 52 53 44 45 46 47 ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ That is, it tries to keep the bits at the start and end of a packing together. This is wrong, as it makes the packing change what bit is being mapped to what based on which bits you're currently packing or unpacking. Worse, the actual calculations within adjust_for_msb_right_quirk don't make sense. Consider the case when packing the last byte of an unaligned packing. It might have a start bit of 7 and an end bit of 5. This would have a width of 3 bits. The new_start_bit will be calculated as the width - the box_end_bit - 1. This will underflow and produce a negative value, which will ultimate result in generating a new box_mask of all 0s. For any other values, the result of the calculations of the new_box_end_bit, new_box_start_bit, and the new box_mask will result in the exact same values for the box_end_bit, box_start_bit, and box_mask. This makes the calculations completely irrelevant. If box_end_bit is 0, and box_start_bit is 7, then the entire function of adjust_for_msb_right_quirk will boil down to just: *to_write = bitrev8(*to_write) The other adjustments are attempting (incorrectly) to keep the bits in the same place but just reversed. This is not the right behavior even if implemented correctly, as it leaves the mapping dependent on the bit values being packed or unpacked. Remove adjust_for_msb_right_quirk() and just use bitrev8 to reverse the byte order when interacting with the packed data. In particular, for packing, we need to reverse both the box_mask and the physical value being packed. This is done after shifting the value by box_end_bit so that the reversed mapping is always aligned to the physical buffer byte boundary. The box_mask is reversed as we're about to use it to clear any stale bits in the physical buffer at this block. For unpacking, we need to reverse the contents of the physical buffer *before* masking with the box_mask. This is critical, as the box_mask is a logical mask of the bit layout before handling the QUIRK_MSB_ON_THE_RIGHT. Add several new tests which cover this behavior. These tests will fail without the fix and pass afterwards. Note that no current drivers make use of QUIRK_MSB_ON_THE_RIGHT. I suspect this is why there have been no reports of this inconsistency before. Signed-off-by: Jacob Keller <jacob.e.keller@intel.com> Reviewed-by: Przemek Kitszel <przemyslaw.kitszel@intel.com> Link: https://patch.msgid.link/20241002-packing-kunit-tests-and-split-pack-unpack-v2-8-8373e551eae3@intel.com Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2024-10-03 05:51:57 +08:00
{
.desc = "msb right, 16 bytes, non-aligned",
PBUF(0x00, 0x00, 0x00, 0x91, 0x88, 0x59, 0x44, 0xd5,
0xcc, 0x3d, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00),
.uval = 0x1122334455667788,
.start_bit = 106,
.end_bit = 43,
.quirks = QUIRK_MSB_ON_THE_RIGHT,
},
{
.desc = "msb right + lsw32 first, 16 bytes, non-aligned",
PBUF(0x00, 0x00, 0x00, 0x00, 0xcc, 0x3d, 0x02, 0x00,
0x88, 0x59, 0x44, 0xd5, 0x00, 0x00, 0x00, 0x91),
.uval = 0x1122334455667788,
.start_bit = 106,
.end_bit = 43,
.quirks = QUIRK_MSB_ON_THE_RIGHT | QUIRK_LSW32_IS_FIRST,
},
{
.desc = "msb right + little endian words, 16 bytes, non-aligned",
PBUF(0x91, 0x00, 0x00, 0x00, 0xd5, 0x44, 0x59, 0x88,
0x00, 0x02, 0x3d, 0xcc, 0x00, 0x00, 0x00, 0x00),
.uval = 0x1122334455667788,
.start_bit = 106,
.end_bit = 43,
.quirks = QUIRK_MSB_ON_THE_RIGHT | QUIRK_LITTLE_ENDIAN,
},
{
.desc = "msb right + lsw32 first + little endian words, 16 bytes, non-aligned",
PBUF(0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x3d, 0xcc,
0xd5, 0x44, 0x59, 0x88, 0x91, 0x00, 0x00, 0x00),
.uval = 0x1122334455667788,
.start_bit = 106,
.end_bit = 43,
.quirks = QUIRK_MSB_ON_THE_RIGHT | QUIRK_LSW32_IS_FIRST | QUIRK_LITTLE_ENDIAN,
},
/* These tests pack and unpack a u64 with all bits set
* (0xffffffffffffffff) at an odd starting bit (43) within an
* otherwise zero array of 128 bits (16 bytes). They test all possible
* bit layouts of the 128 bit buffer.
*/
{
.desc = "no quirks, 16 bytes, non-aligned, 0xff",
PBUF(0x00, 0x00, 0x07, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xf8, 0x00, 0x00, 0x00, 0x00, 0x00),
.uval = 0xffffffffffffffff,
.start_bit = 106,
.end_bit = 43,
.quirks = NO_QUIRKS,
},
{
.desc = "lsw32 first, 16 bytes, non-aligned, 0xff",
PBUF(0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xf8, 0x00,
0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x07, 0xff),
.uval = 0xffffffffffffffff,
.start_bit = 106,
.end_bit = 43,
.quirks = QUIRK_LSW32_IS_FIRST,
},
{
.desc = "little endian words, 16 bytes, non-aligned, 0xff",
PBUF(0xff, 0x07, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff,
0x00, 0xf8, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00),
.uval = 0xffffffffffffffff,
.start_bit = 106,
.end_bit = 43,
.quirks = QUIRK_LITTLE_ENDIAN,
},
{
.desc = "lsw32 first + little endian words, 16 bytes, non-aligned, 0xff",
PBUF(0x00, 0x00, 0x00, 0x00, 0x00, 0xf8, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0x07, 0x00, 0x00),
.uval = 0xffffffffffffffff,
.start_bit = 106,
.end_bit = 43,
.quirks = QUIRK_LSW32_IS_FIRST | QUIRK_LITTLE_ENDIAN,
},
lib: packing: fix QUIRK_MSB_ON_THE_RIGHT behavior The QUIRK_MSB_ON_THE_RIGHT quirk is intended to modify pack() and unpack() so that the most significant bit of each byte in the packed layout is on the right. The way the quirk is currently implemented is broken whenever the packing code packs or unpacks any value that is not exactly a full byte. The broken behavior can occur when packing any values smaller than one byte, when packing any value that is not exactly a whole number of bytes, or when the packing is not aligned to a byte boundary. This quirk is documented in the following way: 1. Normally (no quirks), we would do it like this: :: 63 62 61 60 59 58 57 56 55 54 53 52 51 50 49 48 47 46 45 44 43 42 41 40 39 38 37 36 35 34 33 32 7 6 5 4 31 30 29 28 27 26 25 24 23 22 21 20 19 18 17 16 15 14 13 12 11 10 9 8 7 6 5 4 3 2 1 0 3 2 1 0 <snip> 2. If QUIRK_MSB_ON_THE_RIGHT is set, we do it like this: :: 56 57 58 59 60 61 62 63 48 49 50 51 52 53 54 55 40 41 42 43 44 45 46 47 32 33 34 35 36 37 38 39 7 6 5 4 24 25 26 27 28 29 30 31 16 17 18 19 20 21 22 23 8 9 10 11 12 13 14 15 0 1 2 3 4 5 6 7 3 2 1 0 That is, QUIRK_MSB_ON_THE_RIGHT does not affect byte positioning, but inverts bit offsets inside a byte. Essentially, the mapping for physical bit offsets should be reserved for a given byte within the payload. This reversal should be fixed to the bytes in the packing layout. The logic to implement this quirk is handled within the adjust_for_msb_right_quirk() function. This function does not work properly when dealing with the bytes that contain only a partial amount of data. In particular, consider trying to pack or unpack the range 53-44. We should always be mapping the bits from the logical ordering to their physical ordering in the same way, regardless of what sequence of bits we are unpacking. This, we should grab the following logical bits: Logical: 55 54 53 52 51 50 49 48 47 45 44 43 42 41 40 39 ^ ^ ^ ^ ^ ^ ^ ^ ^ And pack them into the physical bits: Physical: 48 49 50 51 52 53 54 55 40 41 42 43 44 45 46 47 Logical: 48 49 50 51 52 53 44 45 46 47 ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ The current logic in adjust_for_msb_right_quirk is broken. I believe it is intending to map according to the following: Physical: 48 49 50 51 52 53 54 55 40 41 42 43 44 45 46 47 Logical: 48 49 50 51 52 53 44 45 46 47 ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ That is, it tries to keep the bits at the start and end of a packing together. This is wrong, as it makes the packing change what bit is being mapped to what based on which bits you're currently packing or unpacking. Worse, the actual calculations within adjust_for_msb_right_quirk don't make sense. Consider the case when packing the last byte of an unaligned packing. It might have a start bit of 7 and an end bit of 5. This would have a width of 3 bits. The new_start_bit will be calculated as the width - the box_end_bit - 1. This will underflow and produce a negative value, which will ultimate result in generating a new box_mask of all 0s. For any other values, the result of the calculations of the new_box_end_bit, new_box_start_bit, and the new box_mask will result in the exact same values for the box_end_bit, box_start_bit, and box_mask. This makes the calculations completely irrelevant. If box_end_bit is 0, and box_start_bit is 7, then the entire function of adjust_for_msb_right_quirk will boil down to just: *to_write = bitrev8(*to_write) The other adjustments are attempting (incorrectly) to keep the bits in the same place but just reversed. This is not the right behavior even if implemented correctly, as it leaves the mapping dependent on the bit values being packed or unpacked. Remove adjust_for_msb_right_quirk() and just use bitrev8 to reverse the byte order when interacting with the packed data. In particular, for packing, we need to reverse both the box_mask and the physical value being packed. This is done after shifting the value by box_end_bit so that the reversed mapping is always aligned to the physical buffer byte boundary. The box_mask is reversed as we're about to use it to clear any stale bits in the physical buffer at this block. For unpacking, we need to reverse the contents of the physical buffer *before* masking with the box_mask. This is critical, as the box_mask is a logical mask of the bit layout before handling the QUIRK_MSB_ON_THE_RIGHT. Add several new tests which cover this behavior. These tests will fail without the fix and pass afterwards. Note that no current drivers make use of QUIRK_MSB_ON_THE_RIGHT. I suspect this is why there have been no reports of this inconsistency before. Signed-off-by: Jacob Keller <jacob.e.keller@intel.com> Reviewed-by: Przemek Kitszel <przemyslaw.kitszel@intel.com> Link: https://patch.msgid.link/20241002-packing-kunit-tests-and-split-pack-unpack-v2-8-8373e551eae3@intel.com Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2024-10-03 05:51:57 +08:00
{
.desc = "msb right, 16 bytes, non-aligned, 0xff",
PBUF(0x00, 0x00, 0xe0, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0x1f, 0x00, 0x00, 0x00, 0x00, 0x00),
.uval = 0xffffffffffffffff,
.start_bit = 106,
.end_bit = 43,
.quirks = QUIRK_MSB_ON_THE_RIGHT,
},
{
.desc = "msb right + lsw32 first, 16 bytes, non-aligned, 0xff",
PBUF(0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x1f, 0x00,
0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0xe0, 0xff),
.uval = 0xffffffffffffffff,
.start_bit = 106,
.end_bit = 43,
.quirks = QUIRK_MSB_ON_THE_RIGHT | QUIRK_LSW32_IS_FIRST,
},
{
.desc = "msb right + little endian words, 16 bytes, non-aligned, 0xff",
PBUF(0xff, 0xe0, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff,
0x00, 0x1f, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00),
.uval = 0xffffffffffffffff,
.start_bit = 106,
.end_bit = 43,
.quirks = QUIRK_MSB_ON_THE_RIGHT | QUIRK_LITTLE_ENDIAN,
},
{
.desc = "msb right + lsw32 first + little endian words, 16 bytes, non-aligned, 0xff",
PBUF(0x00, 0x00, 0x00, 0x00, 0x00, 0x1f, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xe0, 0x00, 0x00),
.uval = 0xffffffffffffffff,
.start_bit = 106,
.end_bit = 43,
.quirks = QUIRK_MSB_ON_THE_RIGHT | QUIRK_LSW32_IS_FIRST | QUIRK_LITTLE_ENDIAN,
},
};
KUNIT_ARRAY_PARAM_DESC(packing, cases, desc);
static void packing_test_pack(struct kunit *test)
{
const struct packing_test_case *params = test->param_value;
u8 *pbuf;
int err;
pbuf = kunit_kzalloc(test, params->pbuf_size, GFP_KERNEL);
KUNIT_ASSERT_NOT_NULL(test, pbuf);
err = pack(pbuf, params->uval, params->start_bit, params->end_bit,
params->pbuf_size, params->quirks);
KUNIT_EXPECT_EQ_MSG(test, err, 0, "pack() returned %pe\n", ERR_PTR(err));
KUNIT_EXPECT_MEMEQ(test, pbuf, params->pbuf, params->pbuf_size);
}
static void packing_test_unpack(struct kunit *test)
{
const struct packing_test_case *params = test->param_value;
u64 uval;
int err;
err = unpack(params->pbuf, &uval, params->start_bit, params->end_bit,
params->pbuf_size, params->quirks);
KUNIT_EXPECT_EQ_MSG(test, err, 0, "unpack() returned %pe\n", ERR_PTR(err));
KUNIT_EXPECT_EQ(test, uval, params->uval);
}
static struct kunit_case packing_test_cases[] = {
KUNIT_CASE_PARAM(packing_test_pack, packing_gen_params),
KUNIT_CASE_PARAM(packing_test_unpack, packing_gen_params),
{},
};
static struct kunit_suite packing_test_suite = {
.name = "packing",
.test_cases = packing_test_cases,
};
kunit_test_suite(packing_test_suite);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("KUnit tests for packing library");