s390/crc32le: convert to C

Convert CRC-32 LE variants to C.

Signed-off-by: Heiko Carstens <hca@linux.ibm.com>
This commit is contained in:
Heiko Carstens 2024-02-03 11:45:28 +01:00
parent c59bf4de01
commit 03325e9b64
3 changed files with 104 additions and 141 deletions

View File

@ -31,10 +31,6 @@ struct crc_desc_ctx {
u32 crc; u32 crc;
}; };
/* Prototypes for functions in assembly files */
u32 crc32_le_vgfm_16(u32 crc, unsigned char const *buf, size_t size);
u32 crc32c_le_vgfm_16(u32 crc, unsigned char const *buf, size_t size);
/* /*
* DEFINE_CRC32_VX() - Define a CRC-32 function using the vector extension * DEFINE_CRC32_VX() - Define a CRC-32 function using the vector extension
* *

View File

@ -6,5 +6,7 @@
#include <linux/types.h> #include <linux/types.h>
u32 crc32_be_vgfm_16(u32 crc, unsigned char const *buf, size_t size); u32 crc32_be_vgfm_16(u32 crc, unsigned char const *buf, size_t size);
u32 crc32_le_vgfm_16(u32 crc, unsigned char const *buf, size_t size);
u32 crc32c_le_vgfm_16(u32 crc, unsigned char const *buf, size_t size);
#endif /* _CRC32_VX_S390_H */ #endif /* _CRC32_VX_S390_H */

View File

@ -13,20 +13,17 @@
* Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com> * Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
*/ */
#include <linux/linkage.h> #include <linux/types.h>
#include <asm/nospec-insn.h> #include <asm/fpu.h>
#include <asm/fpu-insn.h> #include "crc32-vx.h"
/* Vector register range containing CRC-32 constants */ /* Vector register range containing CRC-32 constants */
#define CONST_PERM_LE2BE %v9 #define CONST_PERM_LE2BE 9
#define CONST_R2R1 %v10 #define CONST_R2R1 10
#define CONST_R4R3 %v11 #define CONST_R4R3 11
#define CONST_R5 %v12 #define CONST_R5 12
#define CONST_RU_POLY %v13 #define CONST_RU_POLY 13
#define CONST_CRC_POLY %v14 #define CONST_CRC_POLY 14
.data
.balign 8
/* /*
* The CRC-32 constant block contains reduction constants to fold and * The CRC-32 constant block contains reduction constants to fold and
@ -59,64 +56,43 @@
* P'(x) = 0x82F63B78 * P'(x) = 0x82F63B78
*/ */
SYM_DATA_START_LOCAL(constants_CRC_32_LE) static unsigned long constants_CRC_32_LE[] = {
.octa 0x0F0E0D0C0B0A09080706050403020100 # BE->LE mask 0x0f0e0d0c0b0a0908, 0x0706050403020100, /* BE->LE mask */
.quad 0x1c6e41596, 0x154442bd4 # R2, R1 0x1c6e41596, 0x154442bd4, /* R2, R1 */
.quad 0x0ccaa009e, 0x1751997d0 # R4, R3 0x0ccaa009e, 0x1751997d0, /* R4, R3 */
.octa 0x163cd6124 # R5 0x0, 0x163cd6124, /* R5 */
.octa 0x1F7011641 # u' 0x0, 0x1f7011641, /* u' */
.octa 0x1DB710641 # P'(x) << 1 0x0, 0x1db710641 /* P'(x) << 1 */
SYM_DATA_END(constants_CRC_32_LE) };
SYM_DATA_START_LOCAL(constants_CRC_32C_LE) static unsigned long constants_CRC_32C_LE[] = {
.octa 0x0F0E0D0C0B0A09080706050403020100 # BE->LE mask 0x0f0e0d0c0b0a0908, 0x0706050403020100, /* BE->LE mask */
.quad 0x09e4addf8, 0x740eef02 # R2, R1 0x09e4addf8, 0x740eef02, /* R2, R1 */
.quad 0x14cd00bd6, 0xf20c0dfe # R4, R3 0x14cd00bd6, 0xf20c0dfe, /* R4, R3 */
.octa 0x0dd45aab8 # R5 0x0, 0x0dd45aab8, /* R5 */
.octa 0x0dea713f1 # u' 0x0, 0x0dea713f1, /* u' */
.octa 0x105ec76f0 # P'(x) << 1 0x0, 0x105ec76f0 /* P'(x) << 1 */
SYM_DATA_END(constants_CRC_32C_LE) };
.previous /**
* crc32_le_vgfm_generic - Compute CRC-32 (LE variant) with vector registers
GEN_BR_THUNK %r14 * @crc: Initial CRC value, typically ~0.
* @buf: Input buffer pointer, performance might be improved if the
.text * buffer is on a doubleword boundary.
* @size: Size of the buffer, must be 64 bytes or greater.
/* * @constants: CRC-32 constant pool base pointer.
* The CRC-32 functions use these calling conventions:
*
* Parameters:
*
* %r2: Initial CRC value, typically ~0; and final CRC (return) value.
* %r3: Input buffer pointer, performance might be improved if the
* buffer is on a doubleword boundary.
* %r4: Length of the buffer, must be 64 bytes or greater.
* *
* Register usage: * Register usage:
* * V0: Initial CRC value and intermediate constants and results.
* %r5: CRC-32 constant pool base pointer. * V1..V4: Data for CRC computation.
* V0: Initial CRC value and intermediate constants and results. * V5..V8: Next data chunks that are fetched from the input buffer.
* V1..V4: Data for CRC computation. * V9: Constant for BE->LE conversion and shift operations
* V5..V8: Next data chunks that are fetched from the input buffer.
* V9: Constant for BE->LE conversion and shift operations
*
* V10..V14: CRC-32 constants. * V10..V14: CRC-32 constants.
*/ */
static u32 crc32_le_vgfm_generic(u32 crc, unsigned char const *buf, size_t size, unsigned long *constants)
SYM_FUNC_START(crc32_le_vgfm_16) {
larl %r5,constants_CRC_32_LE
j crc32_le_vgfm_generic
SYM_FUNC_END(crc32_le_vgfm_16)
SYM_FUNC_START(crc32c_le_vgfm_16)
larl %r5,constants_CRC_32C_LE
j crc32_le_vgfm_generic
SYM_FUNC_END(crc32c_le_vgfm_16)
SYM_FUNC_START(crc32_le_vgfm_generic)
/* Load CRC-32 constants */ /* Load CRC-32 constants */
VLM CONST_PERM_LE2BE,CONST_CRC_POLY,0,%r5 fpu_vlm(CONST_PERM_LE2BE, CONST_CRC_POLY, constants);
/* /*
* Load the initial CRC value. * Load the initial CRC value.
@ -125,90 +101,73 @@ SYM_FUNC_START(crc32_le_vgfm_generic)
* vector register and is later XORed with the LSB portion * vector register and is later XORed with the LSB portion
* of the loaded input data. * of the loaded input data.
*/ */
VZERO %v0 /* Clear V0 */ fpu_vzero(0); /* Clear V0 */
VLVGF %v0,%r2,3 /* Load CRC into rightmost word */ fpu_vlvgf(0, crc, 3); /* Load CRC into rightmost word */
/* Load a 64-byte data chunk and XOR with CRC */ /* Load a 64-byte data chunk and XOR with CRC */
VLM %v1,%v4,0,%r3 /* 64-bytes into V1..V4 */ fpu_vlm(1, 4, buf);
VPERM %v1,%v1,%v1,CONST_PERM_LE2BE fpu_vperm(1, 1, 1, CONST_PERM_LE2BE);
VPERM %v2,%v2,%v2,CONST_PERM_LE2BE fpu_vperm(2, 2, 2, CONST_PERM_LE2BE);
VPERM %v3,%v3,%v3,CONST_PERM_LE2BE fpu_vperm(3, 3, 3, CONST_PERM_LE2BE);
VPERM %v4,%v4,%v4,CONST_PERM_LE2BE fpu_vperm(4, 4, 4, CONST_PERM_LE2BE);
VX %v1,%v0,%v1 /* V1 ^= CRC */ fpu_vx(1, 0, 1); /* V1 ^= CRC */
aghi %r3,64 /* BUF = BUF + 64 */ buf += 64;
aghi %r4,-64 /* LEN = LEN - 64 */ size -= 64;
cghi %r4,64 while (size >= 64) {
jl .Lless_than_64bytes fpu_vlm(5, 8, buf);
fpu_vperm(5, 5, 5, CONST_PERM_LE2BE);
fpu_vperm(6, 6, 6, CONST_PERM_LE2BE);
fpu_vperm(7, 7, 7, CONST_PERM_LE2BE);
fpu_vperm(8, 8, 8, CONST_PERM_LE2BE);
/*
* Perform a GF(2) multiplication of the doublewords in V1 with
* the R1 and R2 reduction constants in V0. The intermediate
* result is then folded (accumulated) with the next data chunk
* in V5 and stored in V1. Repeat this step for the register
* contents in V2, V3, and V4 respectively.
*/
fpu_vgfmag(1, CONST_R2R1, 1, 5);
fpu_vgfmag(2, CONST_R2R1, 2, 6);
fpu_vgfmag(3, CONST_R2R1, 3, 7);
fpu_vgfmag(4, CONST_R2R1, 4, 8);
buf += 64;
size -= 64;
}
.Lfold_64bytes_loop:
/* Load the next 64-byte data chunk into V5 to V8 */
VLM %v5,%v8,0,%r3
VPERM %v5,%v5,%v5,CONST_PERM_LE2BE
VPERM %v6,%v6,%v6,CONST_PERM_LE2BE
VPERM %v7,%v7,%v7,CONST_PERM_LE2BE
VPERM %v8,%v8,%v8,CONST_PERM_LE2BE
/*
* Perform a GF(2) multiplication of the doublewords in V1 with
* the R1 and R2 reduction constants in V0. The intermediate result
* is then folded (accumulated) with the next data chunk in V5 and
* stored in V1. Repeat this step for the register contents
* in V2, V3, and V4 respectively.
*/
VGFMAG %v1,CONST_R2R1,%v1,%v5
VGFMAG %v2,CONST_R2R1,%v2,%v6
VGFMAG %v3,CONST_R2R1,%v3,%v7
VGFMAG %v4,CONST_R2R1,%v4,%v8
aghi %r3,64 /* BUF = BUF + 64 */
aghi %r4,-64 /* LEN = LEN - 64 */
cghi %r4,64
jnl .Lfold_64bytes_loop
.Lless_than_64bytes:
/* /*
* Fold V1 to V4 into a single 128-bit value in V1. Multiply V1 with R3 * Fold V1 to V4 into a single 128-bit value in V1. Multiply V1 with R3
* and R4 and accumulating the next 128-bit chunk until a single 128-bit * and R4 and accumulating the next 128-bit chunk until a single 128-bit
* value remains. * value remains.
*/ */
VGFMAG %v1,CONST_R4R3,%v1,%v2 fpu_vgfmag(1, CONST_R4R3, 1, 2);
VGFMAG %v1,CONST_R4R3,%v1,%v3 fpu_vgfmag(1, CONST_R4R3, 1, 3);
VGFMAG %v1,CONST_R4R3,%v1,%v4 fpu_vgfmag(1, CONST_R4R3, 1, 4);
cghi %r4,16 while (size >= 16) {
jl .Lfinal_fold fpu_vl(2, buf);
fpu_vperm(2, 2, 2, CONST_PERM_LE2BE);
fpu_vgfmag(1, CONST_R4R3, 1, 2);
buf += 16;
size -= 16;
}
.Lfold_16bytes_loop:
VL %v2,0,,%r3 /* Load next data chunk */
VPERM %v2,%v2,%v2,CONST_PERM_LE2BE
VGFMAG %v1,CONST_R4R3,%v1,%v2 /* Fold next data chunk */
aghi %r3,16
aghi %r4,-16
cghi %r4,16
jnl .Lfold_16bytes_loop
.Lfinal_fold:
/* /*
* Set up a vector register for byte shifts. The shift value must * Set up a vector register for byte shifts. The shift value must
* be loaded in bits 1-4 in byte element 7 of a vector register. * be loaded in bits 1-4 in byte element 7 of a vector register.
* Shift by 8 bytes: 0x40 * Shift by 8 bytes: 0x40
* Shift by 4 bytes: 0x20 * Shift by 4 bytes: 0x20
*/ */
VLEIB %v9,0x40,7 fpu_vleib(9, 0x40, 7);
/* /*
* Prepare V0 for the next GF(2) multiplication: shift V0 by 8 bytes * Prepare V0 for the next GF(2) multiplication: shift V0 by 8 bytes
* to move R4 into the rightmost doubleword and set the leftmost * to move R4 into the rightmost doubleword and set the leftmost
* doubleword to 0x1. * doubleword to 0x1.
*/ */
VSRLB %v0,CONST_R4R3,%v9 fpu_vsrlb(0, CONST_R4R3, 9);
VLEIG %v0,1,0 fpu_vleig(0, 1, 0);
/* /*
* Compute GF(2) product of V1 and V0. The rightmost doubleword * Compute GF(2) product of V1 and V0. The rightmost doubleword
@ -216,7 +175,7 @@ SYM_FUNC_START(crc32_le_vgfm_generic)
* multiplied by 0x1 and is then XORed with rightmost product. * multiplied by 0x1 and is then XORed with rightmost product.
* Implicitly, the intermediate leftmost product becomes padded * Implicitly, the intermediate leftmost product becomes padded
*/ */
VGFMG %v1,%v0,%v1 fpu_vgfmg(1, 0, 1);
/* /*
* Now do the final 32-bit fold by multiplying the rightmost word * Now do the final 32-bit fold by multiplying the rightmost word
@ -231,10 +190,10 @@ SYM_FUNC_START(crc32_le_vgfm_generic)
* rightmost doubleword and the leftmost doubleword is zero to ignore * rightmost doubleword and the leftmost doubleword is zero to ignore
* the leftmost product of V1. * the leftmost product of V1.
*/ */
VLEIB %v9,0x20,7 /* Shift by words */ fpu_vleib(9, 0x20, 7); /* Shift by words */
VSRLB %v2,%v1,%v9 /* Store remaining bits in V2 */ fpu_vsrlb(2, 1, 9); /* Store remaining bits in V2 */
VUPLLF %v1,%v1 /* Split rightmost doubleword */ fpu_vupllf(1, 1); /* Split rightmost doubleword */
VGFMAG %v1,CONST_R5,%v1,%v2 /* V1 = (V1 * R5) XOR V2 */ fpu_vgfmag(1, CONST_R5, 1, 2); /* V1 = (V1 * R5) XOR V2 */
/* /*
* Apply a Barret reduction to compute the final 32-bit CRC value. * Apply a Barret reduction to compute the final 32-bit CRC value.
@ -256,20 +215,26 @@ SYM_FUNC_START(crc32_le_vgfm_generic)
*/ */
/* T1(x) = floor( R(x) / x^32 ) GF2MUL u */ /* T1(x) = floor( R(x) / x^32 ) GF2MUL u */
VUPLLF %v2,%v1 fpu_vupllf(2, 1);
VGFMG %v2,CONST_RU_POLY,%v2 fpu_vgfmg(2, CONST_RU_POLY, 2);
/* /*
* Compute the GF(2) product of the CRC polynomial with T1(x) in * Compute the GF(2) product of the CRC polynomial with T1(x) in
* V2 and XOR the intermediate result, T2(x), with the value in V1. * V2 and XOR the intermediate result, T2(x), with the value in V1.
* The final result is stored in word element 2 of V2. * The final result is stored in word element 2 of V2.
*/ */
VUPLLF %v2,%v2 fpu_vupllf(2, 2);
VGFMAG %v2,CONST_CRC_POLY,%v2,%v1 fpu_vgfmag(2, CONST_CRC_POLY, 2, 1);
.Ldone: return fpu_vlgvf(2, 2);
VLGVF %r2,%v2,2 }
BR_EX %r14
SYM_FUNC_END(crc32_le_vgfm_generic)
.previous u32 crc32_le_vgfm_16(u32 crc, unsigned char const *buf, size_t size)
{
return crc32_le_vgfm_generic(crc, buf, size, &constants_CRC_32_LE[0]);
}
u32 crc32c_le_vgfm_16(u32 crc, unsigned char const *buf, size_t size)
{
return crc32_le_vgfm_generic(crc, buf, size, &constants_CRC_32C_LE[0]);
}