mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-26 05:34:13 +08:00
[CRYPTO] api: Add crypto_inc and crypto_xor
With the addition of more stream ciphers we need to curb the proliferation of ad-hoc xor functions. This patch creates a generic pair of functions, crypto_inc and crypto_xor which does big-endian increment and exclusive or, respectively. For optimum performance, they both use u32 operations so alignment must be as that of u32 even though the arguments are of type u8 *. Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
This commit is contained in:
parent
fcd0675593
commit
7613636def
@ -605,6 +605,53 @@ int crypto_tfm_in_queue(struct crypto_queue *queue, struct crypto_tfm *tfm)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(crypto_tfm_in_queue);
|
||||
|
||||
static inline void crypto_inc_byte(u8 *a, unsigned int size)
|
||||
{
|
||||
u8 *b = (a + size);
|
||||
u8 c;
|
||||
|
||||
for (; size; size--) {
|
||||
c = *--b + 1;
|
||||
*b = c;
|
||||
if (c)
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
void crypto_inc(u8 *a, unsigned int size)
|
||||
{
|
||||
__be32 *b = (__be32 *)(a + size);
|
||||
u32 c;
|
||||
|
||||
for (; size >= 4; size -= 4) {
|
||||
c = be32_to_cpu(*--b) + 1;
|
||||
*b = cpu_to_be32(c);
|
||||
if (c)
|
||||
return;
|
||||
}
|
||||
|
||||
crypto_inc_byte(a, size);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(crypto_inc);
|
||||
|
||||
static inline void crypto_xor_byte(u8 *a, const u8 *b, unsigned int size)
|
||||
{
|
||||
for (; size; size--)
|
||||
*a++ ^= *b++;
|
||||
}
|
||||
|
||||
void crypto_xor(u8 *dst, const u8 *src, unsigned int size)
|
||||
{
|
||||
u32 *a = (u32 *)dst;
|
||||
u32 *b = (u32 *)src;
|
||||
|
||||
for (; size >= 4; size -= 4)
|
||||
*a++ ^= *b++;
|
||||
|
||||
crypto_xor_byte((u8 *)a, (u8 *)b, size);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(crypto_xor);
|
||||
|
||||
static int __init crypto_algapi_init(void)
|
||||
{
|
||||
crypto_init_proc();
|
||||
|
@ -124,6 +124,10 @@ int crypto_enqueue_request(struct crypto_queue *queue,
|
||||
struct crypto_async_request *crypto_dequeue_request(struct crypto_queue *queue);
|
||||
int crypto_tfm_in_queue(struct crypto_queue *queue, struct crypto_tfm *tfm);
|
||||
|
||||
/* These functions require the input/output to be aligned as u32. */
|
||||
void crypto_inc(u8 *a, unsigned int size);
|
||||
void crypto_xor(u8 *dst, const u8 *src, unsigned int size);
|
||||
|
||||
int blkcipher_walk_done(struct blkcipher_desc *desc,
|
||||
struct blkcipher_walk *walk, int err);
|
||||
int blkcipher_walk_virt(struct blkcipher_desc *desc,
|
||||
|
Loading…
Reference in New Issue
Block a user