mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-11 12:28:41 +08:00
8a1955f958
Currently the ecb/cbc macros hold fpu context unnecessarily when using scalar cipher routines (e.g. when handling odd sizes of blocks per walk). Change the macros to drop fpu context as soon as the fpu is out of use. No performance impact found (on Intel Haswell). Signed-off-by: Peter Lafreniere <peter@n8pjl.ca> Acked-by: Ard Biesheuvel <ardb@kernel.org> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
88 lines
2.5 KiB
C
88 lines
2.5 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
|
|
#ifndef _CRYPTO_ECB_CBC_HELPER_H
|
|
#define _CRYPTO_ECB_CBC_HELPER_H
|
|
|
|
#include <crypto/internal/skcipher.h>
|
|
#include <asm/fpu/api.h>
|
|
|
|
/*
|
|
* Mode helpers to instantiate parameterized skcipher ECB/CBC modes without
|
|
* having to rely on indirect calls and retpolines.
|
|
*/
|
|
|
|
#define ECB_WALK_START(req, bsize, fpu_blocks) do { \
|
|
void *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req)); \
|
|
const int __fpu_blocks = (fpu_blocks); \
|
|
const int __bsize = (bsize); \
|
|
struct skcipher_walk walk; \
|
|
int err = skcipher_walk_virt(&walk, (req), false); \
|
|
while (walk.nbytes > 0) { \
|
|
unsigned int nbytes = walk.nbytes; \
|
|
bool do_fpu = __fpu_blocks != -1 && \
|
|
nbytes >= __fpu_blocks * __bsize; \
|
|
const u8 *src = walk.src.virt.addr; \
|
|
u8 *dst = walk.dst.virt.addr; \
|
|
u8 __maybe_unused buf[(bsize)]; \
|
|
if (do_fpu) kernel_fpu_begin()
|
|
|
|
#define CBC_WALK_START(req, bsize, fpu_blocks) \
|
|
ECB_WALK_START(req, bsize, fpu_blocks)
|
|
|
|
#define ECB_WALK_ADVANCE(blocks) do { \
|
|
dst += (blocks) * __bsize; \
|
|
src += (blocks) * __bsize; \
|
|
nbytes -= (blocks) * __bsize; \
|
|
} while (0)
|
|
|
|
#define ECB_BLOCK(blocks, func) do { \
|
|
const int __blocks = (blocks); \
|
|
if (do_fpu && __blocks < __fpu_blocks) { \
|
|
kernel_fpu_end(); \
|
|
do_fpu = false; \
|
|
} \
|
|
while (nbytes >= __blocks * __bsize) { \
|
|
(func)(ctx, dst, src); \
|
|
ECB_WALK_ADVANCE(blocks); \
|
|
} \
|
|
} while (0)
|
|
|
|
#define CBC_ENC_BLOCK(func) do { \
|
|
const u8 *__iv = walk.iv; \
|
|
while (nbytes >= __bsize) { \
|
|
crypto_xor_cpy(dst, src, __iv, __bsize); \
|
|
(func)(ctx, dst, dst); \
|
|
__iv = dst; \
|
|
ECB_WALK_ADVANCE(1); \
|
|
} \
|
|
memcpy(walk.iv, __iv, __bsize); \
|
|
} while (0)
|
|
|
|
#define CBC_DEC_BLOCK(blocks, func) do { \
|
|
const int __blocks = (blocks); \
|
|
if (do_fpu && __blocks < __fpu_blocks) { \
|
|
kernel_fpu_end(); \
|
|
do_fpu = false; \
|
|
} \
|
|
while (nbytes >= __blocks * __bsize) { \
|
|
const u8 *__iv = src + ((blocks) - 1) * __bsize; \
|
|
if (dst == src) \
|
|
__iv = memcpy(buf, __iv, __bsize); \
|
|
(func)(ctx, dst, src); \
|
|
crypto_xor(dst, walk.iv, __bsize); \
|
|
memcpy(walk.iv, __iv, __bsize); \
|
|
ECB_WALK_ADVANCE(blocks); \
|
|
} \
|
|
} while (0)
|
|
|
|
#define ECB_WALK_END() \
|
|
if (do_fpu) kernel_fpu_end(); \
|
|
err = skcipher_walk_done(&walk, nbytes); \
|
|
} \
|
|
return err; \
|
|
} while (0)
|
|
|
|
#define CBC_WALK_END() ECB_WALK_END()
|
|
|
|
#endif
|