crypto: blowfish - use unaligned accessors instead of alignmask
Instead of using an alignmask of 0x3 to ensure 32-bit alignment of the Blowfish input and output blocks, which propagates to mode drivers, and results in pointless copying on architectures that don't care about alignment, use the unaligned accessors, which will do the right thing on each respective architecture, avoiding the need for double buffering. Signed-off-by: Ard Biesheuvel <ardb@kernel.org> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
This commit is contained in:
parent
81d091a293
commit
50a3a9fae3
@ -14,7 +14,7 @@
|
|||||||
#include <linux/init.h>
|
#include <linux/init.h>
|
||||||
#include <linux/module.h>
|
#include <linux/module.h>
|
||||||
#include <linux/mm.h>
|
#include <linux/mm.h>
|
||||||
#include <asm/byteorder.h>
|
#include <asm/unaligned.h>
|
||||||
#include <linux/crypto.h>
|
#include <linux/crypto.h>
|
||||||
#include <linux/types.h>
|
#include <linux/types.h>
|
||||||
#include <crypto/blowfish.h>
|
#include <crypto/blowfish.h>
|
||||||
@ -36,12 +36,10 @@
|
|||||||
static void bf_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
|
static void bf_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
|
||||||
{
|
{
|
||||||
struct bf_ctx *ctx = crypto_tfm_ctx(tfm);
|
struct bf_ctx *ctx = crypto_tfm_ctx(tfm);
|
||||||
const __be32 *in_blk = (const __be32 *)src;
|
|
||||||
__be32 *const out_blk = (__be32 *)dst;
|
|
||||||
const u32 *P = ctx->p;
|
const u32 *P = ctx->p;
|
||||||
const u32 *S = ctx->s;
|
const u32 *S = ctx->s;
|
||||||
u32 yl = be32_to_cpu(in_blk[0]);
|
u32 yl = get_unaligned_be32(src);
|
||||||
u32 yr = be32_to_cpu(in_blk[1]);
|
u32 yr = get_unaligned_be32(src + 4);
|
||||||
|
|
||||||
ROUND(yr, yl, 0);
|
ROUND(yr, yl, 0);
|
||||||
ROUND(yl, yr, 1);
|
ROUND(yl, yr, 1);
|
||||||
@ -63,19 +61,17 @@ static void bf_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
|
|||||||
yl ^= P[16];
|
yl ^= P[16];
|
||||||
yr ^= P[17];
|
yr ^= P[17];
|
||||||
|
|
||||||
out_blk[0] = cpu_to_be32(yr);
|
put_unaligned_be32(yr, dst);
|
||||||
out_blk[1] = cpu_to_be32(yl);
|
put_unaligned_be32(yl, dst + 4);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void bf_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
|
static void bf_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
|
||||||
{
|
{
|
||||||
struct bf_ctx *ctx = crypto_tfm_ctx(tfm);
|
struct bf_ctx *ctx = crypto_tfm_ctx(tfm);
|
||||||
const __be32 *in_blk = (const __be32 *)src;
|
|
||||||
__be32 *const out_blk = (__be32 *)dst;
|
|
||||||
const u32 *P = ctx->p;
|
const u32 *P = ctx->p;
|
||||||
const u32 *S = ctx->s;
|
const u32 *S = ctx->s;
|
||||||
u32 yl = be32_to_cpu(in_blk[0]);
|
u32 yl = get_unaligned_be32(src);
|
||||||
u32 yr = be32_to_cpu(in_blk[1]);
|
u32 yr = get_unaligned_be32(src + 4);
|
||||||
|
|
||||||
ROUND(yr, yl, 17);
|
ROUND(yr, yl, 17);
|
||||||
ROUND(yl, yr, 16);
|
ROUND(yl, yr, 16);
|
||||||
@ -97,8 +93,8 @@ static void bf_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
|
|||||||
yl ^= P[1];
|
yl ^= P[1];
|
||||||
yr ^= P[0];
|
yr ^= P[0];
|
||||||
|
|
||||||
out_blk[0] = cpu_to_be32(yr);
|
put_unaligned_be32(yr, dst);
|
||||||
out_blk[1] = cpu_to_be32(yl);
|
put_unaligned_be32(yl, dst + 4);
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct crypto_alg alg = {
|
static struct crypto_alg alg = {
|
||||||
@ -108,7 +104,6 @@ static struct crypto_alg alg = {
|
|||||||
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
|
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
|
||||||
.cra_blocksize = BF_BLOCK_SIZE,
|
.cra_blocksize = BF_BLOCK_SIZE,
|
||||||
.cra_ctxsize = sizeof(struct bf_ctx),
|
.cra_ctxsize = sizeof(struct bf_ctx),
|
||||||
.cra_alignmask = 3,
|
|
||||||
.cra_module = THIS_MODULE,
|
.cra_module = THIS_MODULE,
|
||||||
.cra_u = { .cipher = {
|
.cra_u = { .cipher = {
|
||||||
.cia_min_keysize = BF_MIN_KEY_SIZE,
|
.cia_min_keysize = BF_MIN_KEY_SIZE,
|
||||||
|
Loading…
Reference in New Issue
Block a user