crypto: x86/aes-gcm - fix PREEMPT_RT issue in gcm_crypt()

On PREEMPT_RT, kfree() takes sleeping locks and must not be called with
preemption disabled.  Therefore, on PREEMPT_RT skcipher_walk_done() must
not be called from within a kernel_fpu_{begin,end}() pair, even when
it's the last call which is guaranteed to not allocate memory.

Therefore, move the last skcipher_walk_done() in gcm_crypt() to the end
of the function so that it goes after the kernel_fpu_end().  To make
this work cleanly, rework the data processing loop to handle only
non-last data segments.

Fixes: b06affb1cb ("crypto: x86/aes-gcm - add VAES and AVX512 / AVX10 optimized AES-GCM")
Reported-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Closes: https://lore.kernel.org/linux-crypto/20240802102333.itejxOsJ@linutronix.de
Signed-off-by: Eric Biggers <ebiggers@google.com>
Tested-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
This commit is contained in:
Eric Biggers 2024-08-05 11:27:13 -07:00 committed by Herbert Xu
parent 4b7acc85de
commit 001412493e

View File

@ -1366,6 +1366,8 @@ gcm_crypt(struct aead_request *req, int flags)
err = skcipher_walk_aead_encrypt(&walk, req, false);
else
err = skcipher_walk_aead_decrypt(&walk, req, false);
if (err)
return err;
/*
* Since the AES-GCM assembly code requires that at least three assembly
@ -1381,37 +1383,31 @@ gcm_crypt(struct aead_request *req, int flags)
gcm_process_assoc(key, ghash_acc, req->src, assoclen, flags);
/* En/decrypt the data and pass the ciphertext through GHASH. */
while ((nbytes = walk.nbytes) != 0) {
if (unlikely(nbytes < walk.total)) {
/*
* Non-last segment. In this case, the assembly
* function requires that the length be a multiple of 16
* (AES_BLOCK_SIZE) bytes. The needed buffering of up
* to 16 bytes is handled by the skcipher_walk. Here we
* just need to round down to a multiple of 16.
*/
nbytes = round_down(nbytes, AES_BLOCK_SIZE);
aes_gcm_update(key, le_ctr, ghash_acc,
walk.src.virt.addr, walk.dst.virt.addr,
nbytes, flags);
le_ctr[0] += nbytes / AES_BLOCK_SIZE;
kernel_fpu_end();
err = skcipher_walk_done(&walk, walk.nbytes - nbytes);
kernel_fpu_begin();
} else {
/* Last segment: process all remaining data. */
aes_gcm_update(key, le_ctr, ghash_acc,
walk.src.virt.addr, walk.dst.virt.addr,
nbytes, flags);
err = skcipher_walk_done(&walk, 0);
/*
* The low word of the counter isn't used by the
* finalize, so there's no need to increment it here.
*/
}
while (unlikely((nbytes = walk.nbytes) < walk.total)) {
/*
* Non-last segment. In this case, the assembly function
* requires that the length be a multiple of 16 (AES_BLOCK_SIZE)
* bytes. The needed buffering of up to 16 bytes is handled by
* the skcipher_walk. Here we just need to round down to a
* multiple of 16.
*/
nbytes = round_down(nbytes, AES_BLOCK_SIZE);
aes_gcm_update(key, le_ctr, ghash_acc, walk.src.virt.addr,
walk.dst.virt.addr, nbytes, flags);
le_ctr[0] += nbytes / AES_BLOCK_SIZE;
kernel_fpu_end();
err = skcipher_walk_done(&walk, walk.nbytes - nbytes);
if (err)
return err;
kernel_fpu_begin();
}
if (err)
goto out;
/* Last segment: process all remaining data. */
aes_gcm_update(key, le_ctr, ghash_acc, walk.src.virt.addr,
walk.dst.virt.addr, nbytes, flags);
/*
* The low word of the counter isn't used by the finalize, so there's no
* need to increment it here.
*/
/* Finalize */
taglen = crypto_aead_authsize(tfm);
@ -1439,8 +1435,9 @@ gcm_crypt(struct aead_request *req, int flags)
datalen, tag, taglen, flags))
err = -EBADMSG;
}
out:
kernel_fpu_end();
if (nbytes)
skcipher_walk_done(&walk, 0);
return err;
}