2017-11-24 22:00:34 +08:00
|
|
|
// SPDX-License-Identifier: GPL-2.0+
|
2006-01-06 16:19:18 +08:00
|
|
|
/*
|
|
|
|
* Cryptographic API.
|
|
|
|
*
|
|
|
|
* s390 implementation of the AES Cipher Algorithm.
|
|
|
|
*
|
|
|
|
* s390 Version:
|
2017-09-18 18:48:09 +08:00
|
|
|
* Copyright IBM Corp. 2005, 2017
|
2006-01-06 16:19:18 +08:00
|
|
|
* Author(s): Jan Glauber (jang@de.ibm.com)
|
2007-12-01 09:47:37 +08:00
|
|
|
* Sebastian Siewior (sebastian@breakpoint.cc> SW-Fallback
|
2017-09-18 18:48:09 +08:00
|
|
|
* Patrick Steuer <patrick.steuer@de.ibm.com>
|
|
|
|
* Harald Freudenberger <freude@de.ibm.com>
|
2006-01-06 16:19:18 +08:00
|
|
|
*
|
2007-10-05 16:52:01 +08:00
|
|
|
* Derived from "crypto/aes_generic.c"
|
2006-01-06 16:19:18 +08:00
|
|
|
*/
|
|
|
|
|
2008-12-25 20:39:37 +08:00
|
|
|
#define KMSG_COMPONENT "aes_s390"
|
|
|
|
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
|
|
|
|
|
2007-10-17 23:18:57 +08:00
|
|
|
#include <crypto/aes.h>
|
2006-08-21 19:39:24 +08:00
|
|
|
#include <crypto/algapi.h>
|
2017-09-18 18:48:09 +08:00
|
|
|
#include <crypto/ghash.h>
|
|
|
|
#include <crypto/internal/aead.h>
|
2016-06-29 18:04:07 +08:00
|
|
|
#include <crypto/internal/skcipher.h>
|
2017-09-18 18:48:09 +08:00
|
|
|
#include <crypto/scatterwalk.h>
|
2007-12-01 09:47:37 +08:00
|
|
|
#include <linux/err.h>
|
2006-01-06 16:19:18 +08:00
|
|
|
#include <linux/module.h>
|
2015-02-20 00:34:07 +08:00
|
|
|
#include <linux/cpufeature.h>
|
2006-01-06 16:19:18 +08:00
|
|
|
#include <linux/init.h>
|
2019-05-27 21:24:20 +08:00
|
|
|
#include <linux/mutex.h>
|
2016-12-15 21:58:08 +08:00
|
|
|
#include <linux/fips.h>
|
2017-09-18 18:48:09 +08:00
|
|
|
#include <linux/string.h>
|
2016-02-17 14:00:01 +08:00
|
|
|
#include <crypto/xts.h>
|
2016-03-17 22:22:12 +08:00
|
|
|
#include <asm/cpacf.h>
|
2006-01-06 16:19:18 +08:00
|
|
|
|
2011-05-04 13:09:44 +08:00
|
|
|
static u8 *ctrblk;
|
2019-05-27 21:24:20 +08:00
|
|
|
static DEFINE_MUTEX(ctrblk_lock);
|
2016-08-18 18:59:46 +08:00
|
|
|
|
2017-09-18 18:48:09 +08:00
|
|
|
static cpacf_mask_t km_functions, kmc_functions, kmctr_functions,
|
|
|
|
kma_functions;
|
2006-01-06 16:19:18 +08:00
|
|
|
|
|
|
|
struct s390_aes_ctx {
|
|
|
|
u8 key[AES_MAX_KEY_SIZE];
|
|
|
|
int key_len;
|
2016-08-15 15:19:16 +08:00
|
|
|
unsigned long fc;
|
2007-12-01 09:47:37 +08:00
|
|
|
union {
|
2019-10-13 04:18:07 +08:00
|
|
|
struct crypto_skcipher *skcipher;
|
2007-12-01 09:47:37 +08:00
|
|
|
struct crypto_cipher *cip;
|
|
|
|
} fallback;
|
2006-01-06 16:19:18 +08:00
|
|
|
};
|
|
|
|
|
2011-04-26 14:12:42 +08:00
|
|
|
struct s390_xts_ctx {
|
|
|
|
u8 key[32];
|
2013-11-20 00:12:47 +08:00
|
|
|
u8 pcc_key[32];
|
2011-04-26 14:12:42 +08:00
|
|
|
int key_len;
|
2016-08-15 15:19:16 +08:00
|
|
|
unsigned long fc;
|
2019-10-13 04:18:07 +08:00
|
|
|
struct crypto_skcipher *fallback;
|
2011-04-26 14:12:42 +08:00
|
|
|
};
|
|
|
|
|
2017-09-18 18:48:09 +08:00
|
|
|
struct gcm_sg_walk {
|
|
|
|
struct scatter_walk walk;
|
|
|
|
unsigned int walk_bytes;
|
|
|
|
u8 *walk_ptr;
|
|
|
|
unsigned int walk_bytes_remain;
|
|
|
|
u8 buf[AES_BLOCK_SIZE];
|
|
|
|
unsigned int buf_bytes;
|
|
|
|
u8 *ptr;
|
|
|
|
unsigned int nbytes;
|
|
|
|
};
|
|
|
|
|
2007-12-01 09:47:37 +08:00
|
|
|
static int setkey_fallback_cip(struct crypto_tfm *tfm, const u8 *in_key,
|
|
|
|
unsigned int key_len)
|
|
|
|
{
|
|
|
|
struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
|
|
|
|
|
2010-01-08 11:18:34 +08:00
|
|
|
sctx->fallback.cip->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
|
|
|
|
sctx->fallback.cip->base.crt_flags |= (tfm->crt_flags &
|
2007-12-01 09:47:37 +08:00
|
|
|
CRYPTO_TFM_REQ_MASK);
|
|
|
|
|
2019-12-31 11:19:38 +08:00
|
|
|
return crypto_cipher_setkey(sctx->fallback.cip, in_key, key_len);
|
2007-12-01 09:47:37 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
|
|
|
|
unsigned int key_len)
|
|
|
|
{
|
|
|
|
struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
|
2016-08-18 18:59:46 +08:00
|
|
|
unsigned long fc;
|
2007-12-01 09:47:37 +08:00
|
|
|
|
2016-08-18 18:59:46 +08:00
|
|
|
/* Pick the correct function code based on the key length */
|
|
|
|
fc = (key_len == 16) ? CPACF_KM_AES_128 :
|
|
|
|
(key_len == 24) ? CPACF_KM_AES_192 :
|
|
|
|
(key_len == 32) ? CPACF_KM_AES_256 : 0;
|
2006-01-06 16:19:18 +08:00
|
|
|
|
2016-08-18 18:59:46 +08:00
|
|
|
/* Check if the function code is available */
|
|
|
|
sctx->fc = (fc && cpacf_test_func(&km_functions, fc)) ? fc : 0;
|
|
|
|
if (!sctx->fc)
|
|
|
|
return setkey_fallback_cip(tfm, in_key, key_len);
|
2007-12-01 09:47:37 +08:00
|
|
|
|
2016-08-18 18:59:46 +08:00
|
|
|
sctx->key_len = key_len;
|
|
|
|
memcpy(sctx->key, in_key, key_len);
|
|
|
|
return 0;
|
2006-01-06 16:19:18 +08:00
|
|
|
}
|
|
|
|
|
2019-07-27 02:19:04 +08:00
|
|
|
static void crypto_aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
|
2006-01-06 16:19:18 +08:00
|
|
|
{
|
2015-01-01 22:56:02 +08:00
|
|
|
struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
|
2006-01-06 16:19:18 +08:00
|
|
|
|
2016-08-18 18:59:46 +08:00
|
|
|
if (unlikely(!sctx->fc)) {
|
2007-12-01 09:47:37 +08:00
|
|
|
crypto_cipher_encrypt_one(sctx->fallback.cip, out, in);
|
|
|
|
return;
|
|
|
|
}
|
2016-08-18 18:59:46 +08:00
|
|
|
cpacf_km(sctx->fc, &sctx->key, out, in, AES_BLOCK_SIZE);
|
2006-01-06 16:19:18 +08:00
|
|
|
}
|
|
|
|
|
2019-07-27 02:19:04 +08:00
|
|
|
static void crypto_aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
|
2006-01-06 16:19:18 +08:00
|
|
|
{
|
2015-01-01 22:56:02 +08:00
|
|
|
struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
|
2006-01-06 16:19:18 +08:00
|
|
|
|
2016-08-18 18:59:46 +08:00
|
|
|
if (unlikely(!sctx->fc)) {
|
2007-12-01 09:47:37 +08:00
|
|
|
crypto_cipher_decrypt_one(sctx->fallback.cip, out, in);
|
|
|
|
return;
|
|
|
|
}
|
2016-08-18 18:59:46 +08:00
|
|
|
cpacf_km(sctx->fc | CPACF_DECRYPT,
|
|
|
|
&sctx->key, out, in, AES_BLOCK_SIZE);
|
2006-01-06 16:19:18 +08:00
|
|
|
}
|
|
|
|
|
2007-12-01 09:47:37 +08:00
|
|
|
static int fallback_init_cip(struct crypto_tfm *tfm)
|
|
|
|
{
|
|
|
|
const char *name = tfm->__crt_alg->cra_name;
|
|
|
|
struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
|
|
|
|
|
|
|
|
sctx->fallback.cip = crypto_alloc_cipher(name, 0,
|
2018-11-15 04:19:39 +08:00
|
|
|
CRYPTO_ALG_NEED_FALLBACK);
|
2007-12-01 09:47:37 +08:00
|
|
|
|
|
|
|
if (IS_ERR(sctx->fallback.cip)) {
|
2008-12-25 20:39:37 +08:00
|
|
|
pr_err("Allocating AES fallback algorithm %s failed\n",
|
|
|
|
name);
|
2009-12-19 00:43:18 +08:00
|
|
|
return PTR_ERR(sctx->fallback.cip);
|
2007-12-01 09:47:37 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void fallback_exit_cip(struct crypto_tfm *tfm)
|
|
|
|
{
|
|
|
|
struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
|
|
|
|
|
|
|
|
crypto_free_cipher(sctx->fallback.cip);
|
|
|
|
sctx->fallback.cip = NULL;
|
|
|
|
}
|
2006-01-06 16:19:18 +08:00
|
|
|
|
|
|
|
static struct crypto_alg aes_alg = {
|
|
|
|
.cra_name = "aes",
|
2006-08-21 19:18:50 +08:00
|
|
|
.cra_driver_name = "aes-s390",
|
2016-03-17 22:22:12 +08:00
|
|
|
.cra_priority = 300,
|
2007-05-05 00:47:47 +08:00
|
|
|
.cra_flags = CRYPTO_ALG_TYPE_CIPHER |
|
|
|
|
CRYPTO_ALG_NEED_FALLBACK,
|
2006-01-06 16:19:18 +08:00
|
|
|
.cra_blocksize = AES_BLOCK_SIZE,
|
|
|
|
.cra_ctxsize = sizeof(struct s390_aes_ctx),
|
|
|
|
.cra_module = THIS_MODULE,
|
2007-12-01 09:47:37 +08:00
|
|
|
.cra_init = fallback_init_cip,
|
|
|
|
.cra_exit = fallback_exit_cip,
|
2006-01-06 16:19:18 +08:00
|
|
|
.cra_u = {
|
|
|
|
.cipher = {
|
|
|
|
.cia_min_keysize = AES_MIN_KEY_SIZE,
|
|
|
|
.cia_max_keysize = AES_MAX_KEY_SIZE,
|
|
|
|
.cia_setkey = aes_set_key,
|
2019-07-27 02:19:04 +08:00
|
|
|
.cia_encrypt = crypto_aes_encrypt,
|
|
|
|
.cia_decrypt = crypto_aes_decrypt,
|
2006-01-06 16:19:18 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2019-10-13 04:18:07 +08:00
|
|
|
static int setkey_fallback_skcipher(struct crypto_skcipher *tfm, const u8 *key,
|
|
|
|
unsigned int len)
|
2007-12-01 09:47:37 +08:00
|
|
|
{
|
2019-10-13 04:18:07 +08:00
|
|
|
struct s390_aes_ctx *sctx = crypto_skcipher_ctx(tfm);
|
2007-12-01 09:47:37 +08:00
|
|
|
|
2019-10-13 04:18:07 +08:00
|
|
|
crypto_skcipher_clear_flags(sctx->fallback.skcipher,
|
|
|
|
CRYPTO_TFM_REQ_MASK);
|
|
|
|
crypto_skcipher_set_flags(sctx->fallback.skcipher,
|
|
|
|
crypto_skcipher_get_flags(tfm) &
|
|
|
|
CRYPTO_TFM_REQ_MASK);
|
2019-12-31 11:19:38 +08:00
|
|
|
return crypto_skcipher_setkey(sctx->fallback.skcipher, key, len);
|
2007-12-01 09:47:37 +08:00
|
|
|
}
|
|
|
|
|
2019-10-13 04:18:07 +08:00
|
|
|
static int fallback_skcipher_crypt(struct s390_aes_ctx *sctx,
|
|
|
|
struct skcipher_request *req,
|
|
|
|
unsigned long modifier)
|
2007-12-01 09:47:37 +08:00
|
|
|
{
|
2019-10-13 04:18:07 +08:00
|
|
|
struct skcipher_request *subreq = skcipher_request_ctx(req);
|
2007-12-01 09:47:37 +08:00
|
|
|
|
2019-10-13 04:18:07 +08:00
|
|
|
*subreq = *req;
|
|
|
|
skcipher_request_set_tfm(subreq, sctx->fallback.skcipher);
|
|
|
|
return (modifier & CPACF_DECRYPT) ?
|
|
|
|
crypto_skcipher_decrypt(subreq) :
|
|
|
|
crypto_skcipher_encrypt(subreq);
|
2007-12-01 09:47:37 +08:00
|
|
|
}
|
|
|
|
|
2019-10-13 04:18:07 +08:00
|
|
|
static int ecb_aes_set_key(struct crypto_skcipher *tfm, const u8 *in_key,
|
2006-08-21 19:39:24 +08:00
|
|
|
unsigned int key_len)
|
|
|
|
{
|
2019-10-13 04:18:07 +08:00
|
|
|
struct s390_aes_ctx *sctx = crypto_skcipher_ctx(tfm);
|
2016-08-18 18:59:46 +08:00
|
|
|
unsigned long fc;
|
2007-12-01 09:47:37 +08:00
|
|
|
|
2016-08-18 18:59:46 +08:00
|
|
|
/* Pick the correct function code based on the key length */
|
|
|
|
fc = (key_len == 16) ? CPACF_KM_AES_128 :
|
|
|
|
(key_len == 24) ? CPACF_KM_AES_192 :
|
|
|
|
(key_len == 32) ? CPACF_KM_AES_256 : 0;
|
2006-08-21 19:39:24 +08:00
|
|
|
|
2016-08-18 18:59:46 +08:00
|
|
|
/* Check if the function code is available */
|
|
|
|
sctx->fc = (fc && cpacf_test_func(&km_functions, fc)) ? fc : 0;
|
|
|
|
if (!sctx->fc)
|
2019-10-13 04:18:07 +08:00
|
|
|
return setkey_fallback_skcipher(tfm, in_key, key_len);
|
2006-08-21 19:39:24 +08:00
|
|
|
|
2016-08-18 18:59:46 +08:00
|
|
|
sctx->key_len = key_len;
|
|
|
|
memcpy(sctx->key, in_key, key_len);
|
|
|
|
return 0;
|
2006-08-21 19:39:24 +08:00
|
|
|
}
|
|
|
|
|
2019-10-13 04:18:07 +08:00
|
|
|
static int ecb_aes_crypt(struct skcipher_request *req, unsigned long modifier)
|
2006-08-21 19:39:24 +08:00
|
|
|
{
|
2019-10-13 04:18:07 +08:00
|
|
|
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
|
|
|
struct s390_aes_ctx *sctx = crypto_skcipher_ctx(tfm);
|
|
|
|
struct skcipher_walk walk;
|
2016-08-15 21:17:52 +08:00
|
|
|
unsigned int nbytes, n;
|
|
|
|
int ret;
|
2006-08-21 19:39:24 +08:00
|
|
|
|
2019-10-13 04:18:07 +08:00
|
|
|
if (unlikely(!sctx->fc))
|
|
|
|
return fallback_skcipher_crypt(sctx, req, modifier);
|
|
|
|
|
|
|
|
ret = skcipher_walk_virt(&walk, req, false);
|
|
|
|
while ((nbytes = walk.nbytes) != 0) {
|
2006-08-21 19:39:24 +08:00
|
|
|
/* only use complete blocks */
|
2016-08-15 21:17:52 +08:00
|
|
|
n = nbytes & ~(AES_BLOCK_SIZE - 1);
|
|
|
|
cpacf_km(sctx->fc | modifier, sctx->key,
|
2019-10-13 04:18:07 +08:00
|
|
|
walk.dst.virt.addr, walk.src.virt.addr, n);
|
|
|
|
ret = skcipher_walk_done(&walk, nbytes - n);
|
2006-08-21 19:39:24 +08:00
|
|
|
}
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2019-10-13 04:18:07 +08:00
|
|
|
static int ecb_aes_encrypt(struct skcipher_request *req)
|
2006-08-21 19:39:24 +08:00
|
|
|
{
|
2019-10-13 04:18:07 +08:00
|
|
|
return ecb_aes_crypt(req, 0);
|
2006-08-21 19:39:24 +08:00
|
|
|
}
|
|
|
|
|
2019-10-13 04:18:07 +08:00
|
|
|
static int ecb_aes_decrypt(struct skcipher_request *req)
|
2006-08-21 19:39:24 +08:00
|
|
|
{
|
2019-10-13 04:18:07 +08:00
|
|
|
return ecb_aes_crypt(req, CPACF_DECRYPT);
|
2006-08-21 19:39:24 +08:00
|
|
|
}
|
|
|
|
|
2019-10-13 04:18:07 +08:00
|
|
|
static int fallback_init_skcipher(struct crypto_skcipher *tfm)
|
2007-12-01 09:47:37 +08:00
|
|
|
{
|
2019-10-13 04:18:07 +08:00
|
|
|
const char *name = crypto_tfm_alg_name(&tfm->base);
|
|
|
|
struct s390_aes_ctx *sctx = crypto_skcipher_ctx(tfm);
|
2007-12-01 09:47:37 +08:00
|
|
|
|
2019-10-13 04:18:07 +08:00
|
|
|
sctx->fallback.skcipher = crypto_alloc_skcipher(name, 0,
|
|
|
|
CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC);
|
2007-12-01 09:47:37 +08:00
|
|
|
|
2019-10-13 04:18:07 +08:00
|
|
|
if (IS_ERR(sctx->fallback.skcipher)) {
|
2008-12-25 20:39:37 +08:00
|
|
|
pr_err("Allocating AES fallback algorithm %s failed\n",
|
|
|
|
name);
|
2019-10-13 04:18:07 +08:00
|
|
|
return PTR_ERR(sctx->fallback.skcipher);
|
2007-12-01 09:47:37 +08:00
|
|
|
}
|
|
|
|
|
2019-10-13 04:18:07 +08:00
|
|
|
crypto_skcipher_set_reqsize(tfm, sizeof(struct skcipher_request) +
|
|
|
|
crypto_skcipher_reqsize(sctx->fallback.skcipher));
|
2007-12-01 09:47:37 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-10-13 04:18:07 +08:00
|
|
|
static void fallback_exit_skcipher(struct crypto_skcipher *tfm)
|
2007-12-01 09:47:37 +08:00
|
|
|
{
|
2019-10-13 04:18:07 +08:00
|
|
|
struct s390_aes_ctx *sctx = crypto_skcipher_ctx(tfm);
|
2007-12-01 09:47:37 +08:00
|
|
|
|
2019-10-13 04:18:07 +08:00
|
|
|
crypto_free_skcipher(sctx->fallback.skcipher);
|
2007-12-01 09:47:37 +08:00
|
|
|
}
|
|
|
|
|
2019-10-13 04:18:07 +08:00
|
|
|
static struct skcipher_alg ecb_aes_alg = {
|
|
|
|
.base.cra_name = "ecb(aes)",
|
|
|
|
.base.cra_driver_name = "ecb-aes-s390",
|
|
|
|
.base.cra_priority = 401, /* combo: aes + ecb + 1 */
|
|
|
|
.base.cra_flags = CRYPTO_ALG_NEED_FALLBACK,
|
|
|
|
.base.cra_blocksize = AES_BLOCK_SIZE,
|
|
|
|
.base.cra_ctxsize = sizeof(struct s390_aes_ctx),
|
|
|
|
.base.cra_module = THIS_MODULE,
|
|
|
|
.init = fallback_init_skcipher,
|
|
|
|
.exit = fallback_exit_skcipher,
|
|
|
|
.min_keysize = AES_MIN_KEY_SIZE,
|
|
|
|
.max_keysize = AES_MAX_KEY_SIZE,
|
|
|
|
.setkey = ecb_aes_set_key,
|
|
|
|
.encrypt = ecb_aes_encrypt,
|
|
|
|
.decrypt = ecb_aes_decrypt,
|
2006-08-21 19:39:24 +08:00
|
|
|
};
|
|
|
|
|
2019-10-13 04:18:07 +08:00
|
|
|
static int cbc_aes_set_key(struct crypto_skcipher *tfm, const u8 *in_key,
|
2006-08-21 19:39:24 +08:00
|
|
|
unsigned int key_len)
|
|
|
|
{
|
2019-10-13 04:18:07 +08:00
|
|
|
struct s390_aes_ctx *sctx = crypto_skcipher_ctx(tfm);
|
2016-08-18 18:59:46 +08:00
|
|
|
unsigned long fc;
|
2007-12-01 09:47:37 +08:00
|
|
|
|
2016-08-18 18:59:46 +08:00
|
|
|
/* Pick the correct function code based on the key length */
|
|
|
|
fc = (key_len == 16) ? CPACF_KMC_AES_128 :
|
|
|
|
(key_len == 24) ? CPACF_KMC_AES_192 :
|
|
|
|
(key_len == 32) ? CPACF_KMC_AES_256 : 0;
|
2006-08-21 19:39:24 +08:00
|
|
|
|
2016-08-18 18:59:46 +08:00
|
|
|
/* Check if the function code is available */
|
|
|
|
sctx->fc = (fc && cpacf_test_func(&kmc_functions, fc)) ? fc : 0;
|
|
|
|
if (!sctx->fc)
|
2019-10-13 04:18:07 +08:00
|
|
|
return setkey_fallback_skcipher(tfm, in_key, key_len);
|
2006-08-21 19:39:24 +08:00
|
|
|
|
2016-08-18 18:59:46 +08:00
|
|
|
sctx->key_len = key_len;
|
|
|
|
memcpy(sctx->key, in_key, key_len);
|
|
|
|
return 0;
|
2006-08-21 19:39:24 +08:00
|
|
|
}
|
|
|
|
|
2019-10-13 04:18:07 +08:00
|
|
|
static int cbc_aes_crypt(struct skcipher_request *req, unsigned long modifier)
|
2006-08-21 19:39:24 +08:00
|
|
|
{
|
2019-10-13 04:18:07 +08:00
|
|
|
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
|
|
|
struct s390_aes_ctx *sctx = crypto_skcipher_ctx(tfm);
|
|
|
|
struct skcipher_walk walk;
|
2016-08-15 21:17:52 +08:00
|
|
|
unsigned int nbytes, n;
|
|
|
|
int ret;
|
2013-11-05 19:36:27 +08:00
|
|
|
struct {
|
|
|
|
u8 iv[AES_BLOCK_SIZE];
|
|
|
|
u8 key[AES_MAX_KEY_SIZE];
|
|
|
|
} param;
|
2006-08-21 19:39:24 +08:00
|
|
|
|
2019-10-13 04:18:07 +08:00
|
|
|
if (unlikely(!sctx->fc))
|
|
|
|
return fallback_skcipher_crypt(sctx, req, modifier);
|
|
|
|
|
|
|
|
ret = skcipher_walk_virt(&walk, req, false);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
memcpy(param.iv, walk.iv, AES_BLOCK_SIZE);
|
2013-11-05 19:36:27 +08:00
|
|
|
memcpy(param.key, sctx->key, sctx->key_len);
|
2019-10-13 04:18:07 +08:00
|
|
|
while ((nbytes = walk.nbytes) != 0) {
|
2006-08-21 19:39:24 +08:00
|
|
|
/* only use complete blocks */
|
2016-08-15 21:17:52 +08:00
|
|
|
n = nbytes & ~(AES_BLOCK_SIZE - 1);
|
|
|
|
cpacf_kmc(sctx->fc | modifier, ¶m,
|
2019-10-13 04:18:07 +08:00
|
|
|
walk.dst.virt.addr, walk.src.virt.addr, n);
|
|
|
|
memcpy(walk.iv, param.iv, AES_BLOCK_SIZE);
|
|
|
|
ret = skcipher_walk_done(&walk, nbytes - n);
|
2016-08-15 21:17:52 +08:00
|
|
|
}
|
2020-02-25 22:34:30 +08:00
|
|
|
memzero_explicit(¶m, sizeof(param));
|
2006-08-21 19:39:24 +08:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2019-10-13 04:18:07 +08:00
|
|
|
static int cbc_aes_encrypt(struct skcipher_request *req)
|
2006-08-21 19:39:24 +08:00
|
|
|
{
|
2019-10-13 04:18:07 +08:00
|
|
|
return cbc_aes_crypt(req, 0);
|
2006-08-21 19:39:24 +08:00
|
|
|
}
|
|
|
|
|
2019-10-13 04:18:07 +08:00
|
|
|
static int cbc_aes_decrypt(struct skcipher_request *req)
|
2006-08-21 19:39:24 +08:00
|
|
|
{
|
2019-10-13 04:18:07 +08:00
|
|
|
return cbc_aes_crypt(req, CPACF_DECRYPT);
|
2006-08-21 19:39:24 +08:00
|
|
|
}
|
|
|
|
|
2019-10-13 04:18:07 +08:00
|
|
|
static struct skcipher_alg cbc_aes_alg = {
|
|
|
|
.base.cra_name = "cbc(aes)",
|
|
|
|
.base.cra_driver_name = "cbc-aes-s390",
|
|
|
|
.base.cra_priority = 402, /* ecb-aes-s390 + 1 */
|
|
|
|
.base.cra_flags = CRYPTO_ALG_NEED_FALLBACK,
|
|
|
|
.base.cra_blocksize = AES_BLOCK_SIZE,
|
|
|
|
.base.cra_ctxsize = sizeof(struct s390_aes_ctx),
|
|
|
|
.base.cra_module = THIS_MODULE,
|
|
|
|
.init = fallback_init_skcipher,
|
|
|
|
.exit = fallback_exit_skcipher,
|
|
|
|
.min_keysize = AES_MIN_KEY_SIZE,
|
|
|
|
.max_keysize = AES_MAX_KEY_SIZE,
|
|
|
|
.ivsize = AES_BLOCK_SIZE,
|
|
|
|
.setkey = cbc_aes_set_key,
|
|
|
|
.encrypt = cbc_aes_encrypt,
|
|
|
|
.decrypt = cbc_aes_decrypt,
|
2006-08-21 19:39:24 +08:00
|
|
|
};
|
|
|
|
|
2019-10-13 04:18:07 +08:00
|
|
|
static int xts_fallback_setkey(struct crypto_skcipher *tfm, const u8 *key,
|
|
|
|
unsigned int len)
|
2011-04-26 14:12:42 +08:00
|
|
|
{
|
2019-10-13 04:18:07 +08:00
|
|
|
struct s390_xts_ctx *xts_ctx = crypto_skcipher_ctx(tfm);
|
2011-04-26 14:12:42 +08:00
|
|
|
|
2019-10-13 04:18:07 +08:00
|
|
|
crypto_skcipher_clear_flags(xts_ctx->fallback, CRYPTO_TFM_REQ_MASK);
|
|
|
|
crypto_skcipher_set_flags(xts_ctx->fallback,
|
|
|
|
crypto_skcipher_get_flags(tfm) &
|
|
|
|
CRYPTO_TFM_REQ_MASK);
|
2019-12-31 11:19:38 +08:00
|
|
|
return crypto_skcipher_setkey(xts_ctx->fallback, key, len);
|
2011-04-26 14:12:42 +08:00
|
|
|
}
|
|
|
|
|
2019-10-13 04:18:07 +08:00
|
|
|
static int xts_aes_set_key(struct crypto_skcipher *tfm, const u8 *in_key,
|
2011-04-26 14:12:42 +08:00
|
|
|
unsigned int key_len)
|
|
|
|
{
|
2019-10-13 04:18:07 +08:00
|
|
|
struct s390_xts_ctx *xts_ctx = crypto_skcipher_ctx(tfm);
|
2016-08-18 18:59:46 +08:00
|
|
|
unsigned long fc;
|
2016-02-09 22:37:47 +08:00
|
|
|
int err;
|
|
|
|
|
2019-08-16 20:35:45 +08:00
|
|
|
err = xts_fallback_setkey(tfm, in_key, key_len);
|
2016-02-09 22:37:47 +08:00
|
|
|
if (err)
|
|
|
|
return err;
|
2011-04-26 14:12:42 +08:00
|
|
|
|
2016-12-15 21:58:08 +08:00
|
|
|
/* In fips mode only 128 bit or 256 bit keys are valid */
|
crypto: remove CRYPTO_TFM_RES_BAD_KEY_LEN
The CRYPTO_TFM_RES_BAD_KEY_LEN flag was apparently meant as a way to
make the ->setkey() functions provide more information about errors.
However, no one actually checks for this flag, which makes it pointless.
Also, many algorithms fail to set this flag when given a bad length key.
Reviewing just the generic implementations, this is the case for
aes-fixed-time, cbcmac, echainiv, nhpoly1305, pcrypt, rfc3686, rfc4309,
rfc7539, rfc7539esp, salsa20, seqiv, and xcbc. But there are probably
many more in arch/*/crypto/ and drivers/crypto/.
Some algorithms can even set this flag when the key is the correct
length. For example, authenc and authencesn set it when the key payload
is malformed in any way (not just a bad length), the atmel-sha and ccree
drivers can set it if a memory allocation fails, and the chelsio driver
sets it for bad auth tag lengths, not just bad key lengths.
So even if someone actually wanted to start checking this flag (which
seems unlikely, since it's been unused for a long time), there would be
a lot of work needed to get it working correctly. But it would probably
be much better to go back to the drawing board and just define different
return values, like -EINVAL if the key is invalid for the algorithm vs.
-EKEYREJECTED if the key was rejected by a policy like "no weak keys".
That would be much simpler, less error-prone, and easier to test.
So just remove this flag.
Signed-off-by: Eric Biggers <ebiggers@google.com>
Reviewed-by: Horia Geantă <horia.geanta@nxp.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2019-12-31 11:19:36 +08:00
|
|
|
if (fips_enabled && key_len != 32 && key_len != 64)
|
2016-12-15 21:58:08 +08:00
|
|
|
return -EINVAL;
|
|
|
|
|
2016-08-18 18:59:46 +08:00
|
|
|
/* Pick the correct function code based on the key length */
|
|
|
|
fc = (key_len == 32) ? CPACF_KM_XTS_128 :
|
|
|
|
(key_len == 64) ? CPACF_KM_XTS_256 : 0;
|
|
|
|
|
|
|
|
/* Check if the function code is available */
|
|
|
|
xts_ctx->fc = (fc && cpacf_test_func(&km_functions, fc)) ? fc : 0;
|
|
|
|
if (!xts_ctx->fc)
|
2019-08-16 20:35:45 +08:00
|
|
|
return 0;
|
2016-08-18 18:59:46 +08:00
|
|
|
|
|
|
|
/* Split the XTS key into the two subkeys */
|
|
|
|
key_len = key_len / 2;
|
2011-04-26 14:12:42 +08:00
|
|
|
xts_ctx->key_len = key_len;
|
2016-08-18 18:59:46 +08:00
|
|
|
memcpy(xts_ctx->key, in_key, key_len);
|
|
|
|
memcpy(xts_ctx->pcc_key, in_key + key_len, key_len);
|
2011-04-26 14:12:42 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-10-13 04:18:07 +08:00
|
|
|
static int xts_aes_crypt(struct skcipher_request *req, unsigned long modifier)
|
2011-04-26 14:12:42 +08:00
|
|
|
{
|
2019-10-13 04:18:07 +08:00
|
|
|
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
|
|
|
struct s390_xts_ctx *xts_ctx = crypto_skcipher_ctx(tfm);
|
|
|
|
struct skcipher_walk walk;
|
2016-08-15 21:17:52 +08:00
|
|
|
unsigned int offset, nbytes, n;
|
|
|
|
int ret;
|
|
|
|
struct {
|
|
|
|
u8 key[32];
|
|
|
|
u8 tweak[16];
|
|
|
|
u8 block[16];
|
|
|
|
u8 bit[16];
|
|
|
|
u8 xts[16];
|
|
|
|
} pcc_param;
|
2013-11-20 00:12:47 +08:00
|
|
|
struct {
|
|
|
|
u8 key[32];
|
|
|
|
u8 init[16];
|
|
|
|
} xts_param;
|
2011-04-26 14:12:42 +08:00
|
|
|
|
2019-10-13 04:18:07 +08:00
|
|
|
if (req->cryptlen < AES_BLOCK_SIZE)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
if (unlikely(!xts_ctx->fc || (req->cryptlen % AES_BLOCK_SIZE) != 0)) {
|
|
|
|
struct skcipher_request *subreq = skcipher_request_ctx(req);
|
|
|
|
|
|
|
|
*subreq = *req;
|
|
|
|
skcipher_request_set_tfm(subreq, xts_ctx->fallback);
|
|
|
|
return (modifier & CPACF_DECRYPT) ?
|
|
|
|
crypto_skcipher_decrypt(subreq) :
|
|
|
|
crypto_skcipher_encrypt(subreq);
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = skcipher_walk_virt(&walk, req, false);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
2016-08-15 21:17:52 +08:00
|
|
|
offset = xts_ctx->key_len & 0x10;
|
2013-11-20 00:12:47 +08:00
|
|
|
memset(pcc_param.block, 0, sizeof(pcc_param.block));
|
|
|
|
memset(pcc_param.bit, 0, sizeof(pcc_param.bit));
|
|
|
|
memset(pcc_param.xts, 0, sizeof(pcc_param.xts));
|
2019-10-13 04:18:07 +08:00
|
|
|
memcpy(pcc_param.tweak, walk.iv, sizeof(pcc_param.tweak));
|
2016-08-18 18:59:46 +08:00
|
|
|
memcpy(pcc_param.key + offset, xts_ctx->pcc_key, xts_ctx->key_len);
|
2016-08-15 21:17:52 +08:00
|
|
|
cpacf_pcc(xts_ctx->fc, pcc_param.key + offset);
|
2011-04-26 14:12:42 +08:00
|
|
|
|
2016-08-18 18:59:46 +08:00
|
|
|
memcpy(xts_param.key + offset, xts_ctx->key, xts_ctx->key_len);
|
2013-11-20 00:12:47 +08:00
|
|
|
memcpy(xts_param.init, pcc_param.xts, 16);
|
2016-08-15 21:17:52 +08:00
|
|
|
|
2019-10-13 04:18:07 +08:00
|
|
|
while ((nbytes = walk.nbytes) != 0) {
|
2011-04-26 14:12:42 +08:00
|
|
|
/* only use complete blocks */
|
|
|
|
n = nbytes & ~(AES_BLOCK_SIZE - 1);
|
2016-08-15 21:17:52 +08:00
|
|
|
cpacf_km(xts_ctx->fc | modifier, xts_param.key + offset,
|
2019-10-13 04:18:07 +08:00
|
|
|
walk.dst.virt.addr, walk.src.virt.addr, n);
|
|
|
|
ret = skcipher_walk_done(&walk, nbytes - n);
|
2016-08-15 21:17:52 +08:00
|
|
|
}
|
2020-02-25 22:34:30 +08:00
|
|
|
memzero_explicit(&pcc_param, sizeof(pcc_param));
|
|
|
|
memzero_explicit(&xts_param, sizeof(xts_param));
|
2011-04-26 14:12:42 +08:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2019-10-13 04:18:07 +08:00
|
|
|
static int xts_aes_encrypt(struct skcipher_request *req)
|
2011-04-26 14:12:42 +08:00
|
|
|
{
|
2019-10-13 04:18:07 +08:00
|
|
|
return xts_aes_crypt(req, 0);
|
2011-04-26 14:12:42 +08:00
|
|
|
}
|
|
|
|
|
2019-10-13 04:18:07 +08:00
|
|
|
static int xts_aes_decrypt(struct skcipher_request *req)
|
2011-04-26 14:12:42 +08:00
|
|
|
{
|
2019-10-13 04:18:07 +08:00
|
|
|
return xts_aes_crypt(req, CPACF_DECRYPT);
|
2011-04-26 14:12:42 +08:00
|
|
|
}
|
|
|
|
|
2019-10-13 04:18:07 +08:00
|
|
|
static int xts_fallback_init(struct crypto_skcipher *tfm)
|
2011-04-26 14:12:42 +08:00
|
|
|
{
|
2019-10-13 04:18:07 +08:00
|
|
|
const char *name = crypto_tfm_alg_name(&tfm->base);
|
|
|
|
struct s390_xts_ctx *xts_ctx = crypto_skcipher_ctx(tfm);
|
2011-04-26 14:12:42 +08:00
|
|
|
|
2019-10-13 04:18:07 +08:00
|
|
|
xts_ctx->fallback = crypto_alloc_skcipher(name, 0,
|
|
|
|
CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC);
|
2011-04-26 14:12:42 +08:00
|
|
|
|
|
|
|
if (IS_ERR(xts_ctx->fallback)) {
|
|
|
|
pr_err("Allocating XTS fallback algorithm %s failed\n",
|
|
|
|
name);
|
|
|
|
return PTR_ERR(xts_ctx->fallback);
|
|
|
|
}
|
2019-10-13 04:18:07 +08:00
|
|
|
crypto_skcipher_set_reqsize(tfm, sizeof(struct skcipher_request) +
|
|
|
|
crypto_skcipher_reqsize(xts_ctx->fallback));
|
2011-04-26 14:12:42 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-10-13 04:18:07 +08:00
|
|
|
static void xts_fallback_exit(struct crypto_skcipher *tfm)
|
2011-04-26 14:12:42 +08:00
|
|
|
{
|
2019-10-13 04:18:07 +08:00
|
|
|
struct s390_xts_ctx *xts_ctx = crypto_skcipher_ctx(tfm);
|
2011-04-26 14:12:42 +08:00
|
|
|
|
2019-10-13 04:18:07 +08:00
|
|
|
crypto_free_skcipher(xts_ctx->fallback);
|
2011-04-26 14:12:42 +08:00
|
|
|
}
|
|
|
|
|
2019-10-13 04:18:07 +08:00
|
|
|
static struct skcipher_alg xts_aes_alg = {
|
|
|
|
.base.cra_name = "xts(aes)",
|
|
|
|
.base.cra_driver_name = "xts-aes-s390",
|
|
|
|
.base.cra_priority = 402, /* ecb-aes-s390 + 1 */
|
|
|
|
.base.cra_flags = CRYPTO_ALG_NEED_FALLBACK,
|
|
|
|
.base.cra_blocksize = AES_BLOCK_SIZE,
|
|
|
|
.base.cra_ctxsize = sizeof(struct s390_xts_ctx),
|
|
|
|
.base.cra_module = THIS_MODULE,
|
|
|
|
.init = xts_fallback_init,
|
|
|
|
.exit = xts_fallback_exit,
|
|
|
|
.min_keysize = 2 * AES_MIN_KEY_SIZE,
|
|
|
|
.max_keysize = 2 * AES_MAX_KEY_SIZE,
|
|
|
|
.ivsize = AES_BLOCK_SIZE,
|
|
|
|
.setkey = xts_aes_set_key,
|
|
|
|
.encrypt = xts_aes_encrypt,
|
|
|
|
.decrypt = xts_aes_decrypt,
|
2011-04-26 14:12:42 +08:00
|
|
|
};
|
|
|
|
|
2019-10-13 04:18:07 +08:00
|
|
|
static int ctr_aes_set_key(struct crypto_skcipher *tfm, const u8 *in_key,
|
2011-05-04 13:09:44 +08:00
|
|
|
unsigned int key_len)
|
|
|
|
{
|
2019-10-13 04:18:07 +08:00
|
|
|
struct s390_aes_ctx *sctx = crypto_skcipher_ctx(tfm);
|
2016-08-18 18:59:46 +08:00
|
|
|
unsigned long fc;
|
2011-05-04 13:09:44 +08:00
|
|
|
|
2016-08-18 18:59:46 +08:00
|
|
|
/* Pick the correct function code based on the key length */
|
|
|
|
fc = (key_len == 16) ? CPACF_KMCTR_AES_128 :
|
|
|
|
(key_len == 24) ? CPACF_KMCTR_AES_192 :
|
|
|
|
(key_len == 32) ? CPACF_KMCTR_AES_256 : 0;
|
|
|
|
|
|
|
|
/* Check if the function code is available */
|
|
|
|
sctx->fc = (fc && cpacf_test_func(&kmctr_functions, fc)) ? fc : 0;
|
|
|
|
if (!sctx->fc)
|
2019-10-13 04:18:07 +08:00
|
|
|
return setkey_fallback_skcipher(tfm, in_key, key_len);
|
2011-05-04 13:09:44 +08:00
|
|
|
|
2016-08-18 18:59:46 +08:00
|
|
|
sctx->key_len = key_len;
|
|
|
|
memcpy(sctx->key, in_key, key_len);
|
|
|
|
return 0;
|
2011-05-04 13:09:44 +08:00
|
|
|
}
|
|
|
|
|
2016-08-15 21:17:52 +08:00
|
|
|
static unsigned int __ctrblk_init(u8 *ctrptr, u8 *iv, unsigned int nbytes)
|
2014-01-16 23:01:11 +08:00
|
|
|
{
|
|
|
|
unsigned int i, n;
|
|
|
|
|
|
|
|
/* only use complete blocks, max. PAGE_SIZE */
|
2016-08-15 21:17:52 +08:00
|
|
|
memcpy(ctrptr, iv, AES_BLOCK_SIZE);
|
2014-01-16 23:01:11 +08:00
|
|
|
n = (nbytes > PAGE_SIZE) ? PAGE_SIZE : nbytes & ~(AES_BLOCK_SIZE - 1);
|
2016-08-15 21:17:52 +08:00
|
|
|
for (i = (n / AES_BLOCK_SIZE) - 1; i > 0; i--) {
|
|
|
|
memcpy(ctrptr + AES_BLOCK_SIZE, ctrptr, AES_BLOCK_SIZE);
|
|
|
|
crypto_inc(ctrptr + AES_BLOCK_SIZE, AES_BLOCK_SIZE);
|
|
|
|
ctrptr += AES_BLOCK_SIZE;
|
2014-01-16 23:01:11 +08:00
|
|
|
}
|
|
|
|
return n;
|
|
|
|
}
|
|
|
|
|
2019-10-13 04:18:07 +08:00
|
|
|
static int ctr_aes_crypt(struct skcipher_request *req)
|
2011-05-04 13:09:44 +08:00
|
|
|
{
|
2019-10-13 04:18:07 +08:00
|
|
|
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
|
|
|
struct s390_aes_ctx *sctx = crypto_skcipher_ctx(tfm);
|
2016-08-15 21:17:52 +08:00
|
|
|
u8 buf[AES_BLOCK_SIZE], *ctrptr;
|
2019-10-13 04:18:07 +08:00
|
|
|
struct skcipher_walk walk;
|
2014-01-16 23:01:11 +08:00
|
|
|
unsigned int n, nbytes;
|
2016-08-15 21:17:52 +08:00
|
|
|
int ret, locked;
|
2011-05-04 13:09:44 +08:00
|
|
|
|
2019-10-13 04:18:07 +08:00
|
|
|
if (unlikely(!sctx->fc))
|
|
|
|
return fallback_skcipher_crypt(sctx, req, 0);
|
|
|
|
|
2019-05-27 21:24:20 +08:00
|
|
|
locked = mutex_trylock(&ctrblk_lock);
|
2014-01-16 23:01:11 +08:00
|
|
|
|
2019-10-13 04:18:07 +08:00
|
|
|
ret = skcipher_walk_virt(&walk, req, false);
|
|
|
|
while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) {
|
2016-08-15 21:17:52 +08:00
|
|
|
n = AES_BLOCK_SIZE;
|
2019-10-13 04:18:07 +08:00
|
|
|
|
2016-08-15 21:17:52 +08:00
|
|
|
if (nbytes >= 2*AES_BLOCK_SIZE && locked)
|
2019-10-13 04:18:07 +08:00
|
|
|
n = __ctrblk_init(ctrblk, walk.iv, nbytes);
|
|
|
|
ctrptr = (n > AES_BLOCK_SIZE) ? ctrblk : walk.iv;
|
|
|
|
cpacf_kmctr(sctx->fc, sctx->key, walk.dst.virt.addr,
|
|
|
|
walk.src.virt.addr, n, ctrptr);
|
2016-08-15 21:17:52 +08:00
|
|
|
if (ctrptr == ctrblk)
|
2019-10-13 04:18:07 +08:00
|
|
|
memcpy(walk.iv, ctrptr + n - AES_BLOCK_SIZE,
|
2016-08-15 21:17:52 +08:00
|
|
|
AES_BLOCK_SIZE);
|
2019-10-13 04:18:07 +08:00
|
|
|
crypto_inc(walk.iv, AES_BLOCK_SIZE);
|
|
|
|
ret = skcipher_walk_done(&walk, nbytes - n);
|
2011-05-04 13:09:44 +08:00
|
|
|
}
|
2016-08-15 21:17:52 +08:00
|
|
|
if (locked)
|
2019-05-27 21:24:20 +08:00
|
|
|
mutex_unlock(&ctrblk_lock);
|
2011-05-04 13:09:44 +08:00
|
|
|
/*
|
|
|
|
* final block may be < AES_BLOCK_SIZE, copy only nbytes
|
|
|
|
*/
|
|
|
|
if (nbytes) {
|
2019-10-13 04:18:07 +08:00
|
|
|
cpacf_kmctr(sctx->fc, sctx->key, buf, walk.src.virt.addr,
|
|
|
|
AES_BLOCK_SIZE, walk.iv);
|
|
|
|
memcpy(walk.dst.virt.addr, buf, nbytes);
|
|
|
|
crypto_inc(walk.iv, AES_BLOCK_SIZE);
|
|
|
|
ret = skcipher_walk_done(&walk, 0);
|
2011-05-04 13:09:44 +08:00
|
|
|
}
|
2014-01-16 23:01:11 +08:00
|
|
|
|
2011-05-04 13:09:44 +08:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2019-10-13 04:18:07 +08:00
|
|
|
static struct skcipher_alg ctr_aes_alg = {
|
|
|
|
.base.cra_name = "ctr(aes)",
|
|
|
|
.base.cra_driver_name = "ctr-aes-s390",
|
|
|
|
.base.cra_priority = 402, /* ecb-aes-s390 + 1 */
|
|
|
|
.base.cra_flags = CRYPTO_ALG_NEED_FALLBACK,
|
|
|
|
.base.cra_blocksize = 1,
|
|
|
|
.base.cra_ctxsize = sizeof(struct s390_aes_ctx),
|
|
|
|
.base.cra_module = THIS_MODULE,
|
|
|
|
.init = fallback_init_skcipher,
|
|
|
|
.exit = fallback_exit_skcipher,
|
|
|
|
.min_keysize = AES_MIN_KEY_SIZE,
|
|
|
|
.max_keysize = AES_MAX_KEY_SIZE,
|
|
|
|
.ivsize = AES_BLOCK_SIZE,
|
|
|
|
.setkey = ctr_aes_set_key,
|
|
|
|
.encrypt = ctr_aes_crypt,
|
|
|
|
.decrypt = ctr_aes_crypt,
|
|
|
|
.chunksize = AES_BLOCK_SIZE,
|
2011-05-04 13:09:44 +08:00
|
|
|
};
|
|
|
|
|
2017-09-18 18:48:09 +08:00
|
|
|
static int gcm_aes_setkey(struct crypto_aead *tfm, const u8 *key,
|
|
|
|
unsigned int keylen)
|
|
|
|
{
|
|
|
|
struct s390_aes_ctx *ctx = crypto_aead_ctx(tfm);
|
|
|
|
|
|
|
|
switch (keylen) {
|
|
|
|
case AES_KEYSIZE_128:
|
|
|
|
ctx->fc = CPACF_KMA_GCM_AES_128;
|
|
|
|
break;
|
|
|
|
case AES_KEYSIZE_192:
|
|
|
|
ctx->fc = CPACF_KMA_GCM_AES_192;
|
|
|
|
break;
|
|
|
|
case AES_KEYSIZE_256:
|
|
|
|
ctx->fc = CPACF_KMA_GCM_AES_256;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
memcpy(ctx->key, key, keylen);
|
|
|
|
ctx->key_len = keylen;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int gcm_aes_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
|
|
|
|
{
|
|
|
|
switch (authsize) {
|
|
|
|
case 4:
|
|
|
|
case 8:
|
|
|
|
case 12:
|
|
|
|
case 13:
|
|
|
|
case 14:
|
|
|
|
case 15:
|
|
|
|
case 16:
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-05-23 22:18:25 +08:00
|
|
|
static void gcm_walk_start(struct gcm_sg_walk *gw, struct scatterlist *sg,
|
|
|
|
unsigned int len)
|
2017-09-18 18:48:09 +08:00
|
|
|
{
|
|
|
|
memset(gw, 0, sizeof(*gw));
|
|
|
|
gw->walk_bytes_remain = len;
|
|
|
|
scatterwalk_start(&gw->walk, sg);
|
|
|
|
}
|
|
|
|
|
2019-05-23 22:18:25 +08:00
|
|
|
static inline unsigned int _gcm_sg_clamp_and_map(struct gcm_sg_walk *gw)
|
|
|
|
{
|
|
|
|
struct scatterlist *nextsg;
|
|
|
|
|
|
|
|
gw->walk_bytes = scatterwalk_clamp(&gw->walk, gw->walk_bytes_remain);
|
|
|
|
while (!gw->walk_bytes) {
|
|
|
|
nextsg = sg_next(gw->walk.sg);
|
|
|
|
if (!nextsg)
|
|
|
|
return 0;
|
|
|
|
scatterwalk_start(&gw->walk, nextsg);
|
|
|
|
gw->walk_bytes = scatterwalk_clamp(&gw->walk,
|
|
|
|
gw->walk_bytes_remain);
|
|
|
|
}
|
|
|
|
gw->walk_ptr = scatterwalk_map(&gw->walk);
|
|
|
|
return gw->walk_bytes;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void _gcm_sg_unmap_and_advance(struct gcm_sg_walk *gw,
|
|
|
|
unsigned int nbytes)
|
|
|
|
{
|
|
|
|
gw->walk_bytes_remain -= nbytes;
|
|
|
|
scatterwalk_unmap(&gw->walk);
|
|
|
|
scatterwalk_advance(&gw->walk, nbytes);
|
|
|
|
scatterwalk_done(&gw->walk, 0, gw->walk_bytes_remain);
|
|
|
|
gw->walk_ptr = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int gcm_in_walk_go(struct gcm_sg_walk *gw, unsigned int minbytesneeded)
|
2017-09-18 18:48:09 +08:00
|
|
|
{
|
|
|
|
int n;
|
|
|
|
|
|
|
|
if (gw->buf_bytes && gw->buf_bytes >= minbytesneeded) {
|
|
|
|
gw->ptr = gw->buf;
|
|
|
|
gw->nbytes = gw->buf_bytes;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (gw->walk_bytes_remain == 0) {
|
|
|
|
gw->ptr = NULL;
|
|
|
|
gw->nbytes = 0;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2019-05-23 22:18:25 +08:00
|
|
|
if (!_gcm_sg_clamp_and_map(gw)) {
|
|
|
|
gw->ptr = NULL;
|
|
|
|
gw->nbytes = 0;
|
|
|
|
goto out;
|
2017-09-18 18:48:09 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (!gw->buf_bytes && gw->walk_bytes >= minbytesneeded) {
|
|
|
|
gw->ptr = gw->walk_ptr;
|
|
|
|
gw->nbytes = gw->walk_bytes;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
while (1) {
|
|
|
|
n = min(gw->walk_bytes, AES_BLOCK_SIZE - gw->buf_bytes);
|
|
|
|
memcpy(gw->buf + gw->buf_bytes, gw->walk_ptr, n);
|
|
|
|
gw->buf_bytes += n;
|
2019-05-23 22:18:25 +08:00
|
|
|
_gcm_sg_unmap_and_advance(gw, n);
|
2017-09-18 18:48:09 +08:00
|
|
|
if (gw->buf_bytes >= minbytesneeded) {
|
|
|
|
gw->ptr = gw->buf;
|
|
|
|
gw->nbytes = gw->buf_bytes;
|
|
|
|
goto out;
|
|
|
|
}
|
2019-05-23 22:18:25 +08:00
|
|
|
if (!_gcm_sg_clamp_and_map(gw)) {
|
|
|
|
gw->ptr = NULL;
|
|
|
|
gw->nbytes = 0;
|
|
|
|
goto out;
|
2017-09-18 18:48:09 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
out:
|
|
|
|
return gw->nbytes;
|
|
|
|
}
|
|
|
|
|
2019-05-23 22:18:25 +08:00
|
|
|
static int gcm_out_walk_go(struct gcm_sg_walk *gw, unsigned int minbytesneeded)
|
2017-09-18 18:48:09 +08:00
|
|
|
{
|
2019-05-23 22:18:25 +08:00
|
|
|
if (gw->walk_bytes_remain == 0) {
|
|
|
|
gw->ptr = NULL;
|
|
|
|
gw->nbytes = 0;
|
|
|
|
goto out;
|
|
|
|
}
|
2017-09-18 18:48:09 +08:00
|
|
|
|
2019-05-23 22:18:25 +08:00
|
|
|
if (!_gcm_sg_clamp_and_map(gw)) {
|
|
|
|
gw->ptr = NULL;
|
|
|
|
gw->nbytes = 0;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (gw->walk_bytes >= minbytesneeded) {
|
|
|
|
gw->ptr = gw->walk_ptr;
|
|
|
|
gw->nbytes = gw->walk_bytes;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
scatterwalk_unmap(&gw->walk);
|
|
|
|
gw->walk_ptr = NULL;
|
|
|
|
|
|
|
|
gw->ptr = gw->buf;
|
|
|
|
gw->nbytes = sizeof(gw->buf);
|
|
|
|
|
|
|
|
out:
|
|
|
|
return gw->nbytes;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int gcm_in_walk_done(struct gcm_sg_walk *gw, unsigned int bytesdone)
|
|
|
|
{
|
2017-09-18 18:48:09 +08:00
|
|
|
if (gw->ptr == NULL)
|
2019-05-23 22:18:25 +08:00
|
|
|
return 0;
|
2017-09-18 18:48:09 +08:00
|
|
|
|
|
|
|
if (gw->ptr == gw->buf) {
|
2019-05-23 22:18:25 +08:00
|
|
|
int n = gw->buf_bytes - bytesdone;
|
2017-09-18 18:48:09 +08:00
|
|
|
if (n > 0) {
|
|
|
|
memmove(gw->buf, gw->buf + bytesdone, n);
|
2019-05-23 22:18:25 +08:00
|
|
|
gw->buf_bytes = n;
|
2017-09-18 18:48:09 +08:00
|
|
|
} else
|
|
|
|
gw->buf_bytes = 0;
|
2019-05-23 22:18:25 +08:00
|
|
|
} else
|
|
|
|
_gcm_sg_unmap_and_advance(gw, bytesdone);
|
|
|
|
|
|
|
|
return bytesdone;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int gcm_out_walk_done(struct gcm_sg_walk *gw, unsigned int bytesdone)
|
|
|
|
{
|
|
|
|
int i, n;
|
|
|
|
|
|
|
|
if (gw->ptr == NULL)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (gw->ptr == gw->buf) {
|
|
|
|
for (i = 0; i < bytesdone; i += n) {
|
|
|
|
if (!_gcm_sg_clamp_and_map(gw))
|
|
|
|
return i;
|
|
|
|
n = min(gw->walk_bytes, bytesdone - i);
|
|
|
|
memcpy(gw->walk_ptr, gw->buf + i, n);
|
|
|
|
_gcm_sg_unmap_and_advance(gw, n);
|
|
|
|
}
|
|
|
|
} else
|
|
|
|
_gcm_sg_unmap_and_advance(gw, bytesdone);
|
|
|
|
|
|
|
|
return bytesdone;
|
2017-09-18 18:48:09 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static int gcm_aes_crypt(struct aead_request *req, unsigned int flags)
|
|
|
|
{
|
|
|
|
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
|
|
|
|
struct s390_aes_ctx *ctx = crypto_aead_ctx(tfm);
|
|
|
|
unsigned int ivsize = crypto_aead_ivsize(tfm);
|
|
|
|
unsigned int taglen = crypto_aead_authsize(tfm);
|
|
|
|
unsigned int aadlen = req->assoclen;
|
|
|
|
unsigned int pclen = req->cryptlen;
|
|
|
|
int ret = 0;
|
|
|
|
|
2019-05-23 22:18:25 +08:00
|
|
|
unsigned int n, len, in_bytes, out_bytes,
|
2017-09-18 18:48:09 +08:00
|
|
|
min_bytes, bytes, aad_bytes, pc_bytes;
|
|
|
|
struct gcm_sg_walk gw_in, gw_out;
|
|
|
|
u8 tag[GHASH_DIGEST_SIZE];
|
|
|
|
|
|
|
|
struct {
|
|
|
|
u32 _[3]; /* reserved */
|
|
|
|
u32 cv; /* Counter Value */
|
|
|
|
u8 t[GHASH_DIGEST_SIZE];/* Tag */
|
|
|
|
u8 h[AES_BLOCK_SIZE]; /* Hash-subkey */
|
|
|
|
u64 taadl; /* Total AAD Length */
|
|
|
|
u64 tpcl; /* Total Plain-/Cipher-text Length */
|
|
|
|
u8 j0[GHASH_BLOCK_SIZE];/* initial counter value */
|
|
|
|
u8 k[AES_MAX_KEY_SIZE]; /* Key */
|
|
|
|
} param;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* encrypt
|
|
|
|
* req->src: aad||plaintext
|
|
|
|
* req->dst: aad||ciphertext||tag
|
|
|
|
* decrypt
|
|
|
|
* req->src: aad||ciphertext||tag
|
|
|
|
* req->dst: aad||plaintext, return 0 or -EBADMSG
|
|
|
|
* aad, plaintext and ciphertext may be empty.
|
|
|
|
*/
|
|
|
|
if (flags & CPACF_DECRYPT)
|
|
|
|
pclen -= taglen;
|
|
|
|
len = aadlen + pclen;
|
|
|
|
|
|
|
|
memset(¶m, 0, sizeof(param));
|
|
|
|
param.cv = 1;
|
|
|
|
param.taadl = aadlen * 8;
|
|
|
|
param.tpcl = pclen * 8;
|
|
|
|
memcpy(param.j0, req->iv, ivsize);
|
|
|
|
*(u32 *)(param.j0 + ivsize) = 1;
|
|
|
|
memcpy(param.k, ctx->key, ctx->key_len);
|
|
|
|
|
2019-05-23 22:18:25 +08:00
|
|
|
gcm_walk_start(&gw_in, req->src, len);
|
|
|
|
gcm_walk_start(&gw_out, req->dst, len);
|
2017-09-18 18:48:09 +08:00
|
|
|
|
|
|
|
do {
|
|
|
|
min_bytes = min_t(unsigned int,
|
|
|
|
aadlen > 0 ? aadlen : pclen, AES_BLOCK_SIZE);
|
2019-05-23 22:18:25 +08:00
|
|
|
in_bytes = gcm_in_walk_go(&gw_in, min_bytes);
|
|
|
|
out_bytes = gcm_out_walk_go(&gw_out, min_bytes);
|
2017-09-18 18:48:09 +08:00
|
|
|
bytes = min(in_bytes, out_bytes);
|
|
|
|
|
|
|
|
if (aadlen + pclen <= bytes) {
|
|
|
|
aad_bytes = aadlen;
|
|
|
|
pc_bytes = pclen;
|
|
|
|
flags |= CPACF_KMA_LAAD | CPACF_KMA_LPC;
|
|
|
|
} else {
|
|
|
|
if (aadlen <= bytes) {
|
|
|
|
aad_bytes = aadlen;
|
|
|
|
pc_bytes = (bytes - aadlen) &
|
|
|
|
~(AES_BLOCK_SIZE - 1);
|
|
|
|
flags |= CPACF_KMA_LAAD;
|
|
|
|
} else {
|
|
|
|
aad_bytes = bytes & ~(AES_BLOCK_SIZE - 1);
|
|
|
|
pc_bytes = 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (aad_bytes > 0)
|
|
|
|
memcpy(gw_out.ptr, gw_in.ptr, aad_bytes);
|
|
|
|
|
|
|
|
cpacf_kma(ctx->fc | flags, ¶m,
|
|
|
|
gw_out.ptr + aad_bytes,
|
|
|
|
gw_in.ptr + aad_bytes, pc_bytes,
|
|
|
|
gw_in.ptr, aad_bytes);
|
|
|
|
|
2019-05-23 22:18:25 +08:00
|
|
|
n = aad_bytes + pc_bytes;
|
|
|
|
if (gcm_in_walk_done(&gw_in, n) != n)
|
|
|
|
return -ENOMEM;
|
|
|
|
if (gcm_out_walk_done(&gw_out, n) != n)
|
|
|
|
return -ENOMEM;
|
2017-09-18 18:48:09 +08:00
|
|
|
aadlen -= aad_bytes;
|
|
|
|
pclen -= pc_bytes;
|
|
|
|
} while (aadlen + pclen > 0);
|
|
|
|
|
|
|
|
if (flags & CPACF_DECRYPT) {
|
|
|
|
scatterwalk_map_and_copy(tag, req->src, len, taglen, 0);
|
|
|
|
if (crypto_memneq(tag, param.t, taglen))
|
|
|
|
ret = -EBADMSG;
|
|
|
|
} else
|
|
|
|
scatterwalk_map_and_copy(param.t, req->dst, len, taglen, 1);
|
|
|
|
|
|
|
|
memzero_explicit(¶m, sizeof(param));
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int gcm_aes_encrypt(struct aead_request *req)
|
|
|
|
{
|
|
|
|
return gcm_aes_crypt(req, CPACF_ENCRYPT);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int gcm_aes_decrypt(struct aead_request *req)
|
|
|
|
{
|
|
|
|
return gcm_aes_crypt(req, CPACF_DECRYPT);
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct aead_alg gcm_aes_aead = {
|
|
|
|
.setkey = gcm_aes_setkey,
|
|
|
|
.setauthsize = gcm_aes_setauthsize,
|
|
|
|
.encrypt = gcm_aes_encrypt,
|
|
|
|
.decrypt = gcm_aes_decrypt,
|
|
|
|
|
|
|
|
.ivsize = GHASH_BLOCK_SIZE - sizeof(u32),
|
|
|
|
.maxauthsize = GHASH_DIGEST_SIZE,
|
|
|
|
.chunksize = AES_BLOCK_SIZE,
|
|
|
|
|
|
|
|
.base = {
|
|
|
|
.cra_blocksize = 1,
|
|
|
|
.cra_ctxsize = sizeof(struct s390_aes_ctx),
|
|
|
|
.cra_priority = 900,
|
|
|
|
.cra_name = "gcm(aes)",
|
|
|
|
.cra_driver_name = "gcm-aes-s390",
|
|
|
|
.cra_module = THIS_MODULE,
|
|
|
|
},
|
|
|
|
};
|
|
|
|
|
2019-10-13 04:18:07 +08:00
|
|
|
static struct crypto_alg *aes_s390_alg;
|
|
|
|
static struct skcipher_alg *aes_s390_skcipher_algs[4];
|
|
|
|
static int aes_s390_skciphers_num;
|
2018-03-01 23:40:00 +08:00
|
|
|
static struct aead_alg *aes_s390_aead_alg;
|
2016-08-18 18:34:34 +08:00
|
|
|
|
2019-10-13 04:18:07 +08:00
|
|
|
static int aes_s390_register_skcipher(struct skcipher_alg *alg)
|
2016-08-18 18:34:34 +08:00
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
2019-10-13 04:18:07 +08:00
|
|
|
ret = crypto_register_skcipher(alg);
|
2016-08-18 18:34:34 +08:00
|
|
|
if (!ret)
|
2019-10-13 04:18:07 +08:00
|
|
|
aes_s390_skcipher_algs[aes_s390_skciphers_num++] = alg;
|
2016-08-18 18:34:34 +08:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void aes_s390_fini(void)
|
|
|
|
{
|
2019-10-13 04:18:07 +08:00
|
|
|
if (aes_s390_alg)
|
|
|
|
crypto_unregister_alg(aes_s390_alg);
|
|
|
|
while (aes_s390_skciphers_num--)
|
|
|
|
crypto_unregister_skcipher(aes_s390_skcipher_algs[aes_s390_skciphers_num]);
|
2016-08-18 18:34:34 +08:00
|
|
|
if (ctrblk)
|
|
|
|
free_page((unsigned long) ctrblk);
|
2017-09-18 18:48:09 +08:00
|
|
|
|
2018-03-01 23:40:00 +08:00
|
|
|
if (aes_s390_aead_alg)
|
|
|
|
crypto_unregister_aead(aes_s390_aead_alg);
|
2016-08-18 18:34:34 +08:00
|
|
|
}
|
2013-10-15 17:24:07 +08:00
|
|
|
|
2008-04-17 13:46:17 +08:00
|
|
|
static int __init aes_s390_init(void)
|
2006-01-06 16:19:18 +08:00
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
2017-09-18 18:48:09 +08:00
|
|
|
/* Query available functions for KM, KMC, KMCTR and KMA */
|
2016-08-18 18:59:46 +08:00
|
|
|
cpacf_query(CPACF_KM, &km_functions);
|
|
|
|
cpacf_query(CPACF_KMC, &kmc_functions);
|
|
|
|
cpacf_query(CPACF_KMCTR, &kmctr_functions);
|
2017-09-18 18:48:09 +08:00
|
|
|
cpacf_query(CPACF_KMA, &kma_functions);
|
2006-08-21 19:39:24 +08:00
|
|
|
|
2016-08-18 18:59:46 +08:00
|
|
|
if (cpacf_test_func(&km_functions, CPACF_KM_AES_128) ||
|
|
|
|
cpacf_test_func(&km_functions, CPACF_KM_AES_192) ||
|
|
|
|
cpacf_test_func(&km_functions, CPACF_KM_AES_256)) {
|
2019-10-13 04:18:07 +08:00
|
|
|
ret = crypto_register_alg(&aes_alg);
|
2016-08-18 18:59:46 +08:00
|
|
|
if (ret)
|
|
|
|
goto out_err;
|
2019-10-13 04:18:07 +08:00
|
|
|
aes_s390_alg = &aes_alg;
|
|
|
|
ret = aes_s390_register_skcipher(&ecb_aes_alg);
|
2016-08-18 18:59:46 +08:00
|
|
|
if (ret)
|
|
|
|
goto out_err;
|
|
|
|
}
|
2006-08-21 19:39:24 +08:00
|
|
|
|
2016-08-18 18:59:46 +08:00
|
|
|
if (cpacf_test_func(&kmc_functions, CPACF_KMC_AES_128) ||
|
|
|
|
cpacf_test_func(&kmc_functions, CPACF_KMC_AES_192) ||
|
|
|
|
cpacf_test_func(&kmc_functions, CPACF_KMC_AES_256)) {
|
2019-10-13 04:18:07 +08:00
|
|
|
ret = aes_s390_register_skcipher(&cbc_aes_alg);
|
2016-08-18 18:59:46 +08:00
|
|
|
if (ret)
|
|
|
|
goto out_err;
|
|
|
|
}
|
2006-08-21 19:39:24 +08:00
|
|
|
|
2016-08-18 18:59:46 +08:00
|
|
|
if (cpacf_test_func(&km_functions, CPACF_KM_XTS_128) ||
|
|
|
|
cpacf_test_func(&km_functions, CPACF_KM_XTS_256)) {
|
2019-10-13 04:18:07 +08:00
|
|
|
ret = aes_s390_register_skcipher(&xts_aes_alg);
|
2011-04-26 14:12:42 +08:00
|
|
|
if (ret)
|
2016-08-18 18:34:34 +08:00
|
|
|
goto out_err;
|
2011-04-26 14:12:42 +08:00
|
|
|
}
|
|
|
|
|
2016-08-18 18:59:46 +08:00
|
|
|
if (cpacf_test_func(&kmctr_functions, CPACF_KMCTR_AES_128) ||
|
|
|
|
cpacf_test_func(&kmctr_functions, CPACF_KMCTR_AES_192) ||
|
|
|
|
cpacf_test_func(&kmctr_functions, CPACF_KMCTR_AES_256)) {
|
2011-05-04 13:09:44 +08:00
|
|
|
ctrblk = (u8 *) __get_free_page(GFP_KERNEL);
|
|
|
|
if (!ctrblk) {
|
|
|
|
ret = -ENOMEM;
|
2016-08-18 18:34:34 +08:00
|
|
|
goto out_err;
|
2011-05-04 13:09:44 +08:00
|
|
|
}
|
2019-10-13 04:18:07 +08:00
|
|
|
ret = aes_s390_register_skcipher(&ctr_aes_alg);
|
2016-08-18 18:34:34 +08:00
|
|
|
if (ret)
|
|
|
|
goto out_err;
|
2011-05-04 13:09:44 +08:00
|
|
|
}
|
|
|
|
|
2017-09-18 18:48:09 +08:00
|
|
|
if (cpacf_test_func(&kma_functions, CPACF_KMA_GCM_AES_128) ||
|
|
|
|
cpacf_test_func(&kma_functions, CPACF_KMA_GCM_AES_192) ||
|
|
|
|
cpacf_test_func(&kma_functions, CPACF_KMA_GCM_AES_256)) {
|
|
|
|
ret = crypto_register_aead(&gcm_aes_aead);
|
|
|
|
if (ret)
|
|
|
|
goto out_err;
|
2018-03-01 23:40:00 +08:00
|
|
|
aes_s390_aead_alg = &gcm_aes_aead;
|
2017-09-18 18:48:09 +08:00
|
|
|
}
|
|
|
|
|
2016-08-18 18:34:34 +08:00
|
|
|
return 0;
|
|
|
|
out_err:
|
|
|
|
aes_s390_fini();
|
2006-01-06 16:19:18 +08:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2015-02-20 00:34:07 +08:00
|
|
|
module_cpu_feature_match(MSA, aes_s390_init);
|
2008-04-17 13:46:17 +08:00
|
|
|
module_exit(aes_s390_fini);
|
2006-01-06 16:19:18 +08:00
|
|
|
|
2014-11-21 09:05:53 +08:00
|
|
|
MODULE_ALIAS_CRYPTO("aes-all");
|
2006-01-06 16:19:18 +08:00
|
|
|
|
|
|
|
MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm");
|
|
|
|
MODULE_LICENSE("GPL");
|