2017-11-24 22:00:34 +08:00
|
|
|
// SPDX-License-Identifier: GPL-2.0
|
2016-11-04 18:57:15 +08:00
|
|
|
/*
|
|
|
|
* Cryptographic API.
|
|
|
|
*
|
|
|
|
* s390 implementation of the AES Cipher Algorithm with protected keys.
|
|
|
|
*
|
|
|
|
* s390 Version:
|
2023-04-01 20:32:08 +08:00
|
|
|
* Copyright IBM Corp. 2017, 2023
|
2016-11-04 18:57:15 +08:00
|
|
|
* Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
|
|
|
|
* Harald Freudenberger <freude@de.ibm.com>
|
|
|
|
*/
|
|
|
|
|
|
|
|
#define KMSG_COMPONENT "paes_s390"
|
|
|
|
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
|
|
|
|
|
|
|
|
#include <crypto/aes.h>
|
|
|
|
#include <crypto/algapi.h>
|
|
|
|
#include <linux/bug.h>
|
|
|
|
#include <linux/err.h>
|
|
|
|
#include <linux/module.h>
|
|
|
|
#include <linux/cpufeature.h>
|
|
|
|
#include <linux/init.h>
|
2020-01-22 17:29:33 +08:00
|
|
|
#include <linux/mutex.h>
|
2016-11-04 18:57:15 +08:00
|
|
|
#include <linux/spinlock.h>
|
2021-01-15 15:56:19 +08:00
|
|
|
#include <linux/delay.h>
|
2019-10-13 04:18:08 +08:00
|
|
|
#include <crypto/internal/skcipher.h>
|
2016-11-04 18:57:15 +08:00
|
|
|
#include <crypto/xts.h>
|
|
|
|
#include <asm/cpacf.h>
|
|
|
|
#include <asm/pkey.h>
|
|
|
|
|
2019-07-19 21:22:26 +08:00
|
|
|
/*
|
|
|
|
* Key blobs smaller/bigger than these defines are rejected
|
|
|
|
* by the common code even before the individual setkey function
|
|
|
|
* is called. As paes can handle different kinds of key blobs
|
|
|
|
* and padding is also possible, the limits need to be generous.
|
|
|
|
*/
|
2024-10-25 23:12:45 +08:00
|
|
|
#define PAES_MIN_KEYSIZE 16
|
|
|
|
#define PAES_MAX_KEYSIZE MAXEP11AESKEYBLOBSIZE
|
|
|
|
#define PAES_256_PROTKEY_SIZE (32 + 32) /* key + verification pattern */
|
2024-10-25 23:12:49 +08:00
|
|
|
#define PXTS_256_PROTKEY_SIZE (32 + 32 + 32) /* k1 + k2 + verification pattern */
|
2019-07-19 21:22:26 +08:00
|
|
|
|
2016-11-04 18:57:15 +08:00
|
|
|
static u8 *ctrblk;
|
2020-01-22 17:29:33 +08:00
|
|
|
static DEFINE_MUTEX(ctrblk_lock);
|
2016-11-04 18:57:15 +08:00
|
|
|
|
|
|
|
static cpacf_mask_t km_functions, kmc_functions, kmctr_functions;
|
|
|
|
|
2024-10-25 23:12:45 +08:00
|
|
|
struct paes_protkey {
|
|
|
|
u32 type;
|
|
|
|
u32 len;
|
2024-10-25 23:12:49 +08:00
|
|
|
u8 protkey[PXTS_256_PROTKEY_SIZE];
|
2024-10-25 23:12:45 +08:00
|
|
|
};
|
|
|
|
|
2018-08-27 16:40:10 +08:00
|
|
|
struct key_blob {
|
2019-07-19 21:22:26 +08:00
|
|
|
/*
|
|
|
|
* Small keys will be stored in the keybuf. Larger keys are
|
|
|
|
* stored in extra allocated memory. In both cases does
|
|
|
|
* key point to the memory where the key is stored.
|
|
|
|
* The code distinguishes by checking keylen against
|
|
|
|
* sizeof(keybuf). See the two following helper functions.
|
|
|
|
*/
|
|
|
|
u8 *key;
|
|
|
|
u8 keybuf[128];
|
2018-08-27 16:40:10 +08:00
|
|
|
unsigned int keylen;
|
|
|
|
};
|
|
|
|
|
2024-10-25 23:12:47 +08:00
|
|
|
/*
|
|
|
|
* make_clrkey_token() - wrap the raw key ck with pkey clearkey token
|
|
|
|
* information.
|
|
|
|
* @returns the size of the clearkey token
|
|
|
|
*/
|
|
|
|
static inline u32 make_clrkey_token(const u8 *ck, size_t cklen, u8 *dest)
|
2020-01-22 19:24:00 +08:00
|
|
|
{
|
2024-10-25 23:12:47 +08:00
|
|
|
struct clrkey_token {
|
2020-01-22 19:24:00 +08:00
|
|
|
u8 type;
|
|
|
|
u8 res0[3];
|
|
|
|
u8 version;
|
|
|
|
u8 res1[3];
|
|
|
|
u32 keytype;
|
|
|
|
u32 len;
|
2024-10-25 23:12:47 +08:00
|
|
|
u8 key[];
|
|
|
|
} __packed *token = (struct clrkey_token *)dest;
|
|
|
|
|
|
|
|
token->type = 0x00;
|
|
|
|
token->version = 0x02;
|
|
|
|
token->keytype = (cklen - 8) >> 3;
|
|
|
|
token->len = cklen;
|
|
|
|
memcpy(token->key, ck, cklen);
|
2020-01-22 19:24:00 +08:00
|
|
|
|
2024-10-25 23:12:47 +08:00
|
|
|
return sizeof(*token) + cklen;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline int _key_to_kb(struct key_blob *kb,
|
|
|
|
const u8 *key,
|
|
|
|
unsigned int keylen)
|
|
|
|
{
|
2020-01-22 19:24:00 +08:00
|
|
|
switch (keylen) {
|
|
|
|
case 16:
|
|
|
|
case 24:
|
|
|
|
case 32:
|
|
|
|
/* clear key value, prepare pkey clear key token in keybuf */
|
|
|
|
memset(kb->keybuf, 0, sizeof(kb->keybuf));
|
2024-10-25 23:12:47 +08:00
|
|
|
kb->keylen = make_clrkey_token(key, keylen, kb->keybuf);
|
2019-07-19 21:22:26 +08:00
|
|
|
kb->key = kb->keybuf;
|
2020-01-22 19:24:00 +08:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
/* other key material, let pkey handle this */
|
|
|
|
if (keylen <= sizeof(kb->keybuf))
|
|
|
|
kb->key = kb->keybuf;
|
|
|
|
else {
|
|
|
|
kb->key = kmalloc(keylen, GFP_KERNEL);
|
|
|
|
if (!kb->key)
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
memcpy(kb->key, key, keylen);
|
|
|
|
kb->keylen = keylen;
|
|
|
|
break;
|
2019-07-19 21:22:26 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2024-10-25 23:12:48 +08:00
|
|
|
static inline int _xts_key_to_kb(struct key_blob *kb,
|
|
|
|
const u8 *key,
|
|
|
|
unsigned int keylen)
|
|
|
|
{
|
|
|
|
size_t cklen = keylen / 2;
|
|
|
|
|
|
|
|
memset(kb->keybuf, 0, sizeof(kb->keybuf));
|
|
|
|
|
|
|
|
switch (keylen) {
|
|
|
|
case 32:
|
|
|
|
case 64:
|
|
|
|
/* clear key value, prepare pkey clear key tokens in keybuf */
|
|
|
|
kb->key = kb->keybuf;
|
|
|
|
kb->keylen = make_clrkey_token(key, cklen, kb->key);
|
|
|
|
kb->keylen += make_clrkey_token(key + cklen, cklen,
|
|
|
|
kb->key + kb->keylen);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
/* other key material, let pkey handle this */
|
|
|
|
if (keylen <= sizeof(kb->keybuf)) {
|
|
|
|
kb->key = kb->keybuf;
|
|
|
|
} else {
|
|
|
|
kb->key = kmalloc(keylen, GFP_KERNEL);
|
|
|
|
if (!kb->key)
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
memcpy(kb->key, key, keylen);
|
|
|
|
kb->keylen = keylen;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-07-19 21:22:26 +08:00
|
|
|
static inline void _free_kb_keybuf(struct key_blob *kb)
|
|
|
|
{
|
|
|
|
if (kb->key && kb->key != kb->keybuf
|
|
|
|
&& kb->keylen > sizeof(kb->keybuf)) {
|
2023-07-17 17:45:19 +08:00
|
|
|
kfree_sensitive(kb->key);
|
2019-07-19 21:22:26 +08:00
|
|
|
kb->key = NULL;
|
|
|
|
}
|
2024-10-25 23:12:49 +08:00
|
|
|
memzero_explicit(kb->keybuf, sizeof(kb->keybuf));
|
2019-07-19 21:22:26 +08:00
|
|
|
}
|
|
|
|
|
2016-11-04 18:57:15 +08:00
|
|
|
struct s390_paes_ctx {
|
2018-08-27 16:40:10 +08:00
|
|
|
struct key_blob kb;
|
2024-10-25 23:12:45 +08:00
|
|
|
struct paes_protkey pk;
|
2020-01-22 17:29:33 +08:00
|
|
|
spinlock_t pk_lock;
|
2016-11-04 18:57:15 +08:00
|
|
|
unsigned long fc;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct s390_pxts_ctx {
|
2024-10-25 23:12:48 +08:00
|
|
|
struct key_blob kb;
|
2024-10-25 23:12:45 +08:00
|
|
|
struct paes_protkey pk[2];
|
2020-01-22 17:29:33 +08:00
|
|
|
spinlock_t pk_lock;
|
2016-11-04 18:57:15 +08:00
|
|
|
unsigned long fc;
|
|
|
|
};
|
|
|
|
|
2024-10-25 23:12:46 +08:00
|
|
|
static inline int __paes_keyblob2pkey(const u8 *key, unsigned int keylen,
|
|
|
|
struct paes_protkey *pk)
|
2016-11-04 18:57:15 +08:00
|
|
|
{
|
2024-10-25 23:12:44 +08:00
|
|
|
int i, rc = -EIO;
|
2024-04-25 22:29:48 +08:00
|
|
|
|
|
|
|
/* try three times in case of busy card */
|
2024-10-25 23:12:44 +08:00
|
|
|
for (i = 0; rc && i < 3; i++) {
|
|
|
|
if (rc == -EBUSY && in_task()) {
|
2024-04-25 22:29:48 +08:00
|
|
|
if (msleep_interruptible(1000))
|
|
|
|
return -EINTR;
|
|
|
|
}
|
2024-10-25 23:12:46 +08:00
|
|
|
rc = pkey_key2protkey(key, keylen, pk->protkey, &pk->len,
|
|
|
|
&pk->type);
|
2024-04-25 22:29:48 +08:00
|
|
|
}
|
|
|
|
|
2024-10-25 23:12:44 +08:00
|
|
|
return rc;
|
2016-11-04 18:57:15 +08:00
|
|
|
}
|
|
|
|
|
2020-01-22 17:29:33 +08:00
|
|
|
static inline int __paes_convert_key(struct s390_paes_ctx *ctx)
|
2016-11-04 18:57:15 +08:00
|
|
|
{
|
2024-10-25 23:12:45 +08:00
|
|
|
struct paes_protkey pk;
|
2024-10-25 23:12:44 +08:00
|
|
|
int rc;
|
2016-11-04 18:57:15 +08:00
|
|
|
|
2024-10-25 23:12:45 +08:00
|
|
|
pk.len = sizeof(pk.protkey);
|
2024-10-25 23:12:46 +08:00
|
|
|
rc = __paes_keyblob2pkey(ctx->kb.key, ctx->kb.keylen, &pk);
|
2024-10-25 23:12:44 +08:00
|
|
|
if (rc)
|
|
|
|
return rc;
|
2016-11-04 18:57:15 +08:00
|
|
|
|
2020-01-22 17:29:33 +08:00
|
|
|
spin_lock_bh(&ctx->pk_lock);
|
2024-10-25 23:12:45 +08:00
|
|
|
memcpy(&ctx->pk, &pk, sizeof(pk));
|
2020-01-22 17:29:33 +08:00
|
|
|
spin_unlock_bh(&ctx->pk_lock);
|
2016-11-04 18:57:15 +08:00
|
|
|
|
2020-01-22 17:29:33 +08:00
|
|
|
return 0;
|
2016-11-04 18:57:15 +08:00
|
|
|
}
|
|
|
|
|
2019-10-13 04:18:08 +08:00
|
|
|
static int ecb_paes_init(struct crypto_skcipher *tfm)
|
2019-07-19 21:22:26 +08:00
|
|
|
{
|
2019-10-13 04:18:08 +08:00
|
|
|
struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm);
|
2019-07-19 21:22:26 +08:00
|
|
|
|
|
|
|
ctx->kb.key = NULL;
|
2020-01-22 17:29:33 +08:00
|
|
|
spin_lock_init(&ctx->pk_lock);
|
2019-07-19 21:22:26 +08:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-10-13 04:18:08 +08:00
|
|
|
static void ecb_paes_exit(struct crypto_skcipher *tfm)
|
2019-07-19 21:22:26 +08:00
|
|
|
{
|
2019-10-13 04:18:08 +08:00
|
|
|
struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm);
|
2019-07-19 21:22:26 +08:00
|
|
|
|
|
|
|
_free_kb_keybuf(&ctx->kb);
|
|
|
|
}
|
|
|
|
|
2020-01-22 17:29:33 +08:00
|
|
|
static inline int __ecb_paes_set_key(struct s390_paes_ctx *ctx)
|
|
|
|
{
|
|
|
|
unsigned long fc;
|
2024-10-25 23:12:44 +08:00
|
|
|
int rc;
|
2020-01-22 17:29:33 +08:00
|
|
|
|
2021-01-15 15:56:19 +08:00
|
|
|
rc = __paes_convert_key(ctx);
|
|
|
|
if (rc)
|
|
|
|
return rc;
|
2020-01-22 17:29:33 +08:00
|
|
|
|
|
|
|
/* Pick the correct function code based on the protected key type */
|
|
|
|
fc = (ctx->pk.type == PKEY_KEYTYPE_AES_128) ? CPACF_KM_PAES_128 :
|
|
|
|
(ctx->pk.type == PKEY_KEYTYPE_AES_192) ? CPACF_KM_PAES_192 :
|
|
|
|
(ctx->pk.type == PKEY_KEYTYPE_AES_256) ? CPACF_KM_PAES_256 : 0;
|
|
|
|
|
|
|
|
/* Check if the function code is available */
|
|
|
|
ctx->fc = (fc && cpacf_test_func(&km_functions, fc)) ? fc : 0;
|
|
|
|
|
|
|
|
return ctx->fc ? 0 : -EINVAL;
|
|
|
|
}
|
|
|
|
|
2019-10-13 04:18:08 +08:00
|
|
|
static int ecb_paes_set_key(struct crypto_skcipher *tfm, const u8 *in_key,
|
2016-11-04 18:57:15 +08:00
|
|
|
unsigned int key_len)
|
|
|
|
{
|
2019-10-13 04:18:08 +08:00
|
|
|
struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm);
|
2024-10-25 23:12:44 +08:00
|
|
|
int rc;
|
2016-11-04 18:57:15 +08:00
|
|
|
|
2019-07-19 21:22:26 +08:00
|
|
|
_free_kb_keybuf(&ctx->kb);
|
2020-01-22 19:24:00 +08:00
|
|
|
rc = _key_to_kb(&ctx->kb, in_key, key_len);
|
2019-07-19 21:22:26 +08:00
|
|
|
if (rc)
|
|
|
|
return rc;
|
|
|
|
|
2020-01-22 17:29:33 +08:00
|
|
|
return __ecb_paes_set_key(ctx);
|
2016-11-04 18:57:15 +08:00
|
|
|
}
|
|
|
|
|
2019-10-13 04:18:08 +08:00
|
|
|
static int ecb_paes_crypt(struct skcipher_request *req, unsigned long modifier)
|
2016-11-04 18:57:15 +08:00
|
|
|
{
|
2019-10-13 04:18:08 +08:00
|
|
|
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
|
|
|
struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm);
|
2020-01-22 17:29:33 +08:00
|
|
|
struct {
|
2024-10-25 23:12:45 +08:00
|
|
|
u8 key[PAES_256_PROTKEY_SIZE];
|
2020-01-22 17:29:33 +08:00
|
|
|
} param;
|
2024-10-25 23:12:44 +08:00
|
|
|
struct skcipher_walk walk;
|
|
|
|
unsigned int nbytes, n, k;
|
|
|
|
int rc;
|
2016-11-04 18:57:15 +08:00
|
|
|
|
2024-10-25 23:12:44 +08:00
|
|
|
rc = skcipher_walk_virt(&walk, req, false);
|
|
|
|
if (rc)
|
|
|
|
return rc;
|
2020-01-22 17:29:33 +08:00
|
|
|
|
|
|
|
spin_lock_bh(&ctx->pk_lock);
|
2024-10-25 23:12:45 +08:00
|
|
|
memcpy(param.key, ctx->pk.protkey, PAES_256_PROTKEY_SIZE);
|
2020-01-22 17:29:33 +08:00
|
|
|
spin_unlock_bh(&ctx->pk_lock);
|
|
|
|
|
2019-10-13 04:18:08 +08:00
|
|
|
while ((nbytes = walk.nbytes) != 0) {
|
2016-11-04 18:57:15 +08:00
|
|
|
/* only use complete blocks */
|
|
|
|
n = nbytes & ~(AES_BLOCK_SIZE - 1);
|
2020-01-22 17:29:33 +08:00
|
|
|
k = cpacf_km(ctx->fc | modifier, ¶m,
|
2019-10-13 04:18:08 +08:00
|
|
|
walk.dst.virt.addr, walk.src.virt.addr, n);
|
2016-11-04 18:57:15 +08:00
|
|
|
if (k)
|
2024-10-25 23:12:44 +08:00
|
|
|
rc = skcipher_walk_done(&walk, nbytes - k);
|
2016-11-04 18:57:15 +08:00
|
|
|
if (k < n) {
|
2020-01-22 17:29:33 +08:00
|
|
|
if (__paes_convert_key(ctx))
|
2019-10-13 04:18:08 +08:00
|
|
|
return skcipher_walk_done(&walk, -EIO);
|
2020-01-22 17:29:33 +08:00
|
|
|
spin_lock_bh(&ctx->pk_lock);
|
2024-10-25 23:12:45 +08:00
|
|
|
memcpy(param.key, ctx->pk.protkey, PAES_256_PROTKEY_SIZE);
|
2020-01-22 17:29:33 +08:00
|
|
|
spin_unlock_bh(&ctx->pk_lock);
|
2016-11-04 18:57:15 +08:00
|
|
|
}
|
|
|
|
}
|
2024-10-25 23:12:44 +08:00
|
|
|
return rc;
|
2016-11-04 18:57:15 +08:00
|
|
|
}
|
|
|
|
|
2019-10-13 04:18:08 +08:00
|
|
|
static int ecb_paes_encrypt(struct skcipher_request *req)
|
2016-11-04 18:57:15 +08:00
|
|
|
{
|
2019-10-13 04:18:08 +08:00
|
|
|
return ecb_paes_crypt(req, 0);
|
2016-11-04 18:57:15 +08:00
|
|
|
}
|
|
|
|
|
2019-10-13 04:18:08 +08:00
|
|
|
static int ecb_paes_decrypt(struct skcipher_request *req)
|
2016-11-04 18:57:15 +08:00
|
|
|
{
|
2019-10-13 04:18:08 +08:00
|
|
|
return ecb_paes_crypt(req, CPACF_DECRYPT);
|
2016-11-04 18:57:15 +08:00
|
|
|
}
|
|
|
|
|
2019-10-13 04:18:08 +08:00
|
|
|
static struct skcipher_alg ecb_paes_alg = {
|
|
|
|
.base.cra_name = "ecb(paes)",
|
|
|
|
.base.cra_driver_name = "ecb-paes-s390",
|
|
|
|
.base.cra_priority = 401, /* combo: aes + ecb + 1 */
|
|
|
|
.base.cra_blocksize = AES_BLOCK_SIZE,
|
|
|
|
.base.cra_ctxsize = sizeof(struct s390_paes_ctx),
|
|
|
|
.base.cra_module = THIS_MODULE,
|
|
|
|
.base.cra_list = LIST_HEAD_INIT(ecb_paes_alg.base.cra_list),
|
|
|
|
.init = ecb_paes_init,
|
|
|
|
.exit = ecb_paes_exit,
|
|
|
|
.min_keysize = PAES_MIN_KEYSIZE,
|
|
|
|
.max_keysize = PAES_MAX_KEYSIZE,
|
|
|
|
.setkey = ecb_paes_set_key,
|
|
|
|
.encrypt = ecb_paes_encrypt,
|
|
|
|
.decrypt = ecb_paes_decrypt,
|
2016-11-04 18:57:15 +08:00
|
|
|
};
|
|
|
|
|
2019-10-13 04:18:08 +08:00
|
|
|
static int cbc_paes_init(struct crypto_skcipher *tfm)
|
2019-07-19 21:22:26 +08:00
|
|
|
{
|
2019-10-13 04:18:08 +08:00
|
|
|
struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm);
|
2019-07-19 21:22:26 +08:00
|
|
|
|
|
|
|
ctx->kb.key = NULL;
|
2020-01-22 17:29:33 +08:00
|
|
|
spin_lock_init(&ctx->pk_lock);
|
2019-07-19 21:22:26 +08:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-10-13 04:18:08 +08:00
|
|
|
static void cbc_paes_exit(struct crypto_skcipher *tfm)
|
2019-07-19 21:22:26 +08:00
|
|
|
{
|
2019-10-13 04:18:08 +08:00
|
|
|
struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm);
|
2019-07-19 21:22:26 +08:00
|
|
|
|
|
|
|
_free_kb_keybuf(&ctx->kb);
|
|
|
|
}
|
|
|
|
|
2020-01-22 17:29:33 +08:00
|
|
|
static inline int __cbc_paes_set_key(struct s390_paes_ctx *ctx)
|
2016-11-04 18:57:15 +08:00
|
|
|
{
|
|
|
|
unsigned long fc;
|
2024-10-25 23:12:44 +08:00
|
|
|
int rc;
|
2016-11-04 18:57:15 +08:00
|
|
|
|
2021-01-15 15:56:19 +08:00
|
|
|
rc = __paes_convert_key(ctx);
|
|
|
|
if (rc)
|
|
|
|
return rc;
|
2016-11-04 18:57:15 +08:00
|
|
|
|
|
|
|
/* Pick the correct function code based on the protected key type */
|
|
|
|
fc = (ctx->pk.type == PKEY_KEYTYPE_AES_128) ? CPACF_KMC_PAES_128 :
|
|
|
|
(ctx->pk.type == PKEY_KEYTYPE_AES_192) ? CPACF_KMC_PAES_192 :
|
|
|
|
(ctx->pk.type == PKEY_KEYTYPE_AES_256) ? CPACF_KMC_PAES_256 : 0;
|
|
|
|
|
|
|
|
/* Check if the function code is available */
|
|
|
|
ctx->fc = (fc && cpacf_test_func(&kmc_functions, fc)) ? fc : 0;
|
|
|
|
|
|
|
|
return ctx->fc ? 0 : -EINVAL;
|
|
|
|
}
|
|
|
|
|
2019-10-13 04:18:08 +08:00
|
|
|
static int cbc_paes_set_key(struct crypto_skcipher *tfm, const u8 *in_key,
|
2016-11-04 18:57:15 +08:00
|
|
|
unsigned int key_len)
|
|
|
|
{
|
2019-10-13 04:18:08 +08:00
|
|
|
struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm);
|
2024-10-25 23:12:44 +08:00
|
|
|
int rc;
|
2016-11-04 18:57:15 +08:00
|
|
|
|
2019-07-19 21:22:26 +08:00
|
|
|
_free_kb_keybuf(&ctx->kb);
|
2020-01-22 19:24:00 +08:00
|
|
|
rc = _key_to_kb(&ctx->kb, in_key, key_len);
|
2019-07-19 21:22:26 +08:00
|
|
|
if (rc)
|
|
|
|
return rc;
|
|
|
|
|
crypto: remove CRYPTO_TFM_RES_BAD_KEY_LEN
The CRYPTO_TFM_RES_BAD_KEY_LEN flag was apparently meant as a way to
make the ->setkey() functions provide more information about errors.
However, no one actually checks for this flag, which makes it pointless.
Also, many algorithms fail to set this flag when given a bad length key.
Reviewing just the generic implementations, this is the case for
aes-fixed-time, cbcmac, echainiv, nhpoly1305, pcrypt, rfc3686, rfc4309,
rfc7539, rfc7539esp, salsa20, seqiv, and xcbc. But there are probably
many more in arch/*/crypto/ and drivers/crypto/.
Some algorithms can even set this flag when the key is the correct
length. For example, authenc and authencesn set it when the key payload
is malformed in any way (not just a bad length), the atmel-sha and ccree
drivers can set it if a memory allocation fails, and the chelsio driver
sets it for bad auth tag lengths, not just bad key lengths.
So even if someone actually wanted to start checking this flag (which
seems unlikely, since it's been unused for a long time), there would be
a lot of work needed to get it working correctly. But it would probably
be much better to go back to the drawing board and just define different
return values, like -EINVAL if the key is invalid for the algorithm vs.
-EKEYREJECTED if the key was rejected by a policy like "no weak keys".
That would be much simpler, less error-prone, and easier to test.
So just remove this flag.
Signed-off-by: Eric Biggers <ebiggers@google.com>
Reviewed-by: Horia Geantă <horia.geanta@nxp.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2019-12-31 11:19:36 +08:00
|
|
|
return __cbc_paes_set_key(ctx);
|
2016-11-04 18:57:15 +08:00
|
|
|
}
|
|
|
|
|
2019-10-13 04:18:08 +08:00
|
|
|
static int cbc_paes_crypt(struct skcipher_request *req, unsigned long modifier)
|
2016-11-04 18:57:15 +08:00
|
|
|
{
|
2019-10-13 04:18:08 +08:00
|
|
|
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
|
|
|
struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm);
|
2016-11-04 18:57:15 +08:00
|
|
|
struct {
|
|
|
|
u8 iv[AES_BLOCK_SIZE];
|
2024-10-25 23:12:45 +08:00
|
|
|
u8 key[PAES_256_PROTKEY_SIZE];
|
2016-11-04 18:57:15 +08:00
|
|
|
} param;
|
2024-10-25 23:12:44 +08:00
|
|
|
struct skcipher_walk walk;
|
|
|
|
unsigned int nbytes, n, k;
|
|
|
|
int rc;
|
2016-11-04 18:57:15 +08:00
|
|
|
|
2024-10-25 23:12:44 +08:00
|
|
|
rc = skcipher_walk_virt(&walk, req, false);
|
|
|
|
if (rc)
|
|
|
|
return rc;
|
2020-01-22 17:29:33 +08:00
|
|
|
|
2019-10-13 04:18:08 +08:00
|
|
|
memcpy(param.iv, walk.iv, AES_BLOCK_SIZE);
|
2020-01-22 17:29:33 +08:00
|
|
|
spin_lock_bh(&ctx->pk_lock);
|
2024-10-25 23:12:45 +08:00
|
|
|
memcpy(param.key, ctx->pk.protkey, PAES_256_PROTKEY_SIZE);
|
2020-01-22 17:29:33 +08:00
|
|
|
spin_unlock_bh(&ctx->pk_lock);
|
|
|
|
|
2019-10-13 04:18:08 +08:00
|
|
|
while ((nbytes = walk.nbytes) != 0) {
|
2016-11-04 18:57:15 +08:00
|
|
|
/* only use complete blocks */
|
|
|
|
n = nbytes & ~(AES_BLOCK_SIZE - 1);
|
|
|
|
k = cpacf_kmc(ctx->fc | modifier, ¶m,
|
2019-10-13 04:18:08 +08:00
|
|
|
walk.dst.virt.addr, walk.src.virt.addr, n);
|
|
|
|
if (k) {
|
|
|
|
memcpy(walk.iv, param.iv, AES_BLOCK_SIZE);
|
2024-10-25 23:12:44 +08:00
|
|
|
rc = skcipher_walk_done(&walk, nbytes - k);
|
2019-10-13 04:18:08 +08:00
|
|
|
}
|
2018-08-27 20:28:47 +08:00
|
|
|
if (k < n) {
|
2020-01-22 17:29:33 +08:00
|
|
|
if (__paes_convert_key(ctx))
|
2019-10-13 04:18:08 +08:00
|
|
|
return skcipher_walk_done(&walk, -EIO);
|
2020-01-22 17:29:33 +08:00
|
|
|
spin_lock_bh(&ctx->pk_lock);
|
2024-10-25 23:12:45 +08:00
|
|
|
memcpy(param.key, ctx->pk.protkey, PAES_256_PROTKEY_SIZE);
|
2020-01-22 17:29:33 +08:00
|
|
|
spin_unlock_bh(&ctx->pk_lock);
|
2016-11-04 18:57:15 +08:00
|
|
|
}
|
|
|
|
}
|
2024-10-25 23:12:44 +08:00
|
|
|
return rc;
|
2016-11-04 18:57:15 +08:00
|
|
|
}
|
|
|
|
|
2019-10-13 04:18:08 +08:00
|
|
|
static int cbc_paes_encrypt(struct skcipher_request *req)
|
2016-11-04 18:57:15 +08:00
|
|
|
{
|
2019-10-13 04:18:08 +08:00
|
|
|
return cbc_paes_crypt(req, 0);
|
2016-11-04 18:57:15 +08:00
|
|
|
}
|
|
|
|
|
2019-10-13 04:18:08 +08:00
|
|
|
static int cbc_paes_decrypt(struct skcipher_request *req)
|
2016-11-04 18:57:15 +08:00
|
|
|
{
|
2019-10-13 04:18:08 +08:00
|
|
|
return cbc_paes_crypt(req, CPACF_DECRYPT);
|
2016-11-04 18:57:15 +08:00
|
|
|
}
|
|
|
|
|
2019-10-13 04:18:08 +08:00
|
|
|
static struct skcipher_alg cbc_paes_alg = {
|
|
|
|
.base.cra_name = "cbc(paes)",
|
|
|
|
.base.cra_driver_name = "cbc-paes-s390",
|
|
|
|
.base.cra_priority = 402, /* ecb-paes-s390 + 1 */
|
|
|
|
.base.cra_blocksize = AES_BLOCK_SIZE,
|
|
|
|
.base.cra_ctxsize = sizeof(struct s390_paes_ctx),
|
|
|
|
.base.cra_module = THIS_MODULE,
|
|
|
|
.base.cra_list = LIST_HEAD_INIT(cbc_paes_alg.base.cra_list),
|
|
|
|
.init = cbc_paes_init,
|
|
|
|
.exit = cbc_paes_exit,
|
|
|
|
.min_keysize = PAES_MIN_KEYSIZE,
|
|
|
|
.max_keysize = PAES_MAX_KEYSIZE,
|
|
|
|
.ivsize = AES_BLOCK_SIZE,
|
|
|
|
.setkey = cbc_paes_set_key,
|
|
|
|
.encrypt = cbc_paes_encrypt,
|
|
|
|
.decrypt = cbc_paes_decrypt,
|
2016-11-04 18:57:15 +08:00
|
|
|
};
|
|
|
|
|
2019-10-13 04:18:08 +08:00
|
|
|
static int xts_paes_init(struct crypto_skcipher *tfm)
|
2019-07-19 21:22:26 +08:00
|
|
|
{
|
2019-10-13 04:18:08 +08:00
|
|
|
struct s390_pxts_ctx *ctx = crypto_skcipher_ctx(tfm);
|
2019-07-19 21:22:26 +08:00
|
|
|
|
2024-10-25 23:12:48 +08:00
|
|
|
ctx->kb.key = NULL;
|
2020-01-22 17:29:33 +08:00
|
|
|
spin_lock_init(&ctx->pk_lock);
|
2019-07-19 21:22:26 +08:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-10-13 04:18:08 +08:00
|
|
|
static void xts_paes_exit(struct crypto_skcipher *tfm)
|
2019-07-19 21:22:26 +08:00
|
|
|
{
|
2019-10-13 04:18:08 +08:00
|
|
|
struct s390_pxts_ctx *ctx = crypto_skcipher_ctx(tfm);
|
2019-07-19 21:22:26 +08:00
|
|
|
|
2024-10-25 23:12:48 +08:00
|
|
|
_free_kb_keybuf(&ctx->kb);
|
2019-07-19 21:22:26 +08:00
|
|
|
}
|
|
|
|
|
2020-01-22 17:29:33 +08:00
|
|
|
static inline int __xts_paes_convert_key(struct s390_pxts_ctx *ctx)
|
|
|
|
{
|
2024-10-25 23:12:45 +08:00
|
|
|
struct paes_protkey pk0, pk1;
|
2024-10-25 23:12:48 +08:00
|
|
|
size_t split_keylen;
|
|
|
|
int rc;
|
2020-01-22 17:29:33 +08:00
|
|
|
|
2024-10-25 23:12:45 +08:00
|
|
|
pk0.len = sizeof(pk0.protkey);
|
|
|
|
pk1.len = sizeof(pk1.protkey);
|
2023-04-01 20:32:08 +08:00
|
|
|
|
2024-10-25 23:12:48 +08:00
|
|
|
rc = __paes_keyblob2pkey(ctx->kb.key, ctx->kb.keylen, &pk0);
|
|
|
|
if (rc)
|
|
|
|
return rc;
|
|
|
|
|
|
|
|
switch (pk0.type) {
|
|
|
|
case PKEY_KEYTYPE_AES_128:
|
|
|
|
case PKEY_KEYTYPE_AES_256:
|
|
|
|
/* second keytoken required */
|
|
|
|
if (ctx->kb.keylen % 2)
|
|
|
|
return -EINVAL;
|
|
|
|
split_keylen = ctx->kb.keylen / 2;
|
|
|
|
|
|
|
|
rc = __paes_keyblob2pkey(ctx->kb.key + split_keylen,
|
|
|
|
split_keylen, &pk1);
|
|
|
|
if (rc)
|
|
|
|
return rc;
|
|
|
|
|
|
|
|
if (pk0.type != pk1.type)
|
|
|
|
return -EINVAL;
|
|
|
|
break;
|
2024-10-25 23:12:49 +08:00
|
|
|
case PKEY_KEYTYPE_AES_XTS_128:
|
|
|
|
case PKEY_KEYTYPE_AES_XTS_256:
|
|
|
|
/* single key */
|
|
|
|
pk1.type = 0;
|
|
|
|
break;
|
2024-10-25 23:12:48 +08:00
|
|
|
default:
|
|
|
|
/* unsupported protected keytype */
|
2020-01-22 17:29:33 +08:00
|
|
|
return -EINVAL;
|
2024-10-25 23:12:48 +08:00
|
|
|
}
|
2020-01-22 17:29:33 +08:00
|
|
|
|
|
|
|
spin_lock_bh(&ctx->pk_lock);
|
2024-10-25 23:12:48 +08:00
|
|
|
ctx->pk[0] = pk0;
|
|
|
|
ctx->pk[1] = pk1;
|
2020-01-22 17:29:33 +08:00
|
|
|
spin_unlock_bh(&ctx->pk_lock);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline int __xts_paes_set_key(struct s390_pxts_ctx *ctx)
|
2016-11-04 18:57:15 +08:00
|
|
|
{
|
|
|
|
unsigned long fc;
|
2024-10-25 23:12:48 +08:00
|
|
|
int rc;
|
2016-11-04 18:57:15 +08:00
|
|
|
|
2024-10-25 23:12:48 +08:00
|
|
|
rc = __xts_paes_convert_key(ctx);
|
|
|
|
if (rc)
|
|
|
|
return rc;
|
2016-11-04 18:57:15 +08:00
|
|
|
|
|
|
|
/* Pick the correct function code based on the protected key type */
|
2024-10-25 23:12:49 +08:00
|
|
|
switch (ctx->pk[0].type) {
|
|
|
|
case PKEY_KEYTYPE_AES_128:
|
|
|
|
fc = CPACF_KM_PXTS_128;
|
|
|
|
break;
|
|
|
|
case PKEY_KEYTYPE_AES_256:
|
|
|
|
fc = CPACF_KM_PXTS_256;
|
|
|
|
break;
|
|
|
|
case PKEY_KEYTYPE_AES_XTS_128:
|
|
|
|
fc = CPACF_KM_PXTS_128_FULL;
|
|
|
|
break;
|
|
|
|
case PKEY_KEYTYPE_AES_XTS_256:
|
|
|
|
fc = CPACF_KM_PXTS_256_FULL;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
fc = 0;
|
|
|
|
break;
|
|
|
|
}
|
2016-11-04 18:57:15 +08:00
|
|
|
|
|
|
|
/* Check if the function code is available */
|
|
|
|
ctx->fc = (fc && cpacf_test_func(&km_functions, fc)) ? fc : 0;
|
|
|
|
|
|
|
|
return ctx->fc ? 0 : -EINVAL;
|
|
|
|
}
|
|
|
|
|
2019-10-13 04:18:08 +08:00
|
|
|
static int xts_paes_set_key(struct crypto_skcipher *tfm, const u8 *in_key,
|
2024-10-25 23:12:48 +08:00
|
|
|
unsigned int in_keylen)
|
2016-11-04 18:57:15 +08:00
|
|
|
{
|
2019-10-13 04:18:08 +08:00
|
|
|
struct s390_pxts_ctx *ctx = crypto_skcipher_ctx(tfm);
|
2024-10-25 23:12:44 +08:00
|
|
|
u8 ckey[2 * AES_MAX_KEY_SIZE];
|
2024-10-25 23:12:48 +08:00
|
|
|
unsigned int ckey_len;
|
2024-10-25 23:12:44 +08:00
|
|
|
int rc;
|
2018-08-27 16:40:10 +08:00
|
|
|
|
2024-10-25 23:12:48 +08:00
|
|
|
if ((in_keylen == 32 || in_keylen == 64) &&
|
|
|
|
xts_verify_key(tfm, in_key, in_keylen))
|
2018-08-27 16:40:10 +08:00
|
|
|
return -EINVAL;
|
2016-11-04 18:57:15 +08:00
|
|
|
|
2024-10-25 23:12:48 +08:00
|
|
|
_free_kb_keybuf(&ctx->kb);
|
|
|
|
rc = _xts_key_to_kb(&ctx->kb, in_key, in_keylen);
|
2019-07-19 21:22:26 +08:00
|
|
|
if (rc)
|
|
|
|
return rc;
|
|
|
|
|
crypto: remove CRYPTO_TFM_RES_BAD_KEY_LEN
The CRYPTO_TFM_RES_BAD_KEY_LEN flag was apparently meant as a way to
make the ->setkey() functions provide more information about errors.
However, no one actually checks for this flag, which makes it pointless.
Also, many algorithms fail to set this flag when given a bad length key.
Reviewing just the generic implementations, this is the case for
aes-fixed-time, cbcmac, echainiv, nhpoly1305, pcrypt, rfc3686, rfc4309,
rfc7539, rfc7539esp, salsa20, seqiv, and xcbc. But there are probably
many more in arch/*/crypto/ and drivers/crypto/.
Some algorithms can even set this flag when the key is the correct
length. For example, authenc and authencesn set it when the key payload
is malformed in any way (not just a bad length), the atmel-sha and ccree
drivers can set it if a memory allocation fails, and the chelsio driver
sets it for bad auth tag lengths, not just bad key lengths.
So even if someone actually wanted to start checking this flag (which
seems unlikely, since it's been unused for a long time), there would be
a lot of work needed to get it working correctly. But it would probably
be much better to go back to the drawing board and just define different
return values, like -EINVAL if the key is invalid for the algorithm vs.
-EKEYREJECTED if the key was rejected by a policy like "no weak keys".
That would be much simpler, less error-prone, and easier to test.
So just remove this flag.
Signed-off-by: Eric Biggers <ebiggers@google.com>
Reviewed-by: Horia Geantă <horia.geanta@nxp.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2019-12-31 11:19:36 +08:00
|
|
|
rc = __xts_paes_set_key(ctx);
|
|
|
|
if (rc)
|
|
|
|
return rc;
|
2016-11-04 18:57:15 +08:00
|
|
|
|
2024-10-25 23:12:49 +08:00
|
|
|
/*
|
|
|
|
* It is not possible on a single protected key (e.g. full AES-XTS) to
|
|
|
|
* check, if k1 and k2 are the same.
|
|
|
|
*/
|
|
|
|
if (ctx->pk[0].type == PKEY_KEYTYPE_AES_XTS_128 ||
|
|
|
|
ctx->pk[0].type == PKEY_KEYTYPE_AES_XTS_256)
|
|
|
|
return 0;
|
2016-11-04 18:57:15 +08:00
|
|
|
/*
|
2022-12-30 05:17:06 +08:00
|
|
|
* xts_verify_key verifies the key length is not odd and makes
|
2016-11-04 18:57:15 +08:00
|
|
|
* sure that the two keys are not the same. This can be done
|
|
|
|
* on the two protected keys as well
|
|
|
|
*/
|
|
|
|
ckey_len = (ctx->pk[0].type == PKEY_KEYTYPE_AES_128) ?
|
|
|
|
AES_KEYSIZE_128 : AES_KEYSIZE_256;
|
|
|
|
memcpy(ckey, ctx->pk[0].protkey, ckey_len);
|
|
|
|
memcpy(ckey + ckey_len, ctx->pk[1].protkey, ckey_len);
|
2019-10-13 04:18:08 +08:00
|
|
|
return xts_verify_key(tfm, ckey, 2*ckey_len);
|
2016-11-04 18:57:15 +08:00
|
|
|
}
|
|
|
|
|
2024-10-25 23:12:49 +08:00
|
|
|
static int paes_xts_crypt_full(struct skcipher_request *req,
|
|
|
|
unsigned long modifier)
|
|
|
|
{
|
|
|
|
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
|
|
|
struct s390_pxts_ctx *ctx = crypto_skcipher_ctx(tfm);
|
|
|
|
unsigned int keylen, offset, nbytes, n, k;
|
|
|
|
struct {
|
|
|
|
u8 key[64];
|
|
|
|
u8 tweak[16];
|
|
|
|
u8 nap[16];
|
|
|
|
u8 wkvp[32];
|
|
|
|
} fxts_param = {
|
|
|
|
.nap = {0},
|
|
|
|
};
|
|
|
|
struct skcipher_walk walk;
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
rc = skcipher_walk_virt(&walk, req, false);
|
|
|
|
if (rc)
|
|
|
|
return rc;
|
|
|
|
|
|
|
|
keylen = (ctx->pk[0].type == PKEY_KEYTYPE_AES_XTS_128) ? 32 : 64;
|
|
|
|
offset = (ctx->pk[0].type == PKEY_KEYTYPE_AES_XTS_128) ? 32 : 0;
|
|
|
|
|
|
|
|
spin_lock_bh(&ctx->pk_lock);
|
|
|
|
memcpy(fxts_param.key + offset, ctx->pk[0].protkey, keylen);
|
|
|
|
memcpy(fxts_param.wkvp, ctx->pk[0].protkey + keylen,
|
|
|
|
sizeof(fxts_param.wkvp));
|
|
|
|
spin_unlock_bh(&ctx->pk_lock);
|
|
|
|
memcpy(fxts_param.tweak, walk.iv, sizeof(fxts_param.tweak));
|
|
|
|
fxts_param.nap[0] = 0x01; /* initial alpha power (1, little-endian) */
|
|
|
|
|
|
|
|
while ((nbytes = walk.nbytes) != 0) {
|
|
|
|
/* only use complete blocks */
|
|
|
|
n = nbytes & ~(AES_BLOCK_SIZE - 1);
|
|
|
|
k = cpacf_km(ctx->fc | modifier, fxts_param.key + offset,
|
|
|
|
walk.dst.virt.addr, walk.src.virt.addr, n);
|
|
|
|
if (k)
|
|
|
|
rc = skcipher_walk_done(&walk, nbytes - k);
|
|
|
|
if (k < n) {
|
|
|
|
if (__xts_paes_convert_key(ctx))
|
|
|
|
return skcipher_walk_done(&walk, -EIO);
|
|
|
|
spin_lock_bh(&ctx->pk_lock);
|
|
|
|
memcpy(fxts_param.key + offset, ctx->pk[0].protkey,
|
|
|
|
keylen);
|
|
|
|
memcpy(fxts_param.wkvp, ctx->pk[0].protkey + keylen,
|
|
|
|
sizeof(fxts_param.wkvp));
|
|
|
|
spin_unlock_bh(&ctx->pk_lock);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int paes_xts_crypt(struct skcipher_request *req, unsigned long modifier)
|
2016-11-04 18:57:15 +08:00
|
|
|
{
|
2019-10-13 04:18:08 +08:00
|
|
|
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
|
|
|
struct s390_pxts_ctx *ctx = crypto_skcipher_ctx(tfm);
|
2016-11-04 18:57:15 +08:00
|
|
|
unsigned int keylen, offset, nbytes, n, k;
|
|
|
|
struct {
|
2024-10-25 23:12:45 +08:00
|
|
|
u8 key[PAES_256_PROTKEY_SIZE];
|
2016-11-04 18:57:15 +08:00
|
|
|
u8 tweak[16];
|
|
|
|
u8 block[16];
|
|
|
|
u8 bit[16];
|
|
|
|
u8 xts[16];
|
|
|
|
} pcc_param;
|
|
|
|
struct {
|
2024-10-25 23:12:45 +08:00
|
|
|
u8 key[PAES_256_PROTKEY_SIZE];
|
2016-11-04 18:57:15 +08:00
|
|
|
u8 init[16];
|
|
|
|
} xts_param;
|
2024-10-25 23:12:44 +08:00
|
|
|
struct skcipher_walk walk;
|
|
|
|
int rc;
|
2016-11-04 18:57:15 +08:00
|
|
|
|
2024-10-25 23:12:44 +08:00
|
|
|
rc = skcipher_walk_virt(&walk, req, false);
|
|
|
|
if (rc)
|
|
|
|
return rc;
|
2020-01-22 17:29:33 +08:00
|
|
|
|
2016-11-04 18:57:15 +08:00
|
|
|
keylen = (ctx->pk[0].type == PKEY_KEYTYPE_AES_128) ? 48 : 64;
|
|
|
|
offset = (ctx->pk[0].type == PKEY_KEYTYPE_AES_128) ? 16 : 0;
|
2020-01-22 17:29:33 +08:00
|
|
|
|
2016-11-04 18:57:15 +08:00
|
|
|
memset(&pcc_param, 0, sizeof(pcc_param));
|
2019-10-13 04:18:08 +08:00
|
|
|
memcpy(pcc_param.tweak, walk.iv, sizeof(pcc_param.tweak));
|
2020-01-22 17:29:33 +08:00
|
|
|
spin_lock_bh(&ctx->pk_lock);
|
2016-11-04 18:57:15 +08:00
|
|
|
memcpy(pcc_param.key + offset, ctx->pk[1].protkey, keylen);
|
|
|
|
memcpy(xts_param.key + offset, ctx->pk[0].protkey, keylen);
|
2020-01-22 17:29:33 +08:00
|
|
|
spin_unlock_bh(&ctx->pk_lock);
|
|
|
|
cpacf_pcc(ctx->fc, pcc_param.key + offset);
|
2016-11-04 18:57:15 +08:00
|
|
|
memcpy(xts_param.init, pcc_param.xts, 16);
|
|
|
|
|
2019-10-13 04:18:08 +08:00
|
|
|
while ((nbytes = walk.nbytes) != 0) {
|
2016-11-04 18:57:15 +08:00
|
|
|
/* only use complete blocks */
|
|
|
|
n = nbytes & ~(AES_BLOCK_SIZE - 1);
|
|
|
|
k = cpacf_km(ctx->fc | modifier, xts_param.key + offset,
|
2019-10-13 04:18:08 +08:00
|
|
|
walk.dst.virt.addr, walk.src.virt.addr, n);
|
2016-11-04 18:57:15 +08:00
|
|
|
if (k)
|
2024-10-25 23:12:44 +08:00
|
|
|
rc = skcipher_walk_done(&walk, nbytes - k);
|
2016-11-04 18:57:15 +08:00
|
|
|
if (k < n) {
|
2020-01-22 17:29:33 +08:00
|
|
|
if (__xts_paes_convert_key(ctx))
|
2019-10-13 04:18:08 +08:00
|
|
|
return skcipher_walk_done(&walk, -EIO);
|
2020-01-22 17:29:33 +08:00
|
|
|
spin_lock_bh(&ctx->pk_lock);
|
|
|
|
memcpy(xts_param.key + offset,
|
|
|
|
ctx->pk[0].protkey, keylen);
|
|
|
|
spin_unlock_bh(&ctx->pk_lock);
|
2016-11-04 18:57:15 +08:00
|
|
|
}
|
|
|
|
}
|
2020-01-22 17:29:33 +08:00
|
|
|
|
2024-10-25 23:12:44 +08:00
|
|
|
return rc;
|
2016-11-04 18:57:15 +08:00
|
|
|
}
|
|
|
|
|
2024-10-25 23:12:49 +08:00
|
|
|
static inline int xts_paes_crypt(struct skcipher_request *req, unsigned long modifier)
|
|
|
|
{
|
|
|
|
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
|
|
|
struct s390_pxts_ctx *ctx = crypto_skcipher_ctx(tfm);
|
|
|
|
|
|
|
|
switch (ctx->fc) {
|
|
|
|
case CPACF_KM_PXTS_128:
|
|
|
|
case CPACF_KM_PXTS_256:
|
|
|
|
return paes_xts_crypt(req, modifier);
|
|
|
|
case CPACF_KM_PXTS_128_FULL:
|
|
|
|
case CPACF_KM_PXTS_256_FULL:
|
|
|
|
return paes_xts_crypt_full(req, modifier);
|
|
|
|
default:
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-10-13 04:18:08 +08:00
|
|
|
static int xts_paes_encrypt(struct skcipher_request *req)
|
2016-11-04 18:57:15 +08:00
|
|
|
{
|
2019-10-13 04:18:08 +08:00
|
|
|
return xts_paes_crypt(req, 0);
|
2016-11-04 18:57:15 +08:00
|
|
|
}
|
|
|
|
|
2019-10-13 04:18:08 +08:00
|
|
|
static int xts_paes_decrypt(struct skcipher_request *req)
|
2016-11-04 18:57:15 +08:00
|
|
|
{
|
2019-10-13 04:18:08 +08:00
|
|
|
return xts_paes_crypt(req, CPACF_DECRYPT);
|
2016-11-04 18:57:15 +08:00
|
|
|
}
|
|
|
|
|
2019-10-13 04:18:08 +08:00
|
|
|
static struct skcipher_alg xts_paes_alg = {
|
|
|
|
.base.cra_name = "xts(paes)",
|
|
|
|
.base.cra_driver_name = "xts-paes-s390",
|
|
|
|
.base.cra_priority = 402, /* ecb-paes-s390 + 1 */
|
|
|
|
.base.cra_blocksize = AES_BLOCK_SIZE,
|
|
|
|
.base.cra_ctxsize = sizeof(struct s390_pxts_ctx),
|
|
|
|
.base.cra_module = THIS_MODULE,
|
|
|
|
.base.cra_list = LIST_HEAD_INIT(xts_paes_alg.base.cra_list),
|
|
|
|
.init = xts_paes_init,
|
|
|
|
.exit = xts_paes_exit,
|
|
|
|
.min_keysize = 2 * PAES_MIN_KEYSIZE,
|
|
|
|
.max_keysize = 2 * PAES_MAX_KEYSIZE,
|
|
|
|
.ivsize = AES_BLOCK_SIZE,
|
|
|
|
.setkey = xts_paes_set_key,
|
|
|
|
.encrypt = xts_paes_encrypt,
|
|
|
|
.decrypt = xts_paes_decrypt,
|
2016-11-04 18:57:15 +08:00
|
|
|
};
|
|
|
|
|
2019-10-13 04:18:08 +08:00
|
|
|
static int ctr_paes_init(struct crypto_skcipher *tfm)
|
2019-07-19 21:22:26 +08:00
|
|
|
{
|
2019-10-13 04:18:08 +08:00
|
|
|
struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm);
|
2019-07-19 21:22:26 +08:00
|
|
|
|
|
|
|
ctx->kb.key = NULL;
|
2020-01-22 17:29:33 +08:00
|
|
|
spin_lock_init(&ctx->pk_lock);
|
2019-07-19 21:22:26 +08:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-10-13 04:18:08 +08:00
|
|
|
static void ctr_paes_exit(struct crypto_skcipher *tfm)
|
2019-07-19 21:22:26 +08:00
|
|
|
{
|
2019-10-13 04:18:08 +08:00
|
|
|
struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm);
|
2019-07-19 21:22:26 +08:00
|
|
|
|
|
|
|
_free_kb_keybuf(&ctx->kb);
|
|
|
|
}
|
|
|
|
|
2020-01-22 17:29:33 +08:00
|
|
|
static inline int __ctr_paes_set_key(struct s390_paes_ctx *ctx)
|
2016-11-04 18:57:15 +08:00
|
|
|
{
|
|
|
|
unsigned long fc;
|
2024-10-25 23:12:44 +08:00
|
|
|
int rc;
|
2016-11-04 18:57:15 +08:00
|
|
|
|
2021-01-15 15:56:19 +08:00
|
|
|
rc = __paes_convert_key(ctx);
|
|
|
|
if (rc)
|
|
|
|
return rc;
|
2016-11-04 18:57:15 +08:00
|
|
|
|
|
|
|
/* Pick the correct function code based on the protected key type */
|
|
|
|
fc = (ctx->pk.type == PKEY_KEYTYPE_AES_128) ? CPACF_KMCTR_PAES_128 :
|
|
|
|
(ctx->pk.type == PKEY_KEYTYPE_AES_192) ? CPACF_KMCTR_PAES_192 :
|
|
|
|
(ctx->pk.type == PKEY_KEYTYPE_AES_256) ?
|
|
|
|
CPACF_KMCTR_PAES_256 : 0;
|
|
|
|
|
|
|
|
/* Check if the function code is available */
|
|
|
|
ctx->fc = (fc && cpacf_test_func(&kmctr_functions, fc)) ? fc : 0;
|
|
|
|
|
|
|
|
return ctx->fc ? 0 : -EINVAL;
|
|
|
|
}
|
|
|
|
|
2019-10-13 04:18:08 +08:00
|
|
|
static int ctr_paes_set_key(struct crypto_skcipher *tfm, const u8 *in_key,
|
2016-11-04 18:57:15 +08:00
|
|
|
unsigned int key_len)
|
|
|
|
{
|
2019-10-13 04:18:08 +08:00
|
|
|
struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm);
|
2024-10-25 23:12:44 +08:00
|
|
|
int rc;
|
2016-11-04 18:57:15 +08:00
|
|
|
|
2019-07-19 21:22:26 +08:00
|
|
|
_free_kb_keybuf(&ctx->kb);
|
2020-01-22 19:24:00 +08:00
|
|
|
rc = _key_to_kb(&ctx->kb, in_key, key_len);
|
2019-07-19 21:22:26 +08:00
|
|
|
if (rc)
|
|
|
|
return rc;
|
|
|
|
|
crypto: remove CRYPTO_TFM_RES_BAD_KEY_LEN
The CRYPTO_TFM_RES_BAD_KEY_LEN flag was apparently meant as a way to
make the ->setkey() functions provide more information about errors.
However, no one actually checks for this flag, which makes it pointless.
Also, many algorithms fail to set this flag when given a bad length key.
Reviewing just the generic implementations, this is the case for
aes-fixed-time, cbcmac, echainiv, nhpoly1305, pcrypt, rfc3686, rfc4309,
rfc7539, rfc7539esp, salsa20, seqiv, and xcbc. But there are probably
many more in arch/*/crypto/ and drivers/crypto/.
Some algorithms can even set this flag when the key is the correct
length. For example, authenc and authencesn set it when the key payload
is malformed in any way (not just a bad length), the atmel-sha and ccree
drivers can set it if a memory allocation fails, and the chelsio driver
sets it for bad auth tag lengths, not just bad key lengths.
So even if someone actually wanted to start checking this flag (which
seems unlikely, since it's been unused for a long time), there would be
a lot of work needed to get it working correctly. But it would probably
be much better to go back to the drawing board and just define different
return values, like -EINVAL if the key is invalid for the algorithm vs.
-EKEYREJECTED if the key was rejected by a policy like "no weak keys".
That would be much simpler, less error-prone, and easier to test.
So just remove this flag.
Signed-off-by: Eric Biggers <ebiggers@google.com>
Reviewed-by: Horia Geantă <horia.geanta@nxp.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2019-12-31 11:19:36 +08:00
|
|
|
return __ctr_paes_set_key(ctx);
|
2016-11-04 18:57:15 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static unsigned int __ctrblk_init(u8 *ctrptr, u8 *iv, unsigned int nbytes)
|
|
|
|
{
|
|
|
|
unsigned int i, n;
|
|
|
|
|
|
|
|
/* only use complete blocks, max. PAGE_SIZE */
|
|
|
|
memcpy(ctrptr, iv, AES_BLOCK_SIZE);
|
|
|
|
n = (nbytes > PAGE_SIZE) ? PAGE_SIZE : nbytes & ~(AES_BLOCK_SIZE - 1);
|
|
|
|
for (i = (n / AES_BLOCK_SIZE) - 1; i > 0; i--) {
|
|
|
|
memcpy(ctrptr + AES_BLOCK_SIZE, ctrptr, AES_BLOCK_SIZE);
|
|
|
|
crypto_inc(ctrptr + AES_BLOCK_SIZE, AES_BLOCK_SIZE);
|
|
|
|
ctrptr += AES_BLOCK_SIZE;
|
|
|
|
}
|
|
|
|
return n;
|
|
|
|
}
|
|
|
|
|
2019-10-13 04:18:08 +08:00
|
|
|
static int ctr_paes_crypt(struct skcipher_request *req)
|
2016-11-04 18:57:15 +08:00
|
|
|
{
|
2019-10-13 04:18:08 +08:00
|
|
|
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
|
|
|
struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm);
|
2016-11-04 18:57:15 +08:00
|
|
|
u8 buf[AES_BLOCK_SIZE], *ctrptr;
|
2020-01-22 17:29:33 +08:00
|
|
|
struct {
|
2024-10-25 23:12:45 +08:00
|
|
|
u8 key[PAES_256_PROTKEY_SIZE];
|
2020-01-22 17:29:33 +08:00
|
|
|
} param;
|
2024-10-25 23:12:44 +08:00
|
|
|
struct skcipher_walk walk;
|
|
|
|
unsigned int nbytes, n, k;
|
|
|
|
int rc, locked;
|
2016-11-04 18:57:15 +08:00
|
|
|
|
2024-10-25 23:12:44 +08:00
|
|
|
rc = skcipher_walk_virt(&walk, req, false);
|
|
|
|
if (rc)
|
|
|
|
return rc;
|
2020-01-22 17:29:33 +08:00
|
|
|
|
|
|
|
spin_lock_bh(&ctx->pk_lock);
|
2024-10-25 23:12:45 +08:00
|
|
|
memcpy(param.key, ctx->pk.protkey, PAES_256_PROTKEY_SIZE);
|
2020-01-22 17:29:33 +08:00
|
|
|
spin_unlock_bh(&ctx->pk_lock);
|
|
|
|
|
|
|
|
locked = mutex_trylock(&ctrblk_lock);
|
|
|
|
|
2019-10-13 04:18:08 +08:00
|
|
|
while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) {
|
2016-11-04 18:57:15 +08:00
|
|
|
n = AES_BLOCK_SIZE;
|
|
|
|
if (nbytes >= 2*AES_BLOCK_SIZE && locked)
|
2019-10-13 04:18:08 +08:00
|
|
|
n = __ctrblk_init(ctrblk, walk.iv, nbytes);
|
|
|
|
ctrptr = (n > AES_BLOCK_SIZE) ? ctrblk : walk.iv;
|
2020-01-22 17:29:33 +08:00
|
|
|
k = cpacf_kmctr(ctx->fc, ¶m, walk.dst.virt.addr,
|
2019-10-13 04:18:08 +08:00
|
|
|
walk.src.virt.addr, n, ctrptr);
|
2016-11-04 18:57:15 +08:00
|
|
|
if (k) {
|
|
|
|
if (ctrptr == ctrblk)
|
2019-10-13 04:18:08 +08:00
|
|
|
memcpy(walk.iv, ctrptr + k - AES_BLOCK_SIZE,
|
2016-11-04 18:57:15 +08:00
|
|
|
AES_BLOCK_SIZE);
|
2019-10-13 04:18:08 +08:00
|
|
|
crypto_inc(walk.iv, AES_BLOCK_SIZE);
|
2024-10-25 23:12:44 +08:00
|
|
|
rc = skcipher_walk_done(&walk, nbytes - k);
|
2016-11-04 18:57:15 +08:00
|
|
|
}
|
|
|
|
if (k < n) {
|
2020-01-22 17:29:33 +08:00
|
|
|
if (__paes_convert_key(ctx)) {
|
2017-02-28 14:05:59 +08:00
|
|
|
if (locked)
|
2020-01-22 17:29:33 +08:00
|
|
|
mutex_unlock(&ctrblk_lock);
|
2019-10-13 04:18:08 +08:00
|
|
|
return skcipher_walk_done(&walk, -EIO);
|
2017-02-28 14:05:59 +08:00
|
|
|
}
|
2020-01-22 17:29:33 +08:00
|
|
|
spin_lock_bh(&ctx->pk_lock);
|
2024-10-25 23:12:45 +08:00
|
|
|
memcpy(param.key, ctx->pk.protkey, PAES_256_PROTKEY_SIZE);
|
2020-01-22 17:29:33 +08:00
|
|
|
spin_unlock_bh(&ctx->pk_lock);
|
2016-11-04 18:57:15 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
if (locked)
|
2020-01-22 17:29:33 +08:00
|
|
|
mutex_unlock(&ctrblk_lock);
|
2016-11-04 18:57:15 +08:00
|
|
|
/*
|
|
|
|
* final block may be < AES_BLOCK_SIZE, copy only nbytes
|
|
|
|
*/
|
|
|
|
if (nbytes) {
|
2023-11-28 14:22:13 +08:00
|
|
|
memset(buf, 0, AES_BLOCK_SIZE);
|
|
|
|
memcpy(buf, walk.src.virt.addr, nbytes);
|
2016-11-04 18:57:15 +08:00
|
|
|
while (1) {
|
2020-01-22 17:29:33 +08:00
|
|
|
if (cpacf_kmctr(ctx->fc, ¶m, buf,
|
2023-11-28 14:22:13 +08:00
|
|
|
buf, AES_BLOCK_SIZE,
|
2019-10-13 04:18:08 +08:00
|
|
|
walk.iv) == AES_BLOCK_SIZE)
|
2016-11-04 18:57:15 +08:00
|
|
|
break;
|
2020-01-22 17:29:33 +08:00
|
|
|
if (__paes_convert_key(ctx))
|
2019-10-13 04:18:08 +08:00
|
|
|
return skcipher_walk_done(&walk, -EIO);
|
2020-01-22 17:29:33 +08:00
|
|
|
spin_lock_bh(&ctx->pk_lock);
|
2024-10-25 23:12:45 +08:00
|
|
|
memcpy(param.key, ctx->pk.protkey, PAES_256_PROTKEY_SIZE);
|
2020-01-22 17:29:33 +08:00
|
|
|
spin_unlock_bh(&ctx->pk_lock);
|
2016-11-04 18:57:15 +08:00
|
|
|
}
|
2019-10-13 04:18:08 +08:00
|
|
|
memcpy(walk.dst.virt.addr, buf, nbytes);
|
|
|
|
crypto_inc(walk.iv, AES_BLOCK_SIZE);
|
2024-10-25 23:12:44 +08:00
|
|
|
rc = skcipher_walk_done(&walk, nbytes);
|
2016-11-04 18:57:15 +08:00
|
|
|
}
|
|
|
|
|
2024-10-25 23:12:44 +08:00
|
|
|
return rc;
|
2016-11-04 18:57:15 +08:00
|
|
|
}
|
|
|
|
|
2019-10-13 04:18:08 +08:00
|
|
|
static struct skcipher_alg ctr_paes_alg = {
|
|
|
|
.base.cra_name = "ctr(paes)",
|
|
|
|
.base.cra_driver_name = "ctr-paes-s390",
|
|
|
|
.base.cra_priority = 402, /* ecb-paes-s390 + 1 */
|
|
|
|
.base.cra_blocksize = 1,
|
|
|
|
.base.cra_ctxsize = sizeof(struct s390_paes_ctx),
|
|
|
|
.base.cra_module = THIS_MODULE,
|
|
|
|
.base.cra_list = LIST_HEAD_INIT(ctr_paes_alg.base.cra_list),
|
|
|
|
.init = ctr_paes_init,
|
|
|
|
.exit = ctr_paes_exit,
|
|
|
|
.min_keysize = PAES_MIN_KEYSIZE,
|
|
|
|
.max_keysize = PAES_MAX_KEYSIZE,
|
|
|
|
.ivsize = AES_BLOCK_SIZE,
|
|
|
|
.setkey = ctr_paes_set_key,
|
|
|
|
.encrypt = ctr_paes_crypt,
|
|
|
|
.decrypt = ctr_paes_crypt,
|
|
|
|
.chunksize = AES_BLOCK_SIZE,
|
2016-11-04 18:57:15 +08:00
|
|
|
};
|
|
|
|
|
2019-10-13 04:18:08 +08:00
|
|
|
static inline void __crypto_unregister_skcipher(struct skcipher_alg *alg)
|
2016-11-04 18:57:15 +08:00
|
|
|
{
|
2019-10-13 04:18:08 +08:00
|
|
|
if (!list_empty(&alg->base.cra_list))
|
|
|
|
crypto_unregister_skcipher(alg);
|
2016-11-04 18:57:15 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void paes_s390_fini(void)
|
|
|
|
{
|
2019-10-13 04:18:08 +08:00
|
|
|
__crypto_unregister_skcipher(&ctr_paes_alg);
|
|
|
|
__crypto_unregister_skcipher(&xts_paes_alg);
|
|
|
|
__crypto_unregister_skcipher(&cbc_paes_alg);
|
|
|
|
__crypto_unregister_skcipher(&ecb_paes_alg);
|
2020-01-22 19:24:00 +08:00
|
|
|
if (ctrblk)
|
|
|
|
free_page((unsigned long) ctrblk);
|
2016-11-04 18:57:15 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static int __init paes_s390_init(void)
|
|
|
|
{
|
2024-10-25 23:12:44 +08:00
|
|
|
int rc;
|
2016-11-04 18:57:15 +08:00
|
|
|
|
|
|
|
/* Query available functions for KM, KMC and KMCTR */
|
|
|
|
cpacf_query(CPACF_KM, &km_functions);
|
|
|
|
cpacf_query(CPACF_KMC, &kmc_functions);
|
|
|
|
cpacf_query(CPACF_KMCTR, &kmctr_functions);
|
|
|
|
|
|
|
|
if (cpacf_test_func(&km_functions, CPACF_KM_PAES_128) ||
|
|
|
|
cpacf_test_func(&km_functions, CPACF_KM_PAES_192) ||
|
|
|
|
cpacf_test_func(&km_functions, CPACF_KM_PAES_256)) {
|
2024-10-25 23:12:44 +08:00
|
|
|
rc = crypto_register_skcipher(&ecb_paes_alg);
|
|
|
|
if (rc)
|
2016-11-04 18:57:15 +08:00
|
|
|
goto out_err;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (cpacf_test_func(&kmc_functions, CPACF_KMC_PAES_128) ||
|
|
|
|
cpacf_test_func(&kmc_functions, CPACF_KMC_PAES_192) ||
|
|
|
|
cpacf_test_func(&kmc_functions, CPACF_KMC_PAES_256)) {
|
2024-10-25 23:12:44 +08:00
|
|
|
rc = crypto_register_skcipher(&cbc_paes_alg);
|
|
|
|
if (rc)
|
2016-11-04 18:57:15 +08:00
|
|
|
goto out_err;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (cpacf_test_func(&km_functions, CPACF_KM_PXTS_128) ||
|
|
|
|
cpacf_test_func(&km_functions, CPACF_KM_PXTS_256)) {
|
2024-10-25 23:12:44 +08:00
|
|
|
rc = crypto_register_skcipher(&xts_paes_alg);
|
|
|
|
if (rc)
|
2016-11-04 18:57:15 +08:00
|
|
|
goto out_err;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (cpacf_test_func(&kmctr_functions, CPACF_KMCTR_PAES_128) ||
|
|
|
|
cpacf_test_func(&kmctr_functions, CPACF_KMCTR_PAES_192) ||
|
|
|
|
cpacf_test_func(&kmctr_functions, CPACF_KMCTR_PAES_256)) {
|
|
|
|
ctrblk = (u8 *) __get_free_page(GFP_KERNEL);
|
|
|
|
if (!ctrblk) {
|
2024-10-25 23:12:44 +08:00
|
|
|
rc = -ENOMEM;
|
2016-11-04 18:57:15 +08:00
|
|
|
goto out_err;
|
|
|
|
}
|
2024-10-25 23:12:44 +08:00
|
|
|
rc = crypto_register_skcipher(&ctr_paes_alg);
|
|
|
|
if (rc)
|
2020-01-22 19:24:00 +08:00
|
|
|
goto out_err;
|
2016-11-04 18:57:15 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
out_err:
|
|
|
|
paes_s390_fini();
|
2024-10-25 23:12:44 +08:00
|
|
|
return rc;
|
2016-11-04 18:57:15 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
module_init(paes_s390_init);
|
|
|
|
module_exit(paes_s390_fini);
|
|
|
|
|
2024-09-18 12:01:47 +08:00
|
|
|
MODULE_ALIAS_CRYPTO("ecb(paes)");
|
|
|
|
MODULE_ALIAS_CRYPTO("cbc(paes)");
|
|
|
|
MODULE_ALIAS_CRYPTO("ctr(paes)");
|
|
|
|
MODULE_ALIAS_CRYPTO("xts(paes)");
|
2016-11-04 18:57:15 +08:00
|
|
|
|
|
|
|
MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm with protected keys");
|
|
|
|
MODULE_LICENSE("GPL");
|