2018-08-21 21:36:09 +08:00
|
|
|
// SPDX-License-Identifier: GPL-2.0
|
2012-07-02 01:19:44 +08:00
|
|
|
/*
|
|
|
|
* Cryptographic API.
|
|
|
|
*
|
|
|
|
* Support for ATMEL AES HW acceleration.
|
|
|
|
*
|
|
|
|
* Copyright (c) 2012 Eukréa Electromatique - ATMEL
|
|
|
|
* Author: Nicolas Royer <nicolas@eukrea.com>
|
|
|
|
*
|
|
|
|
* Some ideas are from omap-aes.c driver.
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
|
|
#include <linux/kernel.h>
|
|
|
|
#include <linux/module.h>
|
|
|
|
#include <linux/slab.h>
|
|
|
|
#include <linux/err.h>
|
|
|
|
#include <linux/clk.h>
|
|
|
|
#include <linux/io.h>
|
|
|
|
#include <linux/hw_random.h>
|
|
|
|
#include <linux/platform_device.h>
|
|
|
|
|
|
|
|
#include <linux/device.h>
|
2020-01-15 20:53:53 +08:00
|
|
|
#include <linux/dmaengine.h>
|
2012-07-02 01:19:44 +08:00
|
|
|
#include <linux/init.h>
|
|
|
|
#include <linux/errno.h>
|
|
|
|
#include <linux/interrupt.h>
|
|
|
|
#include <linux/irq.h>
|
|
|
|
#include <linux/scatterlist.h>
|
|
|
|
#include <linux/dma-mapping.h>
|
2013-10-14 23:52:38 +08:00
|
|
|
#include <linux/of_device.h>
|
2012-07-02 01:19:44 +08:00
|
|
|
#include <linux/delay.h>
|
|
|
|
#include <linux/crypto.h>
|
|
|
|
#include <crypto/scatterwalk.h>
|
|
|
|
#include <crypto/algapi.h>
|
|
|
|
#include <crypto/aes.h>
|
2017-08-22 16:08:12 +08:00
|
|
|
#include <crypto/gcm.h>
|
2016-10-03 20:33:16 +08:00
|
|
|
#include <crypto/xts.h>
|
2015-12-18 01:13:07 +08:00
|
|
|
#include <crypto/internal/aead.h>
|
2019-11-10 01:09:33 +08:00
|
|
|
#include <crypto/internal/skcipher.h>
|
2012-07-02 01:19:44 +08:00
|
|
|
#include "atmel-aes-regs.h"
|
2017-01-27 00:07:56 +08:00
|
|
|
#include "atmel-authenc.h"
|
2012-07-02 01:19:44 +08:00
|
|
|
|
2015-12-18 00:48:34 +08:00
|
|
|
#define ATMEL_AES_PRIORITY 300
|
|
|
|
|
2015-12-18 01:13:00 +08:00
|
|
|
#define ATMEL_AES_BUFFER_ORDER 2
|
|
|
|
#define ATMEL_AES_BUFFER_SIZE (PAGE_SIZE << ATMEL_AES_BUFFER_ORDER)
|
|
|
|
|
2012-07-02 01:19:44 +08:00
|
|
|
#define CFB8_BLOCK_SIZE 1
|
|
|
|
#define CFB16_BLOCK_SIZE 2
|
|
|
|
#define CFB32_BLOCK_SIZE 4
|
|
|
|
#define CFB64_BLOCK_SIZE 8
|
|
|
|
|
2015-12-18 01:13:00 +08:00
|
|
|
#define SIZE_IN_WORDS(x) ((x) >> 2)
|
|
|
|
|
2012-07-02 01:19:44 +08:00
|
|
|
/* AES flags */
|
2015-12-18 01:13:07 +08:00
|
|
|
/* Reserve bits [18:16] [14:12] [1:0] for mode (same as for AES_MR) */
|
2015-12-18 00:48:41 +08:00
|
|
|
#define AES_FLAGS_ENCRYPT AES_MR_CYPHER_ENC
|
2015-12-18 01:13:07 +08:00
|
|
|
#define AES_FLAGS_GTAGEN AES_MR_GTAGEN
|
2015-12-18 00:48:41 +08:00
|
|
|
#define AES_FLAGS_OPMODE_MASK (AES_MR_OPMOD_MASK | AES_MR_CFBS_MASK)
|
|
|
|
#define AES_FLAGS_ECB AES_MR_OPMOD_ECB
|
|
|
|
#define AES_FLAGS_CBC AES_MR_OPMOD_CBC
|
|
|
|
#define AES_FLAGS_OFB AES_MR_OPMOD_OFB
|
|
|
|
#define AES_FLAGS_CFB128 (AES_MR_OPMOD_CFB | AES_MR_CFBS_128b)
|
|
|
|
#define AES_FLAGS_CFB64 (AES_MR_OPMOD_CFB | AES_MR_CFBS_64b)
|
|
|
|
#define AES_FLAGS_CFB32 (AES_MR_OPMOD_CFB | AES_MR_CFBS_32b)
|
|
|
|
#define AES_FLAGS_CFB16 (AES_MR_OPMOD_CFB | AES_MR_CFBS_16b)
|
|
|
|
#define AES_FLAGS_CFB8 (AES_MR_OPMOD_CFB | AES_MR_CFBS_8b)
|
|
|
|
#define AES_FLAGS_CTR AES_MR_OPMOD_CTR
|
2015-12-18 01:13:07 +08:00
|
|
|
#define AES_FLAGS_GCM AES_MR_OPMOD_GCM
|
2016-10-03 20:33:16 +08:00
|
|
|
#define AES_FLAGS_XTS AES_MR_OPMOD_XTS
|
2015-12-18 00:48:41 +08:00
|
|
|
|
|
|
|
#define AES_FLAGS_MODE_MASK (AES_FLAGS_OPMODE_MASK | \
|
2015-12-18 01:13:07 +08:00
|
|
|
AES_FLAGS_ENCRYPT | \
|
|
|
|
AES_FLAGS_GTAGEN)
|
2015-12-18 00:48:41 +08:00
|
|
|
|
|
|
|
#define AES_FLAGS_BUSY BIT(3)
|
2015-12-18 01:13:08 +08:00
|
|
|
#define AES_FLAGS_DUMP_REG BIT(4)
|
2017-01-27 00:07:56 +08:00
|
|
|
#define AES_FLAGS_OWN_SHA BIT(5)
|
2015-12-18 00:48:41 +08:00
|
|
|
|
2017-10-31 23:25:24 +08:00
|
|
|
#define AES_FLAGS_PERSISTENT AES_FLAGS_BUSY
|
2012-07-02 01:19:44 +08:00
|
|
|
|
2013-02-21 00:10:24 +08:00
|
|
|
#define ATMEL_AES_QUEUE_LENGTH 50
|
2012-07-02 01:19:44 +08:00
|
|
|
|
2015-12-18 01:13:06 +08:00
|
|
|
#define ATMEL_AES_DMA_THRESHOLD 256
|
2012-07-02 01:19:44 +08:00
|
|
|
|
|
|
|
|
2013-02-21 00:10:24 +08:00
|
|
|
struct atmel_aes_caps {
|
2015-12-18 01:13:02 +08:00
|
|
|
bool has_dualbuff;
|
|
|
|
bool has_cfb64;
|
2015-12-18 01:13:07 +08:00
|
|
|
bool has_gcm;
|
2016-10-03 20:33:16 +08:00
|
|
|
bool has_xts;
|
2017-01-27 00:07:56 +08:00
|
|
|
bool has_authenc;
|
2015-12-18 01:13:02 +08:00
|
|
|
u32 max_burst_size;
|
2013-02-21 00:10:24 +08:00
|
|
|
};
|
|
|
|
|
2012-07-02 01:19:44 +08:00
|
|
|
struct atmel_aes_dev;
|
|
|
|
|
2015-12-18 00:48:39 +08:00
|
|
|
|
|
|
|
typedef int (*atmel_aes_fn_t)(struct atmel_aes_dev *);
|
|
|
|
|
|
|
|
|
|
|
|
struct atmel_aes_base_ctx {
|
2015-12-18 01:13:02 +08:00
|
|
|
struct atmel_aes_dev *dd;
|
|
|
|
atmel_aes_fn_t start;
|
|
|
|
int keylen;
|
|
|
|
u32 key[AES_KEYSIZE_256 / sizeof(u32)];
|
|
|
|
u16 block_size;
|
2017-10-31 23:25:23 +08:00
|
|
|
bool is_aead;
|
2012-07-02 01:19:44 +08:00
|
|
|
};
|
|
|
|
|
2015-12-18 00:48:39 +08:00
|
|
|
struct atmel_aes_ctx {
|
|
|
|
struct atmel_aes_base_ctx base;
|
|
|
|
};
|
|
|
|
|
2015-12-18 01:13:05 +08:00
|
|
|
struct atmel_aes_ctr_ctx {
|
|
|
|
struct atmel_aes_base_ctx base;
|
|
|
|
|
2019-10-16 20:26:33 +08:00
|
|
|
__be32 iv[AES_BLOCK_SIZE / sizeof(u32)];
|
2015-12-18 01:13:05 +08:00
|
|
|
size_t offset;
|
|
|
|
struct scatterlist src[2];
|
|
|
|
struct scatterlist dst[2];
|
2019-12-13 22:45:44 +08:00
|
|
|
u32 blocks;
|
2015-12-18 01:13:05 +08:00
|
|
|
};
|
|
|
|
|
2015-12-18 01:13:07 +08:00
|
|
|
struct atmel_aes_gcm_ctx {
|
|
|
|
struct atmel_aes_base_ctx base;
|
|
|
|
|
|
|
|
struct scatterlist src[2];
|
|
|
|
struct scatterlist dst[2];
|
|
|
|
|
2019-10-16 20:26:33 +08:00
|
|
|
__be32 j0[AES_BLOCK_SIZE / sizeof(u32)];
|
2015-12-18 01:13:07 +08:00
|
|
|
u32 tag[AES_BLOCK_SIZE / sizeof(u32)];
|
2019-10-16 20:26:33 +08:00
|
|
|
__be32 ghash[AES_BLOCK_SIZE / sizeof(u32)];
|
2015-12-18 01:13:07 +08:00
|
|
|
size_t textlen;
|
|
|
|
|
2019-10-16 20:26:33 +08:00
|
|
|
const __be32 *ghash_in;
|
|
|
|
__be32 *ghash_out;
|
2015-12-18 01:13:07 +08:00
|
|
|
atmel_aes_fn_t ghash_resume;
|
|
|
|
};
|
|
|
|
|
2016-10-03 20:33:16 +08:00
|
|
|
struct atmel_aes_xts_ctx {
|
|
|
|
struct atmel_aes_base_ctx base;
|
|
|
|
|
|
|
|
u32 key2[AES_KEYSIZE_256 / sizeof(u32)];
|
2021-07-20 16:55:34 +08:00
|
|
|
struct crypto_skcipher *fallback_tfm;
|
2016-10-03 20:33:16 +08:00
|
|
|
};
|
|
|
|
|
2019-10-28 15:39:07 +08:00
|
|
|
#if IS_ENABLED(CONFIG_CRYPTO_DEV_ATMEL_AUTHENC)
|
2017-01-27 00:07:56 +08:00
|
|
|
struct atmel_aes_authenc_ctx {
|
|
|
|
struct atmel_aes_base_ctx base;
|
|
|
|
struct atmel_sha_authenc_ctx *auth;
|
|
|
|
};
|
|
|
|
#endif
|
|
|
|
|
2012-07-02 01:19:44 +08:00
|
|
|
struct atmel_aes_reqctx {
|
2015-12-18 01:13:02 +08:00
|
|
|
unsigned long mode;
|
2019-11-15 21:49:09 +08:00
|
|
|
u8 lastc[AES_BLOCK_SIZE];
|
2021-07-20 16:55:34 +08:00
|
|
|
struct skcipher_request fallback_req;
|
2012-07-02 01:19:44 +08:00
|
|
|
};
|
|
|
|
|
2019-10-28 15:39:07 +08:00
|
|
|
#if IS_ENABLED(CONFIG_CRYPTO_DEV_ATMEL_AUTHENC)
|
2017-01-27 00:07:56 +08:00
|
|
|
struct atmel_aes_authenc_reqctx {
|
|
|
|
struct atmel_aes_reqctx base;
|
|
|
|
|
|
|
|
struct scatterlist src[2];
|
|
|
|
struct scatterlist dst[2];
|
|
|
|
size_t textlen;
|
|
|
|
u32 digest[SHA512_DIGEST_SIZE / sizeof(u32)];
|
|
|
|
|
|
|
|
/* auth_req MUST be place last. */
|
|
|
|
struct ahash_request auth_req;
|
|
|
|
};
|
|
|
|
#endif
|
|
|
|
|
2012-07-02 01:19:44 +08:00
|
|
|
struct atmel_aes_dma {
|
2015-12-18 01:13:00 +08:00
|
|
|
struct dma_chan *chan;
|
|
|
|
struct scatterlist *sg;
|
|
|
|
int nents;
|
|
|
|
unsigned int remainder;
|
|
|
|
unsigned int sg_len;
|
2012-07-02 01:19:44 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
struct atmel_aes_dev {
|
|
|
|
struct list_head list;
|
|
|
|
unsigned long phys_base;
|
|
|
|
void __iomem *io_base;
|
|
|
|
|
2015-12-18 00:48:39 +08:00
|
|
|
struct crypto_async_request *areq;
|
|
|
|
struct atmel_aes_base_ctx *ctx;
|
|
|
|
|
2015-12-18 00:48:42 +08:00
|
|
|
bool is_async;
|
|
|
|
atmel_aes_fn_t resume;
|
2015-12-18 01:13:00 +08:00
|
|
|
atmel_aes_fn_t cpu_transfer_complete;
|
2015-12-18 00:48:42 +08:00
|
|
|
|
2012-07-02 01:19:44 +08:00
|
|
|
struct device *dev;
|
|
|
|
struct clk *iclk;
|
2015-12-18 01:13:02 +08:00
|
|
|
int irq;
|
2012-07-02 01:19:44 +08:00
|
|
|
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
spinlock_t lock;
|
|
|
|
struct crypto_queue queue;
|
|
|
|
|
|
|
|
struct tasklet_struct done_task;
|
|
|
|
struct tasklet_struct queue_task;
|
|
|
|
|
2015-12-18 01:13:00 +08:00
|
|
|
size_t total;
|
|
|
|
size_t datalen;
|
|
|
|
u32 *data;
|
2012-07-02 01:19:44 +08:00
|
|
|
|
2015-12-18 01:13:00 +08:00
|
|
|
struct atmel_aes_dma src;
|
|
|
|
struct atmel_aes_dma dst;
|
2012-07-02 01:19:44 +08:00
|
|
|
|
2015-12-18 01:13:00 +08:00
|
|
|
size_t buflen;
|
|
|
|
void *buf;
|
|
|
|
struct scatterlist aligned_sg;
|
|
|
|
struct scatterlist *real_dst;
|
2012-07-02 01:19:44 +08:00
|
|
|
|
2013-02-21 00:10:24 +08:00
|
|
|
struct atmel_aes_caps caps;
|
|
|
|
|
2015-12-18 01:13:02 +08:00
|
|
|
u32 hw_version;
|
2012-07-02 01:19:44 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
struct atmel_aes_drv {
|
|
|
|
struct list_head dev_list;
|
|
|
|
spinlock_t lock;
|
|
|
|
};
|
|
|
|
|
|
|
|
static struct atmel_aes_drv atmel_aes = {
|
|
|
|
.dev_list = LIST_HEAD_INIT(atmel_aes.dev_list),
|
|
|
|
.lock = __SPIN_LOCK_UNLOCKED(atmel_aes.lock),
|
|
|
|
};
|
|
|
|
|
2015-12-18 01:13:08 +08:00
|
|
|
#ifdef VERBOSE_DEBUG
|
|
|
|
static const char *atmel_aes_reg_name(u32 offset, char *tmp, size_t sz)
|
|
|
|
{
|
|
|
|
switch (offset) {
|
|
|
|
case AES_CR:
|
|
|
|
return "CR";
|
|
|
|
|
|
|
|
case AES_MR:
|
|
|
|
return "MR";
|
|
|
|
|
|
|
|
case AES_ISR:
|
|
|
|
return "ISR";
|
|
|
|
|
|
|
|
case AES_IMR:
|
|
|
|
return "IMR";
|
|
|
|
|
|
|
|
case AES_IER:
|
|
|
|
return "IER";
|
|
|
|
|
|
|
|
case AES_IDR:
|
|
|
|
return "IDR";
|
|
|
|
|
|
|
|
case AES_KEYWR(0):
|
|
|
|
case AES_KEYWR(1):
|
|
|
|
case AES_KEYWR(2):
|
|
|
|
case AES_KEYWR(3):
|
|
|
|
case AES_KEYWR(4):
|
|
|
|
case AES_KEYWR(5):
|
|
|
|
case AES_KEYWR(6):
|
|
|
|
case AES_KEYWR(7):
|
|
|
|
snprintf(tmp, sz, "KEYWR[%u]", (offset - AES_KEYWR(0)) >> 2);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case AES_IDATAR(0):
|
|
|
|
case AES_IDATAR(1):
|
|
|
|
case AES_IDATAR(2):
|
|
|
|
case AES_IDATAR(3):
|
|
|
|
snprintf(tmp, sz, "IDATAR[%u]", (offset - AES_IDATAR(0)) >> 2);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case AES_ODATAR(0):
|
|
|
|
case AES_ODATAR(1):
|
|
|
|
case AES_ODATAR(2):
|
|
|
|
case AES_ODATAR(3):
|
|
|
|
snprintf(tmp, sz, "ODATAR[%u]", (offset - AES_ODATAR(0)) >> 2);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case AES_IVR(0):
|
|
|
|
case AES_IVR(1):
|
|
|
|
case AES_IVR(2):
|
|
|
|
case AES_IVR(3):
|
|
|
|
snprintf(tmp, sz, "IVR[%u]", (offset - AES_IVR(0)) >> 2);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case AES_AADLENR:
|
|
|
|
return "AADLENR";
|
|
|
|
|
|
|
|
case AES_CLENR:
|
|
|
|
return "CLENR";
|
|
|
|
|
|
|
|
case AES_GHASHR(0):
|
|
|
|
case AES_GHASHR(1):
|
|
|
|
case AES_GHASHR(2):
|
|
|
|
case AES_GHASHR(3):
|
|
|
|
snprintf(tmp, sz, "GHASHR[%u]", (offset - AES_GHASHR(0)) >> 2);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case AES_TAGR(0):
|
|
|
|
case AES_TAGR(1):
|
|
|
|
case AES_TAGR(2):
|
|
|
|
case AES_TAGR(3):
|
|
|
|
snprintf(tmp, sz, "TAGR[%u]", (offset - AES_TAGR(0)) >> 2);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case AES_CTRR:
|
|
|
|
return "CTRR";
|
|
|
|
|
|
|
|
case AES_GCMHR(0):
|
|
|
|
case AES_GCMHR(1):
|
|
|
|
case AES_GCMHR(2):
|
|
|
|
case AES_GCMHR(3):
|
|
|
|
snprintf(tmp, sz, "GCMHR[%u]", (offset - AES_GCMHR(0)) >> 2);
|
2016-01-19 09:05:43 +08:00
|
|
|
break;
|
2015-12-18 01:13:08 +08:00
|
|
|
|
2017-01-27 00:07:56 +08:00
|
|
|
case AES_EMR:
|
|
|
|
return "EMR";
|
|
|
|
|
2016-10-03 20:33:16 +08:00
|
|
|
case AES_TWR(0):
|
|
|
|
case AES_TWR(1):
|
|
|
|
case AES_TWR(2):
|
|
|
|
case AES_TWR(3):
|
|
|
|
snprintf(tmp, sz, "TWR[%u]", (offset - AES_TWR(0)) >> 2);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case AES_ALPHAR(0):
|
|
|
|
case AES_ALPHAR(1):
|
|
|
|
case AES_ALPHAR(2):
|
|
|
|
case AES_ALPHAR(3):
|
|
|
|
snprintf(tmp, sz, "ALPHAR[%u]", (offset - AES_ALPHAR(0)) >> 2);
|
|
|
|
break;
|
|
|
|
|
2015-12-18 01:13:08 +08:00
|
|
|
default:
|
|
|
|
snprintf(tmp, sz, "0x%02x", offset);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
return tmp;
|
|
|
|
}
|
|
|
|
#endif /* VERBOSE_DEBUG */
|
|
|
|
|
2015-12-18 01:13:03 +08:00
|
|
|
/* Shared functions */
|
2013-02-21 00:10:24 +08:00
|
|
|
|
2012-07-02 01:19:44 +08:00
|
|
|
static inline u32 atmel_aes_read(struct atmel_aes_dev *dd, u32 offset)
|
|
|
|
{
|
2015-12-18 01:13:08 +08:00
|
|
|
u32 value = readl_relaxed(dd->io_base + offset);
|
|
|
|
|
|
|
|
#ifdef VERBOSE_DEBUG
|
|
|
|
if (dd->flags & AES_FLAGS_DUMP_REG) {
|
|
|
|
char tmp[16];
|
|
|
|
|
|
|
|
dev_vdbg(dd->dev, "read 0x%08x from %s\n", value,
|
|
|
|
atmel_aes_reg_name(offset, tmp, sizeof(tmp)));
|
|
|
|
}
|
|
|
|
#endif /* VERBOSE_DEBUG */
|
|
|
|
|
|
|
|
return value;
|
2012-07-02 01:19:44 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline void atmel_aes_write(struct atmel_aes_dev *dd,
|
|
|
|
u32 offset, u32 value)
|
|
|
|
{
|
2015-12-18 01:13:08 +08:00
|
|
|
#ifdef VERBOSE_DEBUG
|
|
|
|
if (dd->flags & AES_FLAGS_DUMP_REG) {
|
|
|
|
char tmp[16];
|
|
|
|
|
|
|
|
dev_vdbg(dd->dev, "write 0x%08x into %s\n", value,
|
2016-09-30 00:46:57 +08:00
|
|
|
atmel_aes_reg_name(offset, tmp, sizeof(tmp)));
|
2015-12-18 01:13:08 +08:00
|
|
|
}
|
|
|
|
#endif /* VERBOSE_DEBUG */
|
|
|
|
|
2012-07-02 01:19:44 +08:00
|
|
|
writel_relaxed(value, dd->io_base + offset);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void atmel_aes_read_n(struct atmel_aes_dev *dd, u32 offset,
|
|
|
|
u32 *value, int count)
|
|
|
|
{
|
|
|
|
for (; count--; value++, offset += 4)
|
|
|
|
*value = atmel_aes_read(dd, offset);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void atmel_aes_write_n(struct atmel_aes_dev *dd, u32 offset,
|
2015-12-18 00:48:33 +08:00
|
|
|
const u32 *value, int count)
|
2012-07-02 01:19:44 +08:00
|
|
|
{
|
|
|
|
for (; count--; value++, offset += 4)
|
|
|
|
atmel_aes_write(dd, offset, *value);
|
|
|
|
}
|
|
|
|
|
2015-12-18 01:13:00 +08:00
|
|
|
static inline void atmel_aes_read_block(struct atmel_aes_dev *dd, u32 offset,
|
2019-10-16 20:26:33 +08:00
|
|
|
void *value)
|
2015-12-18 01:13:00 +08:00
|
|
|
{
|
|
|
|
atmel_aes_read_n(dd, offset, value, SIZE_IN_WORDS(AES_BLOCK_SIZE));
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void atmel_aes_write_block(struct atmel_aes_dev *dd, u32 offset,
|
2019-10-16 20:26:33 +08:00
|
|
|
const void *value)
|
2015-12-18 01:13:00 +08:00
|
|
|
{
|
|
|
|
atmel_aes_write_n(dd, offset, value, SIZE_IN_WORDS(AES_BLOCK_SIZE));
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline int atmel_aes_wait_for_data_ready(struct atmel_aes_dev *dd,
|
|
|
|
atmel_aes_fn_t resume)
|
|
|
|
{
|
|
|
|
u32 isr = atmel_aes_read(dd, AES_ISR);
|
|
|
|
|
|
|
|
if (unlikely(isr & AES_INT_DATARDY))
|
|
|
|
return resume(dd);
|
|
|
|
|
|
|
|
dd->resume = resume;
|
|
|
|
atmel_aes_write(dd, AES_IER, AES_INT_DATARDY);
|
|
|
|
return -EINPROGRESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline size_t atmel_aes_padlen(size_t len, size_t block_size)
|
|
|
|
{
|
|
|
|
len &= block_size - 1;
|
|
|
|
return len ? block_size - len : 0;
|
|
|
|
}
|
|
|
|
|
2021-07-20 16:55:35 +08:00
|
|
|
static struct atmel_aes_dev *atmel_aes_dev_alloc(struct atmel_aes_base_ctx *ctx)
|
2012-07-02 01:19:44 +08:00
|
|
|
{
|
2021-07-20 16:55:35 +08:00
|
|
|
struct atmel_aes_dev *aes_dd;
|
2012-07-02 01:19:44 +08:00
|
|
|
|
|
|
|
spin_lock_bh(&atmel_aes.lock);
|
2021-07-20 16:55:35 +08:00
|
|
|
/* One AES IP per SoC. */
|
|
|
|
aes_dd = list_first_entry_or_null(&atmel_aes.dev_list,
|
|
|
|
struct atmel_aes_dev, list);
|
2012-07-02 01:19:44 +08:00
|
|
|
spin_unlock_bh(&atmel_aes.lock);
|
|
|
|
return aes_dd;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int atmel_aes_hw_init(struct atmel_aes_dev *dd)
|
|
|
|
{
|
2015-10-02 20:12:58 +08:00
|
|
|
int err;
|
|
|
|
|
2016-01-30 00:53:33 +08:00
|
|
|
err = clk_enable(dd->iclk);
|
2015-10-02 20:12:58 +08:00
|
|
|
if (err)
|
|
|
|
return err;
|
2012-07-02 01:19:44 +08:00
|
|
|
|
2017-10-31 23:25:24 +08:00
|
|
|
atmel_aes_write(dd, AES_CR, AES_CR_SWRST);
|
|
|
|
atmel_aes_write(dd, AES_MR, 0xE << AES_MR_CKEY_OFFSET);
|
2012-07-02 01:19:44 +08:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2013-02-21 00:10:24 +08:00
|
|
|
static inline unsigned int atmel_aes_get_version(struct atmel_aes_dev *dd)
|
|
|
|
{
|
|
|
|
return atmel_aes_read(dd, AES_HW_VERSION) & 0x00000fff;
|
|
|
|
}
|
|
|
|
|
2015-12-18 00:48:37 +08:00
|
|
|
static int atmel_aes_hw_version_init(struct atmel_aes_dev *dd)
|
2012-07-02 01:19:44 +08:00
|
|
|
{
|
2015-12-18 00:48:37 +08:00
|
|
|
int err;
|
|
|
|
|
|
|
|
err = atmel_aes_hw_init(dd);
|
|
|
|
if (err)
|
|
|
|
return err;
|
2012-07-02 01:19:44 +08:00
|
|
|
|
2013-02-21 00:10:24 +08:00
|
|
|
dd->hw_version = atmel_aes_get_version(dd);
|
|
|
|
|
2015-12-18 00:48:37 +08:00
|
|
|
dev_info(dd->dev, "version: 0x%x\n", dd->hw_version);
|
2012-07-02 01:19:44 +08:00
|
|
|
|
2016-01-30 00:53:33 +08:00
|
|
|
clk_disable(dd->iclk);
|
2015-12-18 00:48:37 +08:00
|
|
|
return 0;
|
2012-07-02 01:19:44 +08:00
|
|
|
}
|
|
|
|
|
2015-12-18 00:48:41 +08:00
|
|
|
static inline void atmel_aes_set_mode(struct atmel_aes_dev *dd,
|
|
|
|
const struct atmel_aes_reqctx *rctx)
|
|
|
|
{
|
|
|
|
/* Clear all but persistent flags and set request flags. */
|
|
|
|
dd->flags = (dd->flags & AES_FLAGS_PERSISTENT) | rctx->mode;
|
|
|
|
}
|
|
|
|
|
2015-12-18 01:13:07 +08:00
|
|
|
static inline bool atmel_aes_is_encrypt(const struct atmel_aes_dev *dd)
|
|
|
|
{
|
|
|
|
return (dd->flags & AES_FLAGS_ENCRYPT);
|
|
|
|
}
|
|
|
|
|
2019-10-28 15:39:07 +08:00
|
|
|
#if IS_ENABLED(CONFIG_CRYPTO_DEV_ATMEL_AUTHENC)
|
2017-01-27 00:07:56 +08:00
|
|
|
static void atmel_aes_authenc_complete(struct atmel_aes_dev *dd, int err);
|
|
|
|
#endif
|
|
|
|
|
2019-10-04 16:55:37 +08:00
|
|
|
static void atmel_aes_set_iv_as_last_ciphertext_block(struct atmel_aes_dev *dd)
|
|
|
|
{
|
2019-11-10 01:09:33 +08:00
|
|
|
struct skcipher_request *req = skcipher_request_cast(dd->areq);
|
|
|
|
struct atmel_aes_reqctx *rctx = skcipher_request_ctx(req);
|
|
|
|
struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
|
|
|
|
unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
|
2019-10-04 16:55:37 +08:00
|
|
|
|
2019-11-10 01:09:33 +08:00
|
|
|
if (req->cryptlen < ivsize)
|
2019-10-04 16:55:37 +08:00
|
|
|
return;
|
|
|
|
|
2023-03-29 03:56:28 +08:00
|
|
|
if (rctx->mode & AES_FLAGS_ENCRYPT)
|
2019-11-10 01:09:33 +08:00
|
|
|
scatterwalk_map_and_copy(req->iv, req->dst,
|
|
|
|
req->cryptlen - ivsize, ivsize, 0);
|
2023-03-29 03:56:28 +08:00
|
|
|
else
|
|
|
|
memcpy(req->iv, rctx->lastc, ivsize);
|
2019-10-04 16:55:37 +08:00
|
|
|
}
|
|
|
|
|
2019-12-05 17:54:03 +08:00
|
|
|
static inline struct atmel_aes_ctr_ctx *
|
|
|
|
atmel_aes_ctr_ctx_cast(struct atmel_aes_base_ctx *ctx)
|
|
|
|
{
|
|
|
|
return container_of(ctx, struct atmel_aes_ctr_ctx, base);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void atmel_aes_ctr_update_req_iv(struct atmel_aes_dev *dd)
|
|
|
|
{
|
|
|
|
struct atmel_aes_ctr_ctx *ctx = atmel_aes_ctr_ctx_cast(dd->ctx);
|
|
|
|
struct skcipher_request *req = skcipher_request_cast(dd->areq);
|
|
|
|
struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
|
|
|
|
unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
|
|
|
|
int i;
|
|
|
|
|
2019-12-13 22:45:44 +08:00
|
|
|
/*
|
|
|
|
* The CTR transfer works in fragments of data of maximum 1 MByte
|
|
|
|
* because of the 16 bit CTR counter embedded in the IP. When reaching
|
|
|
|
* here, ctx->blocks contains the number of blocks of the last fragment
|
|
|
|
* processed, there is no need to explicit cast it to u16.
|
|
|
|
*/
|
2019-12-05 17:54:03 +08:00
|
|
|
for (i = 0; i < ctx->blocks; i++)
|
|
|
|
crypto_inc((u8 *)ctx->iv, AES_BLOCK_SIZE);
|
|
|
|
|
|
|
|
memcpy(req->iv, ctx->iv, ivsize);
|
|
|
|
}
|
|
|
|
|
2015-12-18 00:48:42 +08:00
|
|
|
static inline int atmel_aes_complete(struct atmel_aes_dev *dd, int err)
|
2012-07-02 01:19:44 +08:00
|
|
|
{
|
2019-12-05 17:54:00 +08:00
|
|
|
struct skcipher_request *req = skcipher_request_cast(dd->areq);
|
|
|
|
struct atmel_aes_reqctx *rctx = skcipher_request_ctx(req);
|
|
|
|
|
2019-10-28 15:39:07 +08:00
|
|
|
#if IS_ENABLED(CONFIG_CRYPTO_DEV_ATMEL_AUTHENC)
|
2017-10-31 23:25:23 +08:00
|
|
|
if (dd->ctx->is_aead)
|
|
|
|
atmel_aes_authenc_complete(dd, err);
|
2017-01-27 00:07:56 +08:00
|
|
|
#endif
|
|
|
|
|
2016-01-30 00:53:33 +08:00
|
|
|
clk_disable(dd->iclk);
|
2012-07-02 01:19:44 +08:00
|
|
|
dd->flags &= ~AES_FLAGS_BUSY;
|
|
|
|
|
2019-12-13 17:54:56 +08:00
|
|
|
if (!err && !dd->ctx->is_aead &&
|
2019-12-05 17:54:03 +08:00
|
|
|
(rctx->mode & AES_FLAGS_OPMODE_MASK) != AES_FLAGS_ECB) {
|
|
|
|
if ((rctx->mode & AES_FLAGS_OPMODE_MASK) != AES_FLAGS_CTR)
|
|
|
|
atmel_aes_set_iv_as_last_ciphertext_block(dd);
|
|
|
|
else
|
|
|
|
atmel_aes_ctr_update_req_iv(dd);
|
|
|
|
}
|
2017-10-31 23:25:23 +08:00
|
|
|
|
2015-12-18 00:48:42 +08:00
|
|
|
if (dd->is_async)
|
2023-01-31 16:02:08 +08:00
|
|
|
crypto_request_complete(dd->areq, err);
|
2015-12-18 00:48:42 +08:00
|
|
|
|
|
|
|
tasklet_schedule(&dd->queue_task);
|
|
|
|
|
|
|
|
return err;
|
2012-07-02 01:19:44 +08:00
|
|
|
}
|
|
|
|
|
2016-10-03 20:33:16 +08:00
|
|
|
static void atmel_aes_write_ctrl_key(struct atmel_aes_dev *dd, bool use_dma,
|
2019-10-16 20:26:33 +08:00
|
|
|
const __be32 *iv, const u32 *key, int keylen)
|
2015-12-18 01:13:03 +08:00
|
|
|
{
|
|
|
|
u32 valmr = 0;
|
|
|
|
|
|
|
|
/* MR register must be set before IV registers */
|
2016-10-03 20:33:16 +08:00
|
|
|
if (keylen == AES_KEYSIZE_128)
|
2015-12-18 01:13:03 +08:00
|
|
|
valmr |= AES_MR_KEYSIZE_128;
|
2016-10-03 20:33:16 +08:00
|
|
|
else if (keylen == AES_KEYSIZE_192)
|
2015-12-18 01:13:03 +08:00
|
|
|
valmr |= AES_MR_KEYSIZE_192;
|
|
|
|
else
|
|
|
|
valmr |= AES_MR_KEYSIZE_256;
|
|
|
|
|
|
|
|
valmr |= dd->flags & AES_FLAGS_MODE_MASK;
|
|
|
|
|
|
|
|
if (use_dma) {
|
|
|
|
valmr |= AES_MR_SMOD_IDATAR0;
|
|
|
|
if (dd->caps.has_dualbuff)
|
|
|
|
valmr |= AES_MR_DUALBUFF;
|
|
|
|
} else {
|
|
|
|
valmr |= AES_MR_SMOD_AUTO;
|
|
|
|
}
|
|
|
|
|
|
|
|
atmel_aes_write(dd, AES_MR, valmr);
|
|
|
|
|
2016-10-03 20:33:16 +08:00
|
|
|
atmel_aes_write_n(dd, AES_KEYWR(0), key, SIZE_IN_WORDS(keylen));
|
2015-12-18 01:13:03 +08:00
|
|
|
|
|
|
|
if (iv && (valmr & AES_MR_OPMOD_MASK) != AES_MR_OPMOD_ECB)
|
|
|
|
atmel_aes_write_block(dd, AES_IVR(0), iv);
|
|
|
|
}
|
|
|
|
|
2016-10-03 20:33:16 +08:00
|
|
|
static inline void atmel_aes_write_ctrl(struct atmel_aes_dev *dd, bool use_dma,
|
2019-10-16 20:26:33 +08:00
|
|
|
const __be32 *iv)
|
2016-10-03 20:33:16 +08:00
|
|
|
|
|
|
|
{
|
|
|
|
atmel_aes_write_ctrl_key(dd, use_dma, iv,
|
|
|
|
dd->ctx->key, dd->ctx->keylen);
|
|
|
|
}
|
2015-12-18 01:13:00 +08:00
|
|
|
|
|
|
|
/* CPU transfer */
|
|
|
|
|
|
|
|
static int atmel_aes_cpu_transfer(struct atmel_aes_dev *dd)
|
2012-07-02 01:19:44 +08:00
|
|
|
{
|
2015-12-18 01:13:00 +08:00
|
|
|
int err = 0;
|
|
|
|
u32 isr;
|
2012-07-02 01:19:44 +08:00
|
|
|
|
2015-12-18 01:13:00 +08:00
|
|
|
for (;;) {
|
|
|
|
atmel_aes_read_block(dd, AES_ODATAR(0), dd->data);
|
|
|
|
dd->data += 4;
|
|
|
|
dd->datalen -= AES_BLOCK_SIZE;
|
|
|
|
|
|
|
|
if (dd->datalen < AES_BLOCK_SIZE)
|
|
|
|
break;
|
|
|
|
|
|
|
|
atmel_aes_write_block(dd, AES_IDATAR(0), dd->data);
|
|
|
|
|
|
|
|
isr = atmel_aes_read(dd, AES_ISR);
|
|
|
|
if (!(isr & AES_INT_DATARDY)) {
|
|
|
|
dd->resume = atmel_aes_cpu_transfer;
|
|
|
|
atmel_aes_write(dd, AES_IER, AES_INT_DATARDY);
|
|
|
|
return -EINPROGRESS;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!sg_copy_from_buffer(dd->real_dst, sg_nents(dd->real_dst),
|
|
|
|
dd->buf, dd->total))
|
|
|
|
err = -EINVAL;
|
|
|
|
|
|
|
|
if (err)
|
|
|
|
return atmel_aes_complete(dd, err);
|
|
|
|
|
|
|
|
return dd->cpu_transfer_complete(dd);
|
2012-07-02 01:19:44 +08:00
|
|
|
}
|
|
|
|
|
2015-12-18 01:13:00 +08:00
|
|
|
static int atmel_aes_cpu_start(struct atmel_aes_dev *dd,
|
|
|
|
struct scatterlist *src,
|
|
|
|
struct scatterlist *dst,
|
|
|
|
size_t len,
|
|
|
|
atmel_aes_fn_t resume)
|
2012-07-02 01:19:44 +08:00
|
|
|
{
|
2015-12-18 01:13:00 +08:00
|
|
|
size_t padlen = atmel_aes_padlen(len, AES_BLOCK_SIZE);
|
2015-12-18 00:48:41 +08:00
|
|
|
|
2015-12-18 01:13:00 +08:00
|
|
|
if (unlikely(len == 0))
|
|
|
|
return -EINVAL;
|
2015-12-18 00:48:41 +08:00
|
|
|
|
2015-12-18 01:13:00 +08:00
|
|
|
sg_copy_to_buffer(src, sg_nents(src), dd->buf, len);
|
2015-12-18 00:48:41 +08:00
|
|
|
|
2015-12-18 01:13:00 +08:00
|
|
|
dd->total = len;
|
|
|
|
dd->real_dst = dst;
|
|
|
|
dd->cpu_transfer_complete = resume;
|
|
|
|
dd->datalen = len + padlen;
|
|
|
|
dd->data = (u32 *)dd->buf;
|
|
|
|
atmel_aes_write_block(dd, AES_IDATAR(0), dd->data);
|
|
|
|
return atmel_aes_wait_for_data_ready(dd, atmel_aes_cpu_transfer);
|
|
|
|
}
|
2015-12-18 00:48:41 +08:00
|
|
|
|
|
|
|
|
2015-12-18 01:13:00 +08:00
|
|
|
/* DMA transfer */
|
|
|
|
|
|
|
|
static void atmel_aes_dma_callback(void *data);
|
|
|
|
|
|
|
|
static bool atmel_aes_check_aligned(struct atmel_aes_dev *dd,
|
|
|
|
struct scatterlist *sg,
|
|
|
|
size_t len,
|
|
|
|
struct atmel_aes_dma *dma)
|
|
|
|
{
|
|
|
|
int nents;
|
|
|
|
|
|
|
|
if (!IS_ALIGNED(len, dd->ctx->block_size))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
for (nents = 0; sg; sg = sg_next(sg), ++nents) {
|
|
|
|
if (!IS_ALIGNED(sg->offset, sizeof(u32)))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
if (len <= sg->length) {
|
|
|
|
if (!IS_ALIGNED(len, dd->ctx->block_size))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
dma->nents = nents+1;
|
|
|
|
dma->remainder = sg->length - len;
|
|
|
|
sg->length = len;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!IS_ALIGNED(sg->length, dd->ctx->block_size))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
len -= sg->length;
|
2015-12-18 00:48:41 +08:00
|
|
|
}
|
2012-07-02 01:19:44 +08:00
|
|
|
|
2015-12-18 01:13:00 +08:00
|
|
|
return false;
|
|
|
|
}
|
2012-07-02 01:19:44 +08:00
|
|
|
|
2015-12-18 01:13:00 +08:00
|
|
|
static inline void atmel_aes_restore_sg(const struct atmel_aes_dma *dma)
|
|
|
|
{
|
|
|
|
struct scatterlist *sg = dma->sg;
|
|
|
|
int nents = dma->nents;
|
2012-07-02 01:19:44 +08:00
|
|
|
|
2015-12-18 01:13:00 +08:00
|
|
|
if (!dma->remainder)
|
|
|
|
return;
|
2012-07-02 01:19:44 +08:00
|
|
|
|
2015-12-18 01:13:00 +08:00
|
|
|
while (--nents > 0 && sg)
|
|
|
|
sg = sg_next(sg);
|
2012-07-02 01:19:44 +08:00
|
|
|
|
2015-12-18 01:13:00 +08:00
|
|
|
if (!sg)
|
|
|
|
return;
|
2012-07-02 01:19:44 +08:00
|
|
|
|
2015-12-18 01:13:00 +08:00
|
|
|
sg->length += dma->remainder;
|
|
|
|
}
|
2012-07-02 01:19:44 +08:00
|
|
|
|
2015-12-18 01:13:00 +08:00
|
|
|
static int atmel_aes_map(struct atmel_aes_dev *dd,
|
|
|
|
struct scatterlist *src,
|
|
|
|
struct scatterlist *dst,
|
|
|
|
size_t len)
|
|
|
|
{
|
|
|
|
bool src_aligned, dst_aligned;
|
|
|
|
size_t padlen;
|
2013-02-21 00:10:24 +08:00
|
|
|
|
2015-12-18 01:13:00 +08:00
|
|
|
dd->total = len;
|
|
|
|
dd->src.sg = src;
|
|
|
|
dd->dst.sg = dst;
|
|
|
|
dd->real_dst = dst;
|
2012-07-02 01:19:44 +08:00
|
|
|
|
2015-12-18 01:13:00 +08:00
|
|
|
src_aligned = atmel_aes_check_aligned(dd, src, len, &dd->src);
|
|
|
|
if (src == dst)
|
|
|
|
dst_aligned = src_aligned;
|
|
|
|
else
|
|
|
|
dst_aligned = atmel_aes_check_aligned(dd, dst, len, &dd->dst);
|
|
|
|
if (!src_aligned || !dst_aligned) {
|
|
|
|
padlen = atmel_aes_padlen(len, dd->ctx->block_size);
|
|
|
|
|
|
|
|
if (dd->buflen < len + padlen)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
if (!src_aligned) {
|
|
|
|
sg_copy_to_buffer(src, sg_nents(src), dd->buf, len);
|
|
|
|
dd->src.sg = &dd->aligned_sg;
|
|
|
|
dd->src.nents = 1;
|
|
|
|
dd->src.remainder = 0;
|
|
|
|
}
|
2012-07-02 01:19:44 +08:00
|
|
|
|
2015-12-18 01:13:00 +08:00
|
|
|
if (!dst_aligned) {
|
|
|
|
dd->dst.sg = &dd->aligned_sg;
|
|
|
|
dd->dst.nents = 1;
|
|
|
|
dd->dst.remainder = 0;
|
|
|
|
}
|
2012-07-02 01:19:44 +08:00
|
|
|
|
2015-12-18 01:13:00 +08:00
|
|
|
sg_init_table(&dd->aligned_sg, 1);
|
|
|
|
sg_set_buf(&dd->aligned_sg, dd->buf, len + padlen);
|
|
|
|
}
|
2012-07-02 01:19:44 +08:00
|
|
|
|
2015-12-18 01:13:00 +08:00
|
|
|
if (dd->src.sg == dd->dst.sg) {
|
|
|
|
dd->src.sg_len = dma_map_sg(dd->dev, dd->src.sg, dd->src.nents,
|
|
|
|
DMA_BIDIRECTIONAL);
|
|
|
|
dd->dst.sg_len = dd->src.sg_len;
|
|
|
|
if (!dd->src.sg_len)
|
|
|
|
return -EFAULT;
|
|
|
|
} else {
|
|
|
|
dd->src.sg_len = dma_map_sg(dd->dev, dd->src.sg, dd->src.nents,
|
|
|
|
DMA_TO_DEVICE);
|
|
|
|
if (!dd->src.sg_len)
|
|
|
|
return -EFAULT;
|
|
|
|
|
|
|
|
dd->dst.sg_len = dma_map_sg(dd->dev, dd->dst.sg, dd->dst.nents,
|
|
|
|
DMA_FROM_DEVICE);
|
|
|
|
if (!dd->dst.sg_len) {
|
|
|
|
dma_unmap_sg(dd->dev, dd->src.sg, dd->src.nents,
|
|
|
|
DMA_TO_DEVICE);
|
|
|
|
return -EFAULT;
|
|
|
|
}
|
|
|
|
}
|
2012-07-02 01:19:44 +08:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-12-18 01:13:00 +08:00
|
|
|
static void atmel_aes_unmap(struct atmel_aes_dev *dd)
|
2012-07-02 01:19:44 +08:00
|
|
|
{
|
2015-12-18 01:13:00 +08:00
|
|
|
if (dd->src.sg == dd->dst.sg) {
|
|
|
|
dma_unmap_sg(dd->dev, dd->src.sg, dd->src.nents,
|
|
|
|
DMA_BIDIRECTIONAL);
|
2015-12-18 00:48:39 +08:00
|
|
|
|
2015-12-18 01:13:00 +08:00
|
|
|
if (dd->src.sg != &dd->aligned_sg)
|
|
|
|
atmel_aes_restore_sg(&dd->src);
|
|
|
|
} else {
|
|
|
|
dma_unmap_sg(dd->dev, dd->dst.sg, dd->dst.nents,
|
|
|
|
DMA_FROM_DEVICE);
|
2015-04-07 17:45:10 +08:00
|
|
|
|
2015-12-18 01:13:00 +08:00
|
|
|
if (dd->dst.sg != &dd->aligned_sg)
|
|
|
|
atmel_aes_restore_sg(&dd->dst);
|
2012-07-02 01:19:44 +08:00
|
|
|
|
2015-12-18 01:13:00 +08:00
|
|
|
dma_unmap_sg(dd->dev, dd->src.sg, dd->src.nents,
|
|
|
|
DMA_TO_DEVICE);
|
|
|
|
|
|
|
|
if (dd->src.sg != &dd->aligned_sg)
|
|
|
|
atmel_aes_restore_sg(&dd->src);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (dd->dst.sg == &dd->aligned_sg)
|
|
|
|
sg_copy_from_buffer(dd->real_dst, sg_nents(dd->real_dst),
|
|
|
|
dd->buf, dd->total);
|
|
|
|
}
|
2012-07-02 01:19:44 +08:00
|
|
|
|
2015-12-18 01:13:00 +08:00
|
|
|
static int atmel_aes_dma_transfer_start(struct atmel_aes_dev *dd,
|
|
|
|
enum dma_slave_buswidth addr_width,
|
|
|
|
enum dma_transfer_direction dir,
|
|
|
|
u32 maxburst)
|
|
|
|
{
|
|
|
|
struct dma_async_tx_descriptor *desc;
|
|
|
|
struct dma_slave_config config;
|
|
|
|
dma_async_tx_callback callback;
|
|
|
|
struct atmel_aes_dma *dma;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
memset(&config, 0, sizeof(config));
|
|
|
|
config.src_addr_width = addr_width;
|
|
|
|
config.dst_addr_width = addr_width;
|
|
|
|
config.src_maxburst = maxburst;
|
|
|
|
config.dst_maxburst = maxburst;
|
|
|
|
|
|
|
|
switch (dir) {
|
|
|
|
case DMA_MEM_TO_DEV:
|
|
|
|
dma = &dd->src;
|
|
|
|
callback = NULL;
|
|
|
|
config.dst_addr = dd->phys_base + AES_IDATAR(0);
|
|
|
|
break;
|
2012-07-02 01:19:44 +08:00
|
|
|
|
2015-12-18 01:13:00 +08:00
|
|
|
case DMA_DEV_TO_MEM:
|
|
|
|
dma = &dd->dst;
|
|
|
|
callback = atmel_aes_dma_callback;
|
|
|
|
config.src_addr = dd->phys_base + AES_ODATAR(0);
|
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
2012-07-02 01:19:44 +08:00
|
|
|
return -EINVAL;
|
2015-12-18 01:13:00 +08:00
|
|
|
}
|
2012-07-02 01:19:44 +08:00
|
|
|
|
2015-12-18 01:13:00 +08:00
|
|
|
err = dmaengine_slave_config(dma->chan, &config);
|
|
|
|
if (err)
|
|
|
|
return err;
|
2012-07-02 01:19:44 +08:00
|
|
|
|
2015-12-18 01:13:00 +08:00
|
|
|
desc = dmaengine_prep_slave_sg(dma->chan, dma->sg, dma->sg_len, dir,
|
|
|
|
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
|
|
|
|
if (!desc)
|
|
|
|
return -ENOMEM;
|
2012-07-02 01:19:44 +08:00
|
|
|
|
2015-12-18 01:13:00 +08:00
|
|
|
desc->callback = callback;
|
|
|
|
desc->callback_param = dd;
|
|
|
|
dmaengine_submit(desc);
|
|
|
|
dma_async_issue_pending(dma->chan);
|
2012-07-02 01:19:44 +08:00
|
|
|
|
2015-12-18 01:13:00 +08:00
|
|
|
return 0;
|
|
|
|
}
|
2015-12-18 00:48:42 +08:00
|
|
|
|
2015-12-18 01:13:00 +08:00
|
|
|
static int atmel_aes_dma_start(struct atmel_aes_dev *dd,
|
|
|
|
struct scatterlist *src,
|
|
|
|
struct scatterlist *dst,
|
|
|
|
size_t len,
|
|
|
|
atmel_aes_fn_t resume)
|
|
|
|
{
|
|
|
|
enum dma_slave_buswidth addr_width;
|
|
|
|
u32 maxburst;
|
|
|
|
int err;
|
2013-02-21 00:10:24 +08:00
|
|
|
|
2015-12-18 01:13:00 +08:00
|
|
|
switch (dd->ctx->block_size) {
|
|
|
|
case CFB8_BLOCK_SIZE:
|
|
|
|
addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
|
|
|
|
maxburst = 1;
|
|
|
|
break;
|
2013-02-21 00:10:24 +08:00
|
|
|
|
2015-12-18 01:13:00 +08:00
|
|
|
case CFB16_BLOCK_SIZE:
|
|
|
|
addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
|
|
|
|
maxburst = 1;
|
|
|
|
break;
|
2013-02-21 00:10:24 +08:00
|
|
|
|
2015-12-18 01:13:00 +08:00
|
|
|
case CFB32_BLOCK_SIZE:
|
|
|
|
case CFB64_BLOCK_SIZE:
|
|
|
|
addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
|
|
|
|
maxburst = 1;
|
|
|
|
break;
|
2013-02-21 00:10:24 +08:00
|
|
|
|
2015-12-18 01:13:00 +08:00
|
|
|
case AES_BLOCK_SIZE:
|
|
|
|
addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
|
|
|
|
maxburst = dd->caps.max_burst_size;
|
|
|
|
break;
|
2012-07-02 01:19:44 +08:00
|
|
|
|
2015-12-18 01:13:00 +08:00
|
|
|
default:
|
|
|
|
err = -EINVAL;
|
|
|
|
goto exit;
|
|
|
|
}
|
2015-04-07 17:45:10 +08:00
|
|
|
|
2015-12-18 01:13:00 +08:00
|
|
|
err = atmel_aes_map(dd, src, dst, len);
|
|
|
|
if (err)
|
|
|
|
goto exit;
|
2013-02-21 00:10:24 +08:00
|
|
|
|
2015-12-18 01:13:00 +08:00
|
|
|
dd->resume = resume;
|
2013-02-21 00:10:24 +08:00
|
|
|
|
2015-12-18 01:13:00 +08:00
|
|
|
/* Set output DMA transfer first */
|
|
|
|
err = atmel_aes_dma_transfer_start(dd, addr_width, DMA_DEV_TO_MEM,
|
|
|
|
maxburst);
|
|
|
|
if (err)
|
|
|
|
goto unmap;
|
2012-07-02 01:19:44 +08:00
|
|
|
|
2015-12-18 01:13:00 +08:00
|
|
|
/* Then set input DMA transfer */
|
|
|
|
err = atmel_aes_dma_transfer_start(dd, addr_width, DMA_MEM_TO_DEV,
|
|
|
|
maxburst);
|
|
|
|
if (err)
|
|
|
|
goto output_transfer_stop;
|
2012-07-02 01:19:44 +08:00
|
|
|
|
2015-12-18 01:13:00 +08:00
|
|
|
return -EINPROGRESS;
|
2013-02-21 00:10:24 +08:00
|
|
|
|
2015-12-18 01:13:00 +08:00
|
|
|
output_transfer_stop:
|
2019-12-13 17:54:42 +08:00
|
|
|
dmaengine_terminate_sync(dd->dst.chan);
|
2015-12-18 01:13:00 +08:00
|
|
|
unmap:
|
|
|
|
atmel_aes_unmap(dd);
|
|
|
|
exit:
|
|
|
|
return atmel_aes_complete(dd, err);
|
|
|
|
}
|
2012-07-02 01:19:44 +08:00
|
|
|
|
2015-12-18 01:13:00 +08:00
|
|
|
static void atmel_aes_dma_callback(void *data)
|
|
|
|
{
|
|
|
|
struct atmel_aes_dev *dd = data;
|
|
|
|
|
2019-12-13 17:54:42 +08:00
|
|
|
atmel_aes_unmap(dd);
|
2015-12-18 01:13:00 +08:00
|
|
|
dd->is_async = true;
|
|
|
|
(void)dd->resume(dd);
|
2012-07-02 01:19:44 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static int atmel_aes_handle_queue(struct atmel_aes_dev *dd,
|
2015-12-18 00:48:39 +08:00
|
|
|
struct crypto_async_request *new_areq)
|
2012-07-02 01:19:44 +08:00
|
|
|
{
|
2015-12-18 00:48:39 +08:00
|
|
|
struct crypto_async_request *areq, *backlog;
|
|
|
|
struct atmel_aes_base_ctx *ctx;
|
2012-07-02 01:19:44 +08:00
|
|
|
unsigned long flags;
|
2017-01-27 00:07:55 +08:00
|
|
|
bool start_async;
|
2012-07-02 01:19:44 +08:00
|
|
|
int err, ret = 0;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&dd->lock, flags);
|
2015-12-18 00:48:39 +08:00
|
|
|
if (new_areq)
|
|
|
|
ret = crypto_enqueue_request(&dd->queue, new_areq);
|
2012-07-02 01:19:44 +08:00
|
|
|
if (dd->flags & AES_FLAGS_BUSY) {
|
|
|
|
spin_unlock_irqrestore(&dd->lock, flags);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
backlog = crypto_get_backlog(&dd->queue);
|
2015-12-18 00:48:39 +08:00
|
|
|
areq = crypto_dequeue_request(&dd->queue);
|
|
|
|
if (areq)
|
2012-07-02 01:19:44 +08:00
|
|
|
dd->flags |= AES_FLAGS_BUSY;
|
|
|
|
spin_unlock_irqrestore(&dd->lock, flags);
|
|
|
|
|
2015-12-18 00:48:39 +08:00
|
|
|
if (!areq)
|
2012-07-02 01:19:44 +08:00
|
|
|
return ret;
|
|
|
|
|
|
|
|
if (backlog)
|
2023-01-31 16:02:08 +08:00
|
|
|
crypto_request_complete(backlog, -EINPROGRESS);
|
2012-07-02 01:19:44 +08:00
|
|
|
|
2015-12-18 00:48:39 +08:00
|
|
|
ctx = crypto_tfm_ctx(areq->tfm);
|
|
|
|
|
|
|
|
dd->areq = areq;
|
2021-10-27 21:30:01 +08:00
|
|
|
dd->ctx = ctx;
|
2017-01-27 00:07:55 +08:00
|
|
|
start_async = (areq != new_areq);
|
|
|
|
dd->is_async = start_async;
|
2015-12-18 00:48:39 +08:00
|
|
|
|
2017-01-27 00:07:55 +08:00
|
|
|
/* WARNING: ctx->start() MAY change dd->is_async. */
|
2015-12-18 00:48:39 +08:00
|
|
|
err = ctx->start(dd);
|
2017-01-27 00:07:55 +08:00
|
|
|
return (start_async) ? ret : err;
|
2015-12-18 00:48:39 +08:00
|
|
|
}
|
|
|
|
|
2015-12-18 01:13:03 +08:00
|
|
|
|
|
|
|
/* AES async block ciphers */
|
|
|
|
|
2015-12-18 01:13:00 +08:00
|
|
|
static int atmel_aes_transfer_complete(struct atmel_aes_dev *dd)
|
|
|
|
{
|
|
|
|
return atmel_aes_complete(dd, 0);
|
|
|
|
}
|
|
|
|
|
2015-12-18 00:48:39 +08:00
|
|
|
static int atmel_aes_start(struct atmel_aes_dev *dd)
|
|
|
|
{
|
2019-11-10 01:09:33 +08:00
|
|
|
struct skcipher_request *req = skcipher_request_cast(dd->areq);
|
|
|
|
struct atmel_aes_reqctx *rctx = skcipher_request_ctx(req);
|
|
|
|
bool use_dma = (req->cryptlen >= ATMEL_AES_DMA_THRESHOLD ||
|
2015-12-18 01:13:00 +08:00
|
|
|
dd->ctx->block_size != AES_BLOCK_SIZE);
|
2015-12-18 00:48:39 +08:00
|
|
|
int err;
|
2012-07-02 01:19:44 +08:00
|
|
|
|
2015-12-18 00:48:41 +08:00
|
|
|
atmel_aes_set_mode(dd, rctx);
|
2012-07-02 01:19:44 +08:00
|
|
|
|
2015-12-18 00:48:38 +08:00
|
|
|
err = atmel_aes_hw_init(dd);
|
2015-12-18 01:13:00 +08:00
|
|
|
if (err)
|
2015-12-18 00:48:42 +08:00
|
|
|
return atmel_aes_complete(dd, err);
|
2012-07-02 01:19:44 +08:00
|
|
|
|
2019-11-10 01:09:33 +08:00
|
|
|
atmel_aes_write_ctrl(dd, use_dma, (void *)req->iv);
|
2015-12-18 01:13:00 +08:00
|
|
|
if (use_dma)
|
2019-11-10 01:09:33 +08:00
|
|
|
return atmel_aes_dma_start(dd, req->src, req->dst,
|
|
|
|
req->cryptlen,
|
2015-12-18 01:13:00 +08:00
|
|
|
atmel_aes_transfer_complete);
|
2012-07-02 01:19:44 +08:00
|
|
|
|
2019-11-10 01:09:33 +08:00
|
|
|
return atmel_aes_cpu_start(dd, req->src, req->dst, req->cryptlen,
|
2015-12-18 01:13:00 +08:00
|
|
|
atmel_aes_transfer_complete);
|
2012-07-02 01:19:44 +08:00
|
|
|
}
|
|
|
|
|
2015-12-18 01:13:05 +08:00
|
|
|
static int atmel_aes_ctr_transfer(struct atmel_aes_dev *dd)
|
|
|
|
{
|
|
|
|
struct atmel_aes_ctr_ctx *ctx = atmel_aes_ctr_ctx_cast(dd->ctx);
|
2019-11-10 01:09:33 +08:00
|
|
|
struct skcipher_request *req = skcipher_request_cast(dd->areq);
|
2015-12-18 01:13:05 +08:00
|
|
|
struct scatterlist *src, *dst;
|
|
|
|
size_t datalen;
|
2019-12-05 17:54:01 +08:00
|
|
|
u32 ctr;
|
2019-12-05 17:54:03 +08:00
|
|
|
u16 start, end;
|
2015-12-18 01:13:05 +08:00
|
|
|
bool use_dma, fragmented = false;
|
|
|
|
|
|
|
|
/* Check for transfer completion. */
|
|
|
|
ctx->offset += dd->total;
|
2019-11-10 01:09:33 +08:00
|
|
|
if (ctx->offset >= req->cryptlen)
|
2015-12-18 01:13:05 +08:00
|
|
|
return atmel_aes_transfer_complete(dd);
|
|
|
|
|
|
|
|
/* Compute data length. */
|
2019-11-10 01:09:33 +08:00
|
|
|
datalen = req->cryptlen - ctx->offset;
|
2019-12-05 17:54:03 +08:00
|
|
|
ctx->blocks = DIV_ROUND_UP(datalen, AES_BLOCK_SIZE);
|
2015-12-18 01:13:05 +08:00
|
|
|
ctr = be32_to_cpu(ctx->iv[3]);
|
2019-12-05 17:54:01 +08:00
|
|
|
|
|
|
|
/* Check 16bit counter overflow. */
|
|
|
|
start = ctr & 0xffff;
|
2019-12-05 17:54:03 +08:00
|
|
|
end = start + ctx->blocks - 1;
|
2019-12-05 17:54:01 +08:00
|
|
|
|
2019-12-05 17:54:03 +08:00
|
|
|
if (ctx->blocks >> 16 || end < start) {
|
2019-12-05 17:54:01 +08:00
|
|
|
ctr |= 0xffff;
|
|
|
|
datalen = AES_BLOCK_SIZE * (0x10000 - start);
|
|
|
|
fragmented = true;
|
2015-12-18 01:13:05 +08:00
|
|
|
}
|
2019-12-05 17:54:01 +08:00
|
|
|
|
2015-12-18 01:13:05 +08:00
|
|
|
use_dma = (datalen >= ATMEL_AES_DMA_THRESHOLD);
|
|
|
|
|
|
|
|
/* Jump to offset. */
|
|
|
|
src = scatterwalk_ffwd(ctx->src, req->src, ctx->offset);
|
|
|
|
dst = ((req->src == req->dst) ? src :
|
|
|
|
scatterwalk_ffwd(ctx->dst, req->dst, ctx->offset));
|
|
|
|
|
|
|
|
/* Configure hardware. */
|
|
|
|
atmel_aes_write_ctrl(dd, use_dma, ctx->iv);
|
|
|
|
if (unlikely(fragmented)) {
|
|
|
|
/*
|
|
|
|
* Increment the counter manually to cope with the hardware
|
|
|
|
* counter overflow.
|
|
|
|
*/
|
|
|
|
ctx->iv[3] = cpu_to_be32(ctr);
|
|
|
|
crypto_inc((u8 *)ctx->iv, AES_BLOCK_SIZE);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (use_dma)
|
|
|
|
return atmel_aes_dma_start(dd, src, dst, datalen,
|
|
|
|
atmel_aes_ctr_transfer);
|
|
|
|
|
|
|
|
return atmel_aes_cpu_start(dd, src, dst, datalen,
|
|
|
|
atmel_aes_ctr_transfer);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int atmel_aes_ctr_start(struct atmel_aes_dev *dd)
|
|
|
|
{
|
|
|
|
struct atmel_aes_ctr_ctx *ctx = atmel_aes_ctr_ctx_cast(dd->ctx);
|
2019-11-10 01:09:33 +08:00
|
|
|
struct skcipher_request *req = skcipher_request_cast(dd->areq);
|
|
|
|
struct atmel_aes_reqctx *rctx = skcipher_request_ctx(req);
|
2015-12-18 01:13:05 +08:00
|
|
|
int err;
|
|
|
|
|
|
|
|
atmel_aes_set_mode(dd, rctx);
|
|
|
|
|
|
|
|
err = atmel_aes_hw_init(dd);
|
|
|
|
if (err)
|
|
|
|
return atmel_aes_complete(dd, err);
|
|
|
|
|
2019-11-10 01:09:33 +08:00
|
|
|
memcpy(ctx->iv, req->iv, AES_BLOCK_SIZE);
|
2015-12-18 01:13:05 +08:00
|
|
|
ctx->offset = 0;
|
|
|
|
dd->total = 0;
|
|
|
|
return atmel_aes_ctr_transfer(dd);
|
|
|
|
}
|
|
|
|
|
2021-07-20 16:55:34 +08:00
|
|
|
static int atmel_aes_xts_fallback(struct skcipher_request *req, bool enc)
|
|
|
|
{
|
|
|
|
struct atmel_aes_reqctx *rctx = skcipher_request_ctx(req);
|
|
|
|
struct atmel_aes_xts_ctx *ctx = crypto_skcipher_ctx(
|
|
|
|
crypto_skcipher_reqtfm(req));
|
|
|
|
|
|
|
|
skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
|
|
|
|
skcipher_request_set_callback(&rctx->fallback_req, req->base.flags,
|
|
|
|
req->base.complete, req->base.data);
|
|
|
|
skcipher_request_set_crypt(&rctx->fallback_req, req->src, req->dst,
|
|
|
|
req->cryptlen, req->iv);
|
|
|
|
|
|
|
|
return enc ? crypto_skcipher_encrypt(&rctx->fallback_req) :
|
|
|
|
crypto_skcipher_decrypt(&rctx->fallback_req);
|
|
|
|
}
|
|
|
|
|
2019-11-10 01:09:33 +08:00
|
|
|
static int atmel_aes_crypt(struct skcipher_request *req, unsigned long mode)
|
2012-07-02 01:19:44 +08:00
|
|
|
{
|
2019-11-10 01:09:33 +08:00
|
|
|
struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
|
|
|
|
struct atmel_aes_base_ctx *ctx = crypto_skcipher_ctx(skcipher);
|
2015-12-18 01:13:02 +08:00
|
|
|
struct atmel_aes_reqctx *rctx;
|
2021-07-20 16:55:29 +08:00
|
|
|
u32 opmode = mode & AES_FLAGS_OPMODE_MASK;
|
|
|
|
|
2021-07-20 16:55:34 +08:00
|
|
|
if (opmode == AES_FLAGS_XTS) {
|
|
|
|
if (req->cryptlen < XTS_BLOCK_SIZE)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
if (!IS_ALIGNED(req->cryptlen, XTS_BLOCK_SIZE))
|
|
|
|
return atmel_aes_xts_fallback(req,
|
|
|
|
mode & AES_FLAGS_ENCRYPT);
|
|
|
|
}
|
2021-07-20 16:55:30 +08:00
|
|
|
|
2021-07-20 16:55:31 +08:00
|
|
|
/*
|
|
|
|
* ECB, CBC, CFB, OFB or CTR mode require the plaintext and ciphertext
|
|
|
|
* to have a positve integer length.
|
|
|
|
*/
|
|
|
|
if (!req->cryptlen && opmode != AES_FLAGS_XTS)
|
|
|
|
return 0;
|
|
|
|
|
2021-07-20 16:55:29 +08:00
|
|
|
if ((opmode == AES_FLAGS_ECB || opmode == AES_FLAGS_CBC) &&
|
|
|
|
!IS_ALIGNED(req->cryptlen, crypto_skcipher_blocksize(skcipher)))
|
|
|
|
return -EINVAL;
|
2012-07-02 01:19:44 +08:00
|
|
|
|
2015-12-18 00:48:41 +08:00
|
|
|
switch (mode & AES_FLAGS_OPMODE_MASK) {
|
|
|
|
case AES_FLAGS_CFB8:
|
2013-02-21 00:10:24 +08:00
|
|
|
ctx->block_size = CFB8_BLOCK_SIZE;
|
2015-12-18 00:48:41 +08:00
|
|
|
break;
|
|
|
|
|
|
|
|
case AES_FLAGS_CFB16:
|
2013-02-21 00:10:24 +08:00
|
|
|
ctx->block_size = CFB16_BLOCK_SIZE;
|
2015-12-18 00:48:41 +08:00
|
|
|
break;
|
|
|
|
|
|
|
|
case AES_FLAGS_CFB32:
|
2013-02-21 00:10:24 +08:00
|
|
|
ctx->block_size = CFB32_BLOCK_SIZE;
|
2015-12-18 00:48:41 +08:00
|
|
|
break;
|
|
|
|
|
|
|
|
case AES_FLAGS_CFB64:
|
2014-04-22 15:23:24 +08:00
|
|
|
ctx->block_size = CFB64_BLOCK_SIZE;
|
2015-12-18 00:48:41 +08:00
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
2013-02-21 00:10:24 +08:00
|
|
|
ctx->block_size = AES_BLOCK_SIZE;
|
2015-12-18 00:48:41 +08:00
|
|
|
break;
|
2012-07-02 01:19:44 +08:00
|
|
|
}
|
2017-10-31 23:25:23 +08:00
|
|
|
ctx->is_aead = false;
|
2012-07-02 01:19:44 +08:00
|
|
|
|
2019-11-10 01:09:33 +08:00
|
|
|
rctx = skcipher_request_ctx(req);
|
2012-07-02 01:19:44 +08:00
|
|
|
rctx->mode = mode;
|
|
|
|
|
2021-07-20 16:55:29 +08:00
|
|
|
if (opmode != AES_FLAGS_ECB &&
|
2023-03-29 03:56:28 +08:00
|
|
|
!(mode & AES_FLAGS_ENCRYPT)) {
|
2019-11-10 01:09:33 +08:00
|
|
|
unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
|
2017-10-31 23:25:23 +08:00
|
|
|
|
2019-11-10 01:09:33 +08:00
|
|
|
if (req->cryptlen >= ivsize)
|
2019-10-04 16:55:37 +08:00
|
|
|
scatterwalk_map_and_copy(rctx->lastc, req->src,
|
2019-11-10 01:09:33 +08:00
|
|
|
req->cryptlen - ivsize,
|
2019-10-04 16:55:37 +08:00
|
|
|
ivsize, 0);
|
2017-10-31 23:25:23 +08:00
|
|
|
}
|
|
|
|
|
2021-07-20 16:55:35 +08:00
|
|
|
return atmel_aes_handle_queue(ctx->dd, &req->base);
|
2012-07-02 01:19:44 +08:00
|
|
|
}
|
|
|
|
|
2019-11-10 01:09:33 +08:00
|
|
|
static int atmel_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
|
2012-07-02 01:19:44 +08:00
|
|
|
unsigned int keylen)
|
|
|
|
{
|
2019-11-10 01:09:33 +08:00
|
|
|
struct atmel_aes_base_ctx *ctx = crypto_skcipher_ctx(tfm);
|
2012-07-02 01:19:44 +08:00
|
|
|
|
2015-12-18 01:13:02 +08:00
|
|
|
if (keylen != AES_KEYSIZE_128 &&
|
|
|
|
keylen != AES_KEYSIZE_192 &&
|
crypto: remove CRYPTO_TFM_RES_BAD_KEY_LEN
The CRYPTO_TFM_RES_BAD_KEY_LEN flag was apparently meant as a way to
make the ->setkey() functions provide more information about errors.
However, no one actually checks for this flag, which makes it pointless.
Also, many algorithms fail to set this flag when given a bad length key.
Reviewing just the generic implementations, this is the case for
aes-fixed-time, cbcmac, echainiv, nhpoly1305, pcrypt, rfc3686, rfc4309,
rfc7539, rfc7539esp, salsa20, seqiv, and xcbc. But there are probably
many more in arch/*/crypto/ and drivers/crypto/.
Some algorithms can even set this flag when the key is the correct
length. For example, authenc and authencesn set it when the key payload
is malformed in any way (not just a bad length), the atmel-sha and ccree
drivers can set it if a memory allocation fails, and the chelsio driver
sets it for bad auth tag lengths, not just bad key lengths.
So even if someone actually wanted to start checking this flag (which
seems unlikely, since it's been unused for a long time), there would be
a lot of work needed to get it working correctly. But it would probably
be much better to go back to the drawing board and just define different
return values, like -EINVAL if the key is invalid for the algorithm vs.
-EKEYREJECTED if the key was rejected by a policy like "no weak keys".
That would be much simpler, less error-prone, and easier to test.
So just remove this flag.
Signed-off-by: Eric Biggers <ebiggers@google.com>
Reviewed-by: Horia Geantă <horia.geanta@nxp.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2019-12-31 11:19:36 +08:00
|
|
|
keylen != AES_KEYSIZE_256)
|
2012-07-02 01:19:44 +08:00
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
memcpy(ctx->key, key, keylen);
|
|
|
|
ctx->keylen = keylen;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-11-10 01:09:33 +08:00
|
|
|
static int atmel_aes_ecb_encrypt(struct skcipher_request *req)
|
2012-07-02 01:19:44 +08:00
|
|
|
{
|
2015-12-18 00:48:41 +08:00
|
|
|
return atmel_aes_crypt(req, AES_FLAGS_ECB | AES_FLAGS_ENCRYPT);
|
2012-07-02 01:19:44 +08:00
|
|
|
}
|
|
|
|
|
2019-11-10 01:09:33 +08:00
|
|
|
static int atmel_aes_ecb_decrypt(struct skcipher_request *req)
|
2012-07-02 01:19:44 +08:00
|
|
|
{
|
2015-12-18 00:48:41 +08:00
|
|
|
return atmel_aes_crypt(req, AES_FLAGS_ECB);
|
2012-07-02 01:19:44 +08:00
|
|
|
}
|
|
|
|
|
2019-11-10 01:09:33 +08:00
|
|
|
static int atmel_aes_cbc_encrypt(struct skcipher_request *req)
|
2012-07-02 01:19:44 +08:00
|
|
|
{
|
2015-12-18 01:13:02 +08:00
|
|
|
return atmel_aes_crypt(req, AES_FLAGS_CBC | AES_FLAGS_ENCRYPT);
|
2012-07-02 01:19:44 +08:00
|
|
|
}
|
|
|
|
|
2019-11-10 01:09:33 +08:00
|
|
|
static int atmel_aes_cbc_decrypt(struct skcipher_request *req)
|
2012-07-02 01:19:44 +08:00
|
|
|
{
|
2015-12-18 01:13:02 +08:00
|
|
|
return atmel_aes_crypt(req, AES_FLAGS_CBC);
|
2012-07-02 01:19:44 +08:00
|
|
|
}
|
|
|
|
|
2019-11-10 01:09:33 +08:00
|
|
|
static int atmel_aes_ofb_encrypt(struct skcipher_request *req)
|
2012-07-02 01:19:44 +08:00
|
|
|
{
|
2015-12-18 01:13:02 +08:00
|
|
|
return atmel_aes_crypt(req, AES_FLAGS_OFB | AES_FLAGS_ENCRYPT);
|
2012-07-02 01:19:44 +08:00
|
|
|
}
|
|
|
|
|
2019-11-10 01:09:33 +08:00
|
|
|
static int atmel_aes_ofb_decrypt(struct skcipher_request *req)
|
2012-07-02 01:19:44 +08:00
|
|
|
{
|
2015-12-18 01:13:02 +08:00
|
|
|
return atmel_aes_crypt(req, AES_FLAGS_OFB);
|
2012-07-02 01:19:44 +08:00
|
|
|
}
|
|
|
|
|
2019-11-10 01:09:33 +08:00
|
|
|
static int atmel_aes_cfb_encrypt(struct skcipher_request *req)
|
2012-07-02 01:19:44 +08:00
|
|
|
{
|
2015-12-18 00:48:41 +08:00
|
|
|
return atmel_aes_crypt(req, AES_FLAGS_CFB128 | AES_FLAGS_ENCRYPT);
|
2012-07-02 01:19:44 +08:00
|
|
|
}
|
|
|
|
|
2019-11-10 01:09:33 +08:00
|
|
|
static int atmel_aes_cfb_decrypt(struct skcipher_request *req)
|
2012-07-02 01:19:44 +08:00
|
|
|
{
|
2015-12-18 00:48:41 +08:00
|
|
|
return atmel_aes_crypt(req, AES_FLAGS_CFB128);
|
2012-07-02 01:19:44 +08:00
|
|
|
}
|
|
|
|
|
2019-11-10 01:09:33 +08:00
|
|
|
static int atmel_aes_cfb64_encrypt(struct skcipher_request *req)
|
2012-07-02 01:19:44 +08:00
|
|
|
{
|
2015-12-18 00:48:41 +08:00
|
|
|
return atmel_aes_crypt(req, AES_FLAGS_CFB64 | AES_FLAGS_ENCRYPT);
|
2012-07-02 01:19:44 +08:00
|
|
|
}
|
|
|
|
|
2019-11-10 01:09:33 +08:00
|
|
|
static int atmel_aes_cfb64_decrypt(struct skcipher_request *req)
|
2012-07-02 01:19:44 +08:00
|
|
|
{
|
2015-12-18 00:48:41 +08:00
|
|
|
return atmel_aes_crypt(req, AES_FLAGS_CFB64);
|
2012-07-02 01:19:44 +08:00
|
|
|
}
|
|
|
|
|
2019-11-10 01:09:33 +08:00
|
|
|
static int atmel_aes_cfb32_encrypt(struct skcipher_request *req)
|
2012-07-02 01:19:44 +08:00
|
|
|
{
|
2015-12-18 00:48:41 +08:00
|
|
|
return atmel_aes_crypt(req, AES_FLAGS_CFB32 | AES_FLAGS_ENCRYPT);
|
2012-07-02 01:19:44 +08:00
|
|
|
}
|
|
|
|
|
2019-11-10 01:09:33 +08:00
|
|
|
static int atmel_aes_cfb32_decrypt(struct skcipher_request *req)
|
2012-07-02 01:19:44 +08:00
|
|
|
{
|
2015-12-18 00:48:41 +08:00
|
|
|
return atmel_aes_crypt(req, AES_FLAGS_CFB32);
|
2012-07-02 01:19:44 +08:00
|
|
|
}
|
|
|
|
|
2019-11-10 01:09:33 +08:00
|
|
|
static int atmel_aes_cfb16_encrypt(struct skcipher_request *req)
|
2012-07-02 01:19:44 +08:00
|
|
|
{
|
2015-12-18 00:48:41 +08:00
|
|
|
return atmel_aes_crypt(req, AES_FLAGS_CFB16 | AES_FLAGS_ENCRYPT);
|
2012-07-02 01:19:44 +08:00
|
|
|
}
|
|
|
|
|
2019-11-10 01:09:33 +08:00
|
|
|
static int atmel_aes_cfb16_decrypt(struct skcipher_request *req)
|
2012-07-02 01:19:44 +08:00
|
|
|
{
|
2015-12-18 00:48:41 +08:00
|
|
|
return atmel_aes_crypt(req, AES_FLAGS_CFB16);
|
2012-07-02 01:19:44 +08:00
|
|
|
}
|
|
|
|
|
2019-11-10 01:09:33 +08:00
|
|
|
static int atmel_aes_cfb8_encrypt(struct skcipher_request *req)
|
2012-07-02 01:19:44 +08:00
|
|
|
{
|
2015-12-18 00:48:41 +08:00
|
|
|
return atmel_aes_crypt(req, AES_FLAGS_CFB8 | AES_FLAGS_ENCRYPT);
|
2012-07-02 01:19:44 +08:00
|
|
|
}
|
|
|
|
|
2019-11-10 01:09:33 +08:00
|
|
|
static int atmel_aes_cfb8_decrypt(struct skcipher_request *req)
|
2012-07-02 01:19:44 +08:00
|
|
|
{
|
2015-12-18 00:48:41 +08:00
|
|
|
return atmel_aes_crypt(req, AES_FLAGS_CFB8);
|
2012-07-02 01:19:44 +08:00
|
|
|
}
|
|
|
|
|
2019-11-10 01:09:33 +08:00
|
|
|
static int atmel_aes_ctr_encrypt(struct skcipher_request *req)
|
2012-07-02 01:19:44 +08:00
|
|
|
{
|
2015-12-18 01:13:02 +08:00
|
|
|
return atmel_aes_crypt(req, AES_FLAGS_CTR | AES_FLAGS_ENCRYPT);
|
2012-07-02 01:19:44 +08:00
|
|
|
}
|
|
|
|
|
2019-11-10 01:09:33 +08:00
|
|
|
static int atmel_aes_ctr_decrypt(struct skcipher_request *req)
|
2012-07-02 01:19:44 +08:00
|
|
|
{
|
2015-12-18 01:13:02 +08:00
|
|
|
return atmel_aes_crypt(req, AES_FLAGS_CTR);
|
2012-07-02 01:19:44 +08:00
|
|
|
}
|
|
|
|
|
2019-11-10 01:09:33 +08:00
|
|
|
static int atmel_aes_init_tfm(struct crypto_skcipher *tfm)
|
2012-07-02 01:19:44 +08:00
|
|
|
{
|
2019-11-10 01:09:33 +08:00
|
|
|
struct atmel_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
|
2021-07-20 16:55:35 +08:00
|
|
|
struct atmel_aes_dev *dd;
|
|
|
|
|
|
|
|
dd = atmel_aes_dev_alloc(&ctx->base);
|
|
|
|
if (!dd)
|
|
|
|
return -ENODEV;
|
2015-12-18 00:48:39 +08:00
|
|
|
|
2019-11-10 01:09:33 +08:00
|
|
|
crypto_skcipher_set_reqsize(tfm, sizeof(struct atmel_aes_reqctx));
|
2021-07-20 16:55:35 +08:00
|
|
|
ctx->base.dd = dd;
|
2015-12-18 00:48:39 +08:00
|
|
|
ctx->base.start = atmel_aes_start;
|
2012-07-02 01:19:44 +08:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-11-10 01:09:33 +08:00
|
|
|
static int atmel_aes_ctr_init_tfm(struct crypto_skcipher *tfm)
|
2015-12-18 01:13:05 +08:00
|
|
|
{
|
2019-11-10 01:09:33 +08:00
|
|
|
struct atmel_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
|
2021-07-20 16:55:35 +08:00
|
|
|
struct atmel_aes_dev *dd;
|
|
|
|
|
|
|
|
dd = atmel_aes_dev_alloc(&ctx->base);
|
|
|
|
if (!dd)
|
|
|
|
return -ENODEV;
|
2015-12-18 01:13:05 +08:00
|
|
|
|
2019-11-10 01:09:33 +08:00
|
|
|
crypto_skcipher_set_reqsize(tfm, sizeof(struct atmel_aes_reqctx));
|
2021-07-20 16:55:35 +08:00
|
|
|
ctx->base.dd = dd;
|
2015-12-18 01:13:05 +08:00
|
|
|
ctx->base.start = atmel_aes_ctr_start;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-11-10 01:09:33 +08:00
|
|
|
static struct skcipher_alg aes_algs[] = {
|
|
|
|
{
|
|
|
|
.base.cra_name = "ecb(aes)",
|
|
|
|
.base.cra_driver_name = "atmel-ecb-aes",
|
|
|
|
.base.cra_blocksize = AES_BLOCK_SIZE,
|
|
|
|
.base.cra_ctxsize = sizeof(struct atmel_aes_ctx),
|
|
|
|
|
|
|
|
.init = atmel_aes_init_tfm,
|
|
|
|
.min_keysize = AES_MIN_KEY_SIZE,
|
|
|
|
.max_keysize = AES_MAX_KEY_SIZE,
|
|
|
|
.setkey = atmel_aes_setkey,
|
|
|
|
.encrypt = atmel_aes_ecb_encrypt,
|
|
|
|
.decrypt = atmel_aes_ecb_decrypt,
|
2012-07-02 01:19:44 +08:00
|
|
|
},
|
|
|
|
{
|
2019-11-10 01:09:33 +08:00
|
|
|
.base.cra_name = "cbc(aes)",
|
|
|
|
.base.cra_driver_name = "atmel-cbc-aes",
|
|
|
|
.base.cra_blocksize = AES_BLOCK_SIZE,
|
|
|
|
.base.cra_ctxsize = sizeof(struct atmel_aes_ctx),
|
|
|
|
|
|
|
|
.init = atmel_aes_init_tfm,
|
|
|
|
.min_keysize = AES_MIN_KEY_SIZE,
|
|
|
|
.max_keysize = AES_MAX_KEY_SIZE,
|
|
|
|
.setkey = atmel_aes_setkey,
|
|
|
|
.encrypt = atmel_aes_cbc_encrypt,
|
|
|
|
.decrypt = atmel_aes_cbc_decrypt,
|
|
|
|
.ivsize = AES_BLOCK_SIZE,
|
2012-07-02 01:19:44 +08:00
|
|
|
},
|
|
|
|
{
|
2019-11-10 01:09:33 +08:00
|
|
|
.base.cra_name = "ofb(aes)",
|
|
|
|
.base.cra_driver_name = "atmel-ofb-aes",
|
2021-07-20 16:55:33 +08:00
|
|
|
.base.cra_blocksize = 1,
|
2019-11-10 01:09:33 +08:00
|
|
|
.base.cra_ctxsize = sizeof(struct atmel_aes_ctx),
|
|
|
|
|
|
|
|
.init = atmel_aes_init_tfm,
|
|
|
|
.min_keysize = AES_MIN_KEY_SIZE,
|
|
|
|
.max_keysize = AES_MAX_KEY_SIZE,
|
|
|
|
.setkey = atmel_aes_setkey,
|
|
|
|
.encrypt = atmel_aes_ofb_encrypt,
|
|
|
|
.decrypt = atmel_aes_ofb_decrypt,
|
|
|
|
.ivsize = AES_BLOCK_SIZE,
|
2012-07-02 01:19:44 +08:00
|
|
|
},
|
|
|
|
{
|
2019-11-10 01:09:33 +08:00
|
|
|
.base.cra_name = "cfb(aes)",
|
|
|
|
.base.cra_driver_name = "atmel-cfb-aes",
|
2023-03-29 03:56:29 +08:00
|
|
|
.base.cra_blocksize = 1,
|
2019-11-10 01:09:33 +08:00
|
|
|
.base.cra_ctxsize = sizeof(struct atmel_aes_ctx),
|
|
|
|
|
|
|
|
.init = atmel_aes_init_tfm,
|
|
|
|
.min_keysize = AES_MIN_KEY_SIZE,
|
|
|
|
.max_keysize = AES_MAX_KEY_SIZE,
|
|
|
|
.setkey = atmel_aes_setkey,
|
|
|
|
.encrypt = atmel_aes_cfb_encrypt,
|
|
|
|
.decrypt = atmel_aes_cfb_decrypt,
|
|
|
|
.ivsize = AES_BLOCK_SIZE,
|
2012-07-02 01:19:44 +08:00
|
|
|
},
|
|
|
|
{
|
2019-11-10 01:09:33 +08:00
|
|
|
.base.cra_name = "cfb32(aes)",
|
|
|
|
.base.cra_driver_name = "atmel-cfb32-aes",
|
|
|
|
.base.cra_blocksize = CFB32_BLOCK_SIZE,
|
|
|
|
.base.cra_ctxsize = sizeof(struct atmel_aes_ctx),
|
|
|
|
|
|
|
|
.init = atmel_aes_init_tfm,
|
|
|
|
.min_keysize = AES_MIN_KEY_SIZE,
|
|
|
|
.max_keysize = AES_MAX_KEY_SIZE,
|
|
|
|
.setkey = atmel_aes_setkey,
|
|
|
|
.encrypt = atmel_aes_cfb32_encrypt,
|
|
|
|
.decrypt = atmel_aes_cfb32_decrypt,
|
|
|
|
.ivsize = AES_BLOCK_SIZE,
|
2012-07-02 01:19:44 +08:00
|
|
|
},
|
|
|
|
{
|
2019-11-10 01:09:33 +08:00
|
|
|
.base.cra_name = "cfb16(aes)",
|
|
|
|
.base.cra_driver_name = "atmel-cfb16-aes",
|
|
|
|
.base.cra_blocksize = CFB16_BLOCK_SIZE,
|
|
|
|
.base.cra_ctxsize = sizeof(struct atmel_aes_ctx),
|
|
|
|
|
|
|
|
.init = atmel_aes_init_tfm,
|
|
|
|
.min_keysize = AES_MIN_KEY_SIZE,
|
|
|
|
.max_keysize = AES_MAX_KEY_SIZE,
|
|
|
|
.setkey = atmel_aes_setkey,
|
|
|
|
.encrypt = atmel_aes_cfb16_encrypt,
|
|
|
|
.decrypt = atmel_aes_cfb16_decrypt,
|
|
|
|
.ivsize = AES_BLOCK_SIZE,
|
2012-07-02 01:19:44 +08:00
|
|
|
},
|
|
|
|
{
|
2019-11-10 01:09:33 +08:00
|
|
|
.base.cra_name = "cfb8(aes)",
|
|
|
|
.base.cra_driver_name = "atmel-cfb8-aes",
|
|
|
|
.base.cra_blocksize = CFB8_BLOCK_SIZE,
|
|
|
|
.base.cra_ctxsize = sizeof(struct atmel_aes_ctx),
|
|
|
|
|
|
|
|
.init = atmel_aes_init_tfm,
|
|
|
|
.min_keysize = AES_MIN_KEY_SIZE,
|
|
|
|
.max_keysize = AES_MAX_KEY_SIZE,
|
|
|
|
.setkey = atmel_aes_setkey,
|
|
|
|
.encrypt = atmel_aes_cfb8_encrypt,
|
|
|
|
.decrypt = atmel_aes_cfb8_decrypt,
|
|
|
|
.ivsize = AES_BLOCK_SIZE,
|
2012-07-02 01:19:44 +08:00
|
|
|
},
|
|
|
|
{
|
2019-11-10 01:09:33 +08:00
|
|
|
.base.cra_name = "ctr(aes)",
|
|
|
|
.base.cra_driver_name = "atmel-ctr-aes",
|
|
|
|
.base.cra_blocksize = 1,
|
|
|
|
.base.cra_ctxsize = sizeof(struct atmel_aes_ctr_ctx),
|
|
|
|
|
|
|
|
.init = atmel_aes_ctr_init_tfm,
|
|
|
|
.min_keysize = AES_MIN_KEY_SIZE,
|
|
|
|
.max_keysize = AES_MAX_KEY_SIZE,
|
|
|
|
.setkey = atmel_aes_setkey,
|
|
|
|
.encrypt = atmel_aes_ctr_encrypt,
|
|
|
|
.decrypt = atmel_aes_ctr_decrypt,
|
|
|
|
.ivsize = AES_BLOCK_SIZE,
|
2012-07-02 01:19:44 +08:00
|
|
|
},
|
|
|
|
};
|
|
|
|
|
2019-11-10 01:09:33 +08:00
|
|
|
static struct skcipher_alg aes_cfb64_alg = {
|
|
|
|
.base.cra_name = "cfb64(aes)",
|
|
|
|
.base.cra_driver_name = "atmel-cfb64-aes",
|
|
|
|
.base.cra_blocksize = CFB64_BLOCK_SIZE,
|
|
|
|
.base.cra_ctxsize = sizeof(struct atmel_aes_ctx),
|
|
|
|
|
|
|
|
.init = atmel_aes_init_tfm,
|
|
|
|
.min_keysize = AES_MIN_KEY_SIZE,
|
|
|
|
.max_keysize = AES_MAX_KEY_SIZE,
|
|
|
|
.setkey = atmel_aes_setkey,
|
|
|
|
.encrypt = atmel_aes_cfb64_encrypt,
|
|
|
|
.decrypt = atmel_aes_cfb64_decrypt,
|
|
|
|
.ivsize = AES_BLOCK_SIZE,
|
2012-07-02 01:19:44 +08:00
|
|
|
};
|
|
|
|
|
2015-12-18 01:13:03 +08:00
|
|
|
|
2015-12-18 01:13:07 +08:00
|
|
|
/* gcm aead functions */
|
|
|
|
|
|
|
|
static int atmel_aes_gcm_ghash(struct atmel_aes_dev *dd,
|
|
|
|
const u32 *data, size_t datalen,
|
2019-10-16 20:26:33 +08:00
|
|
|
const __be32 *ghash_in, __be32 *ghash_out,
|
2015-12-18 01:13:07 +08:00
|
|
|
atmel_aes_fn_t resume);
|
|
|
|
static int atmel_aes_gcm_ghash_init(struct atmel_aes_dev *dd);
|
|
|
|
static int atmel_aes_gcm_ghash_finalize(struct atmel_aes_dev *dd);
|
|
|
|
|
|
|
|
static int atmel_aes_gcm_start(struct atmel_aes_dev *dd);
|
|
|
|
static int atmel_aes_gcm_process(struct atmel_aes_dev *dd);
|
|
|
|
static int atmel_aes_gcm_length(struct atmel_aes_dev *dd);
|
|
|
|
static int atmel_aes_gcm_data(struct atmel_aes_dev *dd);
|
|
|
|
static int atmel_aes_gcm_tag_init(struct atmel_aes_dev *dd);
|
|
|
|
static int atmel_aes_gcm_tag(struct atmel_aes_dev *dd);
|
|
|
|
static int atmel_aes_gcm_finalize(struct atmel_aes_dev *dd);
|
|
|
|
|
|
|
|
static inline struct atmel_aes_gcm_ctx *
|
|
|
|
atmel_aes_gcm_ctx_cast(struct atmel_aes_base_ctx *ctx)
|
|
|
|
{
|
|
|
|
return container_of(ctx, struct atmel_aes_gcm_ctx, base);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int atmel_aes_gcm_ghash(struct atmel_aes_dev *dd,
|
|
|
|
const u32 *data, size_t datalen,
|
2019-10-16 20:26:33 +08:00
|
|
|
const __be32 *ghash_in, __be32 *ghash_out,
|
2015-12-18 01:13:07 +08:00
|
|
|
atmel_aes_fn_t resume)
|
|
|
|
{
|
|
|
|
struct atmel_aes_gcm_ctx *ctx = atmel_aes_gcm_ctx_cast(dd->ctx);
|
|
|
|
|
|
|
|
dd->data = (u32 *)data;
|
|
|
|
dd->datalen = datalen;
|
|
|
|
ctx->ghash_in = ghash_in;
|
|
|
|
ctx->ghash_out = ghash_out;
|
|
|
|
ctx->ghash_resume = resume;
|
|
|
|
|
|
|
|
atmel_aes_write_ctrl(dd, false, NULL);
|
|
|
|
return atmel_aes_wait_for_data_ready(dd, atmel_aes_gcm_ghash_init);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int atmel_aes_gcm_ghash_init(struct atmel_aes_dev *dd)
|
|
|
|
{
|
|
|
|
struct atmel_aes_gcm_ctx *ctx = atmel_aes_gcm_ctx_cast(dd->ctx);
|
|
|
|
|
|
|
|
/* Set the data length. */
|
|
|
|
atmel_aes_write(dd, AES_AADLENR, dd->total);
|
|
|
|
atmel_aes_write(dd, AES_CLENR, 0);
|
|
|
|
|
|
|
|
/* If needed, overwrite the GCM Intermediate Hash Word Registers */
|
|
|
|
if (ctx->ghash_in)
|
|
|
|
atmel_aes_write_block(dd, AES_GHASHR(0), ctx->ghash_in);
|
|
|
|
|
|
|
|
return atmel_aes_gcm_ghash_finalize(dd);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int atmel_aes_gcm_ghash_finalize(struct atmel_aes_dev *dd)
|
|
|
|
{
|
|
|
|
struct atmel_aes_gcm_ctx *ctx = atmel_aes_gcm_ctx_cast(dd->ctx);
|
|
|
|
u32 isr;
|
|
|
|
|
|
|
|
/* Write data into the Input Data Registers. */
|
|
|
|
while (dd->datalen > 0) {
|
|
|
|
atmel_aes_write_block(dd, AES_IDATAR(0), dd->data);
|
|
|
|
dd->data += 4;
|
|
|
|
dd->datalen -= AES_BLOCK_SIZE;
|
|
|
|
|
|
|
|
isr = atmel_aes_read(dd, AES_ISR);
|
|
|
|
if (!(isr & AES_INT_DATARDY)) {
|
|
|
|
dd->resume = atmel_aes_gcm_ghash_finalize;
|
|
|
|
atmel_aes_write(dd, AES_IER, AES_INT_DATARDY);
|
|
|
|
return -EINPROGRESS;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Read the computed hash from GHASHRx. */
|
|
|
|
atmel_aes_read_block(dd, AES_GHASHR(0), ctx->ghash_out);
|
|
|
|
|
|
|
|
return ctx->ghash_resume(dd);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static int atmel_aes_gcm_start(struct atmel_aes_dev *dd)
|
|
|
|
{
|
|
|
|
struct atmel_aes_gcm_ctx *ctx = atmel_aes_gcm_ctx_cast(dd->ctx);
|
|
|
|
struct aead_request *req = aead_request_cast(dd->areq);
|
|
|
|
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
|
|
|
|
struct atmel_aes_reqctx *rctx = aead_request_ctx(req);
|
|
|
|
size_t ivsize = crypto_aead_ivsize(tfm);
|
|
|
|
size_t datalen, padlen;
|
|
|
|
const void *iv = req->iv;
|
|
|
|
u8 *data = dd->buf;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
atmel_aes_set_mode(dd, rctx);
|
|
|
|
|
|
|
|
err = atmel_aes_hw_init(dd);
|
|
|
|
if (err)
|
|
|
|
return atmel_aes_complete(dd, err);
|
|
|
|
|
2017-08-22 16:08:12 +08:00
|
|
|
if (likely(ivsize == GCM_AES_IV_SIZE)) {
|
2015-12-18 01:13:07 +08:00
|
|
|
memcpy(ctx->j0, iv, ivsize);
|
|
|
|
ctx->j0[3] = cpu_to_be32(1);
|
|
|
|
return atmel_aes_gcm_process(dd);
|
|
|
|
}
|
|
|
|
|
|
|
|
padlen = atmel_aes_padlen(ivsize, AES_BLOCK_SIZE);
|
|
|
|
datalen = ivsize + padlen + AES_BLOCK_SIZE;
|
|
|
|
if (datalen > dd->buflen)
|
|
|
|
return atmel_aes_complete(dd, -EINVAL);
|
|
|
|
|
|
|
|
memcpy(data, iv, ivsize);
|
|
|
|
memset(data + ivsize, 0, padlen + sizeof(u64));
|
2019-10-16 20:26:33 +08:00
|
|
|
((__be64 *)(data + datalen))[-1] = cpu_to_be64(ivsize * 8);
|
2015-12-18 01:13:07 +08:00
|
|
|
|
|
|
|
return atmel_aes_gcm_ghash(dd, (const u32 *)data, datalen,
|
|
|
|
NULL, ctx->j0, atmel_aes_gcm_process);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int atmel_aes_gcm_process(struct atmel_aes_dev *dd)
|
|
|
|
{
|
|
|
|
struct atmel_aes_gcm_ctx *ctx = atmel_aes_gcm_ctx_cast(dd->ctx);
|
|
|
|
struct aead_request *req = aead_request_cast(dd->areq);
|
|
|
|
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
|
|
|
|
bool enc = atmel_aes_is_encrypt(dd);
|
|
|
|
u32 authsize;
|
|
|
|
|
|
|
|
/* Compute text length. */
|
|
|
|
authsize = crypto_aead_authsize(tfm);
|
|
|
|
ctx->textlen = req->cryptlen - (enc ? 0 : authsize);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* According to tcrypt test suite, the GCM Automatic Tag Generation
|
|
|
|
* fails when both the message and its associated data are empty.
|
|
|
|
*/
|
|
|
|
if (likely(req->assoclen != 0 || ctx->textlen != 0))
|
|
|
|
dd->flags |= AES_FLAGS_GTAGEN;
|
|
|
|
|
|
|
|
atmel_aes_write_ctrl(dd, false, NULL);
|
|
|
|
return atmel_aes_wait_for_data_ready(dd, atmel_aes_gcm_length);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int atmel_aes_gcm_length(struct atmel_aes_dev *dd)
|
|
|
|
{
|
|
|
|
struct atmel_aes_gcm_ctx *ctx = atmel_aes_gcm_ctx_cast(dd->ctx);
|
|
|
|
struct aead_request *req = aead_request_cast(dd->areq);
|
2019-10-16 20:26:33 +08:00
|
|
|
__be32 j0_lsw, *j0 = ctx->j0;
|
2015-12-18 01:13:07 +08:00
|
|
|
size_t padlen;
|
|
|
|
|
|
|
|
/* Write incr32(J0) into IV. */
|
|
|
|
j0_lsw = j0[3];
|
2020-09-14 12:17:46 +08:00
|
|
|
be32_add_cpu(&j0[3], 1);
|
2015-12-18 01:13:07 +08:00
|
|
|
atmel_aes_write_block(dd, AES_IVR(0), j0);
|
|
|
|
j0[3] = j0_lsw;
|
|
|
|
|
|
|
|
/* Set aad and text lengths. */
|
|
|
|
atmel_aes_write(dd, AES_AADLENR, req->assoclen);
|
|
|
|
atmel_aes_write(dd, AES_CLENR, ctx->textlen);
|
|
|
|
|
|
|
|
/* Check whether AAD are present. */
|
|
|
|
if (unlikely(req->assoclen == 0)) {
|
|
|
|
dd->datalen = 0;
|
|
|
|
return atmel_aes_gcm_data(dd);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Copy assoc data and add padding. */
|
|
|
|
padlen = atmel_aes_padlen(req->assoclen, AES_BLOCK_SIZE);
|
|
|
|
if (unlikely(req->assoclen + padlen > dd->buflen))
|
|
|
|
return atmel_aes_complete(dd, -EINVAL);
|
|
|
|
sg_copy_to_buffer(req->src, sg_nents(req->src), dd->buf, req->assoclen);
|
|
|
|
|
|
|
|
/* Write assoc data into the Input Data register. */
|
|
|
|
dd->data = (u32 *)dd->buf;
|
|
|
|
dd->datalen = req->assoclen + padlen;
|
|
|
|
return atmel_aes_gcm_data(dd);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int atmel_aes_gcm_data(struct atmel_aes_dev *dd)
|
|
|
|
{
|
|
|
|
struct atmel_aes_gcm_ctx *ctx = atmel_aes_gcm_ctx_cast(dd->ctx);
|
|
|
|
struct aead_request *req = aead_request_cast(dd->areq);
|
|
|
|
bool use_dma = (ctx->textlen >= ATMEL_AES_DMA_THRESHOLD);
|
|
|
|
struct scatterlist *src, *dst;
|
|
|
|
u32 isr, mr;
|
|
|
|
|
|
|
|
/* Write AAD first. */
|
|
|
|
while (dd->datalen > 0) {
|
|
|
|
atmel_aes_write_block(dd, AES_IDATAR(0), dd->data);
|
|
|
|
dd->data += 4;
|
|
|
|
dd->datalen -= AES_BLOCK_SIZE;
|
|
|
|
|
|
|
|
isr = atmel_aes_read(dd, AES_ISR);
|
|
|
|
if (!(isr & AES_INT_DATARDY)) {
|
|
|
|
dd->resume = atmel_aes_gcm_data;
|
|
|
|
atmel_aes_write(dd, AES_IER, AES_INT_DATARDY);
|
|
|
|
return -EINPROGRESS;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* GMAC only. */
|
|
|
|
if (unlikely(ctx->textlen == 0))
|
|
|
|
return atmel_aes_gcm_tag_init(dd);
|
|
|
|
|
|
|
|
/* Prepare src and dst scatter lists to transfer cipher/plain texts */
|
|
|
|
src = scatterwalk_ffwd(ctx->src, req->src, req->assoclen);
|
|
|
|
dst = ((req->src == req->dst) ? src :
|
|
|
|
scatterwalk_ffwd(ctx->dst, req->dst, req->assoclen));
|
|
|
|
|
|
|
|
if (use_dma) {
|
|
|
|
/* Update the Mode Register for DMA transfers. */
|
|
|
|
mr = atmel_aes_read(dd, AES_MR);
|
|
|
|
mr &= ~(AES_MR_SMOD_MASK | AES_MR_DUALBUFF);
|
|
|
|
mr |= AES_MR_SMOD_IDATAR0;
|
|
|
|
if (dd->caps.has_dualbuff)
|
|
|
|
mr |= AES_MR_DUALBUFF;
|
|
|
|
atmel_aes_write(dd, AES_MR, mr);
|
|
|
|
|
|
|
|
return atmel_aes_dma_start(dd, src, dst, ctx->textlen,
|
|
|
|
atmel_aes_gcm_tag_init);
|
|
|
|
}
|
|
|
|
|
|
|
|
return atmel_aes_cpu_start(dd, src, dst, ctx->textlen,
|
|
|
|
atmel_aes_gcm_tag_init);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int atmel_aes_gcm_tag_init(struct atmel_aes_dev *dd)
|
|
|
|
{
|
|
|
|
struct atmel_aes_gcm_ctx *ctx = atmel_aes_gcm_ctx_cast(dd->ctx);
|
|
|
|
struct aead_request *req = aead_request_cast(dd->areq);
|
2019-10-16 20:26:33 +08:00
|
|
|
__be64 *data = dd->buf;
|
2015-12-18 01:13:07 +08:00
|
|
|
|
|
|
|
if (likely(dd->flags & AES_FLAGS_GTAGEN)) {
|
|
|
|
if (!(atmel_aes_read(dd, AES_ISR) & AES_INT_TAGRDY)) {
|
|
|
|
dd->resume = atmel_aes_gcm_tag_init;
|
|
|
|
atmel_aes_write(dd, AES_IER, AES_INT_TAGRDY);
|
|
|
|
return -EINPROGRESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
return atmel_aes_gcm_finalize(dd);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Read the GCM Intermediate Hash Word Registers. */
|
|
|
|
atmel_aes_read_block(dd, AES_GHASHR(0), ctx->ghash);
|
|
|
|
|
|
|
|
data[0] = cpu_to_be64(req->assoclen * 8);
|
|
|
|
data[1] = cpu_to_be64(ctx->textlen * 8);
|
|
|
|
|
|
|
|
return atmel_aes_gcm_ghash(dd, (const u32 *)data, AES_BLOCK_SIZE,
|
|
|
|
ctx->ghash, ctx->ghash, atmel_aes_gcm_tag);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int atmel_aes_gcm_tag(struct atmel_aes_dev *dd)
|
|
|
|
{
|
|
|
|
struct atmel_aes_gcm_ctx *ctx = atmel_aes_gcm_ctx_cast(dd->ctx);
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Change mode to CTR to complete the tag generation.
|
|
|
|
* Use J0 as Initialization Vector.
|
|
|
|
*/
|
|
|
|
flags = dd->flags;
|
|
|
|
dd->flags &= ~(AES_FLAGS_OPMODE_MASK | AES_FLAGS_GTAGEN);
|
|
|
|
dd->flags |= AES_FLAGS_CTR;
|
|
|
|
atmel_aes_write_ctrl(dd, false, ctx->j0);
|
|
|
|
dd->flags = flags;
|
|
|
|
|
|
|
|
atmel_aes_write_block(dd, AES_IDATAR(0), ctx->ghash);
|
|
|
|
return atmel_aes_wait_for_data_ready(dd, atmel_aes_gcm_finalize);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int atmel_aes_gcm_finalize(struct atmel_aes_dev *dd)
|
|
|
|
{
|
|
|
|
struct atmel_aes_gcm_ctx *ctx = atmel_aes_gcm_ctx_cast(dd->ctx);
|
|
|
|
struct aead_request *req = aead_request_cast(dd->areq);
|
|
|
|
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
|
|
|
|
bool enc = atmel_aes_is_encrypt(dd);
|
|
|
|
u32 offset, authsize, itag[4], *otag = ctx->tag;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
/* Read the computed tag. */
|
|
|
|
if (likely(dd->flags & AES_FLAGS_GTAGEN))
|
|
|
|
atmel_aes_read_block(dd, AES_TAGR(0), ctx->tag);
|
|
|
|
else
|
|
|
|
atmel_aes_read_block(dd, AES_ODATAR(0), ctx->tag);
|
|
|
|
|
|
|
|
offset = req->assoclen + ctx->textlen;
|
|
|
|
authsize = crypto_aead_authsize(tfm);
|
|
|
|
if (enc) {
|
|
|
|
scatterwalk_map_and_copy(otag, req->dst, offset, authsize, 1);
|
|
|
|
err = 0;
|
|
|
|
} else {
|
|
|
|
scatterwalk_map_and_copy(itag, req->src, offset, authsize, 0);
|
|
|
|
err = crypto_memneq(itag, otag, authsize) ? -EBADMSG : 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
return atmel_aes_complete(dd, err);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int atmel_aes_gcm_crypt(struct aead_request *req,
|
|
|
|
unsigned long mode)
|
|
|
|
{
|
|
|
|
struct atmel_aes_base_ctx *ctx;
|
|
|
|
struct atmel_aes_reqctx *rctx;
|
|
|
|
|
|
|
|
ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
|
|
|
|
ctx->block_size = AES_BLOCK_SIZE;
|
2017-10-31 23:25:23 +08:00
|
|
|
ctx->is_aead = true;
|
2015-12-18 01:13:07 +08:00
|
|
|
|
|
|
|
rctx = aead_request_ctx(req);
|
|
|
|
rctx->mode = AES_FLAGS_GCM | mode;
|
|
|
|
|
2021-07-20 16:55:35 +08:00
|
|
|
return atmel_aes_handle_queue(ctx->dd, &req->base);
|
2015-12-18 01:13:07 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static int atmel_aes_gcm_setkey(struct crypto_aead *tfm, const u8 *key,
|
|
|
|
unsigned int keylen)
|
|
|
|
{
|
|
|
|
struct atmel_aes_base_ctx *ctx = crypto_aead_ctx(tfm);
|
|
|
|
|
|
|
|
if (keylen != AES_KEYSIZE_256 &&
|
|
|
|
keylen != AES_KEYSIZE_192 &&
|
crypto: remove CRYPTO_TFM_RES_BAD_KEY_LEN
The CRYPTO_TFM_RES_BAD_KEY_LEN flag was apparently meant as a way to
make the ->setkey() functions provide more information about errors.
However, no one actually checks for this flag, which makes it pointless.
Also, many algorithms fail to set this flag when given a bad length key.
Reviewing just the generic implementations, this is the case for
aes-fixed-time, cbcmac, echainiv, nhpoly1305, pcrypt, rfc3686, rfc4309,
rfc7539, rfc7539esp, salsa20, seqiv, and xcbc. But there are probably
many more in arch/*/crypto/ and drivers/crypto/.
Some algorithms can even set this flag when the key is the correct
length. For example, authenc and authencesn set it when the key payload
is malformed in any way (not just a bad length), the atmel-sha and ccree
drivers can set it if a memory allocation fails, and the chelsio driver
sets it for bad auth tag lengths, not just bad key lengths.
So even if someone actually wanted to start checking this flag (which
seems unlikely, since it's been unused for a long time), there would be
a lot of work needed to get it working correctly. But it would probably
be much better to go back to the drawing board and just define different
return values, like -EINVAL if the key is invalid for the algorithm vs.
-EKEYREJECTED if the key was rejected by a policy like "no weak keys".
That would be much simpler, less error-prone, and easier to test.
So just remove this flag.
Signed-off-by: Eric Biggers <ebiggers@google.com>
Reviewed-by: Horia Geantă <horia.geanta@nxp.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2019-12-31 11:19:36 +08:00
|
|
|
keylen != AES_KEYSIZE_128)
|
2015-12-18 01:13:07 +08:00
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
memcpy(ctx->key, key, keylen);
|
|
|
|
ctx->keylen = keylen;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int atmel_aes_gcm_setauthsize(struct crypto_aead *tfm,
|
|
|
|
unsigned int authsize)
|
|
|
|
{
|
2019-12-05 17:54:08 +08:00
|
|
|
return crypto_gcm_check_authsize(authsize);
|
2015-12-18 01:13:07 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static int atmel_aes_gcm_encrypt(struct aead_request *req)
|
|
|
|
{
|
|
|
|
return atmel_aes_gcm_crypt(req, AES_FLAGS_ENCRYPT);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int atmel_aes_gcm_decrypt(struct aead_request *req)
|
|
|
|
{
|
|
|
|
return atmel_aes_gcm_crypt(req, 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int atmel_aes_gcm_init(struct crypto_aead *tfm)
|
|
|
|
{
|
|
|
|
struct atmel_aes_gcm_ctx *ctx = crypto_aead_ctx(tfm);
|
2021-07-20 16:55:35 +08:00
|
|
|
struct atmel_aes_dev *dd;
|
|
|
|
|
|
|
|
dd = atmel_aes_dev_alloc(&ctx->base);
|
|
|
|
if (!dd)
|
|
|
|
return -ENODEV;
|
2015-12-18 01:13:07 +08:00
|
|
|
|
|
|
|
crypto_aead_set_reqsize(tfm, sizeof(struct atmel_aes_reqctx));
|
2021-07-20 16:55:35 +08:00
|
|
|
ctx->base.dd = dd;
|
2015-12-18 01:13:07 +08:00
|
|
|
ctx->base.start = atmel_aes_gcm_start;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct aead_alg aes_gcm_alg = {
|
|
|
|
.setkey = atmel_aes_gcm_setkey,
|
|
|
|
.setauthsize = atmel_aes_gcm_setauthsize,
|
|
|
|
.encrypt = atmel_aes_gcm_encrypt,
|
|
|
|
.decrypt = atmel_aes_gcm_decrypt,
|
|
|
|
.init = atmel_aes_gcm_init,
|
2017-08-22 16:08:12 +08:00
|
|
|
.ivsize = GCM_AES_IV_SIZE,
|
2015-12-18 01:13:07 +08:00
|
|
|
.maxauthsize = AES_BLOCK_SIZE,
|
|
|
|
|
|
|
|
.base = {
|
|
|
|
.cra_name = "gcm(aes)",
|
|
|
|
.cra_driver_name = "atmel-gcm-aes",
|
|
|
|
.cra_blocksize = 1,
|
|
|
|
.cra_ctxsize = sizeof(struct atmel_aes_gcm_ctx),
|
|
|
|
},
|
|
|
|
};
|
|
|
|
|
|
|
|
|
2016-10-03 20:33:16 +08:00
|
|
|
/* xts functions */
|
|
|
|
|
|
|
|
static inline struct atmel_aes_xts_ctx *
|
|
|
|
atmel_aes_xts_ctx_cast(struct atmel_aes_base_ctx *ctx)
|
|
|
|
{
|
|
|
|
return container_of(ctx, struct atmel_aes_xts_ctx, base);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int atmel_aes_xts_process_data(struct atmel_aes_dev *dd);
|
|
|
|
|
|
|
|
static int atmel_aes_xts_start(struct atmel_aes_dev *dd)
|
|
|
|
{
|
|
|
|
struct atmel_aes_xts_ctx *ctx = atmel_aes_xts_ctx_cast(dd->ctx);
|
2019-11-10 01:09:33 +08:00
|
|
|
struct skcipher_request *req = skcipher_request_cast(dd->areq);
|
|
|
|
struct atmel_aes_reqctx *rctx = skcipher_request_ctx(req);
|
2016-10-03 20:33:16 +08:00
|
|
|
unsigned long flags;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
atmel_aes_set_mode(dd, rctx);
|
|
|
|
|
|
|
|
err = atmel_aes_hw_init(dd);
|
|
|
|
if (err)
|
|
|
|
return atmel_aes_complete(dd, err);
|
|
|
|
|
2019-11-10 01:09:33 +08:00
|
|
|
/* Compute the tweak value from req->iv with ecb(aes). */
|
2016-10-03 20:33:16 +08:00
|
|
|
flags = dd->flags;
|
|
|
|
dd->flags &= ~AES_FLAGS_MODE_MASK;
|
|
|
|
dd->flags |= (AES_FLAGS_ECB | AES_FLAGS_ENCRYPT);
|
|
|
|
atmel_aes_write_ctrl_key(dd, false, NULL,
|
|
|
|
ctx->key2, ctx->base.keylen);
|
|
|
|
dd->flags = flags;
|
|
|
|
|
2019-11-10 01:09:33 +08:00
|
|
|
atmel_aes_write_block(dd, AES_IDATAR(0), req->iv);
|
2016-10-03 20:33:16 +08:00
|
|
|
return atmel_aes_wait_for_data_ready(dd, atmel_aes_xts_process_data);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int atmel_aes_xts_process_data(struct atmel_aes_dev *dd)
|
|
|
|
{
|
2019-11-10 01:09:33 +08:00
|
|
|
struct skcipher_request *req = skcipher_request_cast(dd->areq);
|
|
|
|
bool use_dma = (req->cryptlen >= ATMEL_AES_DMA_THRESHOLD);
|
2016-10-03 20:33:16 +08:00
|
|
|
u32 tweak[AES_BLOCK_SIZE / sizeof(u32)];
|
2019-10-16 20:26:33 +08:00
|
|
|
static const __le32 one[AES_BLOCK_SIZE / sizeof(u32)] = {cpu_to_le32(1), };
|
2016-10-03 20:33:16 +08:00
|
|
|
u8 *tweak_bytes = (u8 *)tweak;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
/* Read the computed ciphered tweak value. */
|
|
|
|
atmel_aes_read_block(dd, AES_ODATAR(0), tweak);
|
|
|
|
/*
|
|
|
|
* Hardware quirk:
|
|
|
|
* the order of the ciphered tweak bytes need to be reversed before
|
|
|
|
* writing them into the ODATARx registers.
|
|
|
|
*/
|
2021-07-23 01:08:27 +08:00
|
|
|
for (i = 0; i < AES_BLOCK_SIZE/2; ++i)
|
|
|
|
swap(tweak_bytes[i], tweak_bytes[AES_BLOCK_SIZE - 1 - i]);
|
2016-10-03 20:33:16 +08:00
|
|
|
|
|
|
|
/* Process the data. */
|
|
|
|
atmel_aes_write_ctrl(dd, use_dma, NULL);
|
|
|
|
atmel_aes_write_block(dd, AES_TWR(0), tweak);
|
|
|
|
atmel_aes_write_block(dd, AES_ALPHAR(0), one);
|
|
|
|
if (use_dma)
|
2019-11-10 01:09:33 +08:00
|
|
|
return atmel_aes_dma_start(dd, req->src, req->dst,
|
|
|
|
req->cryptlen,
|
2016-10-03 20:33:16 +08:00
|
|
|
atmel_aes_transfer_complete);
|
|
|
|
|
2019-11-10 01:09:33 +08:00
|
|
|
return atmel_aes_cpu_start(dd, req->src, req->dst, req->cryptlen,
|
2016-10-03 20:33:16 +08:00
|
|
|
atmel_aes_transfer_complete);
|
|
|
|
}
|
|
|
|
|
2019-11-10 01:09:33 +08:00
|
|
|
static int atmel_aes_xts_setkey(struct crypto_skcipher *tfm, const u8 *key,
|
2016-10-03 20:33:16 +08:00
|
|
|
unsigned int keylen)
|
|
|
|
{
|
2019-11-10 01:09:33 +08:00
|
|
|
struct atmel_aes_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
|
2016-10-03 20:33:16 +08:00
|
|
|
int err;
|
|
|
|
|
2022-12-30 05:17:06 +08:00
|
|
|
err = xts_verify_key(tfm, key, keylen);
|
2016-10-03 20:33:16 +08:00
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
|
2021-07-20 16:55:34 +08:00
|
|
|
crypto_skcipher_clear_flags(ctx->fallback_tfm, CRYPTO_TFM_REQ_MASK);
|
|
|
|
crypto_skcipher_set_flags(ctx->fallback_tfm, tfm->base.crt_flags &
|
|
|
|
CRYPTO_TFM_REQ_MASK);
|
|
|
|
err = crypto_skcipher_setkey(ctx->fallback_tfm, key, keylen);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
|
2016-10-03 20:33:16 +08:00
|
|
|
memcpy(ctx->base.key, key, keylen/2);
|
|
|
|
memcpy(ctx->key2, key + keylen/2, keylen/2);
|
|
|
|
ctx->base.keylen = keylen/2;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-11-10 01:09:33 +08:00
|
|
|
static int atmel_aes_xts_encrypt(struct skcipher_request *req)
|
2016-10-03 20:33:16 +08:00
|
|
|
{
|
|
|
|
return atmel_aes_crypt(req, AES_FLAGS_XTS | AES_FLAGS_ENCRYPT);
|
|
|
|
}
|
|
|
|
|
2019-11-10 01:09:33 +08:00
|
|
|
static int atmel_aes_xts_decrypt(struct skcipher_request *req)
|
2016-10-03 20:33:16 +08:00
|
|
|
{
|
|
|
|
return atmel_aes_crypt(req, AES_FLAGS_XTS);
|
|
|
|
}
|
|
|
|
|
2019-11-10 01:09:33 +08:00
|
|
|
static int atmel_aes_xts_init_tfm(struct crypto_skcipher *tfm)
|
2016-10-03 20:33:16 +08:00
|
|
|
{
|
2019-11-10 01:09:33 +08:00
|
|
|
struct atmel_aes_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
|
2021-07-20 16:55:35 +08:00
|
|
|
struct atmel_aes_dev *dd;
|
2021-07-20 16:55:34 +08:00
|
|
|
const char *tfm_name = crypto_tfm_alg_name(&tfm->base);
|
2016-10-03 20:33:16 +08:00
|
|
|
|
2021-07-20 16:55:35 +08:00
|
|
|
dd = atmel_aes_dev_alloc(&ctx->base);
|
|
|
|
if (!dd)
|
|
|
|
return -ENODEV;
|
|
|
|
|
2021-07-20 16:55:34 +08:00
|
|
|
ctx->fallback_tfm = crypto_alloc_skcipher(tfm_name, 0,
|
|
|
|
CRYPTO_ALG_NEED_FALLBACK);
|
|
|
|
if (IS_ERR(ctx->fallback_tfm))
|
|
|
|
return PTR_ERR(ctx->fallback_tfm);
|
|
|
|
|
|
|
|
crypto_skcipher_set_reqsize(tfm, sizeof(struct atmel_aes_reqctx) +
|
|
|
|
crypto_skcipher_reqsize(ctx->fallback_tfm));
|
2021-07-20 16:55:35 +08:00
|
|
|
ctx->base.dd = dd;
|
2016-10-03 20:33:16 +08:00
|
|
|
ctx->base.start = atmel_aes_xts_start;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2021-07-20 16:55:34 +08:00
|
|
|
static void atmel_aes_xts_exit_tfm(struct crypto_skcipher *tfm)
|
|
|
|
{
|
|
|
|
struct atmel_aes_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
|
|
|
|
|
|
|
|
crypto_free_skcipher(ctx->fallback_tfm);
|
|
|
|
}
|
|
|
|
|
2019-11-10 01:09:33 +08:00
|
|
|
static struct skcipher_alg aes_xts_alg = {
|
|
|
|
.base.cra_name = "xts(aes)",
|
|
|
|
.base.cra_driver_name = "atmel-xts-aes",
|
|
|
|
.base.cra_blocksize = AES_BLOCK_SIZE,
|
|
|
|
.base.cra_ctxsize = sizeof(struct atmel_aes_xts_ctx),
|
2021-07-20 16:55:34 +08:00
|
|
|
.base.cra_flags = CRYPTO_ALG_NEED_FALLBACK,
|
2019-11-10 01:09:33 +08:00
|
|
|
|
|
|
|
.min_keysize = 2 * AES_MIN_KEY_SIZE,
|
|
|
|
.max_keysize = 2 * AES_MAX_KEY_SIZE,
|
|
|
|
.ivsize = AES_BLOCK_SIZE,
|
|
|
|
.setkey = atmel_aes_xts_setkey,
|
|
|
|
.encrypt = atmel_aes_xts_encrypt,
|
|
|
|
.decrypt = atmel_aes_xts_decrypt,
|
|
|
|
.init = atmel_aes_xts_init_tfm,
|
2021-07-20 16:55:34 +08:00
|
|
|
.exit = atmel_aes_xts_exit_tfm,
|
2016-10-03 20:33:16 +08:00
|
|
|
};
|
|
|
|
|
2019-10-28 15:39:07 +08:00
|
|
|
#if IS_ENABLED(CONFIG_CRYPTO_DEV_ATMEL_AUTHENC)
|
2017-01-27 00:07:56 +08:00
|
|
|
/* authenc aead functions */
|
|
|
|
|
|
|
|
static int atmel_aes_authenc_start(struct atmel_aes_dev *dd);
|
|
|
|
static int atmel_aes_authenc_init(struct atmel_aes_dev *dd, int err,
|
|
|
|
bool is_async);
|
|
|
|
static int atmel_aes_authenc_transfer(struct atmel_aes_dev *dd, int err,
|
|
|
|
bool is_async);
|
|
|
|
static int atmel_aes_authenc_digest(struct atmel_aes_dev *dd);
|
|
|
|
static int atmel_aes_authenc_final(struct atmel_aes_dev *dd, int err,
|
|
|
|
bool is_async);
|
|
|
|
|
|
|
|
static void atmel_aes_authenc_complete(struct atmel_aes_dev *dd, int err)
|
|
|
|
{
|
|
|
|
struct aead_request *req = aead_request_cast(dd->areq);
|
|
|
|
struct atmel_aes_authenc_reqctx *rctx = aead_request_ctx(req);
|
|
|
|
|
|
|
|
if (err && (dd->flags & AES_FLAGS_OWN_SHA))
|
|
|
|
atmel_sha_authenc_abort(&rctx->auth_req);
|
|
|
|
dd->flags &= ~AES_FLAGS_OWN_SHA;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int atmel_aes_authenc_start(struct atmel_aes_dev *dd)
|
|
|
|
{
|
|
|
|
struct aead_request *req = aead_request_cast(dd->areq);
|
|
|
|
struct atmel_aes_authenc_reqctx *rctx = aead_request_ctx(req);
|
|
|
|
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
|
|
|
|
struct atmel_aes_authenc_ctx *ctx = crypto_aead_ctx(tfm);
|
|
|
|
int err;
|
|
|
|
|
|
|
|
atmel_aes_set_mode(dd, &rctx->base);
|
|
|
|
|
|
|
|
err = atmel_aes_hw_init(dd);
|
|
|
|
if (err)
|
|
|
|
return atmel_aes_complete(dd, err);
|
|
|
|
|
|
|
|
return atmel_sha_authenc_schedule(&rctx->auth_req, ctx->auth,
|
|
|
|
atmel_aes_authenc_init, dd);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int atmel_aes_authenc_init(struct atmel_aes_dev *dd, int err,
|
|
|
|
bool is_async)
|
|
|
|
{
|
|
|
|
struct aead_request *req = aead_request_cast(dd->areq);
|
|
|
|
struct atmel_aes_authenc_reqctx *rctx = aead_request_ctx(req);
|
|
|
|
|
|
|
|
if (is_async)
|
|
|
|
dd->is_async = true;
|
|
|
|
if (err)
|
|
|
|
return atmel_aes_complete(dd, err);
|
|
|
|
|
|
|
|
/* If here, we've got the ownership of the SHA device. */
|
|
|
|
dd->flags |= AES_FLAGS_OWN_SHA;
|
|
|
|
|
|
|
|
/* Configure the SHA device. */
|
|
|
|
return atmel_sha_authenc_init(&rctx->auth_req,
|
|
|
|
req->src, req->assoclen,
|
|
|
|
rctx->textlen,
|
|
|
|
atmel_aes_authenc_transfer, dd);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int atmel_aes_authenc_transfer(struct atmel_aes_dev *dd, int err,
|
|
|
|
bool is_async)
|
|
|
|
{
|
|
|
|
struct aead_request *req = aead_request_cast(dd->areq);
|
|
|
|
struct atmel_aes_authenc_reqctx *rctx = aead_request_ctx(req);
|
|
|
|
bool enc = atmel_aes_is_encrypt(dd);
|
|
|
|
struct scatterlist *src, *dst;
|
2019-10-28 15:45:02 +08:00
|
|
|
__be32 iv[AES_BLOCK_SIZE / sizeof(u32)];
|
2017-01-27 00:07:56 +08:00
|
|
|
u32 emr;
|
|
|
|
|
|
|
|
if (is_async)
|
|
|
|
dd->is_async = true;
|
|
|
|
if (err)
|
|
|
|
return atmel_aes_complete(dd, err);
|
|
|
|
|
|
|
|
/* Prepare src and dst scatter-lists to transfer cipher/plain texts. */
|
|
|
|
src = scatterwalk_ffwd(rctx->src, req->src, req->assoclen);
|
|
|
|
dst = src;
|
|
|
|
|
|
|
|
if (req->src != req->dst)
|
|
|
|
dst = scatterwalk_ffwd(rctx->dst, req->dst, req->assoclen);
|
|
|
|
|
|
|
|
/* Configure the AES device. */
|
|
|
|
memcpy(iv, req->iv, sizeof(iv));
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Here we always set the 2nd parameter of atmel_aes_write_ctrl() to
|
|
|
|
* 'true' even if the data transfer is actually performed by the CPU (so
|
|
|
|
* not by the DMA) because we must force the AES_MR_SMOD bitfield to the
|
|
|
|
* value AES_MR_SMOD_IDATAR0. Indeed, both AES_MR_SMOD and SHA_MR_SMOD
|
|
|
|
* must be set to *_MR_SMOD_IDATAR0.
|
|
|
|
*/
|
|
|
|
atmel_aes_write_ctrl(dd, true, iv);
|
|
|
|
emr = AES_EMR_PLIPEN;
|
|
|
|
if (!enc)
|
|
|
|
emr |= AES_EMR_PLIPD;
|
|
|
|
atmel_aes_write(dd, AES_EMR, emr);
|
|
|
|
|
|
|
|
/* Transfer data. */
|
|
|
|
return atmel_aes_dma_start(dd, src, dst, rctx->textlen,
|
|
|
|
atmel_aes_authenc_digest);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int atmel_aes_authenc_digest(struct atmel_aes_dev *dd)
|
|
|
|
{
|
|
|
|
struct aead_request *req = aead_request_cast(dd->areq);
|
|
|
|
struct atmel_aes_authenc_reqctx *rctx = aead_request_ctx(req);
|
|
|
|
|
|
|
|
/* atmel_sha_authenc_final() releases the SHA device. */
|
|
|
|
dd->flags &= ~AES_FLAGS_OWN_SHA;
|
|
|
|
return atmel_sha_authenc_final(&rctx->auth_req,
|
|
|
|
rctx->digest, sizeof(rctx->digest),
|
|
|
|
atmel_aes_authenc_final, dd);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int atmel_aes_authenc_final(struct atmel_aes_dev *dd, int err,
|
|
|
|
bool is_async)
|
|
|
|
{
|
|
|
|
struct aead_request *req = aead_request_cast(dd->areq);
|
|
|
|
struct atmel_aes_authenc_reqctx *rctx = aead_request_ctx(req);
|
|
|
|
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
|
|
|
|
bool enc = atmel_aes_is_encrypt(dd);
|
|
|
|
u32 idigest[SHA512_DIGEST_SIZE / sizeof(u32)], *odigest = rctx->digest;
|
|
|
|
u32 offs, authsize;
|
|
|
|
|
|
|
|
if (is_async)
|
|
|
|
dd->is_async = true;
|
|
|
|
if (err)
|
|
|
|
goto complete;
|
|
|
|
|
|
|
|
offs = req->assoclen + rctx->textlen;
|
|
|
|
authsize = crypto_aead_authsize(tfm);
|
|
|
|
if (enc) {
|
|
|
|
scatterwalk_map_and_copy(odigest, req->dst, offs, authsize, 1);
|
|
|
|
} else {
|
|
|
|
scatterwalk_map_and_copy(idigest, req->src, offs, authsize, 0);
|
|
|
|
if (crypto_memneq(idigest, odigest, authsize))
|
|
|
|
err = -EBADMSG;
|
|
|
|
}
|
|
|
|
|
|
|
|
complete:
|
|
|
|
return atmel_aes_complete(dd, err);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int atmel_aes_authenc_setkey(struct crypto_aead *tfm, const u8 *key,
|
|
|
|
unsigned int keylen)
|
|
|
|
{
|
|
|
|
struct atmel_aes_authenc_ctx *ctx = crypto_aead_ctx(tfm);
|
|
|
|
struct crypto_authenc_keys keys;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
|
|
|
|
goto badkey;
|
|
|
|
|
|
|
|
if (keys.enckeylen > sizeof(ctx->base.key))
|
|
|
|
goto badkey;
|
|
|
|
|
|
|
|
/* Save auth key. */
|
|
|
|
err = atmel_sha_authenc_setkey(ctx->auth,
|
|
|
|
keys.authkey, keys.authkeylen,
|
2019-12-31 11:19:38 +08:00
|
|
|
crypto_aead_get_flags(tfm));
|
2017-01-27 00:07:56 +08:00
|
|
|
if (err) {
|
|
|
|
memzero_explicit(&keys, sizeof(keys));
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Save enc key. */
|
|
|
|
ctx->base.keylen = keys.enckeylen;
|
|
|
|
memcpy(ctx->base.key, keys.enckey, keys.enckeylen);
|
|
|
|
|
|
|
|
memzero_explicit(&keys, sizeof(keys));
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
badkey:
|
2018-02-23 17:01:40 +08:00
|
|
|
memzero_explicit(&keys, sizeof(keys));
|
2017-01-27 00:07:56 +08:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int atmel_aes_authenc_init_tfm(struct crypto_aead *tfm,
|
|
|
|
unsigned long auth_mode)
|
|
|
|
{
|
|
|
|
struct atmel_aes_authenc_ctx *ctx = crypto_aead_ctx(tfm);
|
|
|
|
unsigned int auth_reqsize = atmel_sha_authenc_get_reqsize();
|
2021-07-20 16:55:35 +08:00
|
|
|
struct atmel_aes_dev *dd;
|
|
|
|
|
|
|
|
dd = atmel_aes_dev_alloc(&ctx->base);
|
|
|
|
if (!dd)
|
|
|
|
return -ENODEV;
|
2017-01-27 00:07:56 +08:00
|
|
|
|
|
|
|
ctx->auth = atmel_sha_authenc_spawn(auth_mode);
|
|
|
|
if (IS_ERR(ctx->auth))
|
|
|
|
return PTR_ERR(ctx->auth);
|
|
|
|
|
|
|
|
crypto_aead_set_reqsize(tfm, (sizeof(struct atmel_aes_authenc_reqctx) +
|
|
|
|
auth_reqsize));
|
2021-07-20 16:55:35 +08:00
|
|
|
ctx->base.dd = dd;
|
2017-01-27 00:07:56 +08:00
|
|
|
ctx->base.start = atmel_aes_authenc_start;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int atmel_aes_authenc_hmac_sha1_init_tfm(struct crypto_aead *tfm)
|
|
|
|
{
|
|
|
|
return atmel_aes_authenc_init_tfm(tfm, SHA_FLAGS_HMAC_SHA1);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int atmel_aes_authenc_hmac_sha224_init_tfm(struct crypto_aead *tfm)
|
|
|
|
{
|
|
|
|
return atmel_aes_authenc_init_tfm(tfm, SHA_FLAGS_HMAC_SHA224);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int atmel_aes_authenc_hmac_sha256_init_tfm(struct crypto_aead *tfm)
|
|
|
|
{
|
|
|
|
return atmel_aes_authenc_init_tfm(tfm, SHA_FLAGS_HMAC_SHA256);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int atmel_aes_authenc_hmac_sha384_init_tfm(struct crypto_aead *tfm)
|
|
|
|
{
|
|
|
|
return atmel_aes_authenc_init_tfm(tfm, SHA_FLAGS_HMAC_SHA384);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int atmel_aes_authenc_hmac_sha512_init_tfm(struct crypto_aead *tfm)
|
|
|
|
{
|
|
|
|
return atmel_aes_authenc_init_tfm(tfm, SHA_FLAGS_HMAC_SHA512);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void atmel_aes_authenc_exit_tfm(struct crypto_aead *tfm)
|
|
|
|
{
|
|
|
|
struct atmel_aes_authenc_ctx *ctx = crypto_aead_ctx(tfm);
|
|
|
|
|
|
|
|
atmel_sha_authenc_free(ctx->auth);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int atmel_aes_authenc_crypt(struct aead_request *req,
|
|
|
|
unsigned long mode)
|
|
|
|
{
|
|
|
|
struct atmel_aes_authenc_reqctx *rctx = aead_request_ctx(req);
|
|
|
|
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
|
|
|
|
struct atmel_aes_base_ctx *ctx = crypto_aead_ctx(tfm);
|
|
|
|
u32 authsize = crypto_aead_authsize(tfm);
|
|
|
|
bool enc = (mode & AES_FLAGS_ENCRYPT);
|
|
|
|
|
|
|
|
/* Compute text length. */
|
|
|
|
if (!enc && req->cryptlen < authsize)
|
|
|
|
return -EINVAL;
|
|
|
|
rctx->textlen = req->cryptlen - (enc ? 0 : authsize);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Currently, empty messages are not supported yet:
|
|
|
|
* the SHA auto-padding can be used only on non-empty messages.
|
|
|
|
* Hence a special case needs to be implemented for empty message.
|
|
|
|
*/
|
|
|
|
if (!rctx->textlen && !req->assoclen)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
rctx->base.mode = mode;
|
|
|
|
ctx->block_size = AES_BLOCK_SIZE;
|
2017-10-31 23:25:23 +08:00
|
|
|
ctx->is_aead = true;
|
2017-01-27 00:07:56 +08:00
|
|
|
|
2021-07-20 16:55:35 +08:00
|
|
|
return atmel_aes_handle_queue(ctx->dd, &req->base);
|
2017-01-27 00:07:56 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static int atmel_aes_authenc_cbc_aes_encrypt(struct aead_request *req)
|
|
|
|
{
|
|
|
|
return atmel_aes_authenc_crypt(req, AES_FLAGS_CBC | AES_FLAGS_ENCRYPT);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int atmel_aes_authenc_cbc_aes_decrypt(struct aead_request *req)
|
|
|
|
{
|
|
|
|
return atmel_aes_authenc_crypt(req, AES_FLAGS_CBC);
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct aead_alg aes_authenc_algs[] = {
|
|
|
|
{
|
|
|
|
.setkey = atmel_aes_authenc_setkey,
|
|
|
|
.encrypt = atmel_aes_authenc_cbc_aes_encrypt,
|
|
|
|
.decrypt = atmel_aes_authenc_cbc_aes_decrypt,
|
|
|
|
.init = atmel_aes_authenc_hmac_sha1_init_tfm,
|
|
|
|
.exit = atmel_aes_authenc_exit_tfm,
|
|
|
|
.ivsize = AES_BLOCK_SIZE,
|
|
|
|
.maxauthsize = SHA1_DIGEST_SIZE,
|
|
|
|
|
|
|
|
.base = {
|
|
|
|
.cra_name = "authenc(hmac(sha1),cbc(aes))",
|
|
|
|
.cra_driver_name = "atmel-authenc-hmac-sha1-cbc-aes",
|
|
|
|
.cra_blocksize = AES_BLOCK_SIZE,
|
|
|
|
.cra_ctxsize = sizeof(struct atmel_aes_authenc_ctx),
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
.setkey = atmel_aes_authenc_setkey,
|
|
|
|
.encrypt = atmel_aes_authenc_cbc_aes_encrypt,
|
|
|
|
.decrypt = atmel_aes_authenc_cbc_aes_decrypt,
|
|
|
|
.init = atmel_aes_authenc_hmac_sha224_init_tfm,
|
|
|
|
.exit = atmel_aes_authenc_exit_tfm,
|
|
|
|
.ivsize = AES_BLOCK_SIZE,
|
|
|
|
.maxauthsize = SHA224_DIGEST_SIZE,
|
|
|
|
|
|
|
|
.base = {
|
|
|
|
.cra_name = "authenc(hmac(sha224),cbc(aes))",
|
|
|
|
.cra_driver_name = "atmel-authenc-hmac-sha224-cbc-aes",
|
|
|
|
.cra_blocksize = AES_BLOCK_SIZE,
|
|
|
|
.cra_ctxsize = sizeof(struct atmel_aes_authenc_ctx),
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
.setkey = atmel_aes_authenc_setkey,
|
|
|
|
.encrypt = atmel_aes_authenc_cbc_aes_encrypt,
|
|
|
|
.decrypt = atmel_aes_authenc_cbc_aes_decrypt,
|
|
|
|
.init = atmel_aes_authenc_hmac_sha256_init_tfm,
|
|
|
|
.exit = atmel_aes_authenc_exit_tfm,
|
|
|
|
.ivsize = AES_BLOCK_SIZE,
|
|
|
|
.maxauthsize = SHA256_DIGEST_SIZE,
|
|
|
|
|
|
|
|
.base = {
|
|
|
|
.cra_name = "authenc(hmac(sha256),cbc(aes))",
|
|
|
|
.cra_driver_name = "atmel-authenc-hmac-sha256-cbc-aes",
|
|
|
|
.cra_blocksize = AES_BLOCK_SIZE,
|
|
|
|
.cra_ctxsize = sizeof(struct atmel_aes_authenc_ctx),
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
.setkey = atmel_aes_authenc_setkey,
|
|
|
|
.encrypt = atmel_aes_authenc_cbc_aes_encrypt,
|
|
|
|
.decrypt = atmel_aes_authenc_cbc_aes_decrypt,
|
|
|
|
.init = atmel_aes_authenc_hmac_sha384_init_tfm,
|
|
|
|
.exit = atmel_aes_authenc_exit_tfm,
|
|
|
|
.ivsize = AES_BLOCK_SIZE,
|
|
|
|
.maxauthsize = SHA384_DIGEST_SIZE,
|
|
|
|
|
|
|
|
.base = {
|
|
|
|
.cra_name = "authenc(hmac(sha384),cbc(aes))",
|
|
|
|
.cra_driver_name = "atmel-authenc-hmac-sha384-cbc-aes",
|
|
|
|
.cra_blocksize = AES_BLOCK_SIZE,
|
|
|
|
.cra_ctxsize = sizeof(struct atmel_aes_authenc_ctx),
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
.setkey = atmel_aes_authenc_setkey,
|
|
|
|
.encrypt = atmel_aes_authenc_cbc_aes_encrypt,
|
|
|
|
.decrypt = atmel_aes_authenc_cbc_aes_decrypt,
|
|
|
|
.init = atmel_aes_authenc_hmac_sha512_init_tfm,
|
|
|
|
.exit = atmel_aes_authenc_exit_tfm,
|
|
|
|
.ivsize = AES_BLOCK_SIZE,
|
|
|
|
.maxauthsize = SHA512_DIGEST_SIZE,
|
|
|
|
|
|
|
|
.base = {
|
|
|
|
.cra_name = "authenc(hmac(sha512),cbc(aes))",
|
|
|
|
.cra_driver_name = "atmel-authenc-hmac-sha512-cbc-aes",
|
|
|
|
.cra_blocksize = AES_BLOCK_SIZE,
|
|
|
|
.cra_ctxsize = sizeof(struct atmel_aes_authenc_ctx),
|
|
|
|
},
|
|
|
|
},
|
|
|
|
};
|
|
|
|
#endif /* CONFIG_CRYPTO_DEV_ATMEL_AUTHENC */
|
2016-10-03 20:33:16 +08:00
|
|
|
|
2015-12-18 01:13:03 +08:00
|
|
|
/* Probe functions */
|
|
|
|
|
|
|
|
static int atmel_aes_buff_init(struct atmel_aes_dev *dd)
|
|
|
|
{
|
|
|
|
dd->buf = (void *)__get_free_pages(GFP_KERNEL, ATMEL_AES_BUFFER_ORDER);
|
|
|
|
dd->buflen = ATMEL_AES_BUFFER_SIZE;
|
|
|
|
dd->buflen &= ~(AES_BLOCK_SIZE - 1);
|
|
|
|
|
|
|
|
if (!dd->buf) {
|
|
|
|
dev_err(dd->dev, "unable to alloc pages.\n");
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void atmel_aes_buff_cleanup(struct atmel_aes_dev *dd)
|
|
|
|
{
|
|
|
|
free_page((unsigned long)dd->buf);
|
|
|
|
}
|
|
|
|
|
2019-12-13 17:54:49 +08:00
|
|
|
static int atmel_aes_dma_init(struct atmel_aes_dev *dd)
|
2015-12-18 01:13:03 +08:00
|
|
|
{
|
2019-11-21 18:16:00 +08:00
|
|
|
int ret;
|
2015-12-18 01:13:03 +08:00
|
|
|
|
|
|
|
/* Try to grab 2 DMA channels */
|
2019-11-21 18:16:00 +08:00
|
|
|
dd->src.chan = dma_request_chan(dd->dev, "tx");
|
|
|
|
if (IS_ERR(dd->src.chan)) {
|
|
|
|
ret = PTR_ERR(dd->src.chan);
|
2015-12-18 01:13:03 +08:00
|
|
|
goto err_dma_in;
|
2019-11-21 18:16:00 +08:00
|
|
|
}
|
2015-12-18 01:13:03 +08:00
|
|
|
|
2019-11-21 18:16:00 +08:00
|
|
|
dd->dst.chan = dma_request_chan(dd->dev, "rx");
|
|
|
|
if (IS_ERR(dd->dst.chan)) {
|
|
|
|
ret = PTR_ERR(dd->dst.chan);
|
2015-12-18 01:13:03 +08:00
|
|
|
goto err_dma_out;
|
2019-11-21 18:16:00 +08:00
|
|
|
}
|
2015-12-18 01:13:03 +08:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
err_dma_out:
|
|
|
|
dma_release_channel(dd->src.chan);
|
|
|
|
err_dma_in:
|
2019-12-13 17:54:54 +08:00
|
|
|
dev_err(dd->dev, "no DMA channel available\n");
|
2019-11-21 18:16:00 +08:00
|
|
|
return ret;
|
2015-12-18 01:13:03 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void atmel_aes_dma_cleanup(struct atmel_aes_dev *dd)
|
|
|
|
{
|
|
|
|
dma_release_channel(dd->dst.chan);
|
|
|
|
dma_release_channel(dd->src.chan);
|
|
|
|
}
|
|
|
|
|
2012-07-02 01:19:44 +08:00
|
|
|
static void atmel_aes_queue_task(unsigned long data)
|
|
|
|
{
|
|
|
|
struct atmel_aes_dev *dd = (struct atmel_aes_dev *)data;
|
|
|
|
|
|
|
|
atmel_aes_handle_queue(dd, NULL);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void atmel_aes_done_task(unsigned long data)
|
|
|
|
{
|
2015-12-18 01:13:02 +08:00
|
|
|
struct atmel_aes_dev *dd = (struct atmel_aes_dev *)data;
|
2012-07-02 01:19:44 +08:00
|
|
|
|
2015-12-18 00:48:42 +08:00
|
|
|
dd->is_async = true;
|
|
|
|
(void)dd->resume(dd);
|
|
|
|
}
|
2012-07-02 01:19:44 +08:00
|
|
|
|
|
|
|
static irqreturn_t atmel_aes_irq(int irq, void *dev_id)
|
|
|
|
{
|
|
|
|
struct atmel_aes_dev *aes_dd = dev_id;
|
|
|
|
u32 reg;
|
|
|
|
|
|
|
|
reg = atmel_aes_read(aes_dd, AES_ISR);
|
|
|
|
if (reg & atmel_aes_read(aes_dd, AES_IMR)) {
|
|
|
|
atmel_aes_write(aes_dd, AES_IDR, reg);
|
|
|
|
if (AES_FLAGS_BUSY & aes_dd->flags)
|
|
|
|
tasklet_schedule(&aes_dd->done_task);
|
|
|
|
else
|
|
|
|
dev_warn(aes_dd->dev, "AES interrupt when no active requests.\n");
|
|
|
|
return IRQ_HANDLED;
|
|
|
|
}
|
|
|
|
|
|
|
|
return IRQ_NONE;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void atmel_aes_unregister_algs(struct atmel_aes_dev *dd)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
2019-10-28 15:39:07 +08:00
|
|
|
#if IS_ENABLED(CONFIG_CRYPTO_DEV_ATMEL_AUTHENC)
|
2017-01-27 00:07:56 +08:00
|
|
|
if (dd->caps.has_authenc)
|
|
|
|
for (i = 0; i < ARRAY_SIZE(aes_authenc_algs); i++)
|
|
|
|
crypto_unregister_aead(&aes_authenc_algs[i]);
|
|
|
|
#endif
|
|
|
|
|
2016-10-03 20:33:16 +08:00
|
|
|
if (dd->caps.has_xts)
|
2019-11-10 01:09:33 +08:00
|
|
|
crypto_unregister_skcipher(&aes_xts_alg);
|
2016-10-03 20:33:16 +08:00
|
|
|
|
2015-12-18 01:13:07 +08:00
|
|
|
if (dd->caps.has_gcm)
|
|
|
|
crypto_unregister_aead(&aes_gcm_alg);
|
|
|
|
|
2013-02-21 00:10:24 +08:00
|
|
|
if (dd->caps.has_cfb64)
|
2019-11-10 01:09:33 +08:00
|
|
|
crypto_unregister_skcipher(&aes_cfb64_alg);
|
2015-12-18 00:48:35 +08:00
|
|
|
|
|
|
|
for (i = 0; i < ARRAY_SIZE(aes_algs); i++)
|
2019-11-10 01:09:33 +08:00
|
|
|
crypto_unregister_skcipher(&aes_algs[i]);
|
2012-07-02 01:19:44 +08:00
|
|
|
}
|
|
|
|
|
2019-12-05 21:48:39 +08:00
|
|
|
static void atmel_aes_crypto_alg_init(struct crypto_alg *alg)
|
|
|
|
{
|
2021-07-20 16:55:34 +08:00
|
|
|
alg->cra_flags |= CRYPTO_ALG_ASYNC;
|
2019-12-05 21:48:39 +08:00
|
|
|
alg->cra_alignmask = 0xf;
|
|
|
|
alg->cra_priority = ATMEL_AES_PRIORITY;
|
|
|
|
alg->cra_module = THIS_MODULE;
|
|
|
|
}
|
|
|
|
|
2012-07-02 01:19:44 +08:00
|
|
|
static int atmel_aes_register_algs(struct atmel_aes_dev *dd)
|
|
|
|
{
|
|
|
|
int err, i, j;
|
|
|
|
|
|
|
|
for (i = 0; i < ARRAY_SIZE(aes_algs); i++) {
|
2019-12-05 21:48:39 +08:00
|
|
|
atmel_aes_crypto_alg_init(&aes_algs[i].base);
|
|
|
|
|
2019-11-10 01:09:33 +08:00
|
|
|
err = crypto_register_skcipher(&aes_algs[i]);
|
2012-07-02 01:19:44 +08:00
|
|
|
if (err)
|
|
|
|
goto err_aes_algs;
|
|
|
|
}
|
|
|
|
|
2013-02-21 00:10:24 +08:00
|
|
|
if (dd->caps.has_cfb64) {
|
2019-12-05 21:48:39 +08:00
|
|
|
atmel_aes_crypto_alg_init(&aes_cfb64_alg.base);
|
|
|
|
|
2019-11-10 01:09:33 +08:00
|
|
|
err = crypto_register_skcipher(&aes_cfb64_alg);
|
2012-07-02 01:19:44 +08:00
|
|
|
if (err)
|
|
|
|
goto err_aes_cfb64_alg;
|
|
|
|
}
|
|
|
|
|
2015-12-18 01:13:07 +08:00
|
|
|
if (dd->caps.has_gcm) {
|
2019-12-05 21:48:39 +08:00
|
|
|
atmel_aes_crypto_alg_init(&aes_gcm_alg.base);
|
|
|
|
|
2015-12-18 01:13:07 +08:00
|
|
|
err = crypto_register_aead(&aes_gcm_alg);
|
|
|
|
if (err)
|
|
|
|
goto err_aes_gcm_alg;
|
|
|
|
}
|
|
|
|
|
2016-10-03 20:33:16 +08:00
|
|
|
if (dd->caps.has_xts) {
|
2019-12-05 21:48:39 +08:00
|
|
|
atmel_aes_crypto_alg_init(&aes_xts_alg.base);
|
|
|
|
|
2019-11-10 01:09:33 +08:00
|
|
|
err = crypto_register_skcipher(&aes_xts_alg);
|
2016-10-03 20:33:16 +08:00
|
|
|
if (err)
|
|
|
|
goto err_aes_xts_alg;
|
|
|
|
}
|
|
|
|
|
2019-10-28 15:39:07 +08:00
|
|
|
#if IS_ENABLED(CONFIG_CRYPTO_DEV_ATMEL_AUTHENC)
|
2017-01-27 00:07:56 +08:00
|
|
|
if (dd->caps.has_authenc) {
|
|
|
|
for (i = 0; i < ARRAY_SIZE(aes_authenc_algs); i++) {
|
2019-12-05 21:48:39 +08:00
|
|
|
atmel_aes_crypto_alg_init(&aes_authenc_algs[i].base);
|
|
|
|
|
2017-01-27 00:07:56 +08:00
|
|
|
err = crypto_register_aead(&aes_authenc_algs[i]);
|
|
|
|
if (err)
|
|
|
|
goto err_aes_authenc_alg;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2012-07-02 01:19:44 +08:00
|
|
|
return 0;
|
|
|
|
|
2019-10-28 15:39:07 +08:00
|
|
|
#if IS_ENABLED(CONFIG_CRYPTO_DEV_ATMEL_AUTHENC)
|
2017-01-27 00:07:56 +08:00
|
|
|
/* i = ARRAY_SIZE(aes_authenc_algs); */
|
|
|
|
err_aes_authenc_alg:
|
|
|
|
for (j = 0; j < i; j++)
|
|
|
|
crypto_unregister_aead(&aes_authenc_algs[j]);
|
2019-11-10 01:09:33 +08:00
|
|
|
crypto_unregister_skcipher(&aes_xts_alg);
|
2017-01-27 00:07:56 +08:00
|
|
|
#endif
|
2016-10-03 20:33:16 +08:00
|
|
|
err_aes_xts_alg:
|
|
|
|
crypto_unregister_aead(&aes_gcm_alg);
|
2015-12-18 01:13:07 +08:00
|
|
|
err_aes_gcm_alg:
|
2019-11-10 01:09:33 +08:00
|
|
|
crypto_unregister_skcipher(&aes_cfb64_alg);
|
2012-07-02 01:19:44 +08:00
|
|
|
err_aes_cfb64_alg:
|
|
|
|
i = ARRAY_SIZE(aes_algs);
|
|
|
|
err_aes_algs:
|
|
|
|
for (j = 0; j < i; j++)
|
2019-11-10 01:09:33 +08:00
|
|
|
crypto_unregister_skcipher(&aes_algs[j]);
|
2012-07-02 01:19:44 +08:00
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2013-02-21 00:10:24 +08:00
|
|
|
static void atmel_aes_get_cap(struct atmel_aes_dev *dd)
|
|
|
|
{
|
|
|
|
dd->caps.has_dualbuff = 0;
|
|
|
|
dd->caps.has_cfb64 = 0;
|
2015-12-18 01:13:07 +08:00
|
|
|
dd->caps.has_gcm = 0;
|
2016-10-03 20:33:16 +08:00
|
|
|
dd->caps.has_xts = 0;
|
2017-01-27 00:07:56 +08:00
|
|
|
dd->caps.has_authenc = 0;
|
2013-02-21 00:10:24 +08:00
|
|
|
dd->caps.max_burst_size = 1;
|
|
|
|
|
|
|
|
/* keep only major version number */
|
|
|
|
switch (dd->hw_version & 0xff0) {
|
2022-01-28 15:17:55 +08:00
|
|
|
case 0x700:
|
2022-12-07 21:59:55 +08:00
|
|
|
case 0x600:
|
2015-12-18 00:48:32 +08:00
|
|
|
case 0x500:
|
|
|
|
dd->caps.has_dualbuff = 1;
|
|
|
|
dd->caps.has_cfb64 = 1;
|
2015-12-18 01:13:07 +08:00
|
|
|
dd->caps.has_gcm = 1;
|
2016-10-03 20:33:16 +08:00
|
|
|
dd->caps.has_xts = 1;
|
2017-01-27 00:07:56 +08:00
|
|
|
dd->caps.has_authenc = 1;
|
2015-12-18 00:48:32 +08:00
|
|
|
dd->caps.max_burst_size = 4;
|
|
|
|
break;
|
2015-04-07 17:45:02 +08:00
|
|
|
case 0x200:
|
|
|
|
dd->caps.has_dualbuff = 1;
|
|
|
|
dd->caps.has_cfb64 = 1;
|
2015-12-18 01:13:07 +08:00
|
|
|
dd->caps.has_gcm = 1;
|
2015-04-07 17:45:02 +08:00
|
|
|
dd->caps.max_burst_size = 4;
|
|
|
|
break;
|
2013-02-21 00:10:24 +08:00
|
|
|
case 0x130:
|
|
|
|
dd->caps.has_dualbuff = 1;
|
|
|
|
dd->caps.has_cfb64 = 1;
|
|
|
|
dd->caps.max_burst_size = 4;
|
|
|
|
break;
|
|
|
|
case 0x120:
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
dev_warn(dd->dev,
|
|
|
|
"Unmanaged aes version, set minimum capabilities\n");
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-10-14 23:52:38 +08:00
|
|
|
#if defined(CONFIG_OF)
|
|
|
|
static const struct of_device_id atmel_aes_dt_ids[] = {
|
|
|
|
{ .compatible = "atmel,at91sam9g46-aes" },
|
|
|
|
{ /* sentinel */ }
|
|
|
|
};
|
|
|
|
MODULE_DEVICE_TABLE(of, atmel_aes_dt_ids);
|
|
|
|
#endif
|
|
|
|
|
2012-12-22 05:14:09 +08:00
|
|
|
static int atmel_aes_probe(struct platform_device *pdev)
|
2012-07-02 01:19:44 +08:00
|
|
|
{
|
|
|
|
struct atmel_aes_dev *aes_dd;
|
|
|
|
struct device *dev = &pdev->dev;
|
|
|
|
struct resource *aes_res;
|
|
|
|
int err;
|
|
|
|
|
2015-10-13 01:47:03 +08:00
|
|
|
aes_dd = devm_kzalloc(&pdev->dev, sizeof(*aes_dd), GFP_KERNEL);
|
2019-12-05 17:53:51 +08:00
|
|
|
if (!aes_dd)
|
|
|
|
return -ENOMEM;
|
2012-07-02 01:19:44 +08:00
|
|
|
|
|
|
|
aes_dd->dev = dev;
|
|
|
|
|
|
|
|
platform_set_drvdata(pdev, aes_dd);
|
|
|
|
|
|
|
|
INIT_LIST_HEAD(&aes_dd->list);
|
2015-04-07 17:45:09 +08:00
|
|
|
spin_lock_init(&aes_dd->lock);
|
2012-07-02 01:19:44 +08:00
|
|
|
|
|
|
|
tasklet_init(&aes_dd->done_task, atmel_aes_done_task,
|
|
|
|
(unsigned long)aes_dd);
|
|
|
|
tasklet_init(&aes_dd->queue_task, atmel_aes_queue_task,
|
|
|
|
(unsigned long)aes_dd);
|
|
|
|
|
|
|
|
crypto_init_queue(&aes_dd->queue, ATMEL_AES_QUEUE_LENGTH);
|
|
|
|
|
2023-07-05 15:21:56 +08:00
|
|
|
aes_dd->io_base = devm_platform_get_and_ioremap_resource(pdev, 0, &aes_res);
|
|
|
|
if (IS_ERR(aes_dd->io_base)) {
|
|
|
|
err = PTR_ERR(aes_dd->io_base);
|
2019-12-05 17:53:53 +08:00
|
|
|
goto err_tasklet_kill;
|
2012-07-02 01:19:44 +08:00
|
|
|
}
|
|
|
|
aes_dd->phys_base = aes_res->start;
|
|
|
|
|
|
|
|
/* Get the IRQ */
|
|
|
|
aes_dd->irq = platform_get_irq(pdev, 0);
|
|
|
|
if (aes_dd->irq < 0) {
|
|
|
|
err = aes_dd->irq;
|
2019-12-05 17:53:53 +08:00
|
|
|
goto err_tasklet_kill;
|
2012-07-02 01:19:44 +08:00
|
|
|
}
|
|
|
|
|
2015-10-13 01:47:03 +08:00
|
|
|
err = devm_request_irq(&pdev->dev, aes_dd->irq, atmel_aes_irq,
|
|
|
|
IRQF_SHARED, "atmel-aes", aes_dd);
|
2012-07-02 01:19:44 +08:00
|
|
|
if (err) {
|
|
|
|
dev_err(dev, "unable to request aes irq.\n");
|
2019-12-05 17:53:53 +08:00
|
|
|
goto err_tasklet_kill;
|
2012-07-02 01:19:44 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Initializing the clock */
|
2015-10-13 01:47:03 +08:00
|
|
|
aes_dd->iclk = devm_clk_get(&pdev->dev, "aes_clk");
|
2012-07-02 01:19:44 +08:00
|
|
|
if (IS_ERR(aes_dd->iclk)) {
|
2015-03-01 04:40:10 +08:00
|
|
|
dev_err(dev, "clock initialization failed.\n");
|
2012-07-02 01:19:44 +08:00
|
|
|
err = PTR_ERR(aes_dd->iclk);
|
2019-12-05 17:53:53 +08:00
|
|
|
goto err_tasklet_kill;
|
2012-07-02 01:19:44 +08:00
|
|
|
}
|
|
|
|
|
2016-01-30 00:53:33 +08:00
|
|
|
err = clk_prepare(aes_dd->iclk);
|
2015-12-18 00:48:37 +08:00
|
|
|
if (err)
|
2019-12-05 17:53:53 +08:00
|
|
|
goto err_tasklet_kill;
|
2013-02-21 00:10:24 +08:00
|
|
|
|
2016-01-30 00:53:33 +08:00
|
|
|
err = atmel_aes_hw_version_init(aes_dd);
|
|
|
|
if (err)
|
2019-12-05 17:53:53 +08:00
|
|
|
goto err_iclk_unprepare;
|
2016-01-30 00:53:33 +08:00
|
|
|
|
2013-02-21 00:10:24 +08:00
|
|
|
atmel_aes_get_cap(aes_dd);
|
|
|
|
|
2019-10-28 15:39:07 +08:00
|
|
|
#if IS_ENABLED(CONFIG_CRYPTO_DEV_ATMEL_AUTHENC)
|
2017-01-27 00:07:56 +08:00
|
|
|
if (aes_dd->caps.has_authenc && !atmel_sha_authenc_is_ready()) {
|
|
|
|
err = -EPROBE_DEFER;
|
2019-12-05 17:53:53 +08:00
|
|
|
goto err_iclk_unprepare;
|
2017-01-27 00:07:56 +08:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2013-02-21 00:10:24 +08:00
|
|
|
err = atmel_aes_buff_init(aes_dd);
|
|
|
|
if (err)
|
2019-12-05 17:53:53 +08:00
|
|
|
goto err_iclk_unprepare;
|
2013-02-21 00:10:24 +08:00
|
|
|
|
2019-12-13 17:54:49 +08:00
|
|
|
err = atmel_aes_dma_init(aes_dd);
|
2012-07-02 01:19:44 +08:00
|
|
|
if (err)
|
2019-12-05 17:53:53 +08:00
|
|
|
goto err_buff_cleanup;
|
2012-07-02 01:19:44 +08:00
|
|
|
|
|
|
|
spin_lock(&atmel_aes.lock);
|
|
|
|
list_add_tail(&aes_dd->list, &atmel_aes.dev_list);
|
|
|
|
spin_unlock(&atmel_aes.lock);
|
|
|
|
|
|
|
|
err = atmel_aes_register_algs(aes_dd);
|
|
|
|
if (err)
|
|
|
|
goto err_algs;
|
|
|
|
|
2013-10-14 23:52:38 +08:00
|
|
|
dev_info(dev, "Atmel AES - Using %s, %s for DMA transfers\n",
|
2015-12-18 01:13:00 +08:00
|
|
|
dma_chan_name(aes_dd->src.chan),
|
|
|
|
dma_chan_name(aes_dd->dst.chan));
|
2012-07-02 01:19:44 +08:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
err_algs:
|
|
|
|
spin_lock(&atmel_aes.lock);
|
|
|
|
list_del(&aes_dd->list);
|
|
|
|
spin_unlock(&atmel_aes.lock);
|
|
|
|
atmel_aes_dma_cleanup(aes_dd);
|
2019-12-05 17:53:53 +08:00
|
|
|
err_buff_cleanup:
|
2013-02-21 00:10:24 +08:00
|
|
|
atmel_aes_buff_cleanup(aes_dd);
|
2019-12-05 17:53:53 +08:00
|
|
|
err_iclk_unprepare:
|
2016-01-30 00:53:33 +08:00
|
|
|
clk_unprepare(aes_dd->iclk);
|
2019-12-05 17:53:53 +08:00
|
|
|
err_tasklet_kill:
|
2012-07-02 01:19:44 +08:00
|
|
|
tasklet_kill(&aes_dd->done_task);
|
|
|
|
tasklet_kill(&aes_dd->queue_task);
|
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2012-12-22 05:14:09 +08:00
|
|
|
static int atmel_aes_remove(struct platform_device *pdev)
|
2012-07-02 01:19:44 +08:00
|
|
|
{
|
2016-10-24 22:51:22 +08:00
|
|
|
struct atmel_aes_dev *aes_dd;
|
2012-07-02 01:19:44 +08:00
|
|
|
|
|
|
|
aes_dd = platform_get_drvdata(pdev);
|
2022-07-06 04:51:38 +08:00
|
|
|
|
2012-07-02 01:19:44 +08:00
|
|
|
spin_lock(&atmel_aes.lock);
|
|
|
|
list_del(&aes_dd->list);
|
|
|
|
spin_unlock(&atmel_aes.lock);
|
|
|
|
|
|
|
|
atmel_aes_unregister_algs(aes_dd);
|
|
|
|
|
|
|
|
tasklet_kill(&aes_dd->done_task);
|
|
|
|
tasklet_kill(&aes_dd->queue_task);
|
|
|
|
|
|
|
|
atmel_aes_dma_cleanup(aes_dd);
|
2015-12-18 00:48:46 +08:00
|
|
|
atmel_aes_buff_cleanup(aes_dd);
|
2012-07-02 01:19:44 +08:00
|
|
|
|
2016-01-30 00:53:33 +08:00
|
|
|
clk_unprepare(aes_dd->iclk);
|
|
|
|
|
2012-07-02 01:19:44 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct platform_driver atmel_aes_driver = {
|
|
|
|
.probe = atmel_aes_probe,
|
2012-12-22 05:14:09 +08:00
|
|
|
.remove = atmel_aes_remove,
|
2012-07-02 01:19:44 +08:00
|
|
|
.driver = {
|
|
|
|
.name = "atmel_aes",
|
2013-10-14 23:52:38 +08:00
|
|
|
.of_match_table = of_match_ptr(atmel_aes_dt_ids),
|
2012-07-02 01:19:44 +08:00
|
|
|
},
|
|
|
|
};
|
|
|
|
|
|
|
|
module_platform_driver(atmel_aes_driver);
|
|
|
|
|
|
|
|
MODULE_DESCRIPTION("Atmel AES hw acceleration support.");
|
|
|
|
MODULE_LICENSE("GPL v2");
|
|
|
|
MODULE_AUTHOR("Nicolas Royer - Eukréa Electromatique");
|