Revert "crypto: spacc - Add SPAcc Skcipher support"

This reverts the following commits:

87a3fcf5fe
58bf99100a
3b1c9df662
8bc1bfa02e
c32f08d024
f036dd5664
c76c9ec333
5d22d37aa8
b63483b37e
2d6213bd59
fc61c658c9
cb67c924b2
06af76b46c
9f1a7ab4d3
8ebb14deef
c8981d9230

They were submitted with no device tree bindings.

Reported-by: Rob Herring <robh@kernel.org>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
This commit is contained in:
Herbert Xu 2024-09-04 07:09:51 +08:00
parent be9c336852
commit b8fc70ab7b
15 changed files with 0 additions and 8343 deletions

View File

@ -696,7 +696,6 @@ config CRYPTO_DEV_BCM_SPU
ahash, and aead algorithms with the kernel cryptographic API.
source "drivers/crypto/stm32/Kconfig"
source "drivers/crypto/dwc-spacc/Kconfig"
config CRYPTO_DEV_SAFEXCEL
tristate "Inside Secure's SafeXcel cryptographic engine driver"

View File

@ -48,7 +48,6 @@ obj-$(CONFIG_CRYPTO_DEV_BCM_SPU) += bcm/
obj-$(CONFIG_CRYPTO_DEV_SAFEXCEL) += inside-secure/
obj-$(CONFIG_CRYPTO_DEV_ARTPEC6) += axis/
obj-y += xilinx/
obj-y += dwc-spacc/
obj-y += hisilicon/
obj-$(CONFIG_CRYPTO_DEV_AMLOGIC_GXL) += amlogic/
obj-y += intel/

View File

@ -1,95 +0,0 @@
# SPDX-License-Identifier: GPL-2.0-only
config CRYPTO_DEV_SPACC
tristate "Support for dw_spacc Security protocol accelerators"
depends on HAS_DMA
default m
help
This enables support for the HASH/CRYP/AEAD hw accelerator which can be found
on dw_spacc IP.
config CRYPTO_DEV_SPACC_CIPHER
bool "Enable CIPHER functionality"
depends on CRYPTO_DEV_SPACC
default y
select CRYPTO_SKCIPHER
select CRYPTO_LIB_DES
select CRYPTO_AES
select CRYPTO_CBC
select CRYPTO_ECB
select CRYPTO_CTR
select CRYPTO_XTS
select CRYPTO_CTS
select CRYPTO_OFB
select CRYPTO_CFB
select CRYPTO_SM4_GENERIC
select CRYPTO_CHACHA20
help
Say y to enable Cipher functionality of SPACC.
config CRYPTO_DEV_SPACC_HASH
bool "Enable HASH functionality"
depends on CRYPTO_DEV_SPACC
default y
select CRYPTO_HASH
select CRYPTO_SHA1
select CRYPTO_MD5
select CRYPTO_SHA256
select CRYPTO_SHA512
select CRYPTO_HMAC
select CRYPTO_SM3
select CRYPTO_CMAC
select CRYPTO_MICHAEL_MIC
select CRYPTO_XCBC
select CRYPTO_AES
select CRYPTO_SM4_GENERIC
help
Say y to enable Hash functionality of SPACC.
config CRYPTO_DEV_SPACC_AEAD
bool "Enable AEAD functionality"
depends on CRYPTO_DEV_SPACC
default y
select CRYPTO_AEAD
select CRYPTO_AUTHENC
select CRYPTO_AES
select CRYPTO_SM4_GENERIC
select CRYPTO_CHACHAPOLY1305
select CRYPTO_GCM
select CRYPTO_CCM
help
Say y to enable AEAD functionality of SPACC.
config CRYPTO_DEV_SPACC_AUTODETECT
bool "Enable Autodetect functionality"
depends on CRYPTO_DEV_SPACC
default y
help
Say y to enable Autodetect functionality
config CRYPTO_DEV_SPACC_DEBUG_TRACE_IO
bool "Enable Trace MMIO reads/writes stats"
depends on CRYPTO_DEV_SPACC
default n
help
Say y to enable Trace MMIO reads/writes stats.
To Debug and trace IO register read/write opration
config CRYPTO_DEV_SPACC_DEBUG_TRACE_DDT
bool "Enable Trace DDT entries stats"
default n
depends on CRYPTO_DEV_SPACC
help
Say y to enable Enable Trace DDT entries stats.
To Debug and trace DDT opration
config CRYPTO_DEV_SPACC_SECURE_MODE
bool "Enable Spacc secure mode stats"
default n
depends on CRYPTO_DEV_SPACC
help
Say y to enable Spacc secure modes stats.

View File

@ -1,16 +0,0 @@
# SPDX-License-Identifier: GPL-2.0-only
obj-$(CONFIG_CRYPTO_DEV_SPACC) += snps-spacc.o
snps-spacc-objs = spacc_hal.o spacc_core.o \
spacc_manager.o spacc_interrupt.o spacc_device.o
ifeq ($(CONFIG_CRYPTO_DEV_SPACC_HASH),y)
snps-spacc-objs += spacc_ahash.o
endif
ifeq ($(CONFIG_CRYPTO_DEV_SPACC_CIPHER),y)
snps-spacc-objs += spacc_skcipher.o
endif
ifeq ($(CONFIG_CRYPTO_DEV_SPACC_AEAD),y)
snps-spacc-objs += spacc_aead.o
endif

File diff suppressed because it is too large Load Diff

View File

@ -1,914 +0,0 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/dmapool.h>
#include <crypto/sm3.h>
#include <crypto/sha1.h>
#include <crypto/sm4.h>
#include <crypto/sha2.h>
#include <crypto/sha3.h>
#include <crypto/md5.h>
#include <crypto/aes.h>
#include <linux/dma-mapping.h>
#include <linux/platform_device.h>
#include <crypto/internal/hash.h>
#include "spacc_device.h"
#include "spacc_core.h"
#define PPP_BUF_SIZE 128
struct sdesc {
struct shash_desc shash;
char ctx[];
};
static struct dma_pool *spacc_hash_pool;
static LIST_HEAD(spacc_hash_alg_list);
static LIST_HEAD(head_sglbuf);
static DEFINE_MUTEX(spacc_hash_alg_mutex);
static struct mode_tab possible_hashes[] = {
{ .keylen[0] = 16, MODE_TAB_HASH("cmac(aes)", MAC_CMAC, 16, 16),
.sw_fb = true },
{ .keylen[0] = 48 | MODE_TAB_HASH_XCBC, MODE_TAB_HASH("xcbc(aes)",
MAC_XCBC, 16, 16), .sw_fb = true },
{ MODE_TAB_HASH("cmac(sm4)", MAC_SM4_CMAC, 16, 16), .sw_fb = true },
{ .keylen[0] = 32 | MODE_TAB_HASH_XCBC, MODE_TAB_HASH("xcbc(sm4)",
MAC_SM4_XCBC, 16, 16), .sw_fb = true },
{ MODE_TAB_HASH("hmac(md5)", HMAC_MD5, MD5_DIGEST_SIZE,
MD5_HMAC_BLOCK_SIZE), .sw_fb = true },
{ MODE_TAB_HASH("md5", HASH_MD5, MD5_DIGEST_SIZE,
MD5_HMAC_BLOCK_SIZE), .sw_fb = true },
{ MODE_TAB_HASH("hmac(sha1)", HMAC_SHA1, SHA1_DIGEST_SIZE,
SHA1_BLOCK_SIZE), .sw_fb = true },
{ MODE_TAB_HASH("sha1", HASH_SHA1, SHA1_DIGEST_SIZE,
SHA1_BLOCK_SIZE), .sw_fb = true },
{ MODE_TAB_HASH("sha224", HASH_SHA224, SHA224_DIGEST_SIZE,
SHA224_BLOCK_SIZE), .sw_fb = true },
{ MODE_TAB_HASH("sha256", HASH_SHA256, SHA256_DIGEST_SIZE,
SHA256_BLOCK_SIZE), .sw_fb = true },
{ MODE_TAB_HASH("sha384", HASH_SHA384, SHA384_DIGEST_SIZE,
SHA384_BLOCK_SIZE), .sw_fb = true },
{ MODE_TAB_HASH("sha512", HASH_SHA512, SHA512_DIGEST_SIZE,
SHA512_BLOCK_SIZE), .sw_fb = true },
{ MODE_TAB_HASH("hmac(sha512)", HMAC_SHA512, SHA512_DIGEST_SIZE,
SHA512_BLOCK_SIZE), .sw_fb = true },
{ MODE_TAB_HASH("hmac(sha224)", HMAC_SHA224, SHA224_DIGEST_SIZE,
SHA224_BLOCK_SIZE), .sw_fb = true },
{ MODE_TAB_HASH("hmac(sha256)", HMAC_SHA256, SHA256_DIGEST_SIZE,
SHA256_BLOCK_SIZE), .sw_fb = true },
{ MODE_TAB_HASH("hmac(sha384)", HMAC_SHA384, SHA384_DIGEST_SIZE,
SHA384_BLOCK_SIZE), .sw_fb = true },
{ MODE_TAB_HASH("sha3-224", HASH_SHA3_224, SHA3_224_DIGEST_SIZE,
SHA3_224_BLOCK_SIZE), .sw_fb = true },
{ MODE_TAB_HASH("sha3-256", HASH_SHA3_256, SHA3_256_DIGEST_SIZE,
SHA3_256_BLOCK_SIZE), .sw_fb = true },
{ MODE_TAB_HASH("sha3-384", HASH_SHA3_384, SHA3_384_DIGEST_SIZE,
SHA3_384_BLOCK_SIZE), .sw_fb = true },
{ MODE_TAB_HASH("sha3-512", HASH_SHA3_512, SHA3_512_DIGEST_SIZE,
SHA3_512_BLOCK_SIZE), .sw_fb = true },
{ MODE_TAB_HASH("hmac(sm3)", HMAC_SM3, SM3_DIGEST_SIZE,
SM3_BLOCK_SIZE), .sw_fb = true },
{ MODE_TAB_HASH("sm3", HASH_SM3, SM3_DIGEST_SIZE,
SM3_BLOCK_SIZE), .sw_fb = true },
{ MODE_TAB_HASH("michael_mic", MAC_MICHAEL, 8, 8), .sw_fb = true },
};
static void spacc_hash_cleanup_dma_dst(struct spacc_crypto_ctx *tctx,
struct ahash_request *req)
{
struct spacc_crypto_reqctx *ctx = ahash_request_ctx(req);
pdu_ddt_free(&ctx->dst);
}
static void spacc_hash_cleanup_dma_src(struct spacc_crypto_ctx *tctx,
struct ahash_request *req)
{
struct spacc_crypto_reqctx *ctx = ahash_request_ctx(req);
if (tctx->tmp_sgl && tctx->tmp_sgl[0].length != 0) {
dma_unmap_sg(tctx->dev, tctx->tmp_sgl, ctx->src_nents,
DMA_TO_DEVICE);
kfree(tctx->tmp_sgl_buff);
tctx->tmp_sgl_buff = NULL;
tctx->tmp_sgl[0].length = 0;
} else {
dma_unmap_sg(tctx->dev, req->src, ctx->src_nents,
DMA_TO_DEVICE);
}
pdu_ddt_free(&ctx->src);
}
static void spacc_hash_cleanup_dma(struct device *dev,
struct ahash_request *req)
{
struct spacc_crypto_reqctx *ctx = ahash_request_ctx(req);
dma_unmap_sg(dev, req->src, ctx->src_nents, DMA_TO_DEVICE);
pdu_ddt_free(&ctx->src);
dma_pool_free(spacc_hash_pool, ctx->digest_buf, ctx->digest_dma);
pdu_ddt_free(&ctx->dst);
}
static void spacc_init_calg(struct crypto_alg *calg,
const struct mode_tab *mode)
{
strscpy(calg->cra_name, mode->name);
calg->cra_name[sizeof(mode->name) - 1] = '\0';
strscpy(calg->cra_driver_name, "spacc-");
strcat(calg->cra_driver_name, mode->name);
calg->cra_driver_name[sizeof(calg->cra_driver_name) - 1] = '\0';
calg->cra_blocksize = mode->blocklen;
}
static int spacc_ctx_clone_handle(struct ahash_request *req)
{
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct spacc_crypto_ctx *tctx = crypto_ahash_ctx(tfm);
struct spacc_crypto_reqctx *ctx = ahash_request_ctx(req);
struct spacc_priv *priv = dev_get_drvdata(tctx->dev);
if (tctx->handle < 0)
return -ENXIO;
ctx->acb.new_handle = spacc_clone_handle(&priv->spacc, tctx->handle,
&ctx->acb);
if (ctx->acb.new_handle < 0) {
spacc_hash_cleanup_dma(tctx->dev, req);
return -ENOMEM;
}
ctx->acb.tctx = tctx;
ctx->acb.ctx = ctx;
ctx->acb.req = req;
ctx->acb.spacc = &priv->spacc;
return 0;
}
static int spacc_hash_init_dma(struct device *dev, struct ahash_request *req,
int final)
{
int rc = -1;
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct spacc_crypto_ctx *tctx = crypto_ahash_ctx(tfm);
struct spacc_crypto_reqctx *ctx = ahash_request_ctx(req);
gfp_t mflags = GFP_ATOMIC;
if (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP)
mflags = GFP_KERNEL;
ctx->digest_buf = dma_pool_alloc(spacc_hash_pool, mflags,
&ctx->digest_dma);
if (!ctx->digest_buf)
return -ENOMEM;
rc = pdu_ddt_init(&ctx->dst, 1 | 0x80000000);
if (rc < 0) {
pr_err("ERR: PDU DDT init error\n");
rc = -EIO;
goto err_free_digest;
}
pdu_ddt_add(&ctx->dst, ctx->digest_dma, SPACC_MAX_DIGEST_SIZE);
if (ctx->total_nents > 0 && ctx->single_shot) {
/* single shot */
spacc_ctx_clone_handle(req);
if (req->nbytes) {
rc = spacc_sg_to_ddt(dev, req->src, req->nbytes,
&ctx->src, DMA_TO_DEVICE);
} else {
memset(tctx->tmp_buffer, '\0', PPP_BUF_SIZE);
sg_set_buf(&(tctx->tmp_sgl[0]), tctx->tmp_buffer,
PPP_BUF_SIZE);
rc = spacc_sg_to_ddt(dev, &(tctx->tmp_sgl[0]),
tctx->tmp_sgl[0].length,
&ctx->src, DMA_TO_DEVICE);
}
} else if (ctx->total_nents == 0 && req->nbytes == 0) {
spacc_ctx_clone_handle(req);
/* zero length case */
memset(tctx->tmp_buffer, '\0', PPP_BUF_SIZE);
sg_set_buf(&(tctx->tmp_sgl[0]), tctx->tmp_buffer, PPP_BUF_SIZE);
rc = spacc_sg_to_ddt(dev, &(tctx->tmp_sgl[0]),
tctx->tmp_sgl[0].length,
&ctx->src, DMA_TO_DEVICE);
}
if (rc < 0)
goto err_free_dst;
ctx->src_nents = rc;
return rc;
err_free_dst:
pdu_ddt_free(&ctx->dst);
err_free_digest:
dma_pool_free(spacc_hash_pool, ctx->digest_buf, ctx->digest_dma);
return rc;
}
static void spacc_free_mems(struct spacc_crypto_reqctx *ctx,
struct spacc_crypto_ctx *tctx,
struct ahash_request *req)
{
spacc_hash_cleanup_dma_dst(tctx, req);
spacc_hash_cleanup_dma_src(tctx, req);
if (ctx->single_shot) {
kfree(tctx->tmp_sgl);
tctx->tmp_sgl = NULL;
ctx->single_shot = 0;
if (ctx->total_nents)
ctx->total_nents = 0;
}
}
static void spacc_digest_cb(void *spacc, void *tfm)
{
struct ahash_cb_data *cb = tfm;
int err = -1;
int dig_sz;
dig_sz = crypto_ahash_digestsize(crypto_ahash_reqtfm(cb->req));
if (cb->ctx->single_shot)
memcpy(cb->req->result, cb->ctx->digest_buf, dig_sz);
else
memcpy(cb->tctx->digest_ctx_buf, cb->ctx->digest_buf, dig_sz);
err = cb->spacc->job[cb->new_handle].job_err;
dma_pool_free(spacc_hash_pool, cb->ctx->digest_buf,
cb->ctx->digest_dma);
spacc_free_mems(cb->ctx, cb->tctx, cb->req);
spacc_close(cb->spacc, cb->new_handle);
if (cb->req->base.complete)
ahash_request_complete(cb->req, err);
}
static int do_shash(unsigned char *name, unsigned char *result,
const u8 *data1, unsigned int data1_len,
const u8 *data2, unsigned int data2_len,
const u8 *key, unsigned int key_len)
{
int rc;
unsigned int size;
struct crypto_shash *hash;
struct sdesc *sdesc;
hash = crypto_alloc_shash(name, 0, 0);
if (IS_ERR(hash)) {
rc = PTR_ERR(hash);
pr_err("ERR: Crypto %s allocation error %d\n", name, rc);
return rc;
}
size = sizeof(struct shash_desc) + crypto_shash_descsize(hash);
sdesc = kmalloc(size, GFP_KERNEL);
if (!sdesc) {
rc = -ENOMEM;
goto do_shash_err;
}
sdesc->shash.tfm = hash;
if (key_len > 0) {
rc = crypto_shash_setkey(hash, key, key_len);
if (rc) {
pr_err("ERR: Could not setkey %s shash\n", name);
goto do_shash_err;
}
}
rc = crypto_shash_init(&sdesc->shash);
if (rc) {
pr_err("ERR: Could not init %s shash\n", name);
goto do_shash_err;
}
rc = crypto_shash_update(&sdesc->shash, data1, data1_len);
if (rc) {
pr_err("ERR: Could not update1\n");
goto do_shash_err;
}
if (data2 && data2_len) {
rc = crypto_shash_update(&sdesc->shash, data2, data2_len);
if (rc) {
pr_err("ERR: Could not update2\n");
goto do_shash_err;
}
}
rc = crypto_shash_final(&sdesc->shash, result);
if (rc)
pr_err("ERR: Could not generate %s hash\n", name);
do_shash_err:
crypto_free_shash(hash);
kfree(sdesc);
return rc;
}
static int spacc_hash_setkey(struct crypto_ahash *tfm, const u8 *key,
unsigned int keylen)
{
int rc;
const struct spacc_alg *salg = spacc_tfm_ahash(&tfm->base);
struct spacc_crypto_ctx *tctx = crypto_ahash_ctx(tfm);
struct spacc_priv *priv = dev_get_drvdata(tctx->dev);
unsigned int digest_size, block_size;
char hash_alg[CRYPTO_MAX_ALG_NAME];
block_size = crypto_tfm_alg_blocksize(&tfm->base);
digest_size = crypto_ahash_digestsize(tfm);
/*
* We will not use the hardware in case of HMACs
* This was meant for hashes but it works for cmac/xcbc since we
* only intend to support 128-bit keys...
*/
if (keylen > block_size && salg->mode->id != CRYPTO_MODE_MAC_CMAC) {
pr_debug("Exceeds keylen: %u\n", keylen);
pr_debug("Req. keylen hashing %s\n",
salg->calg->cra_name);
memset(hash_alg, 0x00, CRYPTO_MAX_ALG_NAME);
switch (salg->mode->id) {
case CRYPTO_MODE_HMAC_SHA224:
rc = do_shash("sha224", tctx->ipad, key, keylen,
NULL, 0, NULL, 0);
break;
case CRYPTO_MODE_HMAC_SHA256:
rc = do_shash("sha256", tctx->ipad, key, keylen,
NULL, 0, NULL, 0);
break;
case CRYPTO_MODE_HMAC_SHA384:
rc = do_shash("sha384", tctx->ipad, key, keylen,
NULL, 0, NULL, 0);
break;
case CRYPTO_MODE_HMAC_SHA512:
rc = do_shash("sha512", tctx->ipad, key, keylen,
NULL, 0, NULL, 0);
break;
case CRYPTO_MODE_HMAC_MD5:
rc = do_shash("md5", tctx->ipad, key, keylen,
NULL, 0, NULL, 0);
break;
case CRYPTO_MODE_HMAC_SHA1:
rc = do_shash("sha1", tctx->ipad, key, keylen,
NULL, 0, NULL, 0);
break;
default:
return -EINVAL;
}
if (rc < 0) {
pr_err("ERR: %d computing shash for %s\n",
rc, hash_alg);
return -EIO;
}
keylen = digest_size;
pr_debug("updated keylen: %u\n", keylen);
tctx->ctx_valid = false;
if (salg->mode->sw_fb) {
rc = crypto_ahash_setkey(tctx->fb.hash,
tctx->ipad, keylen);
if (rc < 0)
return rc;
}
} else {
memcpy(tctx->ipad, key, keylen);
tctx->ctx_valid = false;
if (salg->mode->sw_fb) {
rc = crypto_ahash_setkey(tctx->fb.hash, key, keylen);
if (rc < 0)
return rc;
}
}
/* close handle since key size may have changed */
if (tctx->handle >= 0) {
spacc_close(&priv->spacc, tctx->handle);
put_device(tctx->dev);
tctx->handle = -1;
tctx->dev = NULL;
}
priv = NULL;
priv = dev_get_drvdata(salg->dev[0]);
tctx->dev = get_device(salg->dev[0]);
if (spacc_isenabled(&priv->spacc, salg->mode->id, keylen)) {
tctx->handle = spacc_open(&priv->spacc,
CRYPTO_MODE_NULL,
salg->mode->id, -1,
0, spacc_digest_cb, tfm);
} else
pr_debug(" Keylen: %d not enabled for algo: %d",
keylen, salg->mode->id);
if (tctx->handle < 0) {
pr_err("ERR: Failed to open SPAcc context\n");
put_device(salg->dev[0]);
return -EIO;
}
rc = spacc_set_operation(&priv->spacc, tctx->handle, OP_ENCRYPT,
ICV_HASH, IP_ICV_OFFSET, 0, 0, 0);
if (rc < 0) {
spacc_close(&priv->spacc, tctx->handle);
tctx->handle = -1;
put_device(tctx->dev);
return -EIO;
}
if (salg->mode->id == CRYPTO_MODE_MAC_XCBC ||
salg->mode->id == CRYPTO_MODE_MAC_SM4_XCBC) {
rc = spacc_compute_xcbc_key(&priv->spacc, salg->mode->id,
tctx->handle, tctx->ipad,
keylen, tctx->ipad);
if (rc < 0) {
dev_warn(tctx->dev,
"Failed to compute XCBC key: %d\n", rc);
return -EIO;
}
rc = spacc_write_context(&priv->spacc, tctx->handle,
SPACC_HASH_OPERATION, tctx->ipad,
32 + keylen, NULL, 0);
} else {
rc = spacc_write_context(&priv->spacc, tctx->handle,
SPACC_HASH_OPERATION, tctx->ipad,
keylen, NULL, 0);
}
memset(tctx->ipad, 0, sizeof(tctx->ipad));
if (rc < 0) {
pr_err("ERR: Failed to write SPAcc context\n");
/* Non-fatal; we continue with the software fallback. */
return 0;
}
tctx->ctx_valid = true;
return 0;
}
static int spacc_set_statesize(struct spacc_alg *salg)
{
unsigned int statesize = 0;
switch (salg->mode->id) {
case CRYPTO_MODE_HMAC_SHA1:
case CRYPTO_MODE_HASH_SHA1:
statesize = sizeof(struct sha1_state);
break;
case CRYPTO_MODE_MAC_CMAC:
case CRYPTO_MODE_MAC_XCBC:
statesize = sizeof(struct crypto_aes_ctx);
break;
case CRYPTO_MODE_MAC_SM4_CMAC:
case CRYPTO_MODE_MAC_SM4_XCBC:
statesize = sizeof(struct sm4_ctx);
break;
case CRYPTO_MODE_HMAC_MD5:
case CRYPTO_MODE_HASH_MD5:
statesize = sizeof(struct md5_state);
break;
case CRYPTO_MODE_HASH_SHA224:
case CRYPTO_MODE_HASH_SHA256:
case CRYPTO_MODE_HMAC_SHA224:
case CRYPTO_MODE_HMAC_SHA256:
statesize = sizeof(struct sha256_state);
break;
case CRYPTO_MODE_HMAC_SHA512:
case CRYPTO_MODE_HASH_SHA512:
statesize = sizeof(struct sha512_state);
break;
case CRYPTO_MODE_HMAC_SHA384:
case CRYPTO_MODE_HASH_SHA384:
statesize = sizeof(struct spacc_crypto_reqctx);
break;
case CRYPTO_MODE_HASH_SHA3_224:
case CRYPTO_MODE_HASH_SHA3_256:
case CRYPTO_MODE_HASH_SHA3_384:
case CRYPTO_MODE_HASH_SHA3_512:
statesize = sizeof(struct sha3_state);
break;
case CRYPTO_MODE_HMAC_SM3:
case CRYPTO_MODE_MAC_MICHAEL:
statesize = sizeof(struct spacc_crypto_reqctx);
break;
default:
break;
}
return statesize;
}
static int spacc_hash_cra_init(struct crypto_tfm *tfm)
{
const struct spacc_alg *salg = spacc_tfm_ahash(tfm);
struct spacc_crypto_ctx *tctx = crypto_tfm_ctx(tfm);
struct spacc_priv *priv = NULL;
tctx->handle = -1;
tctx->ctx_valid = false;
tctx->dev = get_device(salg->dev[0]);
if (salg->mode->sw_fb) {
tctx->fb.hash = crypto_alloc_ahash(salg->calg->cra_name, 0,
CRYPTO_ALG_NEED_FALLBACK);
if (IS_ERR(tctx->fb.hash)) {
if (tctx->handle >= 0)
spacc_close(&priv->spacc, tctx->handle);
put_device(tctx->dev);
return PTR_ERR(tctx->fb.hash);
}
crypto_ahash_set_statesize(__crypto_ahash_cast(tfm),
crypto_ahash_statesize(tctx->fb.hash));
crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
sizeof(struct spacc_crypto_reqctx) +
crypto_ahash_reqsize(tctx->fb.hash));
} else {
crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
sizeof(struct spacc_crypto_reqctx));
}
return 0;
}
static void spacc_hash_cra_exit(struct crypto_tfm *tfm)
{
struct spacc_crypto_ctx *tctx = crypto_tfm_ctx(tfm);
struct spacc_priv *priv = dev_get_drvdata(tctx->dev);
crypto_free_ahash(tctx->fb.hash);
if (tctx->handle >= 0)
spacc_close(&priv->spacc, tctx->handle);
put_device(tctx->dev);
}
static int spacc_hash_init(struct ahash_request *req)
{
int rc = 0;
struct crypto_ahash *reqtfm = crypto_ahash_reqtfm(req);
struct spacc_crypto_ctx *tctx = crypto_ahash_ctx(reqtfm);
struct spacc_crypto_reqctx *ctx = ahash_request_ctx(req);
ctx->digest_buf = NULL;
ctx->single_shot = 0;
ctx->total_nents = 0;
tctx->tmp_sgl = NULL;
ahash_request_set_tfm(&ctx->fb.hash_req, tctx->fb.hash);
ctx->fb.hash_req.base.flags = req->base.flags &
CRYPTO_TFM_REQ_MAY_SLEEP;
rc = crypto_ahash_init(&ctx->fb.hash_req);
return rc;
}
static int spacc_hash_update(struct ahash_request *req)
{
int rc;
int nbytes = req->nbytes;
struct crypto_ahash *reqtfm = crypto_ahash_reqtfm(req);
struct spacc_crypto_ctx *tctx = crypto_ahash_ctx(reqtfm);
struct spacc_crypto_reqctx *ctx = ahash_request_ctx(req);
if (!nbytes)
return 0;
pr_debug("%s Using SW fallback\n", __func__);
ahash_request_set_tfm(&ctx->fb.hash_req, tctx->fb.hash);
ctx->fb.hash_req.base.flags = req->base.flags &
CRYPTO_TFM_REQ_MAY_SLEEP;
ctx->fb.hash_req.nbytes = req->nbytes;
ctx->fb.hash_req.src = req->src;
rc = crypto_ahash_update(&ctx->fb.hash_req);
return rc;
}
static int spacc_hash_final(struct ahash_request *req)
{
struct crypto_ahash *reqtfm = crypto_ahash_reqtfm(req);
struct spacc_crypto_ctx *tctx = crypto_ahash_ctx(reqtfm);
struct spacc_crypto_reqctx *ctx = ahash_request_ctx(req);
int rc;
ahash_request_set_tfm(&ctx->fb.hash_req, tctx->fb.hash);
ctx->fb.hash_req.base.flags = req->base.flags &
CRYPTO_TFM_REQ_MAY_SLEEP;
ctx->fb.hash_req.result = req->result;
rc = crypto_ahash_final(&ctx->fb.hash_req);
return rc;
}
static int spacc_hash_digest(struct ahash_request *req)
{
int ret, final = 0;
int rc;
struct crypto_ahash *reqtfm = crypto_ahash_reqtfm(req);
struct spacc_crypto_ctx *tctx = crypto_ahash_ctx(reqtfm);
struct spacc_crypto_reqctx *ctx = ahash_request_ctx(req);
struct spacc_priv *priv = dev_get_drvdata(tctx->dev);
const struct spacc_alg *salg = spacc_tfm_ahash(&reqtfm->base);
/* direct single shot digest call */
ctx->single_shot = 1;
ctx->total_nents = sg_nents(req->src);
/* alloc tmp_sgl */
tctx->tmp_sgl = kmalloc(sizeof(*(tctx->tmp_sgl)) * 2, GFP_KERNEL);
if (!tctx->tmp_sgl)
return -ENOMEM;
sg_init_table(tctx->tmp_sgl, 2);
tctx->tmp_sgl[0].length = 0;
if (tctx->handle < 0 || !tctx->ctx_valid) {
priv = NULL;
pr_debug("%s: open SPAcc context\n", __func__);
priv = dev_get_drvdata(salg->dev[0]);
tctx->dev = get_device(salg->dev[0]);
ret = spacc_isenabled(&priv->spacc, salg->mode->id, 0);
if (ret)
tctx->handle = spacc_open(&priv->spacc,
CRYPTO_MODE_NULL,
salg->mode->id, -1, 0,
spacc_digest_cb,
reqtfm);
if (tctx->handle < 0) {
put_device(salg->dev[0]);
pr_debug("Failed to open SPAcc context\n");
goto fallback;
}
rc = spacc_set_operation(&priv->spacc, tctx->handle,
OP_ENCRYPT, ICV_HASH, IP_ICV_OFFSET,
0, 0, 0);
if (rc < 0) {
spacc_close(&priv->spacc, tctx->handle);
pr_debug("Failed to open SPAcc context\n");
tctx->handle = -1;
put_device(tctx->dev);
goto fallback;
}
tctx->ctx_valid = true;
}
rc = spacc_hash_init_dma(tctx->dev, req, final);
if (rc < 0)
goto fallback;
if (rc == 0)
return 0;
rc = spacc_packet_enqueue_ddt(&priv->spacc, ctx->acb.new_handle,
&ctx->src, &ctx->dst, req->nbytes,
0, req->nbytes, 0, 0, 0);
if (rc < 0) {
spacc_hash_cleanup_dma(tctx->dev, req);
spacc_close(&priv->spacc, ctx->acb.new_handle);
if (rc != -EBUSY) {
pr_debug("Failed to enqueue job, ERR: %d\n", rc);
return rc;
}
if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
return -EBUSY;
goto fallback;
}
return -EINPROGRESS;
fallback:
/* Start from scratch as init is not called before digest */
ahash_request_set_tfm(&ctx->fb.hash_req, tctx->fb.hash);
ctx->fb.hash_req.base.flags = req->base.flags &
CRYPTO_TFM_REQ_MAY_SLEEP;
ctx->fb.hash_req.nbytes = req->nbytes;
ctx->fb.hash_req.src = req->src;
ctx->fb.hash_req.result = req->result;
return crypto_ahash_digest(&ctx->fb.hash_req);
}
static int spacc_hash_finup(struct ahash_request *req)
{
struct crypto_ahash *reqtfm = crypto_ahash_reqtfm(req);
struct spacc_crypto_ctx *tctx = crypto_ahash_ctx(reqtfm);
struct spacc_crypto_reqctx *ctx = ahash_request_ctx(req);
int rc;
ahash_request_set_tfm(&ctx->fb.hash_req, tctx->fb.hash);
ctx->fb.hash_req.base.flags = req->base.flags &
CRYPTO_TFM_REQ_MAY_SLEEP;
ctx->fb.hash_req.nbytes = req->nbytes;
ctx->fb.hash_req.src = req->src;
ctx->fb.hash_req.result = req->result;
rc = crypto_ahash_finup(&ctx->fb.hash_req);
return rc;
}
static int spacc_hash_import(struct ahash_request *req, const void *in)
{
int rc;
struct crypto_ahash *reqtfm = crypto_ahash_reqtfm(req);
struct spacc_crypto_ctx *tctx = crypto_ahash_ctx(reqtfm);
struct spacc_crypto_reqctx *ctx = ahash_request_ctx(req);
ahash_request_set_tfm(&ctx->fb.hash_req, tctx->fb.hash);
ctx->fb.hash_req.base.flags = req->base.flags &
CRYPTO_TFM_REQ_MAY_SLEEP;
rc = crypto_ahash_import(&ctx->fb.hash_req, in);
return rc;
}
static int spacc_hash_export(struct ahash_request *req, void *out)
{
int rc;
struct crypto_ahash *reqtfm = crypto_ahash_reqtfm(req);
struct spacc_crypto_ctx *tctx = crypto_ahash_ctx(reqtfm);
struct spacc_crypto_reqctx *ctx = ahash_request_ctx(req);
ahash_request_set_tfm(&ctx->fb.hash_req, tctx->fb.hash);
ctx->fb.hash_req.base.flags = req->base.flags &
CRYPTO_TFM_REQ_MAY_SLEEP;
rc = crypto_ahash_export(&ctx->fb.hash_req, out);
return rc;
}
static const struct ahash_alg spacc_hash_template = {
.init = spacc_hash_init,
.update = spacc_hash_update,
.final = spacc_hash_final,
.finup = spacc_hash_finup,
.digest = spacc_hash_digest,
.setkey = spacc_hash_setkey,
.export = spacc_hash_export,
.import = spacc_hash_import,
.halg.base = {
.cra_priority = 300,
.cra_module = THIS_MODULE,
.cra_init = spacc_hash_cra_init,
.cra_exit = spacc_hash_cra_exit,
.cra_ctxsize = sizeof(struct spacc_crypto_ctx),
.cra_flags = CRYPTO_ALG_TYPE_AHASH |
CRYPTO_ALG_ASYNC |
CRYPTO_ALG_NEED_FALLBACK |
CRYPTO_ALG_OPTIONAL_KEY
},
};
static int spacc_register_hash(struct spacc_alg *salg)
{
int rc;
salg->calg = &salg->alg.hash.halg.base;
salg->alg.hash = spacc_hash_template;
spacc_init_calg(salg->calg, salg->mode);
salg->alg.hash.halg.digestsize = salg->mode->hashlen;
salg->alg.hash.halg.statesize = spacc_set_statesize(salg);
rc = crypto_register_ahash(&salg->alg.hash);
if (rc < 0)
return rc;
mutex_lock(&spacc_hash_alg_mutex);
list_add(&salg->list, &spacc_hash_alg_list);
mutex_unlock(&spacc_hash_alg_mutex);
return 0;
}
int probe_hashes(struct platform_device *spacc_pdev)
{
int rc;
unsigned int i;
int registered = 0;
struct spacc_alg *salg;
struct spacc_priv *priv = dev_get_drvdata(&spacc_pdev->dev);
spacc_hash_pool = dma_pool_create("spacc-digest", &spacc_pdev->dev,
SPACC_MAX_DIGEST_SIZE,
SPACC_DMA_ALIGN, SPACC_DMA_BOUNDARY);
if (!spacc_hash_pool)
return -ENOMEM;
for (i = 0; i < ARRAY_SIZE(possible_hashes); i++)
possible_hashes[i].valid = 0;
for (i = 0; i < ARRAY_SIZE(possible_hashes); i++) {
if (possible_hashes[i].valid == 0 &&
spacc_isenabled(&priv->spacc,
possible_hashes[i].id & 0xFF,
possible_hashes[i].hashlen)) {
salg = kmalloc(sizeof(*salg), GFP_KERNEL);
if (!salg)
return -ENOMEM;
salg->mode = &possible_hashes[i];
/* Copy all dev's over to the salg */
salg->dev[0] = &spacc_pdev->dev;
salg->dev[1] = NULL;
rc = spacc_register_hash(salg);
if (rc < 0) {
kfree(salg);
continue;
}
pr_debug("registered %s\n",
possible_hashes[i].name);
registered++;
possible_hashes[i].valid = 1;
}
}
return registered;
}
int spacc_unregister_hash_algs(void)
{
struct spacc_alg *salg, *tmp;
mutex_lock(&spacc_hash_alg_mutex);
list_for_each_entry_safe(salg, tmp, &spacc_hash_alg_list, list) {
crypto_unregister_alg(salg->calg);
list_del(&salg->list);
kfree(salg);
}
mutex_unlock(&spacc_hash_alg_mutex);
dma_pool_destroy(spacc_hash_pool);
return 0;
}

File diff suppressed because it is too large Load Diff

View File

@ -1,824 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef SPACC_CORE_H_
#define SPACC_CORE_H_
#include <linux/interrupt.h>
#include <linux/platform_device.h>
#include <linux/of_device.h>
#include <linux/dma-mapping.h>
#include <crypto/skcipher.h>
#include "spacc_hal.h"
enum {
SPACC_DMA_UNDEF = 0,
SPACC_DMA_DDT = 1,
SPACC_DMA_LINEAR = 2
};
enum {
SPACC_OP_MODE_IRQ = 0,
SPACC_OP_MODE_WD = 1 /* watchdog */
};
#define OP_ENCRYPT 0
#define OP_DECRYPT 1
#define SPACC_CRYPTO_OPERATION 1
#define SPACC_HASH_OPERATION 2
#define SPACC_AADCOPY_FLAG 0x80000000
#define SPACC_AUTO_SIZE (-1)
#define SPACC_WD_LIMIT 0x80
#define SPACC_WD_TIMER_INIT 0x40000
/********* Register Offsets **********/
#define SPACC_REG_IRQ_EN 0x00000L
#define SPACC_REG_IRQ_STAT 0x00004L
#define SPACC_REG_IRQ_CTRL 0x00008L
#define SPACC_REG_FIFO_STAT 0x0000CL
#define SPACC_REG_SDMA_BRST_SZ 0x00010L
#define SPACC_REG_SRC_PTR 0x00020L
#define SPACC_REG_DST_PTR 0x00024L
#define SPACC_REG_OFFSET 0x00028L
#define SPACC_REG_PRE_AAD_LEN 0x0002CL
#define SPACC_REG_POST_AAD_LEN 0x00030L
#define SPACC_REG_PROC_LEN 0x00034L
#define SPACC_REG_ICV_LEN 0x00038L
#define SPACC_REG_ICV_OFFSET 0x0003CL
#define SPACC_REG_IV_OFFSET 0x00040L
#define SPACC_REG_SW_CTRL 0x00044L
#define SPACC_REG_AUX_INFO 0x00048L
#define SPACC_REG_CTRL 0x0004CL
#define SPACC_REG_STAT_POP 0x00050L
#define SPACC_REG_STATUS 0x00054L
#define SPACC_REG_STAT_WD_CTRL 0x00080L
#define SPACC_REG_KEY_SZ 0x00100L
#define SPACC_REG_VIRTUAL_RQST 0x00140L
#define SPACC_REG_VIRTUAL_ALLOC 0x00144L
#define SPACC_REG_VIRTUAL_PRIO 0x00148L
#define SPACC_REG_ID 0x00180L
#define SPACC_REG_CONFIG 0x00184L
#define SPACC_REG_CONFIG2 0x00190L
#define SPACC_REG_SECURE_CTRL 0x001C0L
#define SPACC_REG_SECURE_RELEASE 0x001C4
#define SPACC_REG_SK_LOAD 0x00200L
#define SPACC_REG_SK_STAT 0x00204L
#define SPACC_REG_SK_KEY 0x00240L
#define SPACC_REG_VERSION_EXT_3 0x00194L
/* out 8MB from base of SPACC */
#define SPACC_REG_SKP 0x800000UL
/********** Context Offsets **********/
#define SPACC_CTX_CIPH_KEY 0x04000L
#define SPACC_CTX_HASH_KEY 0x08000L
/******** Sub-Context Offsets ********/
#define SPACC_CTX_AES_KEY 0x00
#define SPACC_CTX_AES_IV 0x20
#define SPACC_CTX_DES_KEY 0x08
#define SPACC_CTX_DES_IV 0x00
/* use these to loop over CMDX macros */
#define SPACC_CMDX_MAX 1
#define SPACC_CMDX_MAX_QOS 3
/********** IRQ_EN Bit Masks **********/
#define _SPACC_IRQ_CMD0 0
#define _SPACC_IRQ_STAT 4
#define _SPACC_IRQ_STAT_WD 12
#define _SPACC_IRQ_GLBL 31
#define SPACC_IRQ_EN_CMD(x) (1UL << _SPACC_IRQ_CMD0 << (x))
#define SPACC_IRQ_EN_STAT BIT(_SPACC_IRQ_STAT)
#define SPACC_IRQ_EN_STAT_WD BIT(_SPACC_IRQ_STAT_WD)
#define SPACC_IRQ_EN_GLBL BIT(_SPACC_IRQ_GLBL)
/********* IRQ_STAT Bitmasks *********/
#define SPACC_IRQ_STAT_CMDX(x) (1UL << _SPACC_IRQ_CMD0 << (x))
#define SPACC_IRQ_STAT_STAT BIT(_SPACC_IRQ_STAT)
#define SPACC_IRQ_STAT_STAT_WD BIT(_SPACC_IRQ_STAT_WD)
#define SPACC_IRQ_STAT_CLEAR_STAT(spacc) writel(SPACC_IRQ_STAT_STAT, \
(spacc)->regmap + SPACC_REG_IRQ_STAT)
#define SPACC_IRQ_STAT_CLEAR_STAT_WD(spacc) writel(SPACC_IRQ_STAT_STAT_WD, \
(spacc)->regmap + SPACC_REG_IRQ_STAT)
#define SPACC_IRQ_STAT_CLEAR_CMDX(spacc, x) writel(SPACC_IRQ_STAT_CMDX(x), \
(spacc)->regmap + SPACC_REG_IRQ_STAT)
/********* IRQ_CTRL Bitmasks *********/
/* CMD0 = 0; for QOS, CMD1 = 8, CMD2 = 16 */
#define _SPACC_IRQ_CTRL_CMDX_CNT(x) (8 * (x))
#define SPACC_IRQ_CTRL_CMDX_CNT_SET(x, n) \
(((n) & 0xFF) << _SPACC_IRQ_CTRL_CMDX_CNT(x))
#define SPACC_IRQ_CTRL_CMDX_CNT_MASK(x) \
(0xFF << _SPACC_IRQ_CTRL_CMDX_CNT(x))
/* STAT_CNT is at 16 and for QOS at 24 */
#define _SPACC_IRQ_CTRL_STAT_CNT 16
#define SPACC_IRQ_CTRL_STAT_CNT_SET(n) ((n) << _SPACC_IRQ_CTRL_STAT_CNT)
#define SPACC_IRQ_CTRL_STAT_CNT_MASK (0x1FF << _SPACC_IRQ_CTRL_STAT_CNT)
#define _SPACC_IRQ_CTRL_STAT_CNT_QOS 24
#define SPACC_IRQ_CTRL_STAT_CNT_SET_QOS(n) \
((n) << _SPACC_IRQ_CTRL_STAT_CNT_QOS)
#define SPACC_IRQ_CTRL_STAT_CNT_MASK_QOS \
(0x7F << _SPACC_IRQ_CTRL_STAT_CNT_QOS)
/******** FIFO_STAT Bitmasks *********/
/* SPACC with QOS */
#define SPACC_FIFO_STAT_CMDX_CNT_MASK(x) \
(0x7F << ((x) * 8))
#define SPACC_FIFO_STAT_CMDX_CNT_GET(x, y) \
(((y) & SPACC_FIFO_STAT_CMDX_CNT_MASK(x)) >> ((x) * 8))
#define SPACC_FIFO_STAT_CMDX_FULL(x) (1UL << (7 + (x) * 8))
#define _SPACC_FIFO_STAT_STAT_CNT_QOS 24
#define SPACC_FIFO_STAT_STAT_CNT_MASK_QOS \
(0x7F << _SPACC_FIFO_STAT_STAT_CNT_QOS)
#define SPACC_FIFO_STAT_STAT_CNT_GET_QOS(y) \
(((y) & \
SPACC_FIFO_STAT_STAT_CNT_MASK_QOS) >> _SPACC_FIFO_STAT_STAT_CNT_QOS)
/* SPACC without QOS */
#define SPACC_FIFO_STAT_CMD0_CNT_MASK (0x1FF)
#define SPACC_FIFO_STAT_CMD0_CNT_GET(y) ((y) & SPACC_FIFO_STAT_CMD0_CNT_MASK)
#define _SPACC_FIFO_STAT_CMD0_FULL 15
#define SPACC_FIFO_STAT_CMD0_FULL BIT(_SPACC_FIFO_STAT_CMD0_FULL)
#define _SPACC_FIFO_STAT_STAT_CNT 16
#define SPACC_FIFO_STAT_STAT_CNT_MASK (0x1FF << _SPACC_FIFO_STAT_STAT_CNT)
#define SPACC_FIFO_STAT_STAT_CNT_GET(y) \
(((y) & SPACC_FIFO_STAT_STAT_CNT_MASK) >> _SPACC_FIFO_STAT_STAT_CNT)
/* both */
#define _SPACC_FIFO_STAT_STAT_EMPTY 31
#define SPACC_FIFO_STAT_STAT_EMPTY BIT(_SPACC_FIFO_STAT_STAT_EMPTY)
/********* SRC/DST_PTR Bitmasks **********/
#define SPACC_SRC_PTR_PTR 0xFFFFFFF8
#define SPACC_DST_PTR_PTR 0xFFFFFFF8
/********** OFFSET Bitmasks **********/
#define SPACC_OFFSET_SRC_O 0
#define SPACC_OFFSET_SRC_W 16
#define SPACC_OFFSET_DST_O 16
#define SPACC_OFFSET_DST_W 16
#define SPACC_MIN_CHUNK_SIZE 1024
#define SPACC_MAX_CHUNK_SIZE 16384
/********* PKT_LEN Bitmasks **********/
#ifndef _SPACC_PKT_LEN_PROC_LEN
#define _SPACC_PKT_LEN_PROC_LEN 0
#endif
#ifndef _SPACC_PKT_LEN_AAD_LEN
#define _SPACC_PKT_LEN_AAD_LEN 16
#endif
/********* SW_CTRL Bitmasks ***********/
#define _SPACC_SW_CTRL_ID_0 0
#define SPACC_SW_CTRL_ID_W 8
#define SPACC_SW_CTRL_ID_MASK (0xFF << _SPACC_SW_CTRL_ID_0)
#define SPACC_SW_CTRL_ID_GET(y) \
(((y) & SPACC_SW_CTRL_ID_MASK) >> _SPACC_SW_CTRL_ID_0)
#define SPACC_SW_CTRL_ID_SET(id) \
(((id) & SPACC_SW_CTRL_ID_MASK) >> _SPACC_SW_CTRL_ID_0)
#define _SPACC_SW_CTRL_PRIO 30
#define SPACC_SW_CTRL_PRIO_MASK 0x3
#define SPACC_SW_CTRL_PRIO_SET(prio) \
(((prio) & SPACC_SW_CTRL_PRIO_MASK) << _SPACC_SW_CTRL_PRIO)
/* Priorities */
#define SPACC_SW_CTRL_PRIO_HI 0
#define SPACC_SW_CTRL_PRIO_MED 1
#define SPACC_SW_CTRL_PRIO_LOW 2
/*********** SECURE_CTRL bitmasks *********/
#define _SPACC_SECURE_CTRL_MS_SRC 0
#define _SPACC_SECURE_CTRL_MS_DST 1
#define _SPACC_SECURE_CTRL_MS_DDT 2
#define _SPACC_SECURE_CTRL_LOCK 31
#define SPACC_SECURE_CTRL_MS_SRC BIT(_SPACC_SECURE_CTRL_MS_SRC)
#define SPACC_SECURE_CTRL_MS_DST BIT(_SPACC_SECURE_CTRL_MS_DST)
#define SPACC_SECURE_CTRL_MS_DDT BIT(_SPACC_SECURE_CTRL_MS_DDT)
#define SPACC_SECURE_CTRL_LOCK BIT(_SPACC_SECURE_CTRL_LOCK)
/********* SKP bits **************/
#define _SPACC_SK_LOAD_CTX_IDX 0
#define _SPACC_SK_LOAD_ALG 8
#define _SPACC_SK_LOAD_MODE 12
#define _SPACC_SK_LOAD_SIZE 16
#define _SPACC_SK_LOAD_ENC_EN 30
#define _SPACC_SK_LOAD_DEC_EN 31
#define _SPACC_SK_STAT_BUSY 0
#define SPACC_SK_LOAD_ENC_EN BIT(_SPACC_SK_LOAD_ENC_EN)
#define SPACC_SK_LOAD_DEC_EN BIT(_SPACC_SK_LOAD_DEC_EN)
#define SPACC_SK_STAT_BUSY BIT(_SPACC_SK_STAT_BUSY)
/*********** CTRL Bitmasks ***********/
/* These CTRL field locations vary with SPACC version
* and if they are used, they should be set accordingly
*/
#define _SPACC_CTRL_CIPH_ALG 0
#define _SPACC_CTRL_HASH_ALG 4
#define _SPACC_CTRL_CIPH_MODE 8
#define _SPACC_CTRL_HASH_MODE 12
#define _SPACC_CTRL_MSG_BEGIN 14
#define _SPACC_CTRL_MSG_END 15
#define _SPACC_CTRL_CTX_IDX 16
#define _SPACC_CTRL_ENCRYPT 24
#define _SPACC_CTRL_AAD_COPY 25
#define _SPACC_CTRL_ICV_PT 26
#define _SPACC_CTRL_ICV_ENC 27
#define _SPACC_CTRL_ICV_APPEND 28
#define _SPACC_CTRL_KEY_EXP 29
#define _SPACC_CTRL_SEC_KEY 31
/* CTRL bitmasks for 4.15+ cores */
#define _SPACC_CTRL_CIPH_ALG_415 0
#define _SPACC_CTRL_HASH_ALG_415 3
#define _SPACC_CTRL_CIPH_MODE_415 8
#define _SPACC_CTRL_HASH_MODE_415 12
/********* Virtual Spacc Priority Bitmasks **********/
#define _SPACC_VPRIO_MODE 0
#define _SPACC_VPRIO_WEIGHT 8
/********* AUX INFO Bitmasks *********/
#define _SPACC_AUX_INFO_DIR 0
#define _SPACC_AUX_INFO_BIT_ALIGN 1
#define _SPACC_AUX_INFO_CBC_CS 16
/********* STAT_POP Bitmasks *********/
#define _SPACC_STAT_POP_POP 0
#define SPACC_STAT_POP_POP BIT(_SPACC_STAT_POP_POP)
/********** STATUS Bitmasks **********/
#define _SPACC_STATUS_SW_ID 0
#define _SPACC_STATUS_RET_CODE 24
#define _SPACC_STATUS_SEC_CMD 31
#define SPACC_GET_STATUS_RET_CODE(s) \
(((s) >> _SPACC_STATUS_RET_CODE) & 0x7)
#define SPACC_STATUS_SW_ID_MASK (0xFF << _SPACC_STATUS_SW_ID)
#define SPACC_STATUS_SW_ID_GET(y) \
(((y) & SPACC_STATUS_SW_ID_MASK) >> _SPACC_STATUS_SW_ID)
/********** KEY_SZ Bitmasks **********/
#define _SPACC_KEY_SZ_SIZE 0
#define _SPACC_KEY_SZ_CTX_IDX 8
#define _SPACC_KEY_SZ_CIPHER 31
#define SPACC_KEY_SZ_CIPHER BIT(_SPACC_KEY_SZ_CIPHER)
#define SPACC_SET_CIPHER_KEY_SZ(z) \
(((z) << _SPACC_KEY_SZ_SIZE) | (1UL << _SPACC_KEY_SZ_CIPHER))
#define SPACC_SET_HASH_KEY_SZ(z) ((z) << _SPACC_KEY_SZ_SIZE)
#define SPACC_SET_KEY_CTX(ctx) ((ctx) << _SPACC_KEY_SZ_CTX_IDX)
/*****************************************************************************/
#define AUX_DIR(a) ((a) << _SPACC_AUX_INFO_DIR)
#define AUX_BIT_ALIGN(a) ((a) << _SPACC_AUX_INFO_BIT_ALIGN)
#define AUX_CBC_CS(a) ((a) << _SPACC_AUX_INFO_CBC_CS)
#define VPRIO_SET(mode, weight) \
(((mode) << _SPACC_VPRIO_MODE) | ((weight) << _SPACC_VPRIO_WEIGHT))
#ifndef MAX_DDT_ENTRIES
/* add one for null at end of list */
#define MAX_DDT_ENTRIES \
((SPACC_MAX_MSG_MALLOC_SIZE / SPACC_MAX_PARTICLE_SIZE) + 1)
#endif
#define DDT_ENTRY_SIZE (sizeof(ddt_entry) * MAX_DDT_ENTRIES)
#ifndef SPACC_MAX_JOBS
#define SPACC_MAX_JOBS BIT(SPACC_SW_CTRL_ID_W)
#endif
#if SPACC_MAX_JOBS > 256
# error SPACC_MAX_JOBS cannot exceed 256.
#endif
#ifndef SPACC_MAX_JOB_BUFFERS
#define SPACC_MAX_JOB_BUFFERS 192
#endif
/* max DDT particle size */
#ifndef SPACC_MAX_PARTICLE_SIZE
#define SPACC_MAX_PARTICLE_SIZE 4096
#endif
/* max message size from HW configuration */
/* usually defined in ICD as (2 exponent 16) -1 */
#ifndef _SPACC_MAX_MSG_MALLOC_SIZE
#define _SPACC_MAX_MSG_MALLOC_SIZE 16
#endif
#define SPACC_MAX_MSG_MALLOC_SIZE BIT(_SPACC_MAX_MSG_MALLOC_SIZE)
#ifndef SPACC_MAX_MSG_SIZE
#define SPACC_MAX_MSG_SIZE (SPACC_MAX_MSG_MALLOC_SIZE - 1)
#endif
#define SPACC_LOOP_WAIT 1000000
#define SPACC_CTR_IV_MAX8 ((u32)0xFF)
#define SPACC_CTR_IV_MAX16 ((u32)0xFFFF)
#define SPACC_CTR_IV_MAX32 ((u32)0xFFFFFFFF)
#define SPACC_CTR_IV_MAX64 ((u64)0xFFFFFFFFFFFFFFFF)
/* cipher algos */
enum ecipher {
C_NULL = 0,
C_DES = 1,
C_AES = 2,
C_RC4 = 3,
C_MULTI2 = 4,
C_KASUMI = 5,
C_SNOW3G_UEA2 = 6,
C_ZUC_UEA3 = 7,
C_CHACHA20 = 8,
C_SM4 = 9,
C_MAX = 10
};
/* ctrl reg cipher modes */
enum eciphermode {
CM_ECB = 0,
CM_CBC = 1,
CM_CTR = 2,
CM_CCM = 3,
CM_GCM = 5,
CM_OFB = 7,
CM_CFB = 8,
CM_F8 = 9,
CM_XTS = 10,
CM_MAX = 11
};
enum echachaciphermode {
CM_CHACHA_STREAM = 2,
CM_CHACHA_AEAD = 5
};
enum ehash {
H_NULL = 0,
H_MD5 = 1,
H_SHA1 = 2,
H_SHA224 = 3,
H_SHA256 = 4,
H_SHA384 = 5,
H_SHA512 = 6,
H_XCBC = 7,
H_CMAC = 8,
H_KF9 = 9,
H_SNOW3G_UIA2 = 10,
H_CRC32_I3E802_3 = 11,
H_ZUC_UIA3 = 12,
H_SHA512_224 = 13,
H_SHA512_256 = 14,
H_MICHAEL = 15,
H_SHA3_224 = 16,
H_SHA3_256 = 17,
H_SHA3_384 = 18,
H_SHA3_512 = 19,
H_SHAKE128 = 20,
H_SHAKE256 = 21,
H_POLY1305 = 22,
H_SM3 = 23,
H_SM4_XCBC_MAC = 24,
H_SM4_CMAC = 25,
H_MAX = 26
};
enum ehashmode {
HM_RAW = 0,
HM_SSLMAC = 1,
HM_HMAC = 2,
HM_MAX = 3
};
enum eshakehashmode {
HM_SHAKE_SHAKE = 0,
HM_SHAKE_CSHAKE = 1,
HM_SHAKE_KMAC = 2
};
enum spacc_ret_code {
SPACC_OK = 0,
SPACC_ICVFAIL = 1,
SPACC_MEMERR = 2,
SPACC_BLOCKERR = 3,
SPACC_SECERR = 4
};
enum eicvpos {
IP_ICV_OFFSET = 0,
IP_ICV_APPEND = 1,
IP_ICV_IGNORE = 2,
IP_MAX = 3
};
enum {
/* HASH of plaintext */
ICV_HASH = 0,
/* HASH the plaintext and encrypt the plaintext and ICV */
ICV_HASH_ENCRYPT = 1,
/* HASH the ciphertext */
ICV_ENCRYPT_HASH = 2,
ICV_IGNORE = 3,
IM_MAX = 4
};
enum {
NO_PARTIAL_PCK = 0,
FIRST_PARTIAL_PCK = 1,
MIDDLE_PARTIAL_PCK = 2,
LAST_PARTIAL_PCK = 3
};
enum crypto_modes {
CRYPTO_MODE_NULL,
CRYPTO_MODE_AES_ECB,
CRYPTO_MODE_AES_CBC,
CRYPTO_MODE_AES_CTR,
CRYPTO_MODE_AES_CCM,
CRYPTO_MODE_AES_GCM,
CRYPTO_MODE_AES_F8,
CRYPTO_MODE_AES_XTS,
CRYPTO_MODE_AES_CFB,
CRYPTO_MODE_AES_OFB,
CRYPTO_MODE_AES_CS1,
CRYPTO_MODE_AES_CS2,
CRYPTO_MODE_AES_CS3,
CRYPTO_MODE_MULTI2_ECB,
CRYPTO_MODE_MULTI2_CBC,
CRYPTO_MODE_MULTI2_OFB,
CRYPTO_MODE_MULTI2_CFB,
CRYPTO_MODE_3DES_CBC,
CRYPTO_MODE_3DES_ECB,
CRYPTO_MODE_DES_CBC,
CRYPTO_MODE_DES_ECB,
CRYPTO_MODE_KASUMI_ECB,
CRYPTO_MODE_KASUMI_F8,
CRYPTO_MODE_SNOW3G_UEA2,
CRYPTO_MODE_ZUC_UEA3,
CRYPTO_MODE_CHACHA20_STREAM,
CRYPTO_MODE_CHACHA20_POLY1305,
CRYPTO_MODE_SM4_ECB,
CRYPTO_MODE_SM4_CBC,
CRYPTO_MODE_SM4_CFB,
CRYPTO_MODE_SM4_OFB,
CRYPTO_MODE_SM4_CTR,
CRYPTO_MODE_SM4_CCM,
CRYPTO_MODE_SM4_GCM,
CRYPTO_MODE_SM4_F8,
CRYPTO_MODE_SM4_XTS,
CRYPTO_MODE_SM4_CS1,
CRYPTO_MODE_SM4_CS2,
CRYPTO_MODE_SM4_CS3,
CRYPTO_MODE_HASH_MD5,
CRYPTO_MODE_HMAC_MD5,
CRYPTO_MODE_HASH_SHA1,
CRYPTO_MODE_HMAC_SHA1,
CRYPTO_MODE_HASH_SHA224,
CRYPTO_MODE_HMAC_SHA224,
CRYPTO_MODE_HASH_SHA256,
CRYPTO_MODE_HMAC_SHA256,
CRYPTO_MODE_HASH_SHA384,
CRYPTO_MODE_HMAC_SHA384,
CRYPTO_MODE_HASH_SHA512,
CRYPTO_MODE_HMAC_SHA512,
CRYPTO_MODE_HASH_SHA512_224,
CRYPTO_MODE_HMAC_SHA512_224,
CRYPTO_MODE_HASH_SHA512_256,
CRYPTO_MODE_HMAC_SHA512_256,
CRYPTO_MODE_MAC_XCBC,
CRYPTO_MODE_MAC_CMAC,
CRYPTO_MODE_MAC_KASUMI_F9,
CRYPTO_MODE_MAC_SNOW3G_UIA2,
CRYPTO_MODE_MAC_ZUC_UIA3,
CRYPTO_MODE_MAC_POLY1305,
CRYPTO_MODE_SSLMAC_MD5,
CRYPTO_MODE_SSLMAC_SHA1,
CRYPTO_MODE_HASH_CRC32,
CRYPTO_MODE_MAC_MICHAEL,
CRYPTO_MODE_HASH_SHA3_224,
CRYPTO_MODE_HASH_SHA3_256,
CRYPTO_MODE_HASH_SHA3_384,
CRYPTO_MODE_HASH_SHA3_512,
CRYPTO_MODE_HASH_SHAKE128,
CRYPTO_MODE_HASH_SHAKE256,
CRYPTO_MODE_HASH_CSHAKE128,
CRYPTO_MODE_HASH_CSHAKE256,
CRYPTO_MODE_MAC_KMAC128,
CRYPTO_MODE_MAC_KMAC256,
CRYPTO_MODE_MAC_KMACXOF128,
CRYPTO_MODE_MAC_KMACXOF256,
CRYPTO_MODE_HASH_SM3,
CRYPTO_MODE_HMAC_SM3,
CRYPTO_MODE_MAC_SM4_XCBC,
CRYPTO_MODE_MAC_SM4_CMAC,
CRYPTO_MODE_LAST
};
/* job descriptor */
typedef void (*spacc_callback)(void *spacc_dev, void *data);
struct spacc_job {
unsigned long
enc_mode, /* Encryption Algorithm mode */
hash_mode, /* HASH Algorithm mode */
icv_len,
icv_offset,
op, /* Operation */
ctrl, /* CTRL shadow register */
/* context just initialized or taken,
* and this is the first use.
*/
first_use,
pre_aad_sz, post_aad_sz, /* size of AAD for the latest packet*/
hkey_sz,
ckey_sz;
/* Direction and bit alignment parameters for the AUX_INFO reg */
unsigned int auxinfo_dir, auxinfo_bit_align;
unsigned int auxinfo_cs_mode; /* AUX info setting for CBC-CS */
u32 ctx_idx;
unsigned int job_used, job_swid, job_done, job_err, job_secure;
spacc_callback cb;
void *cbdata;
};
#define SPACC_CTX_IDX_UNUSED 0xFFFFFFFF
#define SPACC_JOB_IDX_UNUSED 0xFFFFFFFF
struct spacc_ctx {
/* Memory context to store cipher keys*/
void __iomem *ciph_key;
/* Memory context to store hash keys*/
void __iomem *hash_key;
/* reference count of jobs using this context */
int ref_cnt;
/* number of contexts following related to this one */
int ncontig;
};
#define SPACC_CTRL_MASK(field) \
(1UL << spacc->config.ctrl_map[(field)])
#define SPACC_CTRL_SET(field, value) \
((value) << spacc->config.ctrl_map[(field)])
enum {
SPACC_CTRL_VER_0,
SPACC_CTRL_VER_1,
SPACC_CTRL_VER_2,
SPACC_CTRL_VER_SIZE
};
enum {
SPACC_CTRL_CIPH_ALG,
SPACC_CTRL_CIPH_MODE,
SPACC_CTRL_HASH_ALG,
SPACC_CTRL_HASH_MODE,
SPACC_CTRL_ENCRYPT,
SPACC_CTRL_CTX_IDX,
SPACC_CTRL_SEC_KEY,
SPACC_CTRL_AAD_COPY,
SPACC_CTRL_ICV_PT,
SPACC_CTRL_ICV_ENC,
SPACC_CTRL_ICV_APPEND,
SPACC_CTRL_KEY_EXP,
SPACC_CTRL_MSG_BEGIN,
SPACC_CTRL_MSG_END,
SPACC_CTRL_MAPSIZE
};
struct spacc_device {
void __iomem *regmap;
int zero_key;
/* hardware configuration */
struct {
unsigned int version,
pdu_version,
project;
uint32_t max_msg_size; /* max PROCLEN value */
unsigned char modes[CRYPTO_MODE_LAST];
int num_ctx, /* no. of contexts */
num_sec_ctx, /* no. of SKP contexts*/
sec_ctx_page_size, /* page size of SKP context in bytes*/
ciph_page_size, /* cipher context page size in bytes*/
hash_page_size, /* hash context page size in bytes*/
string_size,
is_qos, /* QOS spacc? */
is_pdu, /* PDU spacc? */
is_secure,
is_secure_port, /* Are we on the secure port? */
is_partial, /* Is partial processing enabled? */
is_ivimport, /* is ivimport enabled? */
dma_type, /* DMA type: linear or scattergather */
idx, /* Which virtual spacc IDX is this? */
priority, /* Weighted priority of virtual spacc */
cmd0_fifo_depth, /* CMD FIFO depths */
cmd1_fifo_depth,
cmd2_fifo_depth,
stat_fifo_depth, /* depth of STATUS FIFO */
fifo_cnt,
ideal_stat_level,
spacc_endian;
uint32_t wd_timer;
u64 oldtimer, timer;
const u8 *ctrl_map; /* map of ctrl register field offsets */
} config;
struct spacc_job_buffer {
int active;
int job_idx;
struct pdu_ddt *src, *dst;
u32 proc_sz, aad_offset, pre_aad_sz,
post_aad_sz, iv_offset, prio;
} job_buffer[SPACC_MAX_JOB_BUFFERS];
int jb_head, jb_tail;
int op_mode, /* operating mode and watchdog functionality */
wdcnt; /* number of pending WD IRQs*/
/* SW_ID value which will be used for next job. */
unsigned int job_next_swid;
struct spacc_ctx *ctx; /* This size changes per configured device */
struct spacc_job *job; /* allocate memory for [SPACC_MAX_JOBS]; */
int job_lookup[SPACC_MAX_JOBS]; /* correlate SW_ID back to job index */
spinlock_t lock; /* lock for register access */
spinlock_t ctx_lock; /* lock for context manager */
/* callback functions for IRQ processing */
void (*irq_cb_cmdx)(struct spacc_device *spacc, int x);
void (*irq_cb_stat)(struct spacc_device *spacc);
void (*irq_cb_stat_wd)(struct spacc_device *spacc);
/* this is called after jobs have been popped off the STATUS FIFO
* useful so you can be told when there might be space available
* in the CMD FIFO
*/
void (*spacc_notify_jobs)(struct spacc_device *spacc);
/* cache*/
struct {
u32 src_ptr,
dst_ptr,
proc_len,
icv_len,
icv_offset,
pre_aad,
post_aad,
iv_offset,
offset,
aux;
} cache;
struct device *dptr;
};
enum {
SPACC_IRQ_MODE_WD = 1, /* use WD*/
SPACC_IRQ_MODE_STEP = 2 /* older use CMD/STAT stepping */
};
enum {
SPACC_IRQ_CMD_GET = 0,
SPACC_IRQ_CMD_SET = 1
};
struct spacc_priv {
struct spacc_device spacc;
struct semaphore core_running;
struct tasklet_struct pop_jobs;
spinlock_t hw_lock;
unsigned long max_msg_len;
};
int spacc_open(struct spacc_device *spacc, int enc, int hash, int ctx,
int secure_mode, spacc_callback cb, void *cbdata);
int spacc_clone_handle(struct spacc_device *spacc, int old_handle,
void *cbdata);
int spacc_close(struct spacc_device *spacc, int job_idx);
int spacc_set_operation(struct spacc_device *spacc, int job_idx, int op,
u32 prot, uint32_t icvcmd, uint32_t icvoff,
uint32_t icvsz, uint32_t sec_key);
int spacc_set_key_exp(struct spacc_device *spacc, int job_idx);
int spacc_packet_enqueue_ddt_ex(struct spacc_device *spacc, int use_jb,
int job_idx, struct pdu_ddt *src_ddt, struct pdu_ddt *dst_ddt,
u32 proc_sz, uint32_t aad_offset, uint32_t pre_aad_sz,
u32 post_aad_sz, uint32_t iv_offset, uint32_t prio);
int spacc_packet_enqueue_ddt(struct spacc_device *spacc, int job_idx,
struct pdu_ddt *src_ddt, struct pdu_ddt *dst_ddt,
uint32_t proc_sz, u32 aad_offset, uint32_t pre_aad_sz,
uint32_t post_aad_sz, u32 iv_offset, uint32_t prio);
/* IRQ handling functions */
void spacc_irq_cmdx_enable(struct spacc_device *spacc, int cmdx, int cmdx_cnt);
void spacc_irq_cmdx_disable(struct spacc_device *spacc, int cmdx);
void spacc_irq_stat_enable(struct spacc_device *spacc, int stat_cnt);
void spacc_irq_stat_disable(struct spacc_device *spacc);
void spacc_irq_stat_wd_enable(struct spacc_device *spacc);
void spacc_irq_stat_wd_disable(struct spacc_device *spacc);
void spacc_irq_glbl_enable(struct spacc_device *spacc);
void spacc_irq_glbl_disable(struct spacc_device *spacc);
uint32_t spacc_process_irq(struct spacc_device *spacc);
void spacc_set_wd_count(struct spacc_device *spacc, uint32_t val);
irqreturn_t spacc_irq_handler(int irq, void *dev);
int spacc_sgs_to_ddt(struct device *dev,
struct scatterlist *sg1, int len1, int *ents1,
struct scatterlist *sg2, int len2, int *ents2,
struct scatterlist *sg3, int len3, int *ents3,
struct pdu_ddt *ddt, int dma_direction);
int spacc_sg_to_ddt(struct device *dev, struct scatterlist *sg,
int nbytes, struct pdu_ddt *ddt, int dma_direction);
/* Context Manager */
void spacc_ctx_init_all(struct spacc_device *spacc);
/* SPAcc specific manipulation of context memory */
int spacc_write_context(struct spacc_device *spacc, int job_idx, int op,
const unsigned char *key, int ksz,
const unsigned char *iv, int ivsz);
int spacc_read_context(struct spacc_device *spacc, int job_idx, int op,
unsigned char *key, int ksz, unsigned char *iv,
int ivsz);
/* Job Manager */
void spacc_job_init_all(struct spacc_device *spacc);
int spacc_job_request(struct spacc_device *dev, int job_idx);
int spacc_job_release(struct spacc_device *dev, int job_idx);
int spacc_handle_release(struct spacc_device *spacc, int job_idx);
/* Helper functions */
struct spacc_ctx *context_lookup_by_job(struct spacc_device *spacc,
int job_idx);
int spacc_isenabled(struct spacc_device *spacc, int mode, int keysize);
int spacc_compute_xcbc_key(struct spacc_device *spacc, int mode_id,
int job_idx, const unsigned char *key,
int keylen, unsigned char *xcbc_out);
int spacc_process_jb(struct spacc_device *spacc);
int spacc_remove(struct platform_device *pdev);
int spacc_static_config(struct spacc_device *spacc);
int spacc_autodetect(struct spacc_device *spacc);
void spacc_pop_jobs(unsigned long data);
void spacc_fini(struct spacc_device *spacc);
int spacc_init(void __iomem *baseaddr, struct spacc_device *spacc,
struct pdu_info *info);
int spacc_pop_packets(struct spacc_device *spacc, int *num_popped);
void spacc_stat_process(struct spacc_device *spacc);
void spacc_cmd_process(struct spacc_device *spacc, int x);
#endif

View File

@ -1,338 +0,0 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/of_device.h>
#include <linux/module.h>
#include <linux/dma-mapping.h>
#include <linux/platform_device.h>
#include "spacc_device.h"
static struct platform_device *spacc_pdev[MAX_DEVICES];
#define VSPACC_PRIORITY_MAX 15
void spacc_cmd_process(struct spacc_device *spacc, int x)
{
struct spacc_priv *priv = container_of(spacc, struct spacc_priv, spacc);
/* run tasklet to pop jobs off fifo */
tasklet_schedule(&priv->pop_jobs);
}
void spacc_stat_process(struct spacc_device *spacc)
{
struct spacc_priv *priv = container_of(spacc, struct spacc_priv, spacc);
/* run tasklet to pop jobs off fifo */
tasklet_schedule(&priv->pop_jobs);
}
int spacc_probe(struct platform_device *pdev,
const struct of_device_id snps_spacc_id[])
{
int spacc_idx = -1;
struct resource *mem;
int spacc_endian = 0;
void __iomem *baseaddr;
struct pdu_info info;
int spacc_priority = -1;
struct spacc_priv *priv;
int x = 0, err, oldmode, irq_num;
const struct of_device_id *match, *id;
u64 oldtimer = 100000, timer = 100000;
if (pdev->dev.of_node) {
id = of_match_node(snps_spacc_id, pdev->dev.of_node);
if (!id) {
dev_err(&pdev->dev, "DT node did not match\n");
return -EINVAL;
}
}
/* Initialize DDT DMA pools based on this device's resources */
if (pdu_mem_init(&pdev->dev)) {
dev_err(&pdev->dev, "Could not initialize DMA pools\n");
return -ENOMEM;
}
match = of_match_device(of_match_ptr(snps_spacc_id), &pdev->dev);
if (!match) {
dev_err(&pdev->dev, "SPAcc dtb missing");
return -ENODEV;
}
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!mem) {
dev_err(&pdev->dev, "no memory resource for spacc\n");
err = -ENXIO;
goto free_ddt_mem_pool;
}
priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
if (!priv) {
err = -ENOMEM;
goto free_ddt_mem_pool;
}
/* Read spacc priority and index and save inside priv.spacc.config */
if (of_property_read_u32(pdev->dev.of_node, "spacc_priority",
&spacc_priority)) {
dev_err(&pdev->dev, "No vspacc priority specified\n");
err = -EINVAL;
goto free_ddt_mem_pool;
}
if (spacc_priority < 0 && spacc_priority > VSPACC_PRIORITY_MAX) {
dev_err(&pdev->dev, "Invalid vspacc priority\n");
err = -EINVAL;
goto free_ddt_mem_pool;
}
priv->spacc.config.priority = spacc_priority;
if (of_property_read_u32(pdev->dev.of_node, "spacc_index",
&spacc_idx)) {
dev_err(&pdev->dev, "No vspacc index specified\n");
err = -EINVAL;
goto free_ddt_mem_pool;
}
priv->spacc.config.idx = spacc_idx;
if (of_property_read_u32(pdev->dev.of_node, "spacc_endian",
&spacc_endian)) {
dev_dbg(&pdev->dev, "No spacc_endian specified\n");
dev_dbg(&pdev->dev, "Default spacc Endianness (0==little)\n");
spacc_endian = 0;
}
priv->spacc.config.spacc_endian = spacc_endian;
if (of_property_read_u64(pdev->dev.of_node, "oldtimer",
&oldtimer)) {
dev_dbg(&pdev->dev, "No oldtimer specified\n");
dev_dbg(&pdev->dev, "Default oldtimer (100000)\n");
oldtimer = 100000;
}
priv->spacc.config.oldtimer = oldtimer;
if (of_property_read_u64(pdev->dev.of_node, "timer", &timer)) {
dev_dbg(&pdev->dev, "No timer specified\n");
dev_dbg(&pdev->dev, "Default timer (100000)\n");
timer = 100000;
}
priv->spacc.config.timer = timer;
baseaddr = devm_ioremap_resource(&pdev->dev, mem);
if (IS_ERR(baseaddr)) {
dev_err(&pdev->dev, "unable to map iomem\n");
err = PTR_ERR(baseaddr);
goto free_ddt_mem_pool;
}
pdu_get_version(baseaddr, &info);
if (pdev->dev.platform_data) {
struct pdu_info *parent_info = pdev->dev.platform_data;
memcpy(&info.pdu_config, &parent_info->pdu_config,
sizeof(info.pdu_config));
}
dev_dbg(&pdev->dev, "EPN %04X : virt [%d]\n",
info.spacc_version.project,
info.spacc_version.vspacc_idx);
/* Validate virtual spacc index with vspacc count read from
* VERSION_EXT.VSPACC_CNT. Thus vspacc count=3, gives valid index 0,1,2
*/
if (spacc_idx != info.spacc_version.vspacc_idx) {
dev_err(&pdev->dev, "DTS vspacc_idx mismatch read value\n");
err = -EINVAL;
goto free_ddt_mem_pool;
}
if (spacc_idx < 0 || spacc_idx > (info.spacc_config.num_vspacc - 1)) {
dev_err(&pdev->dev, "Invalid vspacc index specified\n");
err = -EINVAL;
goto free_ddt_mem_pool;
}
err = spacc_init(baseaddr, &priv->spacc, &info);
if (err != CRYPTO_OK) {
dev_err(&pdev->dev, "Failed to initialize device %d\n", x);
err = -ENXIO;
goto free_ddt_mem_pool;
}
spin_lock_init(&priv->hw_lock);
spacc_irq_glbl_disable(&priv->spacc);
tasklet_init(&priv->pop_jobs, spacc_pop_jobs, (unsigned long)priv);
priv->spacc.dptr = &pdev->dev;
platform_set_drvdata(pdev, priv);
irq_num = platform_get_irq(pdev, 0);
if (irq_num < 0) {
dev_err(&pdev->dev, "no irq resource for spacc\n");
err = -ENXIO;
goto free_ddt_mem_pool;
}
/* Determine configured maximum message length. */
priv->max_msg_len = priv->spacc.config.max_msg_size;
if (devm_request_irq(&pdev->dev, irq_num, spacc_irq_handler,
IRQF_SHARED, dev_name(&pdev->dev),
&pdev->dev)) {
dev_err(&pdev->dev, "failed to request IRQ\n");
err = -EBUSY;
goto err_tasklet_kill;
}
priv->spacc.irq_cb_stat = spacc_stat_process;
priv->spacc.irq_cb_cmdx = spacc_cmd_process;
oldmode = priv->spacc.op_mode;
priv->spacc.op_mode = SPACC_OP_MODE_IRQ;
spacc_irq_stat_enable(&priv->spacc, 1);
spacc_irq_cmdx_enable(&priv->spacc, 0, 1);
spacc_irq_stat_wd_disable(&priv->spacc);
spacc_irq_glbl_enable(&priv->spacc);
#if IS_ENABLED(CONFIG_CRYPTO_DEV_SPACC_AUTODETECT)
err = spacc_autodetect(&priv->spacc);
if (err < 0) {
spacc_irq_glbl_disable(&priv->spacc);
goto err_tasklet_kill;
}
#else
err = spacc_static_config(&priv->spacc);
if (err < 0) {
spacc_irq_glbl_disable(&priv->spacc);
goto err_tasklet_kill;
}
#endif
priv->spacc.op_mode = oldmode;
if (priv->spacc.op_mode == SPACC_OP_MODE_IRQ) {
priv->spacc.irq_cb_stat = spacc_stat_process;
priv->spacc.irq_cb_cmdx = spacc_cmd_process;
spacc_irq_stat_enable(&priv->spacc, 1);
spacc_irq_cmdx_enable(&priv->spacc, 0, 1);
spacc_irq_glbl_enable(&priv->spacc);
} else {
priv->spacc.irq_cb_stat = spacc_stat_process;
priv->spacc.irq_cb_stat_wd = spacc_stat_process;
spacc_irq_stat_enable(&priv->spacc,
priv->spacc.config.ideal_stat_level);
spacc_irq_cmdx_disable(&priv->spacc, 0);
spacc_irq_stat_wd_enable(&priv->spacc);
spacc_irq_glbl_enable(&priv->spacc);
/* enable the wd by setting the wd_timer = 100000 */
spacc_set_wd_count(&priv->spacc,
priv->spacc.config.wd_timer =
priv->spacc.config.timer);
}
/* unlock normal*/
if (priv->spacc.config.is_secure_port) {
u32 t;
t = readl(baseaddr + SPACC_REG_SECURE_CTRL);
t &= ~(1UL << 31);
writel(t, baseaddr + SPACC_REG_SECURE_CTRL);
}
/* unlock device by default */
writel(0, baseaddr + SPACC_REG_SECURE_CTRL);
return err;
err_tasklet_kill:
tasklet_kill(&priv->pop_jobs);
spacc_fini(&priv->spacc);
free_ddt_mem_pool:
pdu_mem_deinit(&pdev->dev);
return err;
}
static void spacc_unregister_algs(void)
{
#if IS_ENABLED(CONFIG_CRYPTO_DEV_SPACC_HASH)
spacc_unregister_hash_algs();
#endif
#if IS_ENABLED(CONFIG_CRYPTO_DEV_SPACC_AEAD)
spacc_unregister_aead_algs();
#endif
#if IS_ENABLED(CONFIG_CRYPTO_DEV_SPACC_CIPHER)
spacc_unregister_cipher_algs();
#endif
}
static const struct of_device_id snps_spacc_id[] = {
{.compatible = "snps-dwc-spacc" },
{ /*sentinel */ }
};
MODULE_DEVICE_TABLE(of, snps_spacc_id);
static int spacc_crypto_probe(struct platform_device *pdev)
{
int rc;
rc = spacc_probe(pdev, snps_spacc_id);
if (rc < 0)
goto err;
spacc_pdev[0] = pdev;
#if IS_ENABLED(CONFIG_CRYPTO_DEV_SPACC_HASH)
rc = probe_hashes(pdev);
if (rc < 0)
goto err;
#endif
#if IS_ENABLED(CONFIG_CRYPTO_DEV_SPACC_CIPHER)
rc = probe_ciphers(pdev);
if (rc < 0)
goto err;
#endif
#if IS_ENABLED(CONFIG_CRYPTO_DEV_SPACC_AEAD)
rc = probe_aeads(pdev);
if (rc < 0)
goto err;
#endif
return 0;
err:
spacc_unregister_algs();
return rc;
}
static void spacc_crypto_remove(struct platform_device *pdev)
{
spacc_unregister_algs();
spacc_remove(pdev);
}
static struct platform_driver spacc_driver = {
.probe = spacc_crypto_probe,
.remove = spacc_crypto_remove,
.driver = {
.name = "spacc",
.of_match_table = of_match_ptr(snps_spacc_id),
.owner = THIS_MODULE,
},
};
module_platform_driver(spacc_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Synopsys, Inc.");
MODULE_DESCRIPTION("SPAcc Crypto Accelerator Driver");

View File

@ -1,231 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef SPACC_DEVICE_H_
#define SPACC_DEVICE_H_
#include <crypto/hash.h>
#include <crypto/ctr.h>
#include <crypto/internal/aead.h>
#include <linux/of.h>
#include "spacc_core.h"
#define MODE_TAB_AEAD(_name, _ciph, _hash, _hashlen, _ivlen, _blocklen) \
.name = _name, .aead = { .ciph = _ciph, .hash = _hash }, \
.hashlen = _hashlen, .ivlen = _ivlen, .blocklen = _blocklen
/* Helper macros for initializing the hash/cipher tables. */
#define MODE_TAB_COMMON(_name, _id_name, _blocklen) \
.name = _name, .id = CRYPTO_MODE_##_id_name, .blocklen = _blocklen
#define MODE_TAB_HASH(_name, _id_name, _hashlen, _blocklen) \
MODE_TAB_COMMON(_name, _id_name, _blocklen), \
.hashlen = _hashlen, .testlen = _hashlen
#define MODE_TAB_CIPH(_name, _id_name, _ivlen, _blocklen) \
MODE_TAB_COMMON(_name, _id_name, _blocklen), \
.ivlen = _ivlen
#define MODE_TAB_HASH_XCBC 0x8000
#define SPACC_MAX_DIGEST_SIZE 64
#define SPACC_MAX_KEY_SIZE 32
#define SPACC_MAX_IV_SIZE 16
#define SPACC_DMA_ALIGN 4
#define SPACC_DMA_BOUNDARY 0x10000
#define MAX_DEVICES 2
/* flag means the IV is computed from setkey and crypt*/
#define SPACC_MANGLE_IV_FLAG 0x8000
/* we're doing a CTR mangle (for RFC3686/IPsec)*/
#define SPACC_MANGLE_IV_RFC3686 0x0100
/* we're doing GCM */
#define SPACC_MANGLE_IV_RFC4106 0x0200
/* we're doing GMAC */
#define SPACC_MANGLE_IV_RFC4543 0x0300
/* we're doing CCM */
#define SPACC_MANGLE_IV_RFC4309 0x0400
/* we're doing SM4 GCM/CCM */
#define SPACC_MANGLE_IV_RFC8998 0x0500
#define CRYPTO_MODE_AES_CTR_RFC3686 (CRYPTO_MODE_AES_CTR \
| SPACC_MANGLE_IV_FLAG \
| SPACC_MANGLE_IV_RFC3686)
#define CRYPTO_MODE_AES_GCM_RFC4106 (CRYPTO_MODE_AES_GCM \
| SPACC_MANGLE_IV_FLAG \
| SPACC_MANGLE_IV_RFC4106)
#define CRYPTO_MODE_AES_GCM_RFC4543 (CRYPTO_MODE_AES_GCM \
| SPACC_MANGLE_IV_FLAG \
| SPACC_MANGLE_IV_RFC4543)
#define CRYPTO_MODE_AES_CCM_RFC4309 (CRYPTO_MODE_AES_CCM \
| SPACC_MANGLE_IV_FLAG \
| SPACC_MANGLE_IV_RFC4309)
#define CRYPTO_MODE_SM4_GCM_RFC8998 (CRYPTO_MODE_SM4_GCM)
#define CRYPTO_MODE_SM4_CCM_RFC8998 (CRYPTO_MODE_SM4_CCM)
struct spacc_crypto_ctx {
struct device *dev;
spinlock_t lock;
struct list_head jobs;
int handle, mode, auth_size, key_len;
unsigned char *cipher_key;
/*
* Indicates that the H/W context has been setup and can be used for
* crypto; otherwise, the software fallback will be used.
*/
bool ctx_valid;
unsigned int flag_ppp;
/* salt used for rfc3686/givencrypt mode */
unsigned char csalt[16];
u8 ipad[128] __aligned(sizeof(u32));
u8 digest_ctx_buf[128] __aligned(sizeof(u32));
u8 tmp_buffer[128] __aligned(sizeof(u32));
/* Save keylen from setkey */
int keylen;
u8 key[256];
int zero_key;
unsigned char *tmp_sgl_buff;
struct scatterlist *tmp_sgl;
union{
struct crypto_ahash *hash;
struct crypto_aead *aead;
struct crypto_skcipher *cipher;
} fb;
};
struct spacc_crypto_reqctx {
struct pdu_ddt src, dst;
void *digest_buf, *iv_buf;
dma_addr_t digest_dma;
int dst_nents, src_nents, aead_nents, total_nents;
int encrypt_op, mode, single_shot;
unsigned int spacc_cipher_cryptlen, rem_nents;
struct aead_cb_data {
int new_handle;
struct spacc_crypto_ctx *tctx;
struct spacc_crypto_reqctx *ctx;
struct aead_request *req;
struct spacc_device *spacc;
} cb;
struct ahash_cb_data {
int new_handle;
struct spacc_crypto_ctx *tctx;
struct spacc_crypto_reqctx *ctx;
struct ahash_request *req;
struct spacc_device *spacc;
} acb;
struct cipher_cb_data {
int new_handle;
struct spacc_crypto_ctx *tctx;
struct spacc_crypto_reqctx *ctx;
struct skcipher_request *req;
struct spacc_device *spacc;
} ccb;
union {
struct ahash_request hash_req;
struct skcipher_request cipher_req;
struct aead_request aead_req;
} fb;
};
struct mode_tab {
char name[128];
int valid;
/* mode ID used in hash/cipher mode but not aead*/
int id;
/* ciph/hash mode used in aead */
struct {
int ciph, hash;
} aead;
unsigned int hashlen, ivlen, blocklen, keylen[3];
unsigned int keylen_mask, testlen;
unsigned int chunksize, walksize, min_keysize, max_keysize;
bool sw_fb;
union {
unsigned char hash_test[SPACC_MAX_DIGEST_SIZE];
unsigned char ciph_test[3][2 * SPACC_MAX_IV_SIZE];
};
};
struct spacc_alg {
struct mode_tab *mode;
unsigned int keylen_mask;
struct device *dev[MAX_DEVICES];
struct list_head list;
struct crypto_alg *calg;
struct crypto_tfm *tfm;
union {
struct ahash_alg hash;
struct aead_alg aead;
struct skcipher_alg skcipher;
} alg;
};
static inline const struct spacc_alg *spacc_tfm_ahash(struct crypto_tfm *tfm)
{
const struct crypto_alg *calg = tfm->__crt_alg;
if ((calg->cra_flags & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_AHASH)
return container_of(calg, struct spacc_alg, alg.hash.halg.base);
return NULL;
}
static inline const struct spacc_alg *spacc_tfm_skcipher(struct crypto_tfm *tfm)
{
const struct crypto_alg *calg = tfm->__crt_alg;
if ((calg->cra_flags & CRYPTO_ALG_TYPE_MASK) ==
CRYPTO_ALG_TYPE_SKCIPHER)
return container_of(calg, struct spacc_alg, alg.skcipher.base);
return NULL;
}
static inline const struct spacc_alg *spacc_tfm_aead(struct crypto_tfm *tfm)
{
const struct crypto_alg *calg = tfm->__crt_alg;
if ((calg->cra_flags & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_AEAD)
return container_of(calg, struct spacc_alg, alg.aead.base);
return NULL;
}
int probe_hashes(struct platform_device *spacc_pdev);
int spacc_unregister_hash_algs(void);
int probe_aeads(struct platform_device *spacc_pdev);
int spacc_unregister_aead_algs(void);
int probe_ciphers(struct platform_device *spacc_pdev);
int spacc_unregister_cipher_algs(void);
int spacc_probe(struct platform_device *pdev,
const struct of_device_id snps_spacc_id[]);
irqreturn_t spacc_irq_handler(int irq, void *dev);
#endif

View File

@ -1,367 +0,0 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/dmapool.h>
#include <linux/dma-mapping.h>
#include "spacc_hal.h"
static struct dma_pool *ddt_pool, *ddt16_pool, *ddt4_pool;
static struct device *ddt_device;
#define PDU_REG_SPACC_VERSION 0x00180UL
#define PDU_REG_SPACC_CONFIG 0x00184UL
#define PDU_REG_SPACC_CONFIG2 0x00190UL
#define PDU_REG_SPACC_IV_OFFSET 0x00040UL
#define PDU_REG_PDU_CONFIG 0x00188UL
#define PDU_REG_SECURE_LOCK 0x001C0UL
int pdu_get_version(void __iomem *dev, struct pdu_info *inf)
{
unsigned long tmp;
if (!inf)
return -1;
memset(inf, 0, sizeof(*inf));
tmp = readl(dev + PDU_REG_SPACC_VERSION);
/* Read the SPAcc version block this tells us the revision,
* project, and a few other feature bits
*
* layout for v6.5+
*/
inf->spacc_version = (struct spacc_version_block) {
.minor = SPACC_ID_MINOR(tmp),
.major = SPACC_ID_MAJOR(tmp),
.version = (SPACC_ID_MAJOR(tmp) << 4) | SPACC_ID_MINOR(tmp),
.qos = SPACC_ID_QOS(tmp),
.is_spacc = SPACC_ID_TYPE(tmp) == SPACC_TYPE_SPACCQOS,
.is_pdu = SPACC_ID_TYPE(tmp) == SPACC_TYPE_PDU,
.aux = SPACC_ID_AUX(tmp),
.vspacc_idx = SPACC_ID_VIDX(tmp),
.partial = SPACC_ID_PARTIAL(tmp),
.project = SPACC_ID_PROJECT(tmp),
};
/* try to autodetect */
writel(0x80000000, dev + PDU_REG_SPACC_IV_OFFSET);
if (readl(dev + PDU_REG_SPACC_IV_OFFSET) == 0x80000000)
inf->spacc_version.ivimport = 1;
else
inf->spacc_version.ivimport = 0;
/* Read the SPAcc config block (v6.5+) which tells us how many
* contexts there are and context page sizes
* this register is only available in v6.5 and up
*/
tmp = readl(dev + PDU_REG_SPACC_CONFIG);
inf->spacc_config = (struct spacc_config_block) {
SPACC_CFG_CTX_CNT(tmp),
SPACC_CFG_VSPACC_CNT(tmp),
SPACC_CFG_CIPH_CTX_SZ(tmp),
SPACC_CFG_HASH_CTX_SZ(tmp),
SPACC_CFG_DMA_TYPE(tmp),
0, 0, 0, 0
};
/* CONFIG2 only present in v6.5+ cores */
tmp = readl(dev + PDU_REG_SPACC_CONFIG2);
if (inf->spacc_version.qos) {
inf->spacc_config.cmd0_fifo_depth =
SPACC_CFG_CMD0_FIFO_QOS(tmp);
inf->spacc_config.cmd1_fifo_depth =
SPACC_CFG_CMD1_FIFO(tmp);
inf->spacc_config.cmd2_fifo_depth =
SPACC_CFG_CMD2_FIFO(tmp);
inf->spacc_config.stat_fifo_depth =
SPACC_CFG_STAT_FIFO_QOS(tmp);
} else {
inf->spacc_config.cmd0_fifo_depth =
SPACC_CFG_CMD0_FIFO(tmp);
inf->spacc_config.stat_fifo_depth =
SPACC_CFG_STAT_FIFO(tmp);
}
/* only read PDU config if it's actually a PDU engine */
if (inf->spacc_version.is_pdu) {
tmp = readl(dev + PDU_REG_PDU_CONFIG);
inf->pdu_config = (struct pdu_config_block)
{SPACC_PDU_CFG_MINOR(tmp),
SPACC_PDU_CFG_MAJOR(tmp)};
/* unlock all cores by default */
writel(0, dev + PDU_REG_SECURE_LOCK);
}
return 0;
}
void pdu_to_dev(void __iomem *addr_, uint32_t *src, unsigned long nword)
{
void __iomem *addr = addr_;
while (nword--) {
writel(*src++, addr);
addr += 4;
}
}
void pdu_from_dev(u32 *dst, void __iomem *addr_, unsigned long nword)
{
void __iomem *addr = addr_;
while (nword--) {
*dst++ = readl(addr);
addr += 4;
}
}
static void pdu_to_dev_big(void __iomem *addr_, const unsigned char *src,
unsigned long nword)
{
unsigned long v;
void __iomem *addr = addr_;
while (nword--) {
v = 0;
v = (v << 8) | ((unsigned long)*src++);
v = (v << 8) | ((unsigned long)*src++);
v = (v << 8) | ((unsigned long)*src++);
v = (v << 8) | ((unsigned long)*src++);
writel(v, addr);
addr += 4;
}
}
static void pdu_from_dev_big(unsigned char *dst, void __iomem *addr_,
unsigned long nword)
{
unsigned long v;
void __iomem *addr = addr_;
while (nword--) {
v = readl(addr);
addr += 4;
*dst++ = (v >> 24) & 0xFF; v <<= 8;
*dst++ = (v >> 24) & 0xFF; v <<= 8;
*dst++ = (v >> 24) & 0xFF; v <<= 8;
*dst++ = (v >> 24) & 0xFF; v <<= 8;
}
}
static void pdu_to_dev_little(void __iomem *addr_, const unsigned char *src,
unsigned long nword)
{
unsigned long v;
void __iomem *addr = addr_;
while (nword--) {
v = 0;
v = (v >> 8) | ((unsigned long)*src++ << 24UL);
v = (v >> 8) | ((unsigned long)*src++ << 24UL);
v = (v >> 8) | ((unsigned long)*src++ << 24UL);
v = (v >> 8) | ((unsigned long)*src++ << 24UL);
writel(v, addr);
addr += 4;
}
}
static void pdu_from_dev_little(unsigned char *dst, void __iomem *addr_,
unsigned long nword)
{
unsigned long v;
void __iomem *addr = addr_;
while (nword--) {
v = readl(addr);
addr += 4;
*dst++ = v & 0xFF; v >>= 8;
*dst++ = v & 0xFF; v >>= 8;
*dst++ = v & 0xFF; v >>= 8;
*dst++ = v & 0xFF; v >>= 8;
}
}
void pdu_to_dev_s(void __iomem *addr, const unsigned char *src,
unsigned long nword, int endian)
{
if (endian)
pdu_to_dev_big(addr, src, nword);
else
pdu_to_dev_little(addr, src, nword);
}
void pdu_from_dev_s(unsigned char *dst, void __iomem *addr,
unsigned long nword, int endian)
{
if (endian)
pdu_from_dev_big(dst, addr, nword);
else
pdu_from_dev_little(dst, addr, nword);
}
void pdu_io_cached_write(void __iomem *addr, unsigned long val,
uint32_t *cache)
{
if (*cache == val) {
#ifdef CONFIG_CRYPTO_DEV_SPACC_DEBUG_TRACE_IO
pr_debug("PDU: write %.8lx -> %p (cached)\n", val, addr);
#endif
return;
}
*cache = val;
writel(val, addr);
}
struct device *get_ddt_device(void)
{
return ddt_device;
}
/* Platform specific DDT routines */
/* create a DMA pool for DDT entries this should help from splitting
* pages for DDTs which by default are 520 bytes long meaning we would
* otherwise waste 3576 bytes per DDT allocated...
* we also maintain a smaller table of 4 entries common for simple jobs
* which uses 480 fewer bytes of DMA memory.
* and for good measure another table for 16 entries saving 384 bytes
*/
int pdu_mem_init(void *device)
{
if (ddt_device)
return 0; /* Already setup */
ddt_device = device;
ddt_pool = dma_pool_create("spaccddt", device, (PDU_MAX_DDT + 1) * 8,
8, 0); /* max of 64 DDT entries */
if (!ddt_pool)
return -1;
#if PDU_MAX_DDT > 16
/* max of 16 DDT entries */
ddt16_pool = dma_pool_create("spaccddt16", device, (16 + 1) * 8, 8, 0);
if (!ddt16_pool) {
dma_pool_destroy(ddt_pool);
return -1;
}
#else
ddt16_pool = ddt_pool;
#endif
/* max of 4 DDT entries */
ddt4_pool = dma_pool_create("spaccddt4", device, (4 + 1) * 8, 8, 0);
if (!ddt4_pool) {
dma_pool_destroy(ddt_pool);
#if PDU_MAX_DDT > 16
dma_pool_destroy(ddt16_pool);
#endif
return -1;
}
return 0;
}
/* destroy the pool */
void pdu_mem_deinit(void *device)
{
/* For now, just skip deinit except for matching device */
if (device != ddt_device)
return;
dma_pool_destroy(ddt_pool);
#if PDU_MAX_DDT > 16
dma_pool_destroy(ddt16_pool);
#endif
dma_pool_destroy(ddt4_pool);
ddt_device = NULL;
}
int pdu_ddt_init(struct pdu_ddt *ddt, unsigned long limit)
{
/* set the MSB if we want to use an ATOMIC
* allocation required for top half processing
*/
int flag = (limit & 0x80000000);
limit &= 0x7FFFFFFF;
if (limit + 1 >= SIZE_MAX / 8) {
/* Too big to even compute DDT size */
return -1;
} else if (limit > PDU_MAX_DDT) {
size_t len = 8 * ((size_t)limit + 1);
ddt->virt = dma_alloc_coherent(ddt_device, len, &ddt->phys,
flag ? GFP_ATOMIC : GFP_KERNEL);
} else if (limit > 16) {
ddt->virt = dma_pool_alloc(ddt_pool, flag ? GFP_ATOMIC :
GFP_KERNEL, &ddt->phys);
} else if (limit > 4) {
ddt->virt = dma_pool_alloc(ddt16_pool, flag ? GFP_ATOMIC :
GFP_KERNEL, &ddt->phys);
} else {
ddt->virt = dma_pool_alloc(ddt4_pool, flag ? GFP_ATOMIC :
GFP_KERNEL, &ddt->phys);
}
ddt->idx = 0;
ddt->len = 0;
ddt->limit = limit;
if (!ddt->virt)
return -1;
#ifdef CONFIG_CRYPTO_DEV_SPACC_DEBUG_TRACE_DDT
pr_debug(" DDT[%.8lx]: allocated %lu fragments\n",
(unsigned long)ddt->phys, limit);
#endif
return 0;
}
int pdu_ddt_add(struct pdu_ddt *ddt, dma_addr_t phys, unsigned long size)
{
#ifdef CONFIG_CRYPTO_DEV_SPACC_DEBUG_TRACE_DDT
pr_debug(" DDT[%.8lx]: 0x%.8lx size %lu\n",
(unsigned long)ddt->phys,
(unsigned long)phys, size);
#endif
if (ddt->idx == ddt->limit)
return -1;
ddt->virt[ddt->idx * 2 + 0] = (uint32_t)phys;
ddt->virt[ddt->idx * 2 + 1] = size;
ddt->virt[ddt->idx * 2 + 2] = 0;
ddt->virt[ddt->idx * 2 + 3] = 0;
ddt->len += size;
++(ddt->idx);
return 0;
}
int pdu_ddt_free(struct pdu_ddt *ddt)
{
if (ddt->virt) {
if (ddt->limit > PDU_MAX_DDT) {
size_t len = 8 * ((size_t)ddt->limit + 1);
dma_free_coherent(ddt_device, len, ddt->virt,
ddt->phys);
} else if (ddt->limit > 16) {
dma_pool_free(ddt_pool, ddt->virt, ddt->phys);
} else if (ddt->limit > 4) {
dma_pool_free(ddt16_pool, ddt->virt, ddt->phys);
} else {
dma_pool_free(ddt4_pool, ddt->virt, ddt->phys);
}
ddt->virt = NULL;
}
return 0;
}

View File

@ -1,114 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef SPACC_HAL_H
#define SPACC_HAL_H
/* Maximum number of DDT entries allowed*/
#ifndef PDU_MAX_DDT
#define PDU_MAX_DDT 64
#endif
/* Platform Generic */
#define PDU_IRQ_EN_GLBL BIT(31)
#define PDU_IRQ_EN_VSPACC(x) (1UL << (x))
#define PDU_IRQ_EN_RNG BIT(16)
#ifndef SPACC_ID_MINOR
#define SPACC_ID_MINOR(x) ((x) & 0x0F)
#define SPACC_ID_MAJOR(x) (((x) >> 4) & 0x0F)
#define SPACC_ID_QOS(x) (((x) >> 8) & 0x01)
#define SPACC_ID_TYPE(x) (((x) >> 9) & 0x03)
#define SPACC_ID_AUX(x) (((x) >> 11) & 0x01)
#define SPACC_ID_VIDX(x) (((x) >> 12) & 0x07)
#define SPACC_ID_PARTIAL(x) (((x) >> 15) & 0x01)
#define SPACC_ID_PROJECT(x) ((x) >> 16)
#define SPACC_TYPE_SPACCQOS 0
#define SPACC_TYPE_PDU 1
#define SPACC_CFG_CTX_CNT(x) ((x) & 0x7F)
#define SPACC_CFG_RC4_CTX_CNT(x) (((x) >> 8) & 0x7F)
#define SPACC_CFG_VSPACC_CNT(x) (((x) >> 16) & 0x0F)
#define SPACC_CFG_CIPH_CTX_SZ(x) (((x) >> 20) & 0x07)
#define SPACC_CFG_HASH_CTX_SZ(x) (((x) >> 24) & 0x0F)
#define SPACC_CFG_DMA_TYPE(x) (((x) >> 28) & 0x03)
#define SPACC_CFG_CMD0_FIFO_QOS(x) (((x) >> 0) & 0x7F)
#define SPACC_CFG_CMD0_FIFO(x) (((x) >> 0) & 0x1FF)
#define SPACC_CFG_CMD1_FIFO(x) (((x) >> 8) & 0x7F)
#define SPACC_CFG_CMD2_FIFO(x) (((x) >> 16) & 0x7F)
#define SPACC_CFG_STAT_FIFO_QOS(x) (((x) >> 24) & 0x7F)
#define SPACC_CFG_STAT_FIFO(x) (((x) >> 16) & 0x1FF)
#define SPACC_PDU_CFG_MINOR(x) ((x) & 0x0F)
#define SPACC_PDU_CFG_MAJOR(x) (((x) >> 4) & 0x0F)
#define PDU_SECURE_LOCK_SPACC(x) (x)
#define PDU_SECURE_LOCK_CFG BIT(30)
#define PDU_SECURE_LOCK_GLBL BIT(31)
#endif /* SPACC_ID_MINOR */
#define CRYPTO_OK (0)
struct spacc_version_block {
unsigned int minor,
major,
version,
qos,
is_spacc,
is_pdu,
aux,
vspacc_idx,
partial,
project,
ivimport;
};
struct spacc_config_block {
unsigned int num_ctx,
num_vspacc,
ciph_ctx_page_size,
hash_ctx_page_size,
dma_type,
cmd0_fifo_depth,
cmd1_fifo_depth,
cmd2_fifo_depth,
stat_fifo_depth;
};
struct pdu_config_block {
unsigned int minor,
major;
};
struct pdu_info {
u32 clockrate;
struct spacc_version_block spacc_version;
struct spacc_config_block spacc_config;
struct pdu_config_block pdu_config;
};
struct pdu_ddt {
dma_addr_t phys;
u32 *virt;
u32 *virt_orig;
unsigned long idx, limit, len;
};
void pdu_io_cached_write(void __iomem *addr, unsigned long val,
uint32_t *cache);
void pdu_to_dev(void __iomem *addr, uint32_t *src, unsigned long nword);
void pdu_from_dev(u32 *dst, void __iomem *addr, unsigned long nword);
void pdu_from_dev_s(unsigned char *dst, void __iomem *addr, unsigned long nword,
int endian);
void pdu_to_dev_s(void __iomem *addr, const unsigned char *src,
unsigned long nword, int endian);
struct device *get_ddt_device(void);
int pdu_mem_init(void *device);
void pdu_mem_deinit(void *device);
int pdu_ddt_init(struct pdu_ddt *ddt, unsigned long limit);
int pdu_ddt_add(struct pdu_ddt *ddt, dma_addr_t phys, unsigned long size);
int pdu_ddt_free(struct pdu_ddt *ddt);
int pdu_get_version(void __iomem *dev, struct pdu_info *inf);
#endif

View File

@ -1,316 +0,0 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/interrupt.h>
#include <linux/platform_device.h>
#include "spacc_core.h"
/* Read the IRQ status register and process as needed */
void spacc_disable_int (struct spacc_device *spacc);
static inline uint32_t _spacc_get_stat_cnt(struct spacc_device *spacc)
{
u32 fifo;
if (spacc->config.is_qos)
fifo = SPACC_FIFO_STAT_STAT_CNT_GET_QOS(readl(spacc->regmap +
SPACC_REG_FIFO_STAT));
else
fifo = SPACC_FIFO_STAT_STAT_CNT_GET(readl(spacc->regmap +
SPACC_REG_FIFO_STAT));
return fifo;
}
static int spacc_pop_packets_ex(struct spacc_device *spacc, int *num_popped,
unsigned long *lock_flag)
{
int jobs;
int ret = -EINPROGRESS;
struct spacc_job *job = NULL;
u32 cmdstat, swid, spacc_errcode = SPACC_OK;
*num_popped = 0;
while ((jobs = _spacc_get_stat_cnt(spacc))) {
while (jobs-- > 0) {
/* write the pop register to get the next job */
writel(1, spacc->regmap + SPACC_REG_STAT_POP);
cmdstat = readl(spacc->regmap + SPACC_REG_STATUS);
swid = SPACC_STATUS_SW_ID_GET(cmdstat);
if (spacc->job_lookup[swid] == SPACC_JOB_IDX_UNUSED) {
ret = -EIO;
goto ERR;
}
/* find the associated job with popped swid */
if (swid < 0 || swid >= SPACC_MAX_JOBS)
job = NULL;
else
job = &spacc->job[spacc->job_lookup[swid]];
if (!job) {
ret = -EIO;
goto ERR;
}
/* mark job as done */
job->job_done = 1;
spacc->job_lookup[swid] = SPACC_JOB_IDX_UNUSED;
spacc_errcode = SPACC_GET_STATUS_RET_CODE(cmdstat);
switch (spacc_errcode) {
case SPACC_ICVFAIL:
ret = -EBADMSG;
break;
case SPACC_MEMERR:
ret = -EINVAL;
break;
case SPACC_BLOCKERR:
ret = -EINVAL;
break;
case SPACC_SECERR:
ret = -EIO;
break;
case SPACC_OK:
ret = CRYPTO_OK;
break;
default:
pr_debug("Invalid SPAcc Error");
}
job->job_err = ret;
/*
* We're done touching the SPAcc hw, so release the
* lock across the job callback. It must be reacquired
* before continuing to the next iteration.
*/
if (job->cb) {
spin_unlock_irqrestore(&spacc->lock,
*lock_flag);
job->cb(spacc, job->cbdata);
spin_lock_irqsave(&spacc->lock,
*lock_flag);
}
(*num_popped)++;
}
}
if (!*num_popped)
pr_debug(" Failed to pop a single job\n");
ERR:
spacc_process_jb(spacc);
/* reset the WD timer to the original value*/
if (spacc->op_mode == SPACC_OP_MODE_WD)
spacc_set_wd_count(spacc, spacc->config.wd_timer);
if (*num_popped && spacc->spacc_notify_jobs)
spacc->spacc_notify_jobs(spacc);
return ret;
}
int spacc_pop_packets(struct spacc_device *spacc, int *num_popped)
{
int err;
unsigned long lock_flag;
spin_lock_irqsave(&spacc->lock, lock_flag);
err = spacc_pop_packets_ex(spacc, num_popped, &lock_flag);
spin_unlock_irqrestore(&spacc->lock, lock_flag);
return err;
}
uint32_t spacc_process_irq(struct spacc_device *spacc)
{
u32 temp;
int x, cmd_max;
unsigned long lock_flag;
spin_lock_irqsave(&spacc->lock, lock_flag);
temp = readl(spacc->regmap + SPACC_REG_IRQ_STAT);
/* clear interrupt pin and run registered callback */
if (temp & SPACC_IRQ_STAT_STAT) {
SPACC_IRQ_STAT_CLEAR_STAT(spacc);
if (spacc->op_mode == SPACC_OP_MODE_IRQ) {
spacc->config.fifo_cnt <<= 2;
if (spacc->config.fifo_cnt >=
spacc->config.stat_fifo_depth)
spacc->config.fifo_cnt =
spacc->config.stat_fifo_depth;
/* update fifo count to allow more stati to pile up*/
spacc_irq_stat_enable(spacc, spacc->config.fifo_cnt);
/* reenable CMD0 empty interrupt*/
spacc_irq_cmdx_enable(spacc, 0, 0);
}
if (spacc->irq_cb_stat)
spacc->irq_cb_stat(spacc);
}
/* Watchdog IRQ */
if (spacc->op_mode == SPACC_OP_MODE_WD) {
if (temp & SPACC_IRQ_STAT_STAT_WD) {
if (++spacc->wdcnt == SPACC_WD_LIMIT) {
/* this happens when you get too many IRQs that
* go unanswered
*/
spacc_irq_stat_wd_disable(spacc);
/* we set the STAT CNT to 1 so that every job
* generates an IRQ now
*/
spacc_irq_stat_enable(spacc, 1);
spacc->op_mode = SPACC_OP_MODE_IRQ;
} else if (spacc->config.wd_timer < (0xFFFFFFUL >> 4)) {
/* if the timer isn't too high lets bump it up
* a bit so as to give the IRQ a chance to
* reply
*/
spacc_set_wd_count(spacc,
spacc->config.wd_timer << 4);
}
SPACC_IRQ_STAT_CLEAR_STAT_WD(spacc);
if (spacc->irq_cb_stat_wd)
spacc->irq_cb_stat_wd(spacc);
}
}
if (spacc->op_mode == SPACC_OP_MODE_IRQ) {
cmd_max = (spacc->config.is_qos ? SPACC_CMDX_MAX_QOS :
SPACC_CMDX_MAX);
for (x = 0; x < cmd_max; x++) {
if (temp & SPACC_IRQ_STAT_CMDX(x)) {
spacc->config.fifo_cnt = 1;
/* disable CMD0 interrupt since STAT=1 */
spacc_irq_cmdx_disable(spacc, x);
spacc_irq_stat_enable(spacc,
spacc->config.fifo_cnt);
SPACC_IRQ_STAT_CLEAR_CMDX(spacc, x);
/* run registered callback */
if (spacc->irq_cb_cmdx)
spacc->irq_cb_cmdx(spacc, x);
}
}
}
spin_unlock_irqrestore(&spacc->lock, lock_flag);
return temp;
}
void spacc_set_wd_count(struct spacc_device *spacc, uint32_t val)
{
writel(val, spacc->regmap + SPACC_REG_STAT_WD_CTRL);
}
/* cmdx and cmdx_cnt depend on HW config
* cmdx can be 0, 1 or 2
* cmdx_cnt must be 2^6 or less
*/
void spacc_irq_cmdx_enable(struct spacc_device *spacc, int cmdx, int cmdx_cnt)
{
u32 temp;
/* read the reg, clear the bit range and set the new value */
temp = readl(spacc->regmap + SPACC_REG_IRQ_CTRL) &
(~SPACC_IRQ_CTRL_CMDX_CNT_MASK(cmdx));
temp |= SPACC_IRQ_CTRL_CMDX_CNT_SET(cmdx, cmdx_cnt);
writel(temp | SPACC_IRQ_CTRL_CMDX_CNT_SET(cmdx, cmdx_cnt),
spacc->regmap + SPACC_REG_IRQ_CTRL);
writel(readl(spacc->regmap + SPACC_REG_IRQ_EN) | SPACC_IRQ_EN_CMD(cmdx),
spacc->regmap + SPACC_REG_IRQ_EN);
}
void spacc_irq_cmdx_disable(struct spacc_device *spacc, int cmdx)
{
writel(readl(spacc->regmap + SPACC_REG_IRQ_EN) &
(~SPACC_IRQ_EN_CMD(cmdx)), spacc->regmap + SPACC_REG_IRQ_EN);
}
void spacc_irq_stat_enable(struct spacc_device *spacc, int stat_cnt)
{
u32 temp;
temp = readl(spacc->regmap + SPACC_REG_IRQ_CTRL);
if (spacc->config.is_qos) {
temp &= (~SPACC_IRQ_CTRL_STAT_CNT_MASK_QOS);
temp |= SPACC_IRQ_CTRL_STAT_CNT_SET_QOS(stat_cnt);
} else {
temp &= (~SPACC_IRQ_CTRL_STAT_CNT_MASK);
temp |= SPACC_IRQ_CTRL_STAT_CNT_SET(stat_cnt);
}
writel(temp, spacc->regmap + SPACC_REG_IRQ_CTRL);
writel(readl(spacc->regmap + SPACC_REG_IRQ_EN) | SPACC_IRQ_EN_STAT,
spacc->regmap + SPACC_REG_IRQ_EN);
}
void spacc_irq_stat_disable(struct spacc_device *spacc)
{
writel(readl(spacc->regmap + SPACC_REG_IRQ_EN) & (~SPACC_IRQ_EN_STAT),
spacc->regmap + SPACC_REG_IRQ_EN);
}
void spacc_irq_stat_wd_enable(struct spacc_device *spacc)
{
writel(readl(spacc->regmap + SPACC_REG_IRQ_EN) | SPACC_IRQ_EN_STAT_WD,
spacc->regmap + SPACC_REG_IRQ_EN);
}
void spacc_irq_stat_wd_disable(struct spacc_device *spacc)
{
writel(readl(spacc->regmap + SPACC_REG_IRQ_EN) &
(~SPACC_IRQ_EN_STAT_WD), spacc->regmap + SPACC_REG_IRQ_EN);
}
void spacc_irq_glbl_enable(struct spacc_device *spacc)
{
writel(readl(spacc->regmap + SPACC_REG_IRQ_EN) | SPACC_IRQ_EN_GLBL,
spacc->regmap + SPACC_REG_IRQ_EN);
}
void spacc_irq_glbl_disable(struct spacc_device *spacc)
{
writel(readl(spacc->regmap + SPACC_REG_IRQ_EN) & (~SPACC_IRQ_EN_GLBL),
spacc->regmap + SPACC_REG_IRQ_EN);
}
void spacc_disable_int (struct spacc_device *spacc)
{
writel(0, spacc->regmap + SPACC_REG_IRQ_EN);
}
/* a function to run callbacks in the IRQ handler */
irqreturn_t spacc_irq_handler(int irq, void *dev)
{
struct spacc_priv *priv = platform_get_drvdata(to_platform_device(dev));
struct spacc_device *spacc = &priv->spacc;
if (spacc->config.oldtimer != spacc->config.timer) {
priv->spacc.config.wd_timer = spacc->config.timer;
spacc_set_wd_count(&priv->spacc, priv->spacc.config.wd_timer);
spacc->config.oldtimer = spacc->config.timer;
}
/* check irq flags and process as required */
if (!spacc_process_irq(spacc))
return IRQ_NONE;
return IRQ_HANDLED;
}

View File

@ -1,653 +0,0 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/minmax.h>
#include "spacc_core.h"
#ifndef MIN
#define MIN(x, y) (((x) < (y)) ? (x) : (y))
#endif
/* prevent reading past the end of the buffer */
static void read_from_buf(unsigned char *dst, unsigned char *src,
int off, int n, int max)
{
if (!dst)
return;
while (off < max && n) {
*dst++ = src[off++];
--n;
}
}
static void write_to_buf(unsigned char *dst, const unsigned char *src,
int off, int n, int len)
{
if (!src)
return;
while (n && (off < len)) {
dst[off++] = *src++;
--n;
}
}
/* This function is not meant to be called directly,
* it should be called from the job manager
*/
static int spacc_ctx_request(struct spacc_device *spacc,
int ctx_id, int ncontig)
{
int ret;
int x, y, count;
unsigned long lock_flag;
if (!spacc)
return -1;
if (ctx_id > spacc->config.num_ctx)
return -1;
if (ncontig < 1 || ncontig > spacc->config.num_ctx)
return -1;
ret = CRYPTO_OK;
spin_lock_irqsave(&spacc->ctx_lock, lock_flag);
/* allocating scheme, look for contiguous contexts. Free contexts have
* a ref_cnt of 0.
* If specific ctx_id is requested,
* test the ncontig and then bump the ref_cnt
*/
if (ctx_id != -1) {
if ((&spacc->ctx[ctx_id])->ncontig != ncontig - 1)
ret = -1;
} else {
/* check to see if ncontig are free
* loop over all available contexts to find the first
* ncontig empty ones
*/
for (x = 0; x <= (spacc->config.num_ctx - ncontig); ) {
count = ncontig;
while (count) {
if ((&spacc->ctx[x + count - 1])->ref_cnt != 0) {
/* incr x to past failed count
* location
*/
x = x + count;
break;
}
count--;
}
if (count != 0) {
ret = -1;
/* test next x */
} else {
ctx_id = x;
ret = CRYPTO_OK;
break;
}
}
}
if (ret == CRYPTO_OK) {
/* ctx_id is good so mark used */
for (y = 0; y < ncontig; y++)
(&spacc->ctx[ctx_id + y])->ref_cnt++;
(&spacc->ctx[ctx_id])->ncontig = ncontig - 1;
} else {
ctx_id = -1;
}
spin_unlock_irqrestore(&spacc->ctx_lock, lock_flag);
return ctx_id;
}
static int spacc_ctx_release(struct spacc_device *spacc, int ctx_id)
{
int y;
int ncontig;
unsigned long lock_flag;
if (ctx_id < 0 || ctx_id > spacc->config.num_ctx)
return -EINVAL;
spin_lock_irqsave(&spacc->ctx_lock, lock_flag);
/* release the base context and contiguous block */
ncontig = (&spacc->ctx[ctx_id])->ncontig;
for (y = 0; y <= ncontig; y++) {
if ((&spacc->ctx[ctx_id + y])->ref_cnt > 0)
(&spacc->ctx[ctx_id + y])->ref_cnt--;
}
if ((&spacc->ctx[ctx_id])->ref_cnt == 0) {
(&spacc->ctx[ctx_id])->ncontig = 0;
#ifdef CONFIG_CRYPTO_DEV_SPACC_SECURE_MODE
/* TODO: This driver works in harmony with "normal" kernel
* processes so we release the context all the time
* normally this would be done from a "secure" kernel process
* (trustzone/etc). This hack is so that SPACC.0
* cores can both use the same context space.
*/
writel(ctx_id, spacc->regmap + SPACC_REG_SECURE_RELEASE);
#endif
}
spin_unlock_irqrestore(&spacc->ctx_lock, lock_flag);
return CRYPTO_OK;
}
/* Job manager: This will reset all job data, pointers, etc */
void spacc_job_init_all(struct spacc_device *spacc)
{
int x;
struct spacc_job *job;
for (x = 0; x < (SPACC_MAX_JOBS); x++) {
job = &spacc->job[x];
memset(job, 0, sizeof(struct spacc_job));
job->job_swid = SPACC_JOB_IDX_UNUSED;
job->job_used = SPACC_JOB_IDX_UNUSED;
spacc->job_lookup[x] = SPACC_JOB_IDX_UNUSED;
}
}
/* get a new job id and use a specific ctx_idx or -1 for a new one */
int spacc_job_request(struct spacc_device *spacc, int ctx_idx)
{
int x, ret;
struct spacc_job *job;
unsigned long lock_flag;
if (!spacc)
return -1;
spin_lock_irqsave(&spacc->lock, lock_flag);
/* find the first available job id */
for (x = 0; x < SPACC_MAX_JOBS; x++) {
job = &spacc->job[x];
if (job->job_used == SPACC_JOB_IDX_UNUSED) {
job->job_used = x;
break;
}
}
if (x == SPACC_MAX_JOBS) {
ret = -1;
} else {
/* associate a single context to go with job */
ret = spacc_ctx_request(spacc, ctx_idx, 1);
if (ret != -1) {
job->ctx_idx = ret;
ret = x;
}
}
spin_unlock_irqrestore(&spacc->lock, lock_flag);
return ret;
}
int spacc_job_release(struct spacc_device *spacc, int job_idx)
{
int ret;
struct spacc_job *job;
unsigned long lock_flag;
if (!spacc)
return -EINVAL;
if (job_idx < 0 || job_idx >= SPACC_MAX_JOBS)
return -ENXIO;
spin_lock_irqsave(&spacc->lock, lock_flag);
job = &spacc->job[job_idx];
/* release context that goes with job */
ret = spacc_ctx_release(spacc, job->ctx_idx);
job->ctx_idx = SPACC_CTX_IDX_UNUSED;
job->job_used = SPACC_JOB_IDX_UNUSED;
/* disable any callback*/
job->cb = NULL;
/* NOTE: this leaves ctrl data in memory */
spin_unlock_irqrestore(&spacc->lock, lock_flag);
return ret;
}
int spacc_handle_release(struct spacc_device *spacc, int job_idx)
{
int ret = 0;
struct spacc_job *job;
unsigned long lock_flag;
if (!spacc)
return -EINVAL;
if (job_idx < 0 || job_idx >= SPACC_MAX_JOBS)
return -ENXIO;
spin_lock_irqsave(&spacc->lock, lock_flag);
job = &spacc->job[job_idx];
job->job_used = SPACC_JOB_IDX_UNUSED;
job->cb = NULL; /* disable any callback*/
/* NOTE: this leaves ctrl data in memory */
spin_unlock_irqrestore(&spacc->lock, lock_flag);
return ret;
}
/* Return a context structure for a job idx or null if invalid */
struct spacc_ctx *context_lookup_by_job(struct spacc_device *spacc, int job_idx)
{
if (job_idx < 0 || job_idx >= SPACC_MAX_JOBS)
return NULL;
return &spacc->ctx[(&spacc->job[job_idx])->ctx_idx];
}
int spacc_process_jb(struct spacc_device *spacc)
{
int tail, ret;
/* are there jobs in the buffer? */
while (spacc->jb_head != spacc->jb_tail) {
tail = spacc->jb_tail;
if (spacc->job_buffer[tail].active) {
ret = spacc_packet_enqueue_ddt_ex(spacc, 0,
spacc->job_buffer[tail].job_idx,
spacc->job_buffer[tail].src,
spacc->job_buffer[tail].dst,
spacc->job_buffer[tail].proc_sz,
spacc->job_buffer[tail].aad_offset,
spacc->job_buffer[tail].pre_aad_sz,
spacc->job_buffer[tail].post_aad_sz,
spacc->job_buffer[tail].iv_offset,
spacc->job_buffer[tail].prio);
if (ret != -EBUSY)
spacc->job_buffer[tail].active = 0;
else
return -1;
}
tail++;
if (tail == SPACC_MAX_JOB_BUFFERS)
tail = 0;
spacc->jb_tail = tail;
}
return 0;
}
/* Write appropriate context data which depends on operation and mode */
int spacc_write_context(struct spacc_device *spacc, int job_idx, int op,
const unsigned char *key, int ksz,
const unsigned char *iv, int ivsz)
{
int buflen;
int ret = CRYPTO_OK;
unsigned char buf[300];
struct spacc_ctx *ctx = NULL;
struct spacc_job *job = NULL;
if (job_idx < 0 || job_idx > SPACC_MAX_JOBS)
return -ENXIO;
job = &spacc->job[job_idx];
ctx = context_lookup_by_job(spacc, job_idx);
if (!job || !ctx)
return -EIO;
switch (op) {
case SPACC_CRYPTO_OPERATION:
/* get page size and then read so we can do a
* read-modify-write cycle
*/
buflen = MIN(sizeof(buf),
(unsigned int)spacc->config.ciph_page_size);
pdu_from_dev_s(buf, ctx->ciph_key, buflen >> 2,
spacc->config.spacc_endian);
switch (job->enc_mode) {
case CRYPTO_MODE_SM4_ECB:
case CRYPTO_MODE_SM4_CBC:
case CRYPTO_MODE_SM4_CFB:
case CRYPTO_MODE_SM4_OFB:
case CRYPTO_MODE_SM4_CTR:
case CRYPTO_MODE_SM4_CCM:
case CRYPTO_MODE_SM4_GCM:
case CRYPTO_MODE_SM4_CS1:
case CRYPTO_MODE_SM4_CS2:
case CRYPTO_MODE_SM4_CS3:
case CRYPTO_MODE_AES_ECB:
case CRYPTO_MODE_AES_CBC:
case CRYPTO_MODE_AES_CS1:
case CRYPTO_MODE_AES_CS2:
case CRYPTO_MODE_AES_CS3:
case CRYPTO_MODE_AES_CFB:
case CRYPTO_MODE_AES_OFB:
case CRYPTO_MODE_AES_CTR:
case CRYPTO_MODE_AES_CCM:
case CRYPTO_MODE_AES_GCM:
write_to_buf(buf, key, 0, ksz, buflen);
if (iv) {
unsigned char one[4] = { 0, 0, 0, 1 };
unsigned long enc1, enc2;
enc1 = CRYPTO_MODE_AES_GCM;
enc2 = CRYPTO_MODE_SM4_GCM;
write_to_buf(buf, iv, 32, ivsz, buflen);
if (ivsz == 12 &&
(job->enc_mode == enc1 ||
job->enc_mode == enc2))
write_to_buf(buf, one, 11 * 4, 4,
buflen);
}
break;
case CRYPTO_MODE_SM4_F8:
case CRYPTO_MODE_AES_F8:
if (key) {
write_to_buf(buf, key + ksz, 0, ksz, buflen);
write_to_buf(buf, key, 48, ksz, buflen);
}
write_to_buf(buf, iv, 32, 16, buflen);
break;
case CRYPTO_MODE_SM4_XTS:
case CRYPTO_MODE_AES_XTS:
if (key) {
write_to_buf(buf, key, 0,
ksz >> 1, buflen);
write_to_buf(buf, key + (ksz >> 1), 48,
ksz >> 1, buflen);
/* divide by two since that's
* what we program the hardware
*/
ksz = ksz >> 1;
}
write_to_buf(buf, iv, 32, 16, buflen);
break;
case CRYPTO_MODE_MULTI2_ECB:
case CRYPTO_MODE_MULTI2_CBC:
case CRYPTO_MODE_MULTI2_OFB:
case CRYPTO_MODE_MULTI2_CFB:
write_to_buf(buf, key, 0, ksz, buflen);
write_to_buf(buf, iv, 0x28, ivsz, buflen);
if (ivsz <= 8) {
/*default to 128 rounds*/
unsigned char rounds[4] = { 0, 0, 0, 128};
write_to_buf(buf, rounds, 0x30, 4, buflen);
}
break;
case CRYPTO_MODE_3DES_CBC:
case CRYPTO_MODE_3DES_ECB:
case CRYPTO_MODE_DES_CBC:
case CRYPTO_MODE_DES_ECB:
write_to_buf(buf, iv, 0, 8, buflen);
write_to_buf(buf, key, 8, ksz, buflen);
break;
case CRYPTO_MODE_KASUMI_ECB:
case CRYPTO_MODE_KASUMI_F8:
write_to_buf(buf, iv, 16, 8, buflen);
write_to_buf(buf, key, 0, 16, buflen);
break;
case CRYPTO_MODE_SNOW3G_UEA2:
case CRYPTO_MODE_ZUC_UEA3:
write_to_buf(buf, key, 0, 32, buflen);
break;
case CRYPTO_MODE_CHACHA20_STREAM:
case CRYPTO_MODE_CHACHA20_POLY1305:
write_to_buf(buf, key, 0, ksz, buflen);
write_to_buf(buf, iv, 32, ivsz, buflen);
break;
case CRYPTO_MODE_NULL:
break;
}
if (key) {
job->ckey_sz = SPACC_SET_CIPHER_KEY_SZ(ksz);
job->first_use = 1;
}
pdu_to_dev_s(ctx->ciph_key, buf, buflen >> 2,
spacc->config.spacc_endian);
break;
case SPACC_HASH_OPERATION:
/* get page size and then read so we can do a
* read-modify-write cycle
*/
buflen = MIN(sizeof(buf),
(u32)spacc->config.hash_page_size);
pdu_from_dev_s(buf, ctx->hash_key, buflen >> 2,
spacc->config.spacc_endian);
switch (job->hash_mode) {
case CRYPTO_MODE_MAC_XCBC:
case CRYPTO_MODE_MAC_SM4_XCBC:
if (key) {
write_to_buf(buf, key + (ksz - 32), 32, 32,
buflen);
write_to_buf(buf, key, 0, (ksz - 32),
buflen);
job->hkey_sz = SPACC_SET_HASH_KEY_SZ(ksz - 32);
}
break;
case CRYPTO_MODE_HASH_CRC32:
case CRYPTO_MODE_MAC_SNOW3G_UIA2:
case CRYPTO_MODE_MAC_ZUC_UIA3:
if (key) {
write_to_buf(buf, key, 0, ksz, buflen);
job->hkey_sz = SPACC_SET_HASH_KEY_SZ(ksz);
}
break;
case CRYPTO_MODE_MAC_POLY1305:
write_to_buf(buf, key, 0, ksz, buflen);
write_to_buf(buf, iv, 32, ivsz, buflen);
break;
case CRYPTO_MODE_HASH_CSHAKE128:
case CRYPTO_MODE_HASH_CSHAKE256:
/* use "iv" and "key" to */
/* pass s-string and n-string */
write_to_buf(buf, iv, 0, ivsz, buflen);
write_to_buf(buf, key,
spacc->config.string_size, ksz, buflen);
break;
case CRYPTO_MODE_MAC_KMAC128:
case CRYPTO_MODE_MAC_KMAC256:
case CRYPTO_MODE_MAC_KMACXOF128:
case CRYPTO_MODE_MAC_KMACXOF256:
/* use "iv" and "key" to pass s-string & key */
write_to_buf(buf, iv, 0, ivsz, buflen);
write_to_buf(buf, key,
spacc->config.string_size, ksz, buflen);
job->hkey_sz = SPACC_SET_HASH_KEY_SZ(ksz);
break;
default:
if (key) {
job->hkey_sz = SPACC_SET_HASH_KEY_SZ(ksz);
write_to_buf(buf, key, 0, ksz, buflen);
}
}
pdu_to_dev_s(ctx->hash_key, buf, buflen >> 2,
spacc->config.spacc_endian);
break;
default:
ret = -EINVAL;
}
return ret;
}
int spacc_read_context(struct spacc_device *spacc, int job_idx,
int op, unsigned char *key, int ksz,
unsigned char *iv, int ivsz)
{
int buflen;
int ret = CRYPTO_OK;
unsigned char buf[300];
struct spacc_ctx *ctx = NULL;
struct spacc_job *job = NULL;
if (job_idx < 0 || job_idx > SPACC_MAX_JOBS)
return -ENXIO;
job = &spacc->job[job_idx];
ctx = context_lookup_by_job(spacc, job_idx);
if (!ctx)
return -EIO;
switch (op) {
case SPACC_CRYPTO_OPERATION:
buflen = MIN(sizeof(buf),
(u32)spacc->config.ciph_page_size);
pdu_from_dev_s(buf, ctx->ciph_key, buflen >> 2,
spacc->config.spacc_endian);
switch (job->enc_mode) {
case CRYPTO_MODE_SM4_ECB:
case CRYPTO_MODE_SM4_CBC:
case CRYPTO_MODE_SM4_CFB:
case CRYPTO_MODE_SM4_OFB:
case CRYPTO_MODE_SM4_CTR:
case CRYPTO_MODE_SM4_CCM:
case CRYPTO_MODE_SM4_GCM:
case CRYPTO_MODE_SM4_CS1:
case CRYPTO_MODE_SM4_CS2:
case CRYPTO_MODE_SM4_CS3:
case CRYPTO_MODE_AES_ECB:
case CRYPTO_MODE_AES_CBC:
case CRYPTO_MODE_AES_CS1:
case CRYPTO_MODE_AES_CS2:
case CRYPTO_MODE_AES_CS3:
case CRYPTO_MODE_AES_CFB:
case CRYPTO_MODE_AES_OFB:
case CRYPTO_MODE_AES_CTR:
case CRYPTO_MODE_AES_CCM:
case CRYPTO_MODE_AES_GCM:
read_from_buf(key, buf, 0, ksz, buflen);
read_from_buf(iv, buf, 32, 16, buflen);
break;
case CRYPTO_MODE_CHACHA20_STREAM:
read_from_buf(key, buf, 0, ksz, buflen);
read_from_buf(iv, buf, 32, 16, buflen);
break;
case CRYPTO_MODE_SM4_F8:
case CRYPTO_MODE_AES_F8:
if (key) {
read_from_buf(key + ksz, buf, 0, ksz, buflen);
read_from_buf(key, buf, 48, ksz, buflen);
}
read_from_buf(iv, buf, 32, 16, buflen);
break;
case CRYPTO_MODE_SM4_XTS:
case CRYPTO_MODE_AES_XTS:
if (key) {
read_from_buf(key, buf, 0, ksz >> 1, buflen);
read_from_buf(key + (ksz >> 1), buf,
48, ksz >> 1, buflen);
}
read_from_buf(iv, buf, 32, 16, buflen);
break;
case CRYPTO_MODE_MULTI2_ECB:
case CRYPTO_MODE_MULTI2_CBC:
case CRYPTO_MODE_MULTI2_OFB:
case CRYPTO_MODE_MULTI2_CFB:
read_from_buf(key, buf, 0, ksz, buflen);
/* Number of rounds at the end of the IV */
read_from_buf(iv, buf, 0x28, ivsz, buflen);
break;
case CRYPTO_MODE_3DES_CBC:
case CRYPTO_MODE_3DES_ECB:
read_from_buf(iv, buf, 0, 8, buflen);
read_from_buf(key, buf, 8, 24, buflen);
break;
case CRYPTO_MODE_DES_CBC:
case CRYPTO_MODE_DES_ECB:
read_from_buf(iv, buf, 0, 8, buflen);
read_from_buf(key, buf, 8, 8, buflen);
break;
case CRYPTO_MODE_KASUMI_ECB:
case CRYPTO_MODE_KASUMI_F8:
read_from_buf(iv, buf, 16, 8, buflen);
read_from_buf(key, buf, 0, 16, buflen);
break;
case CRYPTO_MODE_SNOW3G_UEA2:
case CRYPTO_MODE_ZUC_UEA3:
read_from_buf(key, buf, 0, 32, buflen);
break;
case CRYPTO_MODE_NULL:
break;
}
break;
case SPACC_HASH_OPERATION:
buflen = MIN(sizeof(buf),
(u32)spacc->config.hash_page_size);
pdu_from_dev_s(buf, ctx->hash_key, buflen >> 2,
spacc->config.spacc_endian);
switch (job->hash_mode) {
case CRYPTO_MODE_MAC_XCBC:
case CRYPTO_MODE_MAC_SM4_XCBC:
if (key && ksz <= 64) {
read_from_buf(key + (ksz - 32), buf,
32, 32, buflen);
read_from_buf(key, buf, 0, ksz - 32, buflen);
}
break;
case CRYPTO_MODE_HASH_CRC32:
read_from_buf(iv, buf, 0, ivsz, buflen);
break;
case CRYPTO_MODE_MAC_SNOW3G_UIA2:
case CRYPTO_MODE_MAC_ZUC_UIA3:
read_from_buf(key, buf, 0, 32, buflen);
break;
default:
read_from_buf(key, buf, 0, ksz, buflen);
}
break;
default:
ret = -EINVAL;
}
return ret;
}
/* Context manager: This will reset all reference counts, pointers, etc */
void spacc_ctx_init_all(struct spacc_device *spacc)
{
int x;
struct spacc_ctx *ctx;
unsigned long lock_flag;
spin_lock_irqsave(&spacc->ctx_lock, lock_flag);
/* initialize contexts */
for (x = 0; x < spacc->config.num_ctx; x++) {
ctx = &spacc->ctx[x];
/* sets everything including ref_cnt and ncontig to 0 */
memset(ctx, 0, sizeof(*ctx));
ctx->ciph_key = spacc->regmap + SPACC_CTX_CIPH_KEY +
(x * spacc->config.ciph_page_size);
ctx->hash_key = spacc->regmap + SPACC_CTX_HASH_KEY +
(x * spacc->config.hash_page_size);
}
spin_unlock_irqrestore(&spacc->ctx_lock, lock_flag);
}

View File

@ -1,717 +0,0 @@
// SPDX-License-Identifier: GPL-2.0
#include <crypto/ctr.h>
#include <crypto/des.h>
#include <linux/dma-mapping.h>
#include <crypto/scatterwalk.h>
#include <crypto/internal/des.h>
#include <linux/platform_device.h>
#include "spacc_device.h"
#include "spacc_core.h"
static LIST_HEAD(spacc_cipher_alg_list);
static DEFINE_MUTEX(spacc_cipher_alg_mutex);
static struct mode_tab possible_ciphers[] = {
/* {keylen, MODE_TAB_CIPH(name, id, iv_len, blk_len)} */
/* SM4 */
{ MODE_TAB_CIPH("cbc(sm4)", SM4_CBC, 16, 16), .keylen[0] = 16,
.chunksize = 16, .walksize = 16, .min_keysize = 16, .max_keysize = 16 },
{ MODE_TAB_CIPH("ecb(sm4)", SM4_ECB, 0, 16), .keylen[0] = 16,
.chunksize = 16, .walksize = 16, .min_keysize = 16, .max_keysize = 16 },
{ MODE_TAB_CIPH("ctr(sm4)", SM4_CTR, 16, 1), .keylen[0] = 16,
.chunksize = 16, .walksize = 16, .min_keysize = 16, .max_keysize = 16 },
{ MODE_TAB_CIPH("xts(sm4)", SM4_XTS, 16, 16), .keylen[0] = 32,
.chunksize = 16, .walksize = 16, .min_keysize = 32, .max_keysize = 32 },
{ MODE_TAB_CIPH("cts(cbc(sm4))", SM4_CS3, 16, 16), .keylen[0] = 16,
.chunksize = 16, .walksize = 16, .min_keysize = 16, .max_keysize = 16 },
/* AES */
{ MODE_TAB_CIPH("cbc(aes)", AES_CBC, 16, 16), .keylen = { 16, 24, 32 },
.chunksize = 16, .walksize = 16, .min_keysize = 16, .max_keysize = 32 },
{ MODE_TAB_CIPH("ecb(aes)", AES_ECB, 0, 16), .keylen = { 16, 24, 32 },
.chunksize = 16, .walksize = 16, .min_keysize = 16, .max_keysize = 32 },
{ MODE_TAB_CIPH("xts(aes)", AES_XTS, 16, 16), .keylen = { 32, 48, 64 },
.chunksize = 16, .walksize = 16, .min_keysize = 32, .max_keysize = 64 },
{ MODE_TAB_CIPH("cts(cbc(aes))", AES_CS3, 16, 16),
.keylen = { 16, 24, 32 }, .chunksize = 16, .walksize = 16,
.min_keysize = 16, .max_keysize = 32 },
{ MODE_TAB_CIPH("ctr(aes)", AES_CTR, 16, 1), .keylen = { 16, 24, 32 },
.chunksize = 16, .walksize = 16, .min_keysize = 16, .max_keysize = 32 },
/* CHACHA20 */
{ MODE_TAB_CIPH("chacha20", CHACHA20_STREAM, 16, 1), .keylen[0] = 32,
.chunksize = 64, .walksize = 64, .min_keysize = 32, .max_keysize = 32 },
/* DES */
{ MODE_TAB_CIPH("ecb(des)", DES_ECB, 0, 8), .keylen[0] = 8,
.chunksize = 8, .walksize = 8, .min_keysize = 8, .max_keysize = 8},
{ MODE_TAB_CIPH("cbc(des)", DES_CBC, 8, 8), .keylen[0] = 8,
.chunksize = 8, .walksize = 8, .min_keysize = 8, .max_keysize = 8},
{ MODE_TAB_CIPH("ecb(des3_ede)", 3DES_ECB, 0, 8), .keylen[0] = 24,
.chunksize = 8, .walksize = 8, .min_keysize = 24, .max_keysize = 24 },
{ MODE_TAB_CIPH("cbc(des3_ede)", 3DES_CBC, 8, 8), .keylen[0] = 24,
.chunksize = 8, .walksize = 8, .min_keysize = 24, .max_keysize = 24 },
};
static int spacc_skcipher_fallback(unsigned char *name,
struct skcipher_request *req, int enc_dec)
{
int ret = 0;
struct crypto_skcipher *reqtfm = crypto_skcipher_reqtfm(req);
struct spacc_crypto_ctx *tctx = crypto_skcipher_ctx(reqtfm);
struct spacc_crypto_reqctx *ctx = skcipher_request_ctx(req);
tctx->fb.cipher = crypto_alloc_skcipher(name,
CRYPTO_ALG_TYPE_SKCIPHER,
CRYPTO_ALG_NEED_FALLBACK);
if (IS_ERR(tctx->fb.cipher))
return PTR_ERR(tctx->fb.cipher);
crypto_skcipher_set_reqsize(reqtfm,
sizeof(struct spacc_crypto_reqctx) +
crypto_skcipher_reqsize(tctx->fb.cipher));
ret = crypto_skcipher_setkey(tctx->fb.cipher, tctx->cipher_key,
tctx->key_len);
if (ret)
return ret;
skcipher_request_set_tfm(&ctx->fb.cipher_req, tctx->fb.cipher);
skcipher_request_set_crypt(&ctx->fb.cipher_req, req->src, req->dst,
req->cryptlen, req->iv);
if (enc_dec)
ret = crypto_skcipher_decrypt(&ctx->fb.cipher_req);
else
ret = crypto_skcipher_encrypt(&ctx->fb.cipher_req);
crypto_free_skcipher(tctx->fb.cipher);
tctx->fb.cipher = NULL;
kfree(tctx->cipher_key);
tctx->cipher_key = NULL;
return ret;
}
static void spacc_cipher_cleanup_dma(struct device *dev,
struct skcipher_request *req)
{
struct spacc_crypto_reqctx *ctx = skcipher_request_ctx(req);
struct spacc_crypto_ctx *tctx = ctx->ccb.tctx;
if (req->dst != req->src) {
if (ctx->src_nents) {
dma_unmap_sg(dev, req->src, ctx->src_nents,
DMA_TO_DEVICE);
pdu_ddt_free(&ctx->src);
}
if (ctx->dst_nents) {
dma_unmap_sg(dev, req->dst, ctx->dst_nents,
DMA_FROM_DEVICE);
pdu_ddt_free(&ctx->dst);
}
} else {
if (ctx->src_nents) {
dma_unmap_sg(dev, req->src, ctx->src_nents,
DMA_TO_DEVICE);
pdu_ddt_free(&ctx->src);
}
}
kfree(tctx->cipher_key);
tctx->cipher_key = NULL;
}
static void spacc_cipher_cb(void *spacc, void *tfm)
{
int err = -1;
struct cipher_cb_data *cb = tfm;
struct spacc_crypto_reqctx *ctx = skcipher_request_ctx(cb->req);
u32 status_reg = readl(cb->spacc->regmap + SPACC_REG_STATUS);
u32 status_ret = (status_reg >> 24) & 0x03;
if (ctx->mode == CRYPTO_MODE_DES_CBC ||
ctx->mode == CRYPTO_MODE_3DES_CBC) {
spacc_read_context(cb->spacc, cb->tctx->handle,
SPACC_CRYPTO_OPERATION, NULL, 0,
cb->req->iv, 8);
} else if (ctx->mode != CRYPTO_MODE_DES_ECB &&
ctx->mode != CRYPTO_MODE_3DES_ECB &&
ctx->mode != CRYPTO_MODE_SM4_ECB &&
ctx->mode != CRYPTO_MODE_AES_ECB &&
ctx->mode != CRYPTO_MODE_SM4_XTS &&
ctx->mode != CRYPTO_MODE_KASUMI_ECB) {
if (status_ret == 0x3) {
err = -EINVAL;
goto CALLBACK_ERR;
}
spacc_read_context(cb->spacc, cb->tctx->handle,
SPACC_CRYPTO_OPERATION, NULL, 0,
cb->req->iv, 16);
}
if (ctx->mode != CRYPTO_MODE_DES_ECB &&
ctx->mode != CRYPTO_MODE_DES_CBC &&
ctx->mode != CRYPTO_MODE_3DES_ECB &&
ctx->mode != CRYPTO_MODE_3DES_CBC) {
if (status_ret == 0x03) {
err = -EINVAL;
goto CALLBACK_ERR;
}
}
if (ctx->mode == CRYPTO_MODE_SM4_ECB && status_ret == 0x03) {
err = -EINVAL;
goto CALLBACK_ERR;
}
if (cb->req->dst != cb->req->src)
dma_sync_sg_for_cpu(cb->tctx->dev, cb->req->dst, ctx->dst_nents,
DMA_FROM_DEVICE);
err = cb->spacc->job[cb->new_handle].job_err;
CALLBACK_ERR:
spacc_cipher_cleanup_dma(cb->tctx->dev, cb->req);
spacc_close(cb->spacc, cb->new_handle);
skcipher_request_complete(cb->req, err);
}
static int spacc_cipher_init_dma(struct device *dev,
struct skcipher_request *req)
{
struct spacc_crypto_reqctx *ctx = skcipher_request_ctx(req);
int rc;
if (req->src == req->dst) {
rc = spacc_sg_to_ddt(dev, req->src, req->cryptlen, &ctx->src,
DMA_TO_DEVICE);
if (rc < 0) {
pdu_ddt_free(&ctx->src);
return rc;
}
ctx->src_nents = rc;
} else {
rc = spacc_sg_to_ddt(dev, req->src, req->cryptlen, &ctx->src,
DMA_TO_DEVICE);
if (rc < 0) {
pdu_ddt_free(&ctx->src);
return rc;
}
ctx->src_nents = rc;
rc = spacc_sg_to_ddt(dev, req->dst, req->cryptlen, &ctx->dst,
DMA_FROM_DEVICE);
if (rc < 0) {
pdu_ddt_free(&ctx->dst);
return rc;
}
ctx->dst_nents = rc;
}
return 0;
}
static int spacc_cipher_cra_init(struct crypto_tfm *tfm)
{
struct spacc_crypto_ctx *tctx = crypto_tfm_ctx(tfm);
const struct spacc_alg *salg = spacc_tfm_skcipher(tfm);
tctx->keylen = 0;
tctx->cipher_key = NULL;
tctx->handle = -1;
tctx->ctx_valid = false;
tctx->dev = get_device(salg->dev[0]);
crypto_skcipher_set_reqsize(__crypto_skcipher_cast(tfm),
sizeof(struct spacc_crypto_reqctx));
return 0;
}
static void spacc_cipher_cra_exit(struct crypto_tfm *tfm)
{
struct spacc_crypto_ctx *tctx = crypto_tfm_ctx(tfm);
struct spacc_priv *priv = dev_get_drvdata(tctx->dev);
if (tctx->handle >= 0)
spacc_close(&priv->spacc, tctx->handle);
put_device(tctx->dev);
}
static int spacc_check_keylen(const struct spacc_alg *salg, unsigned int keylen)
{
unsigned int i;
for (i = 0; i < ARRAY_SIZE(salg->mode->keylen); i++)
if (salg->mode->keylen[i] == keylen)
return 0;
return -EINVAL;
}
static int spacc_cipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
unsigned int keylen)
{
int ret = 0, rc = 0, err;
const struct spacc_alg *salg = spacc_tfm_skcipher(&tfm->base);
struct spacc_crypto_ctx *tctx = crypto_skcipher_ctx(tfm);
struct spacc_priv *priv = dev_get_drvdata(tctx->dev);
struct spacc_crypto_reqctx *ctx = crypto_skcipher_ctx(tfm);
err = spacc_check_keylen(salg, keylen);
if (err)
return err;
ctx->mode = salg->mode->id;
tctx->key_len = keylen;
tctx->cipher_key = kmalloc(keylen, GFP_KERNEL);
memcpy(tctx->cipher_key, key, keylen);
if (tctx->handle >= 0) {
spacc_close(&priv->spacc, tctx->handle);
put_device(tctx->dev);
tctx->handle = -1;
tctx->dev = NULL;
}
priv = NULL;
priv = dev_get_drvdata(salg->dev[0]);
tctx->dev = get_device(salg->dev[0]);
ret = spacc_isenabled(&priv->spacc, salg->mode->id,
keylen);
if (ret)
tctx->handle = spacc_open(&priv->spacc, salg->mode->id,
CRYPTO_MODE_NULL, -1, 0,
spacc_cipher_cb, tfm);
if (tctx->handle < 0) {
put_device(salg->dev[0]);
dev_dbg(salg->dev[0], "failed to open SPAcc context\n");
return -EINVAL;
}
/* Weak key Implementation for DES_ECB */
if (salg->mode->id == CRYPTO_MODE_DES_ECB) {
err = verify_skcipher_des_key(tfm, key);
if (err)
return -EINVAL;
}
if (salg->mode->id == CRYPTO_MODE_SM4_F8 ||
salg->mode->id == CRYPTO_MODE_AES_F8) {
/* f8 mode requires an IV of 128-bits and a key-salt mask,
* equivalent in size to the key.
* AES-F8 or SM4-F8 mode has a SALTKEY prepended to the base
* key.
*/
rc = spacc_write_context(&priv->spacc, tctx->handle,
SPACC_CRYPTO_OPERATION, key, 16,
NULL, 0);
} else {
rc = spacc_write_context(&priv->spacc, tctx->handle,
SPACC_CRYPTO_OPERATION, key, keylen,
NULL, 0);
}
if (rc < 0) {
dev_dbg(salg->dev[0], "failed with SPAcc write context\n");
return -EINVAL;
}
return 0;
}
static int spacc_cipher_process(struct skcipher_request *req, int enc_dec)
{
u8 ivc1[16];
unsigned char *name;
unsigned int len = 0;
u32 num_iv = 0, diff;
u64 num_iv64 = 0, diff64;
unsigned char chacha20_iv[16];
int rc = 0, ret = 0, i = 0, j = 0;
struct crypto_skcipher *reqtfm = crypto_skcipher_reqtfm(req);
struct spacc_crypto_ctx *tctx = crypto_skcipher_ctx(reqtfm);
struct spacc_crypto_reqctx *ctx = skcipher_request_ctx(req);
struct spacc_priv *priv = dev_get_drvdata(tctx->dev);
const struct spacc_alg *salg = spacc_tfm_skcipher(&reqtfm->base);
struct spacc_device *device_h = &priv->spacc;
len = ctx->spacc_cipher_cryptlen / 16;
if (req->cryptlen == 0) {
if (salg->mode->id == CRYPTO_MODE_SM4_CS3 ||
salg->mode->id == CRYPTO_MODE_SM4_XTS ||
salg->mode->id == CRYPTO_MODE_AES_XTS ||
salg->mode->id == CRYPTO_MODE_AES_CS3)
return -EINVAL;
else
return 0;
}
/* Given IV - <1st 4-bytes as counter value>
* <last 12-bytes as nonce>
* Reversing the order of nonce & counter as,
* <1st 12-bytes as nonce>
* <last 4-bytes as counter>
* and then write to HW context,
* ex:
* Given IV - 2a000000000000000000000000000002
* Reverse order - 0000000000000000000000020000002a
*/
if (salg->mode->id == CRYPTO_MODE_CHACHA20_STREAM) {
for (i = 4; i < 16; i++) {
chacha20_iv[j] = req->iv[i];
j++;
}
j = j + 3;
for (i = 0; i <= 3; i++) {
chacha20_iv[j] = req->iv[i];
j--;
}
memcpy(req->iv, chacha20_iv, 16);
}
if (salg->mode->id == CRYPTO_MODE_SM4_CFB) {
if (req->cryptlen % 16 != 0) {
name = salg->calg->cra_name;
ret = spacc_skcipher_fallback(name, req, enc_dec);
return ret;
}
}
if (salg->mode->id == CRYPTO_MODE_SM4_XTS ||
salg->mode->id == CRYPTO_MODE_SM4_CS3 ||
salg->mode->id == CRYPTO_MODE_AES_XTS ||
salg->mode->id == CRYPTO_MODE_AES_CS3) {
if (req->cryptlen == 16) {
name = salg->calg->cra_name;
ret = spacc_skcipher_fallback(name, req, enc_dec);
return ret;
}
}
if (salg->mode->id == CRYPTO_MODE_AES_CTR ||
salg->mode->id == CRYPTO_MODE_SM4_CTR) {
/* copy the IV to local buffer */
for (i = 0; i < 16; i++)
ivc1[i] = req->iv[i];
/* 64-bit counter width */
if (readl(device_h->regmap + SPACC_REG_VERSION_EXT_3) & (0x3)) {
for (i = 8; i < 16; i++) {
num_iv64 <<= 8;
num_iv64 |= ivc1[i];
}
diff64 = SPACC_CTR_IV_MAX64 - num_iv64;
if (len > diff64) {
name = salg->calg->cra_name;
ret = spacc_skcipher_fallback(name,
req, enc_dec);
return ret;
}
/* 32-bit counter width */
} else if (readl(device_h->regmap + SPACC_REG_VERSION_EXT_3)
& (0x2)) {
for (i = 12; i < 16; i++) {
num_iv <<= 8;
num_iv |= ivc1[i];
}
diff = SPACC_CTR_IV_MAX32 - num_iv;
if (len > diff) {
name = salg->calg->cra_name;
ret = spacc_skcipher_fallback(name,
req, enc_dec);
return ret;
}
/* 16-bit counter width */
} else if (readl(device_h->regmap + SPACC_REG_VERSION_EXT_3)
& (0x1)) {
for (i = 14; i < 16; i++) {
num_iv <<= 8;
num_iv |= ivc1[i];
}
diff = SPACC_CTR_IV_MAX16 - num_iv;
if (len > diff) {
name = salg->calg->cra_name;
ret = spacc_skcipher_fallback(name,
req, enc_dec);
return ret;
}
/* 8-bit counter width */
} else if ((readl(device_h->regmap + SPACC_REG_VERSION_EXT_3)
& 0x7) == 0) {
for (i = 15; i < 16; i++) {
num_iv <<= 8;
num_iv |= ivc1[i];
}
diff = SPACC_CTR_IV_MAX8 - num_iv;
if (len > diff) {
name = salg->calg->cra_name;
ret = spacc_skcipher_fallback(name,
req, enc_dec);
return ret;
}
}
}
if (salg->mode->id == CRYPTO_MODE_DES_CBC ||
salg->mode->id == CRYPTO_MODE_3DES_CBC)
rc = spacc_write_context(&priv->spacc, tctx->handle,
SPACC_CRYPTO_OPERATION, NULL, 0,
req->iv, 8);
else if (salg->mode->id != CRYPTO_MODE_DES_ECB &&
salg->mode->id != CRYPTO_MODE_3DES_ECB &&
salg->mode->id != CRYPTO_MODE_SM4_ECB &&
salg->mode->id != CRYPTO_MODE_AES_ECB &&
salg->mode->id != CRYPTO_MODE_KASUMI_ECB)
rc = spacc_write_context(&priv->spacc, tctx->handle,
SPACC_CRYPTO_OPERATION, NULL, 0,
req->iv, 16);
if (rc < 0)
pr_err("ERR: spacc_write_context\n");
/* Initialize the DMA */
rc = spacc_cipher_init_dma(tctx->dev, req);
ctx->ccb.new_handle = spacc_clone_handle(&priv->spacc, tctx->handle,
&ctx->ccb);
if (ctx->ccb.new_handle < 0) {
spacc_cipher_cleanup_dma(tctx->dev, req);
dev_dbg(salg->dev[0], "failed to clone handle\n");
return -EINVAL;
}
/* copying the data to clone handle */
ctx->ccb.tctx = tctx;
ctx->ccb.ctx = ctx;
ctx->ccb.req = req;
ctx->ccb.spacc = &priv->spacc;
if (salg->mode->id == CRYPTO_MODE_SM4_CS3) {
int handle = ctx->ccb.new_handle;
if (handle < 0 || handle > SPACC_MAX_JOBS)
return -ENXIO;
device_h->job[handle].auxinfo_cs_mode = 3;
}
if (enc_dec) { /* for decrypt */
rc = spacc_set_operation(&priv->spacc, ctx->ccb.new_handle, 1,
ICV_IGNORE, IP_ICV_IGNORE, 0, 0, 0);
spacc_set_key_exp(&priv->spacc, ctx->ccb.new_handle);
} else { /* for encrypt */
rc = spacc_set_operation(&priv->spacc, ctx->ccb.new_handle, 0,
ICV_IGNORE, IP_ICV_IGNORE, 0, 0, 0);
}
rc = spacc_packet_enqueue_ddt(&priv->spacc, ctx->ccb.new_handle,
&ctx->src,
(req->dst == req->src) ? &ctx->src :
&ctx->dst,
req->cryptlen,
0, 0, 0, 0, 0);
if (rc < 0) {
spacc_cipher_cleanup_dma(tctx->dev, req);
spacc_close(&priv->spacc, ctx->ccb.new_handle);
if (rc != -EBUSY && rc < 0) {
dev_err(tctx->dev,
"failed to enqueue job, ERR: %d\n", rc);
return rc;
} else if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
return -EBUSY;
}
}
priv->spacc.job[tctx->handle].first_use = 0;
priv->spacc.job[tctx->handle].ctrl &=
~(1UL << priv->spacc.config.ctrl_map[SPACC_CTRL_KEY_EXP]);
return -EINPROGRESS;
}
static int spacc_cipher_encrypt(struct skcipher_request *req)
{
int rv = 0;
struct spacc_crypto_reqctx *ctx = skcipher_request_ctx(req);
ctx->spacc_cipher_cryptlen = req->cryptlen;
/* enc_dec - 0(encrypt), 1(decrypt) */
rv = spacc_cipher_process(req, 0);
return rv;
}
static int spacc_cipher_decrypt(struct skcipher_request *req)
{
int rv = 0;
struct spacc_crypto_reqctx *ctx = skcipher_request_ctx(req);
ctx->spacc_cipher_cryptlen = req->cryptlen;
/* enc_dec - 0(encrypt), 1(decrypt) */
rv = spacc_cipher_process(req, 1);
return rv;
}
static struct skcipher_alg spacc_skcipher_alg = {
.setkey = spacc_cipher_setkey,
.encrypt = spacc_cipher_encrypt,
.decrypt = spacc_cipher_decrypt,
/*
* Chunksize: Equal to the block size except for stream cipher
* such as CTR where it is set to the underlying block size.
*
* Walksize: Equal to the chunk size except in cases where the
* algorithm is considerably more efficient if it can operate on
* multiple chunks in parallel. Should be a multiple of chunksize.
*/
.min_keysize = 16,
.max_keysize = 64,
.ivsize = 16,
.chunksize = 16,
.walksize = 16,
.base = {
.cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
CRYPTO_ALG_ASYNC |
CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = 16,
.cra_ctxsize = sizeof(struct spacc_crypto_ctx),
.cra_priority = 300,
.cra_init = spacc_cipher_cra_init,
.cra_exit = spacc_cipher_cra_exit,
.cra_module = THIS_MODULE,
},
};
static void spacc_init_calg(struct crypto_alg *calg,
const struct mode_tab *mode)
{
strscpy(calg->cra_name, mode->name, sizeof(mode->name) - 1);
calg->cra_name[sizeof(mode->name) - 1] = '\0';
strscpy(calg->cra_driver_name, "spacc-");
strcat(calg->cra_driver_name, mode->name);
calg->cra_driver_name[sizeof(calg->cra_driver_name) - 1] = '\0';
calg->cra_blocksize = mode->blocklen;
}
static int spacc_register_cipher(struct spacc_alg *salg,
unsigned int algo_idx)
{
int rc;
salg->calg = &salg->alg.skcipher.base;
salg->alg.skcipher = spacc_skcipher_alg;
/* this function will assign mode->name to calg->cra_name &
* calg->cra_driver_name
*/
spacc_init_calg(salg->calg, salg->mode);
salg->alg.skcipher.ivsize = salg->mode->ivlen;
salg->alg.skcipher.base.cra_blocksize = salg->mode->blocklen;
salg->alg.skcipher.chunksize = possible_ciphers[algo_idx].chunksize;
salg->alg.skcipher.walksize = possible_ciphers[algo_idx].walksize;
salg->alg.skcipher.min_keysize = possible_ciphers[algo_idx].min_keysize;
salg->alg.skcipher.max_keysize = possible_ciphers[algo_idx].max_keysize;
rc = crypto_register_skcipher(&salg->alg.skcipher);
if (rc < 0)
return rc;
mutex_lock(&spacc_cipher_alg_mutex);
list_add(&salg->list, &spacc_cipher_alg_list);
mutex_unlock(&spacc_cipher_alg_mutex);
return 0;
}
int probe_ciphers(struct platform_device *spacc_pdev)
{
int rc;
unsigned int i, y;
int registered = 0;
struct spacc_alg *salg;
struct spacc_priv *priv = dev_get_drvdata(&spacc_pdev->dev);
for (i = 0; i < ARRAY_SIZE(possible_ciphers); i++)
possible_ciphers[i].valid = 0;
for (i = 0; i < ARRAY_SIZE(possible_ciphers) &&
(possible_ciphers[i].valid == 0); i++) {
for (y = 0; y < 3; y++) {
if (spacc_isenabled(&priv->spacc,
possible_ciphers[i].id & 0xFF,
possible_ciphers[i].keylen[y])) {
salg = kmalloc(sizeof(*salg), GFP_KERNEL);
if (!salg)
return -ENOMEM;
salg->mode = &possible_ciphers[i];
salg->dev[0] = &spacc_pdev->dev;
if (possible_ciphers[i].valid == 0) {
rc = spacc_register_cipher(salg, i);
if (rc < 0) {
kfree(salg);
continue;
}
}
dev_dbg(&spacc_pdev->dev, "registered %s\n",
possible_ciphers[i].name);
registered++;
possible_ciphers[i].valid = 1;
}
}
}
return registered;
}
int spacc_unregister_cipher_algs(void)
{
struct spacc_alg *salg, *tmp;
mutex_lock(&spacc_cipher_alg_mutex);
list_for_each_entry_safe(salg, tmp, &spacc_cipher_alg_list, list) {
crypto_unregister_skcipher(&salg->alg.skcipher);
list_del(&salg->list);
kfree(salg);
}
mutex_unlock(&spacc_cipher_alg_mutex);
return 0;
}