mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-23 20:53:53 +08:00
crypto: chelsio - switch to skcipher API
Commit 7a7ffe65c8
("crypto: skcipher - Add top-level skcipher interface")
dated 20 august 2015 introduced the new skcipher API which is supposed to
replace both blkcipher and ablkcipher. While all consumers of the API have
been converted long ago, some producers of the ablkcipher remain, forcing
us to keep the ablkcipher support routines alive, along with the matching
code to expose [a]blkciphers via the skcipher API.
So switch this driver to the skcipher API, allowing us to finally drop the
ablkcipher code in the near future.
Cc: Atul Gupta <atul.gupta@chelsio.com>
Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
This commit is contained in:
parent
ac0d3d130f
commit
7cea6d3e01
@ -93,7 +93,7 @@ static u32 round_constant[11] = {
|
|||||||
0x1B000000, 0x36000000, 0x6C000000
|
0x1B000000, 0x36000000, 0x6C000000
|
||||||
};
|
};
|
||||||
|
|
||||||
static int chcr_handle_cipher_resp(struct ablkcipher_request *req,
|
static int chcr_handle_cipher_resp(struct skcipher_request *req,
|
||||||
unsigned char *input, int err);
|
unsigned char *input, int err);
|
||||||
|
|
||||||
static inline struct chcr_aead_ctx *AEAD_CTX(struct chcr_context *ctx)
|
static inline struct chcr_aead_ctx *AEAD_CTX(struct chcr_context *ctx)
|
||||||
@ -568,11 +568,11 @@ static void ulptx_walk_add_sg(struct ulptx_walk *walk,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int get_cryptoalg_subtype(struct crypto_tfm *tfm)
|
static inline int get_cryptoalg_subtype(struct crypto_skcipher *tfm)
|
||||||
{
|
{
|
||||||
struct crypto_alg *alg = tfm->__crt_alg;
|
struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
|
||||||
struct chcr_alg_template *chcr_crypto_alg =
|
struct chcr_alg_template *chcr_crypto_alg =
|
||||||
container_of(alg, struct chcr_alg_template, alg.crypto);
|
container_of(alg, struct chcr_alg_template, alg.skcipher);
|
||||||
|
|
||||||
return chcr_crypto_alg->type & CRYPTO_ALG_SUB_TYPE_MASK;
|
return chcr_crypto_alg->type & CRYPTO_ALG_SUB_TYPE_MASK;
|
||||||
}
|
}
|
||||||
@ -757,14 +757,14 @@ static inline void create_wreq(struct chcr_context *ctx,
|
|||||||
*/
|
*/
|
||||||
static struct sk_buff *create_cipher_wr(struct cipher_wr_param *wrparam)
|
static struct sk_buff *create_cipher_wr(struct cipher_wr_param *wrparam)
|
||||||
{
|
{
|
||||||
struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(wrparam->req);
|
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(wrparam->req);
|
||||||
struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
|
struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
|
||||||
struct sk_buff *skb = NULL;
|
struct sk_buff *skb = NULL;
|
||||||
struct chcr_wr *chcr_req;
|
struct chcr_wr *chcr_req;
|
||||||
struct cpl_rx_phys_dsgl *phys_cpl;
|
struct cpl_rx_phys_dsgl *phys_cpl;
|
||||||
struct ulptx_sgl *ulptx;
|
struct ulptx_sgl *ulptx;
|
||||||
struct chcr_blkcipher_req_ctx *reqctx =
|
struct chcr_skcipher_req_ctx *reqctx =
|
||||||
ablkcipher_request_ctx(wrparam->req);
|
skcipher_request_ctx(wrparam->req);
|
||||||
unsigned int temp = 0, transhdr_len, dst_size;
|
unsigned int temp = 0, transhdr_len, dst_size;
|
||||||
int error;
|
int error;
|
||||||
int nents;
|
int nents;
|
||||||
@ -807,9 +807,9 @@ static struct sk_buff *create_cipher_wr(struct cipher_wr_param *wrparam)
|
|||||||
|
|
||||||
chcr_req->key_ctx.ctx_hdr = ablkctx->key_ctx_hdr;
|
chcr_req->key_ctx.ctx_hdr = ablkctx->key_ctx_hdr;
|
||||||
if ((reqctx->op == CHCR_DECRYPT_OP) &&
|
if ((reqctx->op == CHCR_DECRYPT_OP) &&
|
||||||
(!(get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) ==
|
(!(get_cryptoalg_subtype(tfm) ==
|
||||||
CRYPTO_ALG_SUB_TYPE_CTR)) &&
|
CRYPTO_ALG_SUB_TYPE_CTR)) &&
|
||||||
(!(get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) ==
|
(!(get_cryptoalg_subtype(tfm) ==
|
||||||
CRYPTO_ALG_SUB_TYPE_CTR_RFC3686))) {
|
CRYPTO_ALG_SUB_TYPE_CTR_RFC3686))) {
|
||||||
generate_copy_rrkey(ablkctx, &chcr_req->key_ctx);
|
generate_copy_rrkey(ablkctx, &chcr_req->key_ctx);
|
||||||
} else {
|
} else {
|
||||||
@ -843,7 +843,7 @@ static struct sk_buff *create_cipher_wr(struct cipher_wr_param *wrparam)
|
|||||||
if (reqctx->op && (ablkctx->ciph_mode ==
|
if (reqctx->op && (ablkctx->ciph_mode ==
|
||||||
CHCR_SCMD_CIPHER_MODE_AES_CBC))
|
CHCR_SCMD_CIPHER_MODE_AES_CBC))
|
||||||
sg_pcopy_to_buffer(wrparam->req->src,
|
sg_pcopy_to_buffer(wrparam->req->src,
|
||||||
sg_nents(wrparam->req->src), wrparam->req->info, 16,
|
sg_nents(wrparam->req->src), wrparam->req->iv, 16,
|
||||||
reqctx->processed + wrparam->bytes - AES_BLOCK_SIZE);
|
reqctx->processed + wrparam->bytes - AES_BLOCK_SIZE);
|
||||||
|
|
||||||
return skb;
|
return skb;
|
||||||
@ -866,11 +866,11 @@ static inline int chcr_keyctx_ck_size(unsigned int keylen)
|
|||||||
|
|
||||||
return ck_size;
|
return ck_size;
|
||||||
}
|
}
|
||||||
static int chcr_cipher_fallback_setkey(struct crypto_ablkcipher *cipher,
|
static int chcr_cipher_fallback_setkey(struct crypto_skcipher *cipher,
|
||||||
const u8 *key,
|
const u8 *key,
|
||||||
unsigned int keylen)
|
unsigned int keylen)
|
||||||
{
|
{
|
||||||
struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
|
struct crypto_tfm *tfm = crypto_skcipher_tfm(cipher);
|
||||||
struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
|
struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
|
||||||
int err = 0;
|
int err = 0;
|
||||||
|
|
||||||
@ -886,7 +886,7 @@ static int chcr_cipher_fallback_setkey(struct crypto_ablkcipher *cipher,
|
|||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int chcr_aes_cbc_setkey(struct crypto_ablkcipher *cipher,
|
static int chcr_aes_cbc_setkey(struct crypto_skcipher *cipher,
|
||||||
const u8 *key,
|
const u8 *key,
|
||||||
unsigned int keylen)
|
unsigned int keylen)
|
||||||
{
|
{
|
||||||
@ -912,13 +912,13 @@ static int chcr_aes_cbc_setkey(struct crypto_ablkcipher *cipher,
|
|||||||
ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_CBC;
|
ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_CBC;
|
||||||
return 0;
|
return 0;
|
||||||
badkey_err:
|
badkey_err:
|
||||||
crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
|
crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
|
||||||
ablkctx->enckey_len = 0;
|
ablkctx->enckey_len = 0;
|
||||||
|
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int chcr_aes_ctr_setkey(struct crypto_ablkcipher *cipher,
|
static int chcr_aes_ctr_setkey(struct crypto_skcipher *cipher,
|
||||||
const u8 *key,
|
const u8 *key,
|
||||||
unsigned int keylen)
|
unsigned int keylen)
|
||||||
{
|
{
|
||||||
@ -943,13 +943,13 @@ static int chcr_aes_ctr_setkey(struct crypto_ablkcipher *cipher,
|
|||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
badkey_err:
|
badkey_err:
|
||||||
crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
|
crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
|
||||||
ablkctx->enckey_len = 0;
|
ablkctx->enckey_len = 0;
|
||||||
|
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int chcr_aes_rfc3686_setkey(struct crypto_ablkcipher *cipher,
|
static int chcr_aes_rfc3686_setkey(struct crypto_skcipher *cipher,
|
||||||
const u8 *key,
|
const u8 *key,
|
||||||
unsigned int keylen)
|
unsigned int keylen)
|
||||||
{
|
{
|
||||||
@ -981,7 +981,7 @@ static int chcr_aes_rfc3686_setkey(struct crypto_ablkcipher *cipher,
|
|||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
badkey_err:
|
badkey_err:
|
||||||
crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
|
crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
|
||||||
ablkctx->enckey_len = 0;
|
ablkctx->enckey_len = 0;
|
||||||
|
|
||||||
return err;
|
return err;
|
||||||
@ -1017,12 +1017,12 @@ static unsigned int adjust_ctr_overflow(u8 *iv, u32 bytes)
|
|||||||
return bytes;
|
return bytes;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int chcr_update_tweak(struct ablkcipher_request *req, u8 *iv,
|
static int chcr_update_tweak(struct skcipher_request *req, u8 *iv,
|
||||||
u32 isfinal)
|
u32 isfinal)
|
||||||
{
|
{
|
||||||
struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
|
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
||||||
struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
|
struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
|
||||||
struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
|
struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
|
||||||
struct crypto_aes_ctx aes;
|
struct crypto_aes_ctx aes;
|
||||||
int ret, i;
|
int ret, i;
|
||||||
u8 *key;
|
u8 *key;
|
||||||
@ -1051,16 +1051,16 @@ static int chcr_update_tweak(struct ablkcipher_request *req, u8 *iv,
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int chcr_update_cipher_iv(struct ablkcipher_request *req,
|
static int chcr_update_cipher_iv(struct skcipher_request *req,
|
||||||
struct cpl_fw6_pld *fw6_pld, u8 *iv)
|
struct cpl_fw6_pld *fw6_pld, u8 *iv)
|
||||||
{
|
{
|
||||||
struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
|
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
||||||
struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
|
struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
|
||||||
int subtype = get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm));
|
int subtype = get_cryptoalg_subtype(tfm);
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
if (subtype == CRYPTO_ALG_SUB_TYPE_CTR)
|
if (subtype == CRYPTO_ALG_SUB_TYPE_CTR)
|
||||||
ctr_add_iv(iv, req->info, (reqctx->processed /
|
ctr_add_iv(iv, req->iv, (reqctx->processed /
|
||||||
AES_BLOCK_SIZE));
|
AES_BLOCK_SIZE));
|
||||||
else if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_RFC3686)
|
else if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_RFC3686)
|
||||||
*(__be32 *)(reqctx->iv + CTR_RFC3686_NONCE_SIZE +
|
*(__be32 *)(reqctx->iv + CTR_RFC3686_NONCE_SIZE +
|
||||||
@ -1071,7 +1071,7 @@ static int chcr_update_cipher_iv(struct ablkcipher_request *req,
|
|||||||
else if (subtype == CRYPTO_ALG_SUB_TYPE_CBC) {
|
else if (subtype == CRYPTO_ALG_SUB_TYPE_CBC) {
|
||||||
if (reqctx->op)
|
if (reqctx->op)
|
||||||
/*Updated before sending last WR*/
|
/*Updated before sending last WR*/
|
||||||
memcpy(iv, req->info, AES_BLOCK_SIZE);
|
memcpy(iv, req->iv, AES_BLOCK_SIZE);
|
||||||
else
|
else
|
||||||
memcpy(iv, &fw6_pld->data[2], AES_BLOCK_SIZE);
|
memcpy(iv, &fw6_pld->data[2], AES_BLOCK_SIZE);
|
||||||
}
|
}
|
||||||
@ -1085,16 +1085,16 @@ static int chcr_update_cipher_iv(struct ablkcipher_request *req,
|
|||||||
* for subsequent update requests
|
* for subsequent update requests
|
||||||
*/
|
*/
|
||||||
|
|
||||||
static int chcr_final_cipher_iv(struct ablkcipher_request *req,
|
static int chcr_final_cipher_iv(struct skcipher_request *req,
|
||||||
struct cpl_fw6_pld *fw6_pld, u8 *iv)
|
struct cpl_fw6_pld *fw6_pld, u8 *iv)
|
||||||
{
|
{
|
||||||
struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
|
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
||||||
struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
|
struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
|
||||||
int subtype = get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm));
|
int subtype = get_cryptoalg_subtype(tfm);
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
if (subtype == CRYPTO_ALG_SUB_TYPE_CTR)
|
if (subtype == CRYPTO_ALG_SUB_TYPE_CTR)
|
||||||
ctr_add_iv(iv, req->info, DIV_ROUND_UP(reqctx->processed,
|
ctr_add_iv(iv, req->iv, DIV_ROUND_UP(reqctx->processed,
|
||||||
AES_BLOCK_SIZE));
|
AES_BLOCK_SIZE));
|
||||||
else if (subtype == CRYPTO_ALG_SUB_TYPE_XTS)
|
else if (subtype == CRYPTO_ALG_SUB_TYPE_XTS)
|
||||||
ret = chcr_update_tweak(req, iv, 1);
|
ret = chcr_update_tweak(req, iv, 1);
|
||||||
@ -1108,25 +1108,25 @@ static int chcr_final_cipher_iv(struct ablkcipher_request *req,
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int chcr_handle_cipher_resp(struct ablkcipher_request *req,
|
static int chcr_handle_cipher_resp(struct skcipher_request *req,
|
||||||
unsigned char *input, int err)
|
unsigned char *input, int err)
|
||||||
{
|
{
|
||||||
struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
|
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
||||||
struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm));
|
struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm));
|
||||||
struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
|
struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
|
||||||
struct sk_buff *skb;
|
struct sk_buff *skb;
|
||||||
struct cpl_fw6_pld *fw6_pld = (struct cpl_fw6_pld *)input;
|
struct cpl_fw6_pld *fw6_pld = (struct cpl_fw6_pld *)input;
|
||||||
struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
|
struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
|
||||||
struct cipher_wr_param wrparam;
|
struct cipher_wr_param wrparam;
|
||||||
struct chcr_dev *dev = c_ctx(tfm)->dev;
|
struct chcr_dev *dev = c_ctx(tfm)->dev;
|
||||||
int bytes;
|
int bytes;
|
||||||
|
|
||||||
if (err)
|
if (err)
|
||||||
goto unmap;
|
goto unmap;
|
||||||
if (req->nbytes == reqctx->processed) {
|
if (req->cryptlen == reqctx->processed) {
|
||||||
chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev,
|
chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev,
|
||||||
req);
|
req);
|
||||||
err = chcr_final_cipher_iv(req, fw6_pld, req->info);
|
err = chcr_final_cipher_iv(req, fw6_pld, req->iv);
|
||||||
goto complete;
|
goto complete;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1134,13 +1134,13 @@ static int chcr_handle_cipher_resp(struct ablkcipher_request *req,
|
|||||||
bytes = chcr_sg_ent_in_wr(reqctx->srcsg, reqctx->dstsg, 0,
|
bytes = chcr_sg_ent_in_wr(reqctx->srcsg, reqctx->dstsg, 0,
|
||||||
CIP_SPACE_LEFT(ablkctx->enckey_len),
|
CIP_SPACE_LEFT(ablkctx->enckey_len),
|
||||||
reqctx->src_ofst, reqctx->dst_ofst);
|
reqctx->src_ofst, reqctx->dst_ofst);
|
||||||
if ((bytes + reqctx->processed) >= req->nbytes)
|
if ((bytes + reqctx->processed) >= req->cryptlen)
|
||||||
bytes = req->nbytes - reqctx->processed;
|
bytes = req->cryptlen - reqctx->processed;
|
||||||
else
|
else
|
||||||
bytes = rounddown(bytes, 16);
|
bytes = rounddown(bytes, 16);
|
||||||
} else {
|
} else {
|
||||||
/*CTR mode counter overfloa*/
|
/*CTR mode counter overfloa*/
|
||||||
bytes = req->nbytes - reqctx->processed;
|
bytes = req->cryptlen - reqctx->processed;
|
||||||
}
|
}
|
||||||
err = chcr_update_cipher_iv(req, fw6_pld, reqctx->iv);
|
err = chcr_update_cipher_iv(req, fw6_pld, reqctx->iv);
|
||||||
if (err)
|
if (err)
|
||||||
@ -1153,13 +1153,13 @@ static int chcr_handle_cipher_resp(struct ablkcipher_request *req,
|
|||||||
req->base.flags,
|
req->base.flags,
|
||||||
req->src,
|
req->src,
|
||||||
req->dst,
|
req->dst,
|
||||||
req->nbytes,
|
req->cryptlen,
|
||||||
req->info,
|
req->iv,
|
||||||
reqctx->op);
|
reqctx->op);
|
||||||
goto complete;
|
goto complete;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) ==
|
if (get_cryptoalg_subtype(tfm) ==
|
||||||
CRYPTO_ALG_SUB_TYPE_CTR)
|
CRYPTO_ALG_SUB_TYPE_CTR)
|
||||||
bytes = adjust_ctr_overflow(reqctx->iv, bytes);
|
bytes = adjust_ctr_overflow(reqctx->iv, bytes);
|
||||||
wrparam.qid = u_ctx->lldi.rxq_ids[c_ctx(tfm)->rx_qidx];
|
wrparam.qid = u_ctx->lldi.rxq_ids[c_ctx(tfm)->rx_qidx];
|
||||||
@ -1185,33 +1185,33 @@ complete:
|
|||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int process_cipher(struct ablkcipher_request *req,
|
static int process_cipher(struct skcipher_request *req,
|
||||||
unsigned short qid,
|
unsigned short qid,
|
||||||
struct sk_buff **skb,
|
struct sk_buff **skb,
|
||||||
unsigned short op_type)
|
unsigned short op_type)
|
||||||
{
|
{
|
||||||
struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
|
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
||||||
unsigned int ivsize = crypto_ablkcipher_ivsize(tfm);
|
unsigned int ivsize = crypto_skcipher_ivsize(tfm);
|
||||||
struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
|
struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
|
||||||
struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
|
struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
|
||||||
struct cipher_wr_param wrparam;
|
struct cipher_wr_param wrparam;
|
||||||
int bytes, err = -EINVAL;
|
int bytes, err = -EINVAL;
|
||||||
|
|
||||||
reqctx->processed = 0;
|
reqctx->processed = 0;
|
||||||
if (!req->info)
|
if (!req->iv)
|
||||||
goto error;
|
goto error;
|
||||||
if ((ablkctx->enckey_len == 0) || (ivsize > AES_BLOCK_SIZE) ||
|
if ((ablkctx->enckey_len == 0) || (ivsize > AES_BLOCK_SIZE) ||
|
||||||
(req->nbytes == 0) ||
|
(req->cryptlen == 0) ||
|
||||||
(req->nbytes % crypto_ablkcipher_blocksize(tfm))) {
|
(req->cryptlen % crypto_skcipher_blocksize(tfm))) {
|
||||||
pr_err("AES: Invalid value of Key Len %d nbytes %d IV Len %d\n",
|
pr_err("AES: Invalid value of Key Len %d nbytes %d IV Len %d\n",
|
||||||
ablkctx->enckey_len, req->nbytes, ivsize);
|
ablkctx->enckey_len, req->cryptlen, ivsize);
|
||||||
goto error;
|
goto error;
|
||||||
}
|
}
|
||||||
|
|
||||||
err = chcr_cipher_dma_map(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req);
|
err = chcr_cipher_dma_map(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req);
|
||||||
if (err)
|
if (err)
|
||||||
goto error;
|
goto error;
|
||||||
if (req->nbytes < (SGE_MAX_WR_LEN - (sizeof(struct chcr_wr) +
|
if (req->cryptlen < (SGE_MAX_WR_LEN - (sizeof(struct chcr_wr) +
|
||||||
AES_MIN_KEY_SIZE +
|
AES_MIN_KEY_SIZE +
|
||||||
sizeof(struct cpl_rx_phys_dsgl) +
|
sizeof(struct cpl_rx_phys_dsgl) +
|
||||||
/*Min dsgl size*/
|
/*Min dsgl size*/
|
||||||
@ -1219,14 +1219,14 @@ static int process_cipher(struct ablkcipher_request *req,
|
|||||||
/* Can be sent as Imm*/
|
/* Can be sent as Imm*/
|
||||||
unsigned int dnents = 0, transhdr_len, phys_dsgl, kctx_len;
|
unsigned int dnents = 0, transhdr_len, phys_dsgl, kctx_len;
|
||||||
|
|
||||||
dnents = sg_nents_xlen(req->dst, req->nbytes,
|
dnents = sg_nents_xlen(req->dst, req->cryptlen,
|
||||||
CHCR_DST_SG_SIZE, 0);
|
CHCR_DST_SG_SIZE, 0);
|
||||||
phys_dsgl = get_space_for_phys_dsgl(dnents);
|
phys_dsgl = get_space_for_phys_dsgl(dnents);
|
||||||
kctx_len = roundup(ablkctx->enckey_len, 16);
|
kctx_len = roundup(ablkctx->enckey_len, 16);
|
||||||
transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, phys_dsgl);
|
transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, phys_dsgl);
|
||||||
reqctx->imm = (transhdr_len + IV + req->nbytes) <=
|
reqctx->imm = (transhdr_len + IV + req->cryptlen) <=
|
||||||
SGE_MAX_WR_LEN;
|
SGE_MAX_WR_LEN;
|
||||||
bytes = IV + req->nbytes;
|
bytes = IV + req->cryptlen;
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
reqctx->imm = 0;
|
reqctx->imm = 0;
|
||||||
@ -1236,21 +1236,21 @@ static int process_cipher(struct ablkcipher_request *req,
|
|||||||
bytes = chcr_sg_ent_in_wr(req->src, req->dst, 0,
|
bytes = chcr_sg_ent_in_wr(req->src, req->dst, 0,
|
||||||
CIP_SPACE_LEFT(ablkctx->enckey_len),
|
CIP_SPACE_LEFT(ablkctx->enckey_len),
|
||||||
0, 0);
|
0, 0);
|
||||||
if ((bytes + reqctx->processed) >= req->nbytes)
|
if ((bytes + reqctx->processed) >= req->cryptlen)
|
||||||
bytes = req->nbytes - reqctx->processed;
|
bytes = req->cryptlen - reqctx->processed;
|
||||||
else
|
else
|
||||||
bytes = rounddown(bytes, 16);
|
bytes = rounddown(bytes, 16);
|
||||||
} else {
|
} else {
|
||||||
bytes = req->nbytes;
|
bytes = req->cryptlen;
|
||||||
}
|
}
|
||||||
if (get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) ==
|
if (get_cryptoalg_subtype(tfm) ==
|
||||||
CRYPTO_ALG_SUB_TYPE_CTR) {
|
CRYPTO_ALG_SUB_TYPE_CTR) {
|
||||||
bytes = adjust_ctr_overflow(req->info, bytes);
|
bytes = adjust_ctr_overflow(req->iv, bytes);
|
||||||
}
|
}
|
||||||
if (get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) ==
|
if (get_cryptoalg_subtype(tfm) ==
|
||||||
CRYPTO_ALG_SUB_TYPE_CTR_RFC3686) {
|
CRYPTO_ALG_SUB_TYPE_CTR_RFC3686) {
|
||||||
memcpy(reqctx->iv, ablkctx->nonce, CTR_RFC3686_NONCE_SIZE);
|
memcpy(reqctx->iv, ablkctx->nonce, CTR_RFC3686_NONCE_SIZE);
|
||||||
memcpy(reqctx->iv + CTR_RFC3686_NONCE_SIZE, req->info,
|
memcpy(reqctx->iv + CTR_RFC3686_NONCE_SIZE, req->iv,
|
||||||
CTR_RFC3686_IV_SIZE);
|
CTR_RFC3686_IV_SIZE);
|
||||||
|
|
||||||
/* initialize counter portion of counter block */
|
/* initialize counter portion of counter block */
|
||||||
@ -1259,7 +1259,7 @@ static int process_cipher(struct ablkcipher_request *req,
|
|||||||
|
|
||||||
} else {
|
} else {
|
||||||
|
|
||||||
memcpy(reqctx->iv, req->info, IV);
|
memcpy(reqctx->iv, req->iv, IV);
|
||||||
}
|
}
|
||||||
if (unlikely(bytes == 0)) {
|
if (unlikely(bytes == 0)) {
|
||||||
chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev,
|
chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev,
|
||||||
@ -1268,7 +1268,7 @@ static int process_cipher(struct ablkcipher_request *req,
|
|||||||
req->base.flags,
|
req->base.flags,
|
||||||
req->src,
|
req->src,
|
||||||
req->dst,
|
req->dst,
|
||||||
req->nbytes,
|
req->cryptlen,
|
||||||
reqctx->iv,
|
reqctx->iv,
|
||||||
op_type);
|
op_type);
|
||||||
goto error;
|
goto error;
|
||||||
@ -1296,9 +1296,9 @@ error:
|
|||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int chcr_aes_encrypt(struct ablkcipher_request *req)
|
static int chcr_aes_encrypt(struct skcipher_request *req)
|
||||||
{
|
{
|
||||||
struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
|
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
||||||
struct chcr_dev *dev = c_ctx(tfm)->dev;
|
struct chcr_dev *dev = c_ctx(tfm)->dev;
|
||||||
struct sk_buff *skb = NULL;
|
struct sk_buff *skb = NULL;
|
||||||
int err, isfull = 0;
|
int err, isfull = 0;
|
||||||
@ -1329,9 +1329,9 @@ error:
|
|||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int chcr_aes_decrypt(struct ablkcipher_request *req)
|
static int chcr_aes_decrypt(struct skcipher_request *req)
|
||||||
{
|
{
|
||||||
struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
|
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
||||||
struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm));
|
struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm));
|
||||||
struct chcr_dev *dev = c_ctx(tfm)->dev;
|
struct chcr_dev *dev = c_ctx(tfm)->dev;
|
||||||
struct sk_buff *skb = NULL;
|
struct sk_buff *skb = NULL;
|
||||||
@ -1398,27 +1398,28 @@ out:
|
|||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int chcr_cra_init(struct crypto_tfm *tfm)
|
static int chcr_init_tfm(struct crypto_skcipher *tfm)
|
||||||
{
|
{
|
||||||
struct crypto_alg *alg = tfm->__crt_alg;
|
struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
|
||||||
struct chcr_context *ctx = crypto_tfm_ctx(tfm);
|
struct chcr_context *ctx = crypto_skcipher_ctx(tfm);
|
||||||
struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
|
struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
|
||||||
|
|
||||||
ablkctx->sw_cipher = crypto_alloc_sync_skcipher(alg->cra_name, 0,
|
ablkctx->sw_cipher = crypto_alloc_sync_skcipher(alg->base.cra_name, 0,
|
||||||
CRYPTO_ALG_NEED_FALLBACK);
|
CRYPTO_ALG_NEED_FALLBACK);
|
||||||
if (IS_ERR(ablkctx->sw_cipher)) {
|
if (IS_ERR(ablkctx->sw_cipher)) {
|
||||||
pr_err("failed to allocate fallback for %s\n", alg->cra_name);
|
pr_err("failed to allocate fallback for %s\n", alg->base.cra_name);
|
||||||
return PTR_ERR(ablkctx->sw_cipher);
|
return PTR_ERR(ablkctx->sw_cipher);
|
||||||
}
|
}
|
||||||
|
|
||||||
tfm->crt_ablkcipher.reqsize = sizeof(struct chcr_blkcipher_req_ctx);
|
crypto_skcipher_set_reqsize(tfm, sizeof(struct chcr_skcipher_req_ctx));
|
||||||
return chcr_device_init(crypto_tfm_ctx(tfm));
|
|
||||||
|
return chcr_device_init(ctx);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int chcr_rfc3686_init(struct crypto_tfm *tfm)
|
static int chcr_rfc3686_init(struct crypto_skcipher *tfm)
|
||||||
{
|
{
|
||||||
struct crypto_alg *alg = tfm->__crt_alg;
|
struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
|
||||||
struct chcr_context *ctx = crypto_tfm_ctx(tfm);
|
struct chcr_context *ctx = crypto_skcipher_ctx(tfm);
|
||||||
struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
|
struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
|
||||||
|
|
||||||
/*RFC3686 initialises IV counter value to 1, rfc3686(ctr(aes))
|
/*RFC3686 initialises IV counter value to 1, rfc3686(ctr(aes))
|
||||||
@ -1427,17 +1428,17 @@ static int chcr_rfc3686_init(struct crypto_tfm *tfm)
|
|||||||
ablkctx->sw_cipher = crypto_alloc_sync_skcipher("ctr(aes)", 0,
|
ablkctx->sw_cipher = crypto_alloc_sync_skcipher("ctr(aes)", 0,
|
||||||
CRYPTO_ALG_NEED_FALLBACK);
|
CRYPTO_ALG_NEED_FALLBACK);
|
||||||
if (IS_ERR(ablkctx->sw_cipher)) {
|
if (IS_ERR(ablkctx->sw_cipher)) {
|
||||||
pr_err("failed to allocate fallback for %s\n", alg->cra_name);
|
pr_err("failed to allocate fallback for %s\n", alg->base.cra_name);
|
||||||
return PTR_ERR(ablkctx->sw_cipher);
|
return PTR_ERR(ablkctx->sw_cipher);
|
||||||
}
|
}
|
||||||
tfm->crt_ablkcipher.reqsize = sizeof(struct chcr_blkcipher_req_ctx);
|
crypto_skcipher_set_reqsize(tfm, sizeof(struct chcr_skcipher_req_ctx));
|
||||||
return chcr_device_init(crypto_tfm_ctx(tfm));
|
return chcr_device_init(ctx);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
static void chcr_cra_exit(struct crypto_tfm *tfm)
|
static void chcr_exit_tfm(struct crypto_skcipher *tfm)
|
||||||
{
|
{
|
||||||
struct chcr_context *ctx = crypto_tfm_ctx(tfm);
|
struct chcr_context *ctx = crypto_skcipher_ctx(tfm);
|
||||||
struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
|
struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
|
||||||
|
|
||||||
crypto_free_sync_skcipher(ablkctx->sw_cipher);
|
crypto_free_sync_skcipher(ablkctx->sw_cipher);
|
||||||
@ -2056,8 +2057,8 @@ int chcr_handle_resp(struct crypto_async_request *req, unsigned char *input,
|
|||||||
err = chcr_handle_aead_resp(aead_request_cast(req), input, err);
|
err = chcr_handle_aead_resp(aead_request_cast(req), input, err);
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case CRYPTO_ALG_TYPE_ABLKCIPHER:
|
case CRYPTO_ALG_TYPE_SKCIPHER:
|
||||||
chcr_handle_cipher_resp(ablkcipher_request_cast(req),
|
chcr_handle_cipher_resp(skcipher_request_cast(req),
|
||||||
input, err);
|
input, err);
|
||||||
break;
|
break;
|
||||||
case CRYPTO_ALG_TYPE_AHASH:
|
case CRYPTO_ALG_TYPE_AHASH:
|
||||||
@ -2148,7 +2149,7 @@ out:
|
|||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int chcr_aes_xts_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
|
static int chcr_aes_xts_setkey(struct crypto_skcipher *cipher, const u8 *key,
|
||||||
unsigned int key_len)
|
unsigned int key_len)
|
||||||
{
|
{
|
||||||
struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
|
struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
|
||||||
@ -2172,7 +2173,7 @@ static int chcr_aes_xts_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
|
|||||||
ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_XTS;
|
ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_XTS;
|
||||||
return 0;
|
return 0;
|
||||||
badkey_err:
|
badkey_err:
|
||||||
crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
|
crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
|
||||||
ablkctx->enckey_len = 0;
|
ablkctx->enckey_len = 0;
|
||||||
|
|
||||||
return err;
|
return err;
|
||||||
@ -2576,12 +2577,12 @@ void chcr_add_aead_dst_ent(struct aead_request *req,
|
|||||||
dsgl_walk_end(&dsgl_walk, qid, ctx->pci_chan_id);
|
dsgl_walk_end(&dsgl_walk, qid, ctx->pci_chan_id);
|
||||||
}
|
}
|
||||||
|
|
||||||
void chcr_add_cipher_src_ent(struct ablkcipher_request *req,
|
void chcr_add_cipher_src_ent(struct skcipher_request *req,
|
||||||
void *ulptx,
|
void *ulptx,
|
||||||
struct cipher_wr_param *wrparam)
|
struct cipher_wr_param *wrparam)
|
||||||
{
|
{
|
||||||
struct ulptx_walk ulp_walk;
|
struct ulptx_walk ulp_walk;
|
||||||
struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
|
struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
|
||||||
u8 *buf = ulptx;
|
u8 *buf = ulptx;
|
||||||
|
|
||||||
memcpy(buf, reqctx->iv, IV);
|
memcpy(buf, reqctx->iv, IV);
|
||||||
@ -2599,13 +2600,13 @@ void chcr_add_cipher_src_ent(struct ablkcipher_request *req,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void chcr_add_cipher_dst_ent(struct ablkcipher_request *req,
|
void chcr_add_cipher_dst_ent(struct skcipher_request *req,
|
||||||
struct cpl_rx_phys_dsgl *phys_cpl,
|
struct cpl_rx_phys_dsgl *phys_cpl,
|
||||||
struct cipher_wr_param *wrparam,
|
struct cipher_wr_param *wrparam,
|
||||||
unsigned short qid)
|
unsigned short qid)
|
||||||
{
|
{
|
||||||
struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
|
struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
|
||||||
struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(wrparam->req);
|
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(wrparam->req);
|
||||||
struct chcr_context *ctx = c_ctx(tfm);
|
struct chcr_context *ctx = c_ctx(tfm);
|
||||||
struct dsgl_walk dsgl_walk;
|
struct dsgl_walk dsgl_walk;
|
||||||
|
|
||||||
@ -2680,7 +2681,7 @@ void chcr_hash_dma_unmap(struct device *dev,
|
|||||||
}
|
}
|
||||||
|
|
||||||
int chcr_cipher_dma_map(struct device *dev,
|
int chcr_cipher_dma_map(struct device *dev,
|
||||||
struct ablkcipher_request *req)
|
struct skcipher_request *req)
|
||||||
{
|
{
|
||||||
int error;
|
int error;
|
||||||
|
|
||||||
@ -2709,7 +2710,7 @@ err:
|
|||||||
}
|
}
|
||||||
|
|
||||||
void chcr_cipher_dma_unmap(struct device *dev,
|
void chcr_cipher_dma_unmap(struct device *dev,
|
||||||
struct ablkcipher_request *req)
|
struct skcipher_request *req)
|
||||||
{
|
{
|
||||||
if (req->src == req->dst) {
|
if (req->src == req->dst) {
|
||||||
dma_unmap_sg(dev, req->src, sg_nents(req->src),
|
dma_unmap_sg(dev, req->src, sg_nents(req->src),
|
||||||
@ -3712,82 +3713,76 @@ static int chcr_aead_decrypt(struct aead_request *req)
|
|||||||
static struct chcr_alg_template driver_algs[] = {
|
static struct chcr_alg_template driver_algs[] = {
|
||||||
/* AES-CBC */
|
/* AES-CBC */
|
||||||
{
|
{
|
||||||
.type = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_SUB_TYPE_CBC,
|
.type = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_SUB_TYPE_CBC,
|
||||||
.is_registered = 0,
|
.is_registered = 0,
|
||||||
.alg.crypto = {
|
.alg.skcipher = {
|
||||||
.cra_name = "cbc(aes)",
|
.base.cra_name = "cbc(aes)",
|
||||||
.cra_driver_name = "cbc-aes-chcr",
|
.base.cra_driver_name = "cbc-aes-chcr",
|
||||||
.cra_blocksize = AES_BLOCK_SIZE,
|
.base.cra_blocksize = AES_BLOCK_SIZE,
|
||||||
.cra_init = chcr_cra_init,
|
|
||||||
.cra_exit = chcr_cra_exit,
|
.init = chcr_init_tfm,
|
||||||
.cra_u.ablkcipher = {
|
.exit = chcr_exit_tfm,
|
||||||
.min_keysize = AES_MIN_KEY_SIZE,
|
.min_keysize = AES_MIN_KEY_SIZE,
|
||||||
.max_keysize = AES_MAX_KEY_SIZE,
|
.max_keysize = AES_MAX_KEY_SIZE,
|
||||||
.ivsize = AES_BLOCK_SIZE,
|
.ivsize = AES_BLOCK_SIZE,
|
||||||
.setkey = chcr_aes_cbc_setkey,
|
.setkey = chcr_aes_cbc_setkey,
|
||||||
.encrypt = chcr_aes_encrypt,
|
.encrypt = chcr_aes_encrypt,
|
||||||
.decrypt = chcr_aes_decrypt,
|
.decrypt = chcr_aes_decrypt,
|
||||||
}
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
.type = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_SUB_TYPE_XTS,
|
||||||
|
.is_registered = 0,
|
||||||
|
.alg.skcipher = {
|
||||||
|
.base.cra_name = "xts(aes)",
|
||||||
|
.base.cra_driver_name = "xts-aes-chcr",
|
||||||
|
.base.cra_blocksize = AES_BLOCK_SIZE,
|
||||||
|
|
||||||
|
.init = chcr_init_tfm,
|
||||||
|
.exit = chcr_exit_tfm,
|
||||||
|
.min_keysize = 2 * AES_MIN_KEY_SIZE,
|
||||||
|
.max_keysize = 2 * AES_MAX_KEY_SIZE,
|
||||||
|
.ivsize = AES_BLOCK_SIZE,
|
||||||
|
.setkey = chcr_aes_xts_setkey,
|
||||||
|
.encrypt = chcr_aes_encrypt,
|
||||||
|
.decrypt = chcr_aes_decrypt,
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
.type = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_SUB_TYPE_CTR,
|
||||||
|
.is_registered = 0,
|
||||||
|
.alg.skcipher = {
|
||||||
|
.base.cra_name = "ctr(aes)",
|
||||||
|
.base.cra_driver_name = "ctr-aes-chcr",
|
||||||
|
.base.cra_blocksize = 1,
|
||||||
|
|
||||||
|
.init = chcr_init_tfm,
|
||||||
|
.exit = chcr_exit_tfm,
|
||||||
|
.min_keysize = AES_MIN_KEY_SIZE,
|
||||||
|
.max_keysize = AES_MAX_KEY_SIZE,
|
||||||
|
.ivsize = AES_BLOCK_SIZE,
|
||||||
|
.setkey = chcr_aes_ctr_setkey,
|
||||||
|
.encrypt = chcr_aes_encrypt,
|
||||||
|
.decrypt = chcr_aes_decrypt,
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
.type = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_SUB_TYPE_XTS,
|
.type = CRYPTO_ALG_TYPE_SKCIPHER |
|
||||||
.is_registered = 0,
|
|
||||||
.alg.crypto = {
|
|
||||||
.cra_name = "xts(aes)",
|
|
||||||
.cra_driver_name = "xts-aes-chcr",
|
|
||||||
.cra_blocksize = AES_BLOCK_SIZE,
|
|
||||||
.cra_init = chcr_cra_init,
|
|
||||||
.cra_exit = NULL,
|
|
||||||
.cra_u .ablkcipher = {
|
|
||||||
.min_keysize = 2 * AES_MIN_KEY_SIZE,
|
|
||||||
.max_keysize = 2 * AES_MAX_KEY_SIZE,
|
|
||||||
.ivsize = AES_BLOCK_SIZE,
|
|
||||||
.setkey = chcr_aes_xts_setkey,
|
|
||||||
.encrypt = chcr_aes_encrypt,
|
|
||||||
.decrypt = chcr_aes_decrypt,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
.type = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_SUB_TYPE_CTR,
|
|
||||||
.is_registered = 0,
|
|
||||||
.alg.crypto = {
|
|
||||||
.cra_name = "ctr(aes)",
|
|
||||||
.cra_driver_name = "ctr-aes-chcr",
|
|
||||||
.cra_blocksize = 1,
|
|
||||||
.cra_init = chcr_cra_init,
|
|
||||||
.cra_exit = chcr_cra_exit,
|
|
||||||
.cra_u.ablkcipher = {
|
|
||||||
.min_keysize = AES_MIN_KEY_SIZE,
|
|
||||||
.max_keysize = AES_MAX_KEY_SIZE,
|
|
||||||
.ivsize = AES_BLOCK_SIZE,
|
|
||||||
.setkey = chcr_aes_ctr_setkey,
|
|
||||||
.encrypt = chcr_aes_encrypt,
|
|
||||||
.decrypt = chcr_aes_decrypt,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
.type = CRYPTO_ALG_TYPE_ABLKCIPHER |
|
|
||||||
CRYPTO_ALG_SUB_TYPE_CTR_RFC3686,
|
CRYPTO_ALG_SUB_TYPE_CTR_RFC3686,
|
||||||
.is_registered = 0,
|
.is_registered = 0,
|
||||||
.alg.crypto = {
|
.alg.skcipher = {
|
||||||
.cra_name = "rfc3686(ctr(aes))",
|
.base.cra_name = "rfc3686(ctr(aes))",
|
||||||
.cra_driver_name = "rfc3686-ctr-aes-chcr",
|
.base.cra_driver_name = "rfc3686-ctr-aes-chcr",
|
||||||
.cra_blocksize = 1,
|
.base.cra_blocksize = 1,
|
||||||
.cra_init = chcr_rfc3686_init,
|
|
||||||
.cra_exit = chcr_cra_exit,
|
.init = chcr_rfc3686_init,
|
||||||
.cra_u.ablkcipher = {
|
.exit = chcr_exit_tfm,
|
||||||
.min_keysize = AES_MIN_KEY_SIZE +
|
.min_keysize = AES_MIN_KEY_SIZE + CTR_RFC3686_NONCE_SIZE,
|
||||||
CTR_RFC3686_NONCE_SIZE,
|
.max_keysize = AES_MAX_KEY_SIZE + CTR_RFC3686_NONCE_SIZE,
|
||||||
.max_keysize = AES_MAX_KEY_SIZE +
|
.ivsize = CTR_RFC3686_IV_SIZE,
|
||||||
CTR_RFC3686_NONCE_SIZE,
|
.setkey = chcr_aes_rfc3686_setkey,
|
||||||
.ivsize = CTR_RFC3686_IV_SIZE,
|
.encrypt = chcr_aes_encrypt,
|
||||||
.setkey = chcr_aes_rfc3686_setkey,
|
.decrypt = chcr_aes_decrypt,
|
||||||
.encrypt = chcr_aes_encrypt,
|
|
||||||
.decrypt = chcr_aes_decrypt,
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
/* SHA */
|
/* SHA */
|
||||||
@ -4254,10 +4249,10 @@ static int chcr_unregister_alg(void)
|
|||||||
|
|
||||||
for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
|
for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
|
||||||
switch (driver_algs[i].type & CRYPTO_ALG_TYPE_MASK) {
|
switch (driver_algs[i].type & CRYPTO_ALG_TYPE_MASK) {
|
||||||
case CRYPTO_ALG_TYPE_ABLKCIPHER:
|
case CRYPTO_ALG_TYPE_SKCIPHER:
|
||||||
if (driver_algs[i].is_registered)
|
if (driver_algs[i].is_registered)
|
||||||
crypto_unregister_alg(
|
crypto_unregister_skcipher(
|
||||||
&driver_algs[i].alg.crypto);
|
&driver_algs[i].alg.skcipher);
|
||||||
break;
|
break;
|
||||||
case CRYPTO_ALG_TYPE_AEAD:
|
case CRYPTO_ALG_TYPE_AEAD:
|
||||||
if (driver_algs[i].is_registered)
|
if (driver_algs[i].is_registered)
|
||||||
@ -4293,21 +4288,20 @@ static int chcr_register_alg(void)
|
|||||||
if (driver_algs[i].is_registered)
|
if (driver_algs[i].is_registered)
|
||||||
continue;
|
continue;
|
||||||
switch (driver_algs[i].type & CRYPTO_ALG_TYPE_MASK) {
|
switch (driver_algs[i].type & CRYPTO_ALG_TYPE_MASK) {
|
||||||
case CRYPTO_ALG_TYPE_ABLKCIPHER:
|
case CRYPTO_ALG_TYPE_SKCIPHER:
|
||||||
driver_algs[i].alg.crypto.cra_priority =
|
driver_algs[i].alg.skcipher.base.cra_priority =
|
||||||
CHCR_CRA_PRIORITY;
|
CHCR_CRA_PRIORITY;
|
||||||
driver_algs[i].alg.crypto.cra_module = THIS_MODULE;
|
driver_algs[i].alg.skcipher.base.cra_module = THIS_MODULE;
|
||||||
driver_algs[i].alg.crypto.cra_flags =
|
driver_algs[i].alg.skcipher.base.cra_flags =
|
||||||
CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC |
|
CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_ASYNC |
|
||||||
CRYPTO_ALG_NEED_FALLBACK;
|
CRYPTO_ALG_NEED_FALLBACK;
|
||||||
driver_algs[i].alg.crypto.cra_ctxsize =
|
driver_algs[i].alg.skcipher.base.cra_ctxsize =
|
||||||
sizeof(struct chcr_context) +
|
sizeof(struct chcr_context) +
|
||||||
sizeof(struct ablk_ctx);
|
sizeof(struct ablk_ctx);
|
||||||
driver_algs[i].alg.crypto.cra_alignmask = 0;
|
driver_algs[i].alg.skcipher.base.cra_alignmask = 0;
|
||||||
driver_algs[i].alg.crypto.cra_type =
|
|
||||||
&crypto_ablkcipher_type;
|
err = crypto_register_skcipher(&driver_algs[i].alg.skcipher);
|
||||||
err = crypto_register_alg(&driver_algs[i].alg.crypto);
|
name = driver_algs[i].alg.skcipher.base.cra_driver_name;
|
||||||
name = driver_algs[i].alg.crypto.cra_driver_name;
|
|
||||||
break;
|
break;
|
||||||
case CRYPTO_ALG_TYPE_AEAD:
|
case CRYPTO_ALG_TYPE_AEAD:
|
||||||
driver_algs[i].alg.aead.base.cra_flags =
|
driver_algs[i].alg.aead.base.cra_flags =
|
||||||
|
@ -287,7 +287,7 @@ struct hash_wr_param {
|
|||||||
};
|
};
|
||||||
|
|
||||||
struct cipher_wr_param {
|
struct cipher_wr_param {
|
||||||
struct ablkcipher_request *req;
|
struct skcipher_request *req;
|
||||||
char *iv;
|
char *iv;
|
||||||
int bytes;
|
int bytes;
|
||||||
unsigned short qid;
|
unsigned short qid;
|
||||||
|
@ -160,9 +160,9 @@ static inline struct chcr_context *a_ctx(struct crypto_aead *tfm)
|
|||||||
return crypto_aead_ctx(tfm);
|
return crypto_aead_ctx(tfm);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline struct chcr_context *c_ctx(struct crypto_ablkcipher *tfm)
|
static inline struct chcr_context *c_ctx(struct crypto_skcipher *tfm)
|
||||||
{
|
{
|
||||||
return crypto_ablkcipher_ctx(tfm);
|
return crypto_skcipher_ctx(tfm);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline struct chcr_context *h_ctx(struct crypto_ahash *tfm)
|
static inline struct chcr_context *h_ctx(struct crypto_ahash *tfm)
|
||||||
@ -285,7 +285,7 @@ struct chcr_ahash_req_ctx {
|
|||||||
u8 bfr2[CHCR_HASH_MAX_BLOCK_SIZE_128];
|
u8 bfr2[CHCR_HASH_MAX_BLOCK_SIZE_128];
|
||||||
};
|
};
|
||||||
|
|
||||||
struct chcr_blkcipher_req_ctx {
|
struct chcr_skcipher_req_ctx {
|
||||||
struct sk_buff *skb;
|
struct sk_buff *skb;
|
||||||
struct scatterlist *dstsg;
|
struct scatterlist *dstsg;
|
||||||
unsigned int processed;
|
unsigned int processed;
|
||||||
@ -302,7 +302,7 @@ struct chcr_alg_template {
|
|||||||
u32 type;
|
u32 type;
|
||||||
u32 is_registered;
|
u32 is_registered;
|
||||||
union {
|
union {
|
||||||
struct crypto_alg crypto;
|
struct skcipher_alg skcipher;
|
||||||
struct ahash_alg hash;
|
struct ahash_alg hash;
|
||||||
struct aead_alg aead;
|
struct aead_alg aead;
|
||||||
} alg;
|
} alg;
|
||||||
@ -321,12 +321,12 @@ void chcr_add_aead_dst_ent(struct aead_request *req,
|
|||||||
struct cpl_rx_phys_dsgl *phys_cpl,
|
struct cpl_rx_phys_dsgl *phys_cpl,
|
||||||
unsigned short qid);
|
unsigned short qid);
|
||||||
void chcr_add_aead_src_ent(struct aead_request *req, struct ulptx_sgl *ulptx);
|
void chcr_add_aead_src_ent(struct aead_request *req, struct ulptx_sgl *ulptx);
|
||||||
void chcr_add_cipher_src_ent(struct ablkcipher_request *req,
|
void chcr_add_cipher_src_ent(struct skcipher_request *req,
|
||||||
void *ulptx,
|
void *ulptx,
|
||||||
struct cipher_wr_param *wrparam);
|
struct cipher_wr_param *wrparam);
|
||||||
int chcr_cipher_dma_map(struct device *dev, struct ablkcipher_request *req);
|
int chcr_cipher_dma_map(struct device *dev, struct skcipher_request *req);
|
||||||
void chcr_cipher_dma_unmap(struct device *dev, struct ablkcipher_request *req);
|
void chcr_cipher_dma_unmap(struct device *dev, struct skcipher_request *req);
|
||||||
void chcr_add_cipher_dst_ent(struct ablkcipher_request *req,
|
void chcr_add_cipher_dst_ent(struct skcipher_request *req,
|
||||||
struct cpl_rx_phys_dsgl *phys_cpl,
|
struct cpl_rx_phys_dsgl *phys_cpl,
|
||||||
struct cipher_wr_param *wrparam,
|
struct cipher_wr_param *wrparam,
|
||||||
unsigned short qid);
|
unsigned short qid);
|
||||||
|
Loading…
Reference in New Issue
Block a user