2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-12-25 21:54:06 +08:00
linux-next/drivers/crypto/caam/sg_sw_qm.h
Horia Geantă 059d73eea6 crypto: caam - use len instead of nents for bulding HW S/G table
Currently, conversion of SW S/G table into HW S/G layout relies on
nents returned by sg_nents_for_len(sg, len).
However this leaves the possibility of HW S/G referencing more data
then needed: since buffer length in HW S/G entries is filled using
sg_dma_len(sg), the last entry in HW S/G table might have a length
that is bigger than needed for the crypto request.

This way of S/G table conversion is fine, unless after converting a table
more entries have to be appended to the HW S/G table.
In this case, crypto engine would access data from the S/G entry having
the incorrect length, instead of advancing in the S/G table.
This situation doesn't exist, but the upcoming implementation of
IV update for skcipher algorithms needs to add a S/G entry after
req->dst S/G (corresponding to output IV).

Signed-off-by: Horia Geantă <horia.geanta@nxp.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2019-06-20 14:18:33 +08:00

86 lines
2.2 KiB
C

/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
/*
* Copyright 2013-2016 Freescale Semiconductor, Inc.
* Copyright 2016-2017 NXP
*/
#ifndef __SG_SW_QM_H
#define __SG_SW_QM_H
#include <soc/fsl/qman.h>
#include "regs.h"
static inline void __dma_to_qm_sg(struct qm_sg_entry *qm_sg_ptr, dma_addr_t dma,
u16 offset)
{
qm_sg_entry_set64(qm_sg_ptr, dma);
qm_sg_ptr->__reserved2 = 0;
qm_sg_ptr->bpid = 0;
qm_sg_ptr->offset = cpu_to_be16(offset & QM_SG_OFF_MASK);
}
static inline void dma_to_qm_sg_one(struct qm_sg_entry *qm_sg_ptr,
dma_addr_t dma, u32 len, u16 offset)
{
__dma_to_qm_sg(qm_sg_ptr, dma, offset);
qm_sg_entry_set_len(qm_sg_ptr, len);
}
static inline void dma_to_qm_sg_one_last(struct qm_sg_entry *qm_sg_ptr,
dma_addr_t dma, u32 len, u16 offset)
{
__dma_to_qm_sg(qm_sg_ptr, dma, offset);
qm_sg_entry_set_f(qm_sg_ptr, len);
}
static inline void dma_to_qm_sg_one_ext(struct qm_sg_entry *qm_sg_ptr,
dma_addr_t dma, u32 len, u16 offset)
{
__dma_to_qm_sg(qm_sg_ptr, dma, offset);
qm_sg_ptr->cfg = cpu_to_be32(QM_SG_EXT | (len & QM_SG_LEN_MASK));
}
static inline void dma_to_qm_sg_one_last_ext(struct qm_sg_entry *qm_sg_ptr,
dma_addr_t dma, u32 len,
u16 offset)
{
__dma_to_qm_sg(qm_sg_ptr, dma, offset);
qm_sg_ptr->cfg = cpu_to_be32(QM_SG_EXT | QM_SG_FIN |
(len & QM_SG_LEN_MASK));
}
/*
* convert scatterlist to h/w link table format
* but does not have final bit; instead, returns last entry
*/
static inline struct qm_sg_entry *
sg_to_qm_sg(struct scatterlist *sg, int len,
struct qm_sg_entry *qm_sg_ptr, u16 offset)
{
int ent_len;
while (len) {
ent_len = min_t(int, sg_dma_len(sg), len);
dma_to_qm_sg_one(qm_sg_ptr, sg_dma_address(sg), ent_len,
offset);
qm_sg_ptr++;
sg = sg_next(sg);
len -= ent_len;
}
return qm_sg_ptr - 1;
}
/*
* convert scatterlist to h/w link table format
* scatterlist must have been previously dma mapped
*/
static inline void sg_to_qm_sg_last(struct scatterlist *sg, int len,
struct qm_sg_entry *qm_sg_ptr, u16 offset)
{
qm_sg_ptr = sg_to_qm_sg(sg, len, qm_sg_ptr, offset);
qm_sg_entry_set_f(qm_sg_ptr, qm_sg_entry_get_len(qm_sg_ptr));
}
#endif /* __SG_SW_QM_H */