mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2025-01-21 05:14:52 +08:00
crypto: qat - fix spelling mistakes from 'bufer' to 'buffer'
Fix spelling mistakes from 'bufer' to 'buffer' in qat_common. Also fix indentation issue caused by the spelling change. Signed-off-by: Meadhbh Fitzpatrick <meadhbh.fitzpatrick@intel.com> Reviewed-by: Ilpo Järvinen <ilpo.jarvinen@linux.intel.com> Reviewed-by: Giovanni Cabiddu <giovanni.cabiddu@intel.com> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
This commit is contained in:
parent
00bef64ac3
commit
692ed5d4b2
@ -37,7 +37,7 @@
|
||||
#define ADF_SIZE_TO_RING_SIZE_IN_BYTES(SIZE) ((1 << (SIZE - 1)) << 7)
|
||||
#define ADF_RING_SIZE_IN_BYTES_TO_SIZE(SIZE) ((1 << (SIZE - 1)) >> 7)
|
||||
|
||||
/* Minimum ring bufer size for memory allocation */
|
||||
/* Minimum ring buffer size for memory allocation */
|
||||
#define ADF_RING_SIZE_BYTES_MIN(SIZE) \
|
||||
((SIZE < ADF_SIZE_TO_RING_SIZE_IN_BYTES(ADF_RING_SIZE_4K)) ? \
|
||||
ADF_SIZE_TO_RING_SIZE_IN_BYTES(ADF_RING_SIZE_4K) : SIZE)
|
||||
|
@ -26,8 +26,8 @@ void qat_bl_free_bufl(struct adf_accel_dev *accel_dev,
|
||||
bl_dma_dir = blp != blpout ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL;
|
||||
|
||||
for (i = 0; i < bl->num_bufs; i++)
|
||||
dma_unmap_single(dev, bl->bufers[i].addr,
|
||||
bl->bufers[i].len, bl_dma_dir);
|
||||
dma_unmap_single(dev, bl->buffers[i].addr,
|
||||
bl->buffers[i].len, bl_dma_dir);
|
||||
|
||||
dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE);
|
||||
|
||||
@ -36,8 +36,8 @@ void qat_bl_free_bufl(struct adf_accel_dev *accel_dev,
|
||||
|
||||
if (blp != blpout) {
|
||||
for (i = 0; i < blout->num_mapped_bufs; i++) {
|
||||
dma_unmap_single(dev, blout->bufers[i].addr,
|
||||
blout->bufers[i].len,
|
||||
dma_unmap_single(dev, blout->buffers[i].addr,
|
||||
blout->buffers[i].len,
|
||||
DMA_FROM_DEVICE);
|
||||
}
|
||||
dma_unmap_single(dev, blpout, sz_out, DMA_TO_DEVICE);
|
||||
@ -63,7 +63,7 @@ static int __qat_bl_sgl_to_bufl(struct adf_accel_dev *accel_dev,
|
||||
dma_addr_t blp = DMA_MAPPING_ERROR;
|
||||
dma_addr_t bloutp = DMA_MAPPING_ERROR;
|
||||
struct scatterlist *sg;
|
||||
size_t sz_out, sz = struct_size(bufl, bufers, n);
|
||||
size_t sz_out, sz = struct_size(bufl, buffers, n);
|
||||
int node = dev_to_node(&GET_DEV(accel_dev));
|
||||
int bufl_dma_dir;
|
||||
|
||||
@ -86,7 +86,7 @@ static int __qat_bl_sgl_to_bufl(struct adf_accel_dev *accel_dev,
|
||||
bufl_dma_dir = sgl != sglout ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL;
|
||||
|
||||
for (i = 0; i < n; i++)
|
||||
bufl->bufers[i].addr = DMA_MAPPING_ERROR;
|
||||
bufl->buffers[i].addr = DMA_MAPPING_ERROR;
|
||||
|
||||
for_each_sg(sgl, sg, n, i) {
|
||||
int y = sg_nctr;
|
||||
@ -94,11 +94,11 @@ static int __qat_bl_sgl_to_bufl(struct adf_accel_dev *accel_dev,
|
||||
if (!sg->length)
|
||||
continue;
|
||||
|
||||
bufl->bufers[y].addr = dma_map_single(dev, sg_virt(sg),
|
||||
sg->length,
|
||||
bufl_dma_dir);
|
||||
bufl->bufers[y].len = sg->length;
|
||||
if (unlikely(dma_mapping_error(dev, bufl->bufers[y].addr)))
|
||||
bufl->buffers[y].addr = dma_map_single(dev, sg_virt(sg),
|
||||
sg->length,
|
||||
bufl_dma_dir);
|
||||
bufl->buffers[y].len = sg->length;
|
||||
if (unlikely(dma_mapping_error(dev, bufl->buffers[y].addr)))
|
||||
goto err_in;
|
||||
sg_nctr++;
|
||||
}
|
||||
@ -111,12 +111,12 @@ static int __qat_bl_sgl_to_bufl(struct adf_accel_dev *accel_dev,
|
||||
buf->sz = sz;
|
||||
/* Handle out of place operation */
|
||||
if (sgl != sglout) {
|
||||
struct qat_alg_buf *bufers;
|
||||
struct qat_alg_buf *buffers;
|
||||
int extra_buff = extra_dst_buff ? 1 : 0;
|
||||
int n_sglout = sg_nents(sglout);
|
||||
|
||||
n = n_sglout + extra_buff;
|
||||
sz_out = struct_size(buflout, bufers, n);
|
||||
sz_out = struct_size(buflout, buffers, n);
|
||||
sg_nctr = 0;
|
||||
|
||||
if (n > QAT_MAX_BUFF_DESC) {
|
||||
@ -129,9 +129,9 @@ static int __qat_bl_sgl_to_bufl(struct adf_accel_dev *accel_dev,
|
||||
buf->sgl_dst_valid = true;
|
||||
}
|
||||
|
||||
bufers = buflout->bufers;
|
||||
buffers = buflout->buffers;
|
||||
for (i = 0; i < n; i++)
|
||||
bufers[i].addr = DMA_MAPPING_ERROR;
|
||||
buffers[i].addr = DMA_MAPPING_ERROR;
|
||||
|
||||
for_each_sg(sglout, sg, n_sglout, i) {
|
||||
int y = sg_nctr;
|
||||
@ -139,17 +139,17 @@ static int __qat_bl_sgl_to_bufl(struct adf_accel_dev *accel_dev,
|
||||
if (!sg->length)
|
||||
continue;
|
||||
|
||||
bufers[y].addr = dma_map_single(dev, sg_virt(sg),
|
||||
sg->length,
|
||||
DMA_FROM_DEVICE);
|
||||
if (unlikely(dma_mapping_error(dev, bufers[y].addr)))
|
||||
buffers[y].addr = dma_map_single(dev, sg_virt(sg),
|
||||
sg->length,
|
||||
DMA_FROM_DEVICE);
|
||||
if (unlikely(dma_mapping_error(dev, buffers[y].addr)))
|
||||
goto err_out;
|
||||
bufers[y].len = sg->length;
|
||||
buffers[y].len = sg->length;
|
||||
sg_nctr++;
|
||||
}
|
||||
if (extra_buff) {
|
||||
bufers[sg_nctr].addr = extra_dst_buff;
|
||||
bufers[sg_nctr].len = sz_extra_dst_buff;
|
||||
buffers[sg_nctr].addr = extra_dst_buff;
|
||||
buffers[sg_nctr].len = sz_extra_dst_buff;
|
||||
}
|
||||
|
||||
buflout->num_bufs = sg_nctr;
|
||||
@ -174,11 +174,11 @@ err_out:
|
||||
|
||||
n = sg_nents(sglout);
|
||||
for (i = 0; i < n; i++) {
|
||||
if (buflout->bufers[i].addr == extra_dst_buff)
|
||||
if (buflout->buffers[i].addr == extra_dst_buff)
|
||||
break;
|
||||
if (!dma_mapping_error(dev, buflout->bufers[i].addr))
|
||||
dma_unmap_single(dev, buflout->bufers[i].addr,
|
||||
buflout->bufers[i].len,
|
||||
if (!dma_mapping_error(dev, buflout->buffers[i].addr))
|
||||
dma_unmap_single(dev, buflout->buffers[i].addr,
|
||||
buflout->buffers[i].len,
|
||||
DMA_FROM_DEVICE);
|
||||
}
|
||||
|
||||
@ -191,9 +191,9 @@ err_in:
|
||||
|
||||
n = sg_nents(sgl);
|
||||
for (i = 0; i < n; i++)
|
||||
if (!dma_mapping_error(dev, bufl->bufers[i].addr))
|
||||
dma_unmap_single(dev, bufl->bufers[i].addr,
|
||||
bufl->bufers[i].len,
|
||||
if (!dma_mapping_error(dev, bufl->buffers[i].addr))
|
||||
dma_unmap_single(dev, bufl->buffers[i].addr,
|
||||
bufl->buffers[i].len,
|
||||
bufl_dma_dir);
|
||||
|
||||
if (!buf->sgl_src_valid)
|
||||
@ -231,9 +231,9 @@ static void qat_bl_sgl_unmap(struct adf_accel_dev *accel_dev,
|
||||
int i;
|
||||
|
||||
for (i = 0; i < n; i++)
|
||||
if (!dma_mapping_error(dev, bl->bufers[i].addr))
|
||||
dma_unmap_single(dev, bl->bufers[i].addr,
|
||||
bl->bufers[i].len, DMA_FROM_DEVICE);
|
||||
if (!dma_mapping_error(dev, bl->buffers[i].addr))
|
||||
dma_unmap_single(dev, bl->buffers[i].addr,
|
||||
bl->buffers[i].len, DMA_FROM_DEVICE);
|
||||
}
|
||||
|
||||
static int qat_bl_sgl_map(struct adf_accel_dev *accel_dev,
|
||||
@ -248,13 +248,13 @@ static int qat_bl_sgl_map(struct adf_accel_dev *accel_dev,
|
||||
size_t sz;
|
||||
|
||||
n = sg_nents(sgl);
|
||||
sz = struct_size(bufl, bufers, n);
|
||||
sz = struct_size(bufl, buffers, n);
|
||||
bufl = kzalloc_node(sz, GFP_KERNEL, node);
|
||||
if (unlikely(!bufl))
|
||||
return -ENOMEM;
|
||||
|
||||
for (i = 0; i < n; i++)
|
||||
bufl->bufers[i].addr = DMA_MAPPING_ERROR;
|
||||
bufl->buffers[i].addr = DMA_MAPPING_ERROR;
|
||||
|
||||
sg_nctr = 0;
|
||||
for_each_sg(sgl, sg, n, i) {
|
||||
@ -263,11 +263,11 @@ static int qat_bl_sgl_map(struct adf_accel_dev *accel_dev,
|
||||
if (!sg->length)
|
||||
continue;
|
||||
|
||||
bufl->bufers[y].addr = dma_map_single(dev, sg_virt(sg),
|
||||
sg->length,
|
||||
DMA_FROM_DEVICE);
|
||||
bufl->bufers[y].len = sg->length;
|
||||
if (unlikely(dma_mapping_error(dev, bufl->bufers[y].addr)))
|
||||
bufl->buffers[y].addr = dma_map_single(dev, sg_virt(sg),
|
||||
sg->length,
|
||||
DMA_FROM_DEVICE);
|
||||
bufl->buffers[y].len = sg->length;
|
||||
if (unlikely(dma_mapping_error(dev, bufl->buffers[y].addr)))
|
||||
goto err_map;
|
||||
sg_nctr++;
|
||||
}
|
||||
@ -280,9 +280,9 @@ static int qat_bl_sgl_map(struct adf_accel_dev *accel_dev,
|
||||
|
||||
err_map:
|
||||
for (i = 0; i < n; i++)
|
||||
if (!dma_mapping_error(dev, bufl->bufers[i].addr))
|
||||
dma_unmap_single(dev, bufl->bufers[i].addr,
|
||||
bufl->bufers[i].len,
|
||||
if (!dma_mapping_error(dev, bufl->buffers[i].addr))
|
||||
dma_unmap_single(dev, bufl->buffers[i].addr,
|
||||
bufl->buffers[i].len,
|
||||
DMA_FROM_DEVICE);
|
||||
kfree(bufl);
|
||||
*bl = NULL;
|
||||
@ -351,7 +351,7 @@ int qat_bl_realloc_map_new_dst(struct adf_accel_dev *accel_dev,
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
new_bl_size = struct_size(new_bl, bufers, new_bl->num_bufs);
|
||||
new_bl_size = struct_size(new_bl, buffers, new_bl->num_bufs);
|
||||
|
||||
/* Map new firmware SGL descriptor */
|
||||
new_blp = dma_map_single(dev, new_bl, new_bl_size, DMA_TO_DEVICE);
|
||||
|
@ -18,7 +18,7 @@ struct qat_alg_buf_list {
|
||||
u64 resrvd;
|
||||
u32 num_bufs;
|
||||
u32 num_mapped_bufs;
|
||||
struct qat_alg_buf bufers[];
|
||||
struct qat_alg_buf buffers[];
|
||||
} __packed;
|
||||
|
||||
struct qat_alg_fixed_buf_list {
|
||||
|
Loading…
Reference in New Issue
Block a user