scsi: smartpqi: Refactor scatterlist code

Factor out code common to all scatter-gather list building to prepare for
new AIO functionality. AIO (Accelerated I/O) requests go directly to disk

No functional changes.

Link: https://lore.kernel.org/r/161549372147.25025.9706613054649682229.stgit@brunhilda
Reviewed-by: Scott Benesh <scott.benesh@microchip.com>
Reviewed-by: Mike McGowen <mike.mcgowen@microchip.com>
Reviewed-by: Scott Teel <scott.teel@microchip.com>
Reviewed-by: Kevin Barnett <kevin.barnett@microchip.com>
Reviewed-by: Martin Wilck <mwilck@suse.com>
Signed-off-by: Don Brace <don.brace@microchip.com>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
This commit is contained in:
Don Brace 2021-03-11 14:15:21 -06:00 committed by Martin K. Petersen
parent 281a817f23
commit 1a22bc4bee

View File

@ -4857,16 +4857,52 @@ static inline void pqi_set_sg_descriptor(
put_unaligned_le32(0, &sg_descriptor->flags);
}
static unsigned int pqi_build_sg_list(struct pqi_sg_descriptor *sg_descriptor,
struct scatterlist *sg, int sg_count, struct pqi_io_request *io_request,
int max_sg_per_iu, bool *chained)
{
int i;
unsigned int num_sg_in_iu;
*chained = false;
i = 0;
num_sg_in_iu = 0;
max_sg_per_iu--; /* Subtract 1 to leave room for chain marker. */
while (1) {
pqi_set_sg_descriptor(sg_descriptor, sg);
if (!*chained)
num_sg_in_iu++;
i++;
if (i == sg_count)
break;
sg_descriptor++;
if (i == max_sg_per_iu) {
put_unaligned_le64((u64)io_request->sg_chain_buffer_dma_handle,
&sg_descriptor->address);
put_unaligned_le32((sg_count - num_sg_in_iu) * sizeof(*sg_descriptor),
&sg_descriptor->length);
put_unaligned_le32(CISS_SG_CHAIN, &sg_descriptor->flags);
*chained = true;
num_sg_in_iu++;
sg_descriptor = io_request->sg_chain_buffer;
}
sg = sg_next(sg);
}
put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags);
return num_sg_in_iu;
}
static int pqi_build_raid_sg_list(struct pqi_ctrl_info *ctrl_info,
struct pqi_raid_path_request *request, struct scsi_cmnd *scmd,
struct pqi_io_request *io_request)
{
int i;
u16 iu_length;
int sg_count;
bool chained;
unsigned int num_sg_in_iu;
unsigned int max_sg_per_iu;
struct scatterlist *sg;
struct pqi_sg_descriptor *sg_descriptor;
@ -4882,36 +4918,10 @@ static int pqi_build_raid_sg_list(struct pqi_ctrl_info *ctrl_info,
sg = scsi_sglist(scmd);
sg_descriptor = request->sg_descriptors;
max_sg_per_iu = ctrl_info->max_sg_per_iu - 1;
chained = false;
num_sg_in_iu = 0;
i = 0;
while (1) {
pqi_set_sg_descriptor(sg_descriptor, sg);
if (!chained)
num_sg_in_iu++;
i++;
if (i == sg_count)
break;
sg_descriptor++;
if (i == max_sg_per_iu) {
put_unaligned_le64(
(u64)io_request->sg_chain_buffer_dma_handle,
&sg_descriptor->address);
put_unaligned_le32((sg_count - num_sg_in_iu)
* sizeof(*sg_descriptor),
&sg_descriptor->length);
put_unaligned_le32(CISS_SG_CHAIN,
&sg_descriptor->flags);
chained = true;
num_sg_in_iu++;
sg_descriptor = io_request->sg_chain_buffer;
}
sg = sg_next(sg);
}
num_sg_in_iu = pqi_build_sg_list(sg_descriptor, sg, sg_count, io_request,
ctrl_info->max_sg_per_iu, &chained);
put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags);
request->partial = chained;
iu_length += num_sg_in_iu * sizeof(*sg_descriptor);
@ -4925,12 +4935,10 @@ static int pqi_build_aio_sg_list(struct pqi_ctrl_info *ctrl_info,
struct pqi_aio_path_request *request, struct scsi_cmnd *scmd,
struct pqi_io_request *io_request)
{
int i;
u16 iu_length;
int sg_count;
bool chained;
unsigned int num_sg_in_iu;
unsigned int max_sg_per_iu;
struct scatterlist *sg;
struct pqi_sg_descriptor *sg_descriptor;
@ -4947,35 +4955,10 @@ static int pqi_build_aio_sg_list(struct pqi_ctrl_info *ctrl_info,
sg = scsi_sglist(scmd);
sg_descriptor = request->sg_descriptors;
max_sg_per_iu = ctrl_info->max_sg_per_iu - 1;
chained = false;
i = 0;
while (1) {
pqi_set_sg_descriptor(sg_descriptor, sg);
if (!chained)
num_sg_in_iu++;
i++;
if (i == sg_count)
break;
sg_descriptor++;
if (i == max_sg_per_iu) {
put_unaligned_le64(
(u64)io_request->sg_chain_buffer_dma_handle,
&sg_descriptor->address);
put_unaligned_le32((sg_count - num_sg_in_iu)
* sizeof(*sg_descriptor),
&sg_descriptor->length);
put_unaligned_le32(CISS_SG_CHAIN,
&sg_descriptor->flags);
chained = true;
num_sg_in_iu++;
sg_descriptor = io_request->sg_chain_buffer;
}
sg = sg_next(sg);
}
num_sg_in_iu = pqi_build_sg_list(sg_descriptor, sg, sg_count, io_request,
ctrl_info->max_sg_per_iu, &chained);
put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags);
request->partial = chained;
iu_length += num_sg_in_iu * sizeof(*sg_descriptor);