mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-11-26 11:34:42 +08:00
scsi: stop passing a gfp_mask argument down the command setup path
There is no reason for ULDs to pass in a flag on how to allocate the S/G lists. While we don't need GFP_ATOMIC for the blk-mq case because we don't hold locks, that decision can be made way down the chain without having to pass a pointless gfp_mask argument. Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Bart Van Assche <bvanassche@acm.org> Reviewed-by: Hannes Reinecke <hare@suse.de>
This commit is contained in:
parent
bb3ec62a17
commit
3c356bde19
@ -588,10 +588,10 @@ static void scsi_free_sgtable(struct scsi_data_buffer *sdb, bool mq)
|
||||
__sg_free_table(&sdb->table, SCSI_MAX_SG_SEGMENTS, mq, scsi_sg_free);
|
||||
}
|
||||
|
||||
static int scsi_alloc_sgtable(struct scsi_data_buffer *sdb, int nents,
|
||||
gfp_t gfp_mask, bool mq)
|
||||
static int scsi_alloc_sgtable(struct scsi_data_buffer *sdb, int nents, bool mq)
|
||||
{
|
||||
struct scatterlist *first_chunk = NULL;
|
||||
gfp_t gfp_mask = mq ? GFP_NOIO : GFP_ATOMIC;
|
||||
int ret;
|
||||
|
||||
BUG_ON(!nents);
|
||||
@ -1077,8 +1077,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
|
||||
}
|
||||
}
|
||||
|
||||
static int scsi_init_sgtable(struct request *req, struct scsi_data_buffer *sdb,
|
||||
gfp_t gfp_mask)
|
||||
static int scsi_init_sgtable(struct request *req, struct scsi_data_buffer *sdb)
|
||||
{
|
||||
int count;
|
||||
|
||||
@ -1086,7 +1085,7 @@ static int scsi_init_sgtable(struct request *req, struct scsi_data_buffer *sdb,
|
||||
* If sg table allocation fails, requeue request later.
|
||||
*/
|
||||
if (unlikely(scsi_alloc_sgtable(sdb, req->nr_phys_segments,
|
||||
gfp_mask, req->mq_ctx != NULL)))
|
||||
req->mq_ctx != NULL)))
|
||||
return BLKPREP_DEFER;
|
||||
|
||||
/*
|
||||
@ -1111,7 +1110,7 @@ static int scsi_init_sgtable(struct request *req, struct scsi_data_buffer *sdb,
|
||||
* BLKPREP_DEFER if the failure is retryable
|
||||
* BLKPREP_KILL if the failure is fatal
|
||||
*/
|
||||
int scsi_init_io(struct scsi_cmnd *cmd, gfp_t gfp_mask)
|
||||
int scsi_init_io(struct scsi_cmnd *cmd)
|
||||
{
|
||||
struct scsi_device *sdev = cmd->device;
|
||||
struct request *rq = cmd->request;
|
||||
@ -1120,7 +1119,7 @@ int scsi_init_io(struct scsi_cmnd *cmd, gfp_t gfp_mask)
|
||||
|
||||
BUG_ON(!rq->nr_phys_segments);
|
||||
|
||||
error = scsi_init_sgtable(rq, &cmd->sdb, gfp_mask);
|
||||
error = scsi_init_sgtable(rq, &cmd->sdb);
|
||||
if (error)
|
||||
goto err_exit;
|
||||
|
||||
@ -1136,8 +1135,7 @@ int scsi_init_io(struct scsi_cmnd *cmd, gfp_t gfp_mask)
|
||||
rq->next_rq->special = bidi_sdb;
|
||||
}
|
||||
|
||||
error = scsi_init_sgtable(rq->next_rq, rq->next_rq->special,
|
||||
GFP_ATOMIC);
|
||||
error = scsi_init_sgtable(rq->next_rq, rq->next_rq->special);
|
||||
if (error)
|
||||
goto err_exit;
|
||||
}
|
||||
@ -1149,7 +1147,7 @@ int scsi_init_io(struct scsi_cmnd *cmd, gfp_t gfp_mask)
|
||||
BUG_ON(prot_sdb == NULL);
|
||||
ivecs = blk_rq_count_integrity_sg(rq->q, rq->bio);
|
||||
|
||||
if (scsi_alloc_sgtable(prot_sdb, ivecs, gfp_mask, is_mq)) {
|
||||
if (scsi_alloc_sgtable(prot_sdb, ivecs, is_mq)) {
|
||||
error = BLKPREP_DEFER;
|
||||
goto err_exit;
|
||||
}
|
||||
@ -1218,7 +1216,7 @@ static int scsi_setup_blk_pc_cmnd(struct scsi_device *sdev, struct request *req)
|
||||
* submit a request without an attached bio.
|
||||
*/
|
||||
if (req->bio) {
|
||||
int ret = scsi_init_io(cmd, GFP_ATOMIC);
|
||||
int ret = scsi_init_io(cmd);
|
||||
if (unlikely(ret))
|
||||
return ret;
|
||||
} else {
|
||||
|
@ -786,7 +786,7 @@ static int sd_setup_discard_cmnd(struct scsi_cmnd *cmd)
|
||||
* amount of blocks described by the request.
|
||||
*/
|
||||
blk_add_request_payload(rq, page, len);
|
||||
ret = scsi_init_io(cmd, GFP_ATOMIC);
|
||||
ret = scsi_init_io(cmd);
|
||||
rq->__data_len = nr_bytes;
|
||||
|
||||
out:
|
||||
@ -880,7 +880,7 @@ static int sd_setup_write_same_cmnd(struct scsi_cmnd *cmd)
|
||||
* knows how much to actually write.
|
||||
*/
|
||||
rq->__data_len = sdp->sector_size;
|
||||
ret = scsi_init_io(cmd, GFP_ATOMIC);
|
||||
ret = scsi_init_io(cmd);
|
||||
rq->__data_len = nr_bytes;
|
||||
return ret;
|
||||
}
|
||||
@ -914,7 +914,7 @@ static int sd_setup_read_write_cmnd(struct scsi_cmnd *SCpnt)
|
||||
int ret;
|
||||
unsigned char protect;
|
||||
|
||||
ret = scsi_init_io(SCpnt, GFP_ATOMIC);
|
||||
ret = scsi_init_io(SCpnt);
|
||||
if (ret != BLKPREP_OK)
|
||||
goto out;
|
||||
SCpnt = rq->special;
|
||||
|
@ -387,7 +387,7 @@ static int sr_init_command(struct scsi_cmnd *SCpnt)
|
||||
struct request *rq = SCpnt->request;
|
||||
int ret;
|
||||
|
||||
ret = scsi_init_io(SCpnt, GFP_ATOMIC);
|
||||
ret = scsi_init_io(SCpnt);
|
||||
if (ret != BLKPREP_OK)
|
||||
goto out;
|
||||
SCpnt = rq->special;
|
||||
|
@ -163,7 +163,7 @@ extern void *scsi_kmap_atomic_sg(struct scatterlist *sg, int sg_count,
|
||||
size_t *offset, size_t *len);
|
||||
extern void scsi_kunmap_atomic_sg(void *virt);
|
||||
|
||||
extern int scsi_init_io(struct scsi_cmnd *cmd, gfp_t gfp_mask);
|
||||
extern int scsi_init_io(struct scsi_cmnd *cmd);
|
||||
|
||||
extern int scsi_dma_map(struct scsi_cmnd *cmd);
|
||||
extern void scsi_dma_unmap(struct scsi_cmnd *cmd);
|
||||
|
Loading…
Reference in New Issue
Block a user