mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-19 10:44:14 +08:00
blk_end_request: changing ide normal caller (take 4)
This patch converts "normal" parts of ide to use blk_end_request interfaces. Related 'uptodate' arguments are converted to 'error'. The conversion of 'uptodate' to 'error' is done only for the internal function, __ide_end_request(). ide_end_request() was not changed since it's exported and used by many ide drivers. With this patch, blkdev_dequeue_request() in __ide_end_request() is moved to blk_end_request, since blk_end_request takes care of dequeueing request like below: if (!list_empty(&rq->queuelist)) blkdev_dequeue_request(rq); In the case of ide, o 'dequeue' variable of __ide_end_request() is 1 only when the request is still linked to the queue (i.e. rq->queuelist is not empty) o 'dequeue' variable of __ide_end_request() is 0 only when the request has already been removed from the queue (i.e. rq->queuelist is empty) So blk_end_request can handle it correctly although ide always run thought the code above. Cc: Bartlomiej Zolnierkiewicz <bzolnier@gmail.com> Signed-off-by: Kiyoshi Ueda <k-ueda@ct.jp.nec.com> Signed-off-by: Jun'ichi Nomura <j-nomura@ce.jp.nec.com> Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
This commit is contained in:
parent
ea6f06f416
commit
5e36bb6ee8
@ -655,9 +655,9 @@ static void cdrom_end_request (ide_drive_t *drive, int uptodate)
|
||||
BUG();
|
||||
} else {
|
||||
spin_lock_irqsave(&ide_lock, flags);
|
||||
end_that_request_chunk(failed, 0,
|
||||
failed->data_len);
|
||||
end_that_request_last(failed, 0);
|
||||
if (__blk_end_request(failed, -EIO,
|
||||
failed->data_len))
|
||||
BUG();
|
||||
spin_unlock_irqrestore(&ide_lock, flags);
|
||||
}
|
||||
} else
|
||||
|
@ -58,15 +58,19 @@ static int __ide_end_request(ide_drive_t *drive, struct request *rq,
|
||||
int uptodate, unsigned int nr_bytes, int dequeue)
|
||||
{
|
||||
int ret = 1;
|
||||
int error = 0;
|
||||
|
||||
if (uptodate <= 0)
|
||||
error = uptodate ? uptodate : -EIO;
|
||||
|
||||
/*
|
||||
* if failfast is set on a request, override number of sectors and
|
||||
* complete the whole request right now
|
||||
*/
|
||||
if (blk_noretry_request(rq) && end_io_error(uptodate))
|
||||
if (blk_noretry_request(rq) && error)
|
||||
nr_bytes = rq->hard_nr_sectors << 9;
|
||||
|
||||
if (!blk_fs_request(rq) && end_io_error(uptodate) && !rq->errors)
|
||||
if (!blk_fs_request(rq) && error && !rq->errors)
|
||||
rq->errors = -EIO;
|
||||
|
||||
/*
|
||||
@ -78,14 +82,9 @@ static int __ide_end_request(ide_drive_t *drive, struct request *rq,
|
||||
ide_dma_on(drive);
|
||||
}
|
||||
|
||||
if (!end_that_request_chunk(rq, uptodate, nr_bytes)) {
|
||||
add_disk_randomness(rq->rq_disk);
|
||||
if (dequeue) {
|
||||
if (!list_empty(&rq->queuelist))
|
||||
blkdev_dequeue_request(rq);
|
||||
if (!__blk_end_request(rq, error, nr_bytes)) {
|
||||
if (dequeue)
|
||||
HWGROUP(drive)->rq = NULL;
|
||||
}
|
||||
end_that_request_last(rq, uptodate);
|
||||
ret = 0;
|
||||
}
|
||||
|
||||
@ -290,9 +289,9 @@ static void ide_complete_pm_request (ide_drive_t *drive, struct request *rq)
|
||||
drive->blocked = 0;
|
||||
blk_start_queue(drive->queue);
|
||||
}
|
||||
blkdev_dequeue_request(rq);
|
||||
HWGROUP(drive)->rq = NULL;
|
||||
end_that_request_last(rq, 1);
|
||||
if (__blk_end_request(rq, 0, 0))
|
||||
BUG();
|
||||
spin_unlock_irqrestore(&ide_lock, flags);
|
||||
}
|
||||
|
||||
@ -387,10 +386,10 @@ void ide_end_drive_cmd (ide_drive_t *drive, u8 stat, u8 err)
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&ide_lock, flags);
|
||||
blkdev_dequeue_request(rq);
|
||||
HWGROUP(drive)->rq = NULL;
|
||||
rq->errors = err;
|
||||
end_that_request_last(rq, !rq->errors);
|
||||
if (__blk_end_request(rq, (rq->errors ? -EIO : 0), 0))
|
||||
BUG();
|
||||
spin_unlock_irqrestore(&ide_lock, flags);
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user