mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-17 01:34:00 +08:00
[BLOCK] update IDE to use new blk_ordered for barriers
Update IDE to use new blk_ordered. This change makes the following behavior changes. * Partial completion of the barrier request is handled as failure of the whole ordered sequence. No more partial completion for barrier requests. * Any failure of pre or post flush request results in failure of the whole ordered sequence. So, successfully completed ordered sequence guarantees that all requests prior to the barrier made to physical medium and, then, the while barrier request made to the physical medium. Signed-off-by: Tejun Heo <htejun@gmail.com> Signed-off-by: Jens Axboe <axboe@suse.de>
This commit is contained in:
parent
9a3dccc425
commit
3e087b5754
@ -681,50 +681,9 @@ static ide_proc_entry_t idedisk_proc[] = {
|
||||
|
||||
#endif /* CONFIG_PROC_FS */
|
||||
|
||||
static void idedisk_end_flush(request_queue_t *q, struct request *flush_rq)
|
||||
static void idedisk_prepare_flush(request_queue_t *q, struct request *rq)
|
||||
{
|
||||
ide_drive_t *drive = q->queuedata;
|
||||
struct request *rq = flush_rq->end_io_data;
|
||||
int good_sectors = rq->hard_nr_sectors;
|
||||
int bad_sectors;
|
||||
sector_t sector;
|
||||
|
||||
if (flush_rq->errors & ABRT_ERR) {
|
||||
printk(KERN_ERR "%s: barrier support doesn't work\n", drive->name);
|
||||
blk_queue_ordered(drive->queue, QUEUE_ORDERED_NONE);
|
||||
blk_queue_issue_flush_fn(drive->queue, NULL);
|
||||
good_sectors = 0;
|
||||
} else if (flush_rq->errors) {
|
||||
good_sectors = 0;
|
||||
if (blk_barrier_preflush(rq)) {
|
||||
sector = ide_get_error_location(drive,flush_rq->buffer);
|
||||
if ((sector >= rq->hard_sector) &&
|
||||
(sector < rq->hard_sector + rq->hard_nr_sectors))
|
||||
good_sectors = sector - rq->hard_sector;
|
||||
}
|
||||
}
|
||||
|
||||
if (flush_rq->errors)
|
||||
printk(KERN_ERR "%s: failed barrier write: "
|
||||
"sector=%Lx(good=%d/bad=%d)\n",
|
||||
drive->name, (unsigned long long)rq->sector,
|
||||
good_sectors,
|
||||
(int) (rq->hard_nr_sectors-good_sectors));
|
||||
|
||||
bad_sectors = rq->hard_nr_sectors - good_sectors;
|
||||
|
||||
if (good_sectors)
|
||||
__ide_end_request(drive, rq, 1, good_sectors);
|
||||
if (bad_sectors)
|
||||
__ide_end_request(drive, rq, 0, bad_sectors);
|
||||
}
|
||||
|
||||
static int idedisk_prepare_flush(request_queue_t *q, struct request *rq)
|
||||
{
|
||||
ide_drive_t *drive = q->queuedata;
|
||||
|
||||
if (!drive->wcache)
|
||||
return 0;
|
||||
|
||||
memset(rq->cmd, 0, sizeof(rq->cmd));
|
||||
|
||||
@ -735,9 +694,8 @@ static int idedisk_prepare_flush(request_queue_t *q, struct request *rq)
|
||||
rq->cmd[0] = WIN_FLUSH_CACHE;
|
||||
|
||||
|
||||
rq->flags |= REQ_DRIVE_TASK | REQ_SOFTBARRIER;
|
||||
rq->flags |= REQ_DRIVE_TASK;
|
||||
rq->buffer = rq->cmd;
|
||||
return 1;
|
||||
}
|
||||
|
||||
static int idedisk_issue_flush(request_queue_t *q, struct gendisk *disk,
|
||||
@ -794,27 +752,64 @@ static int set_nowerr(ide_drive_t *drive, int arg)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void update_ordered(ide_drive_t *drive)
|
||||
{
|
||||
struct hd_driveid *id = drive->id;
|
||||
unsigned ordered = QUEUE_ORDERED_NONE;
|
||||
prepare_flush_fn *prep_fn = NULL;
|
||||
issue_flush_fn *issue_fn = NULL;
|
||||
|
||||
if (drive->wcache) {
|
||||
unsigned long long capacity;
|
||||
int barrier;
|
||||
/*
|
||||
* We must avoid issuing commands a drive does not
|
||||
* understand or we may crash it. We check flush cache
|
||||
* is supported. We also check we have the LBA48 flush
|
||||
* cache if the drive capacity is too large. By this
|
||||
* time we have trimmed the drive capacity if LBA48 is
|
||||
* not available so we don't need to recheck that.
|
||||
*/
|
||||
capacity = idedisk_capacity(drive);
|
||||
barrier = ide_id_has_flush_cache(id) &&
|
||||
(drive->addressing == 0 || capacity <= (1ULL << 28) ||
|
||||
ide_id_has_flush_cache_ext(id));
|
||||
|
||||
printk(KERN_INFO "%s: cache flushes %ssupported\n",
|
||||
drive->name, barrier ? "" : "not");
|
||||
|
||||
if (barrier) {
|
||||
ordered = QUEUE_ORDERED_DRAIN_FLUSH;
|
||||
prep_fn = idedisk_prepare_flush;
|
||||
issue_fn = idedisk_issue_flush;
|
||||
}
|
||||
} else
|
||||
ordered = QUEUE_ORDERED_DRAIN;
|
||||
|
||||
blk_queue_ordered(drive->queue, ordered, prep_fn);
|
||||
blk_queue_issue_flush_fn(drive->queue, issue_fn);
|
||||
}
|
||||
|
||||
static int write_cache(ide_drive_t *drive, int arg)
|
||||
{
|
||||
ide_task_t args;
|
||||
int err;
|
||||
int err = 1;
|
||||
|
||||
if (!ide_id_has_flush_cache(drive->id))
|
||||
return 1;
|
||||
|
||||
memset(&args, 0, sizeof(ide_task_t));
|
||||
args.tfRegister[IDE_FEATURE_OFFSET] = (arg) ?
|
||||
if (ide_id_has_flush_cache(drive->id)) {
|
||||
memset(&args, 0, sizeof(ide_task_t));
|
||||
args.tfRegister[IDE_FEATURE_OFFSET] = (arg) ?
|
||||
SETFEATURES_EN_WCACHE : SETFEATURES_DIS_WCACHE;
|
||||
args.tfRegister[IDE_COMMAND_OFFSET] = WIN_SETFEATURES;
|
||||
args.command_type = IDE_DRIVE_TASK_NO_DATA;
|
||||
args.handler = &task_no_data_intr;
|
||||
args.tfRegister[IDE_COMMAND_OFFSET] = WIN_SETFEATURES;
|
||||
args.command_type = IDE_DRIVE_TASK_NO_DATA;
|
||||
args.handler = &task_no_data_intr;
|
||||
err = ide_raw_taskfile(drive, &args, NULL);
|
||||
if (err == 0)
|
||||
drive->wcache = arg;
|
||||
}
|
||||
|
||||
err = ide_raw_taskfile(drive, &args, NULL);
|
||||
if (err)
|
||||
return err;
|
||||
update_ordered(drive);
|
||||
|
||||
drive->wcache = arg;
|
||||
return 0;
|
||||
return err;
|
||||
}
|
||||
|
||||
static int do_idedisk_flushcache (ide_drive_t *drive)
|
||||
@ -888,7 +883,6 @@ static void idedisk_setup (ide_drive_t *drive)
|
||||
{
|
||||
struct hd_driveid *id = drive->id;
|
||||
unsigned long long capacity;
|
||||
int barrier;
|
||||
|
||||
idedisk_add_settings(drive);
|
||||
|
||||
@ -992,31 +986,6 @@ static void idedisk_setup (ide_drive_t *drive)
|
||||
drive->wcache = 1;
|
||||
|
||||
write_cache(drive, 1);
|
||||
|
||||
/*
|
||||
* We must avoid issuing commands a drive does not understand
|
||||
* or we may crash it. We check flush cache is supported. We also
|
||||
* check we have the LBA48 flush cache if the drive capacity is
|
||||
* too large. By this time we have trimmed the drive capacity if
|
||||
* LBA48 is not available so we don't need to recheck that.
|
||||
*/
|
||||
barrier = 0;
|
||||
if (ide_id_has_flush_cache(id))
|
||||
barrier = 1;
|
||||
if (drive->addressing == 1) {
|
||||
/* Can't issue the correct flush ? */
|
||||
if (capacity > (1ULL << 28) && !ide_id_has_flush_cache_ext(id))
|
||||
barrier = 0;
|
||||
}
|
||||
|
||||
printk(KERN_INFO "%s: cache flushes %ssupported\n",
|
||||
drive->name, barrier ? "" : "not ");
|
||||
if (barrier) {
|
||||
blk_queue_ordered(drive->queue, QUEUE_ORDERED_FLUSH);
|
||||
drive->queue->prepare_flush_fn = idedisk_prepare_flush;
|
||||
drive->queue->end_flush_fn = idedisk_end_flush;
|
||||
blk_queue_issue_flush_fn(drive->queue, idedisk_issue_flush);
|
||||
}
|
||||
}
|
||||
|
||||
static void ide_cacheflush_p(ide_drive_t *drive)
|
||||
|
@ -119,10 +119,7 @@ int ide_end_request (ide_drive_t *drive, int uptodate, int nr_sectors)
|
||||
if (!nr_sectors)
|
||||
nr_sectors = rq->hard_cur_sectors;
|
||||
|
||||
if (blk_complete_barrier_rq_locked(drive->queue, rq, nr_sectors))
|
||||
ret = rq->nr_sectors != 0;
|
||||
else
|
||||
ret = __ide_end_request(drive, rq, uptodate, nr_sectors);
|
||||
ret = __ide_end_request(drive, rq, uptodate, nr_sectors);
|
||||
|
||||
spin_unlock_irqrestore(&ide_lock, flags);
|
||||
return ret;
|
||||
|
Loading…
Reference in New Issue
Block a user