block,fs: use REQ_* flags directly

Remove the WRITE_* and READ_SYNC wrappers, and just use the flags
directly.  Where applicable this also drops usage of the
bio_set_op_attrs wrapper.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Jens Axboe <axboe@fb.com>
This commit is contained in:
Christoph Hellwig 2016-11-01 07:40:10 -06:00 committed by Jens Axboe
parent a2b809672e
commit 70fd76140a
53 changed files with 133 additions and 182 deletions

View File

@ -330,7 +330,7 @@ static bool blk_kick_flush(struct request_queue *q, struct blk_flush_queue *fq)
} }
flush_rq->cmd_type = REQ_TYPE_FS; flush_rq->cmd_type = REQ_TYPE_FS;
flush_rq->cmd_flags = REQ_OP_FLUSH | WRITE_FLUSH; flush_rq->cmd_flags = REQ_OP_FLUSH | REQ_PREFLUSH;
flush_rq->rq_flags |= RQF_FLUSH_SEQ; flush_rq->rq_flags |= RQF_FLUSH_SEQ;
flush_rq->rq_disk = first_rq->rq_disk; flush_rq->rq_disk = first_rq->rq_disk;
flush_rq->end_io = flush_end_io; flush_rq->end_io = flush_end_io;
@ -486,7 +486,7 @@ int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask,
bio = bio_alloc(gfp_mask, 0); bio = bio_alloc(gfp_mask, 0);
bio->bi_bdev = bdev; bio->bi_bdev = bdev;
bio_set_op_attrs(bio, REQ_OP_WRITE, WRITE_FLUSH); bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
ret = submit_bio_wait(bio); ret = submit_bio_wait(bio);

View File

@ -1266,7 +1266,7 @@ static void submit_one_flush(struct drbd_device *device, struct issue_flush_cont
bio->bi_bdev = device->ldev->backing_bdev; bio->bi_bdev = device->ldev->backing_bdev;
bio->bi_private = octx; bio->bi_private = octx;
bio->bi_end_io = one_flush_endio; bio->bi_end_io = one_flush_endio;
bio_set_op_attrs(bio, REQ_OP_FLUSH, WRITE_FLUSH); bio->bi_opf = REQ_OP_FLUSH | REQ_PREFLUSH;
device->flush_jif = jiffies; device->flush_jif = jiffies;
set_bit(FLUSH_PENDING, &device->flags); set_bit(FLUSH_PENDING, &device->flags);

View File

@ -1253,14 +1253,14 @@ static int dispatch_rw_block_io(struct xen_blkif_ring *ring,
case BLKIF_OP_WRITE: case BLKIF_OP_WRITE:
ring->st_wr_req++; ring->st_wr_req++;
operation = REQ_OP_WRITE; operation = REQ_OP_WRITE;
operation_flags = WRITE_ODIRECT; operation_flags = REQ_SYNC | REQ_IDLE;
break; break;
case BLKIF_OP_WRITE_BARRIER: case BLKIF_OP_WRITE_BARRIER:
drain = true; drain = true;
case BLKIF_OP_FLUSH_DISKCACHE: case BLKIF_OP_FLUSH_DISKCACHE:
ring->st_f_req++; ring->st_f_req++;
operation = REQ_OP_WRITE; operation = REQ_OP_WRITE;
operation_flags = WRITE_FLUSH; operation_flags = REQ_PREFLUSH;
break; break;
default: default:
operation = 0; /* make gcc happy */ operation = 0; /* make gcc happy */
@ -1272,7 +1272,7 @@ static int dispatch_rw_block_io(struct xen_blkif_ring *ring,
nseg = req->operation == BLKIF_OP_INDIRECT ? nseg = req->operation == BLKIF_OP_INDIRECT ?
req->u.indirect.nr_segments : req->u.rw.nr_segments; req->u.indirect.nr_segments : req->u.rw.nr_segments;
if (unlikely(nseg == 0 && operation_flags != WRITE_FLUSH) || if (unlikely(nseg == 0 && operation_flags != REQ_PREFLUSH) ||
unlikely((req->operation != BLKIF_OP_INDIRECT) && unlikely((req->operation != BLKIF_OP_INDIRECT) &&
(nseg > BLKIF_MAX_SEGMENTS_PER_REQUEST)) || (nseg > BLKIF_MAX_SEGMENTS_PER_REQUEST)) ||
unlikely((req->operation == BLKIF_OP_INDIRECT) && unlikely((req->operation == BLKIF_OP_INDIRECT) &&
@ -1334,7 +1334,7 @@ static int dispatch_rw_block_io(struct xen_blkif_ring *ring,
} }
/* Wait on all outstanding I/O's and once that has been completed /* Wait on all outstanding I/O's and once that has been completed
* issue the WRITE_FLUSH. * issue the flush.
*/ */
if (drain) if (drain)
xen_blk_drain_io(pending_req->ring); xen_blk_drain_io(pending_req->ring);
@ -1380,7 +1380,7 @@ static int dispatch_rw_block_io(struct xen_blkif_ring *ring,
/* This will be hit if the operation was a flush or discard. */ /* This will be hit if the operation was a flush or discard. */
if (!bio) { if (!bio) {
BUG_ON(operation_flags != WRITE_FLUSH); BUG_ON(operation_flags != REQ_PREFLUSH);
bio = bio_alloc(GFP_KERNEL, 0); bio = bio_alloc(GFP_KERNEL, 0);
if (unlikely(bio == NULL)) if (unlikely(bio == NULL))

View File

@ -297,7 +297,7 @@ static void bch_btree_node_read(struct btree *b)
bio->bi_iter.bi_size = KEY_SIZE(&b->key) << 9; bio->bi_iter.bi_size = KEY_SIZE(&b->key) << 9;
bio->bi_end_io = btree_node_read_endio; bio->bi_end_io = btree_node_read_endio;
bio->bi_private = &cl; bio->bi_private = &cl;
bio_set_op_attrs(bio, REQ_OP_READ, REQ_META|READ_SYNC); bio->bi_opf = REQ_OP_READ | REQ_META;
bch_bio_map(bio, b->keys.set[0].data); bch_bio_map(bio, b->keys.set[0].data);
@ -393,7 +393,7 @@ static void do_btree_node_write(struct btree *b)
b->bio->bi_end_io = btree_node_write_endio; b->bio->bi_end_io = btree_node_write_endio;
b->bio->bi_private = cl; b->bio->bi_private = cl;
b->bio->bi_iter.bi_size = roundup(set_bytes(i), block_bytes(b->c)); b->bio->bi_iter.bi_size = roundup(set_bytes(i), block_bytes(b->c));
bio_set_op_attrs(b->bio, REQ_OP_WRITE, REQ_META|WRITE_SYNC|REQ_FUA); b->bio->bi_opf = REQ_OP_WRITE | REQ_META | REQ_FUA;
bch_bio_map(b->bio, i); bch_bio_map(b->bio, i);
/* /*

View File

@ -52,7 +52,7 @@ void bch_btree_verify(struct btree *b)
bio->bi_bdev = PTR_CACHE(b->c, &b->key, 0)->bdev; bio->bi_bdev = PTR_CACHE(b->c, &b->key, 0)->bdev;
bio->bi_iter.bi_sector = PTR_OFFSET(&b->key, 0); bio->bi_iter.bi_sector = PTR_OFFSET(&b->key, 0);
bio->bi_iter.bi_size = KEY_SIZE(&v->key) << 9; bio->bi_iter.bi_size = KEY_SIZE(&v->key) << 9;
bio_set_op_attrs(bio, REQ_OP_READ, REQ_META|READ_SYNC); bio->bi_opf = REQ_OP_READ | REQ_META;
bch_bio_map(bio, sorted); bch_bio_map(bio, sorted);
submit_bio_wait(bio); submit_bio_wait(bio);
@ -113,7 +113,7 @@ void bch_data_verify(struct cached_dev *dc, struct bio *bio)
check = bio_clone(bio, GFP_NOIO); check = bio_clone(bio, GFP_NOIO);
if (!check) if (!check)
return; return;
bio_set_op_attrs(check, REQ_OP_READ, READ_SYNC); check->bi_opf = REQ_OP_READ;
if (bio_alloc_pages(check, GFP_NOIO)) if (bio_alloc_pages(check, GFP_NOIO))
goto out_put; goto out_put;

View File

@ -923,7 +923,7 @@ static void cached_dev_write(struct cached_dev *dc, struct search *s)
flush->bi_bdev = bio->bi_bdev; flush->bi_bdev = bio->bi_bdev;
flush->bi_end_io = request_endio; flush->bi_end_io = request_endio;
flush->bi_private = cl; flush->bi_private = cl;
bio_set_op_attrs(flush, REQ_OP_WRITE, WRITE_FLUSH); flush->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
closure_bio_submit(flush, cl); closure_bio_submit(flush, cl);
} }

View File

@ -381,7 +381,7 @@ static char *uuid_read(struct cache_set *c, struct jset *j, struct closure *cl)
return "bad uuid pointer"; return "bad uuid pointer";
bkey_copy(&c->uuid_bucket, k); bkey_copy(&c->uuid_bucket, k);
uuid_io(c, REQ_OP_READ, READ_SYNC, k, cl); uuid_io(c, REQ_OP_READ, 0, k, cl);
if (j->version < BCACHE_JSET_VERSION_UUIDv1) { if (j->version < BCACHE_JSET_VERSION_UUIDv1) {
struct uuid_entry_v0 *u0 = (void *) c->uuids; struct uuid_entry_v0 *u0 = (void *) c->uuids;
@ -600,7 +600,7 @@ static void prio_read(struct cache *ca, uint64_t bucket)
ca->prio_last_buckets[bucket_nr] = bucket; ca->prio_last_buckets[bucket_nr] = bucket;
bucket_nr++; bucket_nr++;
prio_io(ca, bucket, REQ_OP_READ, READ_SYNC); prio_io(ca, bucket, REQ_OP_READ, 0);
if (p->csum != bch_crc64(&p->magic, bucket_bytes(ca) - 8)) if (p->csum != bch_crc64(&p->magic, bucket_bytes(ca) - 8))
pr_warn("bad csum reading priorities"); pr_warn("bad csum reading priorities");

View File

@ -1316,7 +1316,7 @@ int dm_bufio_issue_flush(struct dm_bufio_client *c)
{ {
struct dm_io_request io_req = { struct dm_io_request io_req = {
.bi_op = REQ_OP_WRITE, .bi_op = REQ_OP_WRITE,
.bi_op_flags = WRITE_FLUSH, .bi_op_flags = REQ_PREFLUSH,
.mem.type = DM_IO_KMEM, .mem.type = DM_IO_KMEM,
.mem.ptr.addr = NULL, .mem.ptr.addr = NULL,
.client = c->dm_io, .client = c->dm_io,

View File

@ -308,7 +308,7 @@ static int flush_header(struct log_c *lc)
}; };
lc->io_req.bi_op = REQ_OP_WRITE; lc->io_req.bi_op = REQ_OP_WRITE;
lc->io_req.bi_op_flags = WRITE_FLUSH; lc->io_req.bi_op_flags = REQ_PREFLUSH;
return dm_io(&lc->io_req, 1, &null_location, NULL); return dm_io(&lc->io_req, 1, &null_location, NULL);
} }

View File

@ -261,7 +261,7 @@ static int mirror_flush(struct dm_target *ti)
struct mirror *m; struct mirror *m;
struct dm_io_request io_req = { struct dm_io_request io_req = {
.bi_op = REQ_OP_WRITE, .bi_op = REQ_OP_WRITE,
.bi_op_flags = WRITE_FLUSH, .bi_op_flags = REQ_PREFLUSH,
.mem.type = DM_IO_KMEM, .mem.type = DM_IO_KMEM,
.mem.ptr.addr = NULL, .mem.ptr.addr = NULL,
.client = ms->io_client, .client = ms->io_client,
@ -657,7 +657,7 @@ static void do_write(struct mirror_set *ms, struct bio *bio)
struct mirror *m; struct mirror *m;
struct dm_io_request io_req = { struct dm_io_request io_req = {
.bi_op = REQ_OP_WRITE, .bi_op = REQ_OP_WRITE,
.bi_op_flags = bio->bi_opf & WRITE_FLUSH_FUA, .bi_op_flags = bio->bi_opf & (REQ_FUA | REQ_PREFLUSH),
.mem.type = DM_IO_BIO, .mem.type = DM_IO_BIO,
.mem.ptr.bio = bio, .mem.ptr.bio = bio,
.notify.fn = write_callback, .notify.fn = write_callback,

View File

@ -741,7 +741,7 @@ static void persistent_commit_exception(struct dm_exception_store *store,
/* /*
* Commit exceptions to disk. * Commit exceptions to disk.
*/ */
if (ps->valid && area_io(ps, REQ_OP_WRITE, WRITE_FLUSH_FUA)) if (ps->valid && area_io(ps, REQ_OP_WRITE, REQ_PREFLUSH | REQ_FUA))
ps->valid = 0; ps->valid = 0;
/* /*
@ -818,7 +818,7 @@ static int persistent_commit_merge(struct dm_exception_store *store,
for (i = 0; i < nr_merged; i++) for (i = 0; i < nr_merged; i++)
clear_exception(ps, ps->current_committed - 1 - i); clear_exception(ps, ps->current_committed - 1 - i);
r = area_io(ps, REQ_OP_WRITE, WRITE_FLUSH_FUA); r = area_io(ps, REQ_OP_WRITE, REQ_PREFLUSH | REQ_FUA);
if (r < 0) if (r < 0)
return r; return r;

View File

@ -1527,7 +1527,7 @@ static struct mapped_device *alloc_dev(int minor)
bio_init(&md->flush_bio); bio_init(&md->flush_bio);
md->flush_bio.bi_bdev = md->bdev; md->flush_bio.bi_bdev = md->bdev;
bio_set_op_attrs(&md->flush_bio, REQ_OP_WRITE, WRITE_FLUSH); md->flush_bio.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
dm_stats_init(&md->stats); dm_stats_init(&md->stats);

View File

@ -394,7 +394,7 @@ static void submit_flushes(struct work_struct *ws)
bi->bi_end_io = md_end_flush; bi->bi_end_io = md_end_flush;
bi->bi_private = rdev; bi->bi_private = rdev;
bi->bi_bdev = rdev->bdev; bi->bi_bdev = rdev->bdev;
bio_set_op_attrs(bi, REQ_OP_WRITE, WRITE_FLUSH); bi->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
atomic_inc(&mddev->flush_pending); atomic_inc(&mddev->flush_pending);
submit_bio(bi); submit_bio(bi);
rcu_read_lock(); rcu_read_lock();
@ -743,7 +743,7 @@ void md_super_write(struct mddev *mddev, struct md_rdev *rdev,
bio_add_page(bio, page, size, 0); bio_add_page(bio, page, size, 0);
bio->bi_private = rdev; bio->bi_private = rdev;
bio->bi_end_io = super_written; bio->bi_end_io = super_written;
bio_set_op_attrs(bio, REQ_OP_WRITE, WRITE_FLUSH_FUA); bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_FUA;
atomic_inc(&mddev->pending_writes); atomic_inc(&mddev->pending_writes);
submit_bio(bio); submit_bio(bio);

View File

@ -685,7 +685,7 @@ void r5l_flush_stripe_to_raid(struct r5l_log *log)
bio_reset(&log->flush_bio); bio_reset(&log->flush_bio);
log->flush_bio.bi_bdev = log->rdev->bdev; log->flush_bio.bi_bdev = log->rdev->bdev;
log->flush_bio.bi_end_io = r5l_log_flush_endio; log->flush_bio.bi_end_io = r5l_log_flush_endio;
bio_set_op_attrs(&log->flush_bio, REQ_OP_WRITE, WRITE_FLUSH); log->flush_bio.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
submit_bio(&log->flush_bio); submit_bio(&log->flush_bio);
} }
@ -1053,7 +1053,7 @@ static int r5l_log_write_empty_meta_block(struct r5l_log *log, sector_t pos,
mb->checksum = cpu_to_le32(crc); mb->checksum = cpu_to_le32(crc);
if (!sync_page_io(log->rdev, pos, PAGE_SIZE, page, REQ_OP_WRITE, if (!sync_page_io(log->rdev, pos, PAGE_SIZE, page, REQ_OP_WRITE,
WRITE_FUA, false)) { REQ_FUA, false)) {
__free_page(page); __free_page(page);
return -EIO; return -EIO;
} }

View File

@ -913,7 +913,7 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
if (test_and_clear_bit(R5_Wantwrite, &sh->dev[i].flags)) { if (test_and_clear_bit(R5_Wantwrite, &sh->dev[i].flags)) {
op = REQ_OP_WRITE; op = REQ_OP_WRITE;
if (test_and_clear_bit(R5_WantFUA, &sh->dev[i].flags)) if (test_and_clear_bit(R5_WantFUA, &sh->dev[i].flags))
op_flags = WRITE_FUA; op_flags = REQ_FUA;
if (test_bit(R5_Discard, &sh->dev[i].flags)) if (test_bit(R5_Discard, &sh->dev[i].flags))
op = REQ_OP_DISCARD; op = REQ_OP_DISCARD;
} else if (test_and_clear_bit(R5_Wantread, &sh->dev[i].flags)) } else if (test_and_clear_bit(R5_Wantread, &sh->dev[i].flags))

View File

@ -58,7 +58,7 @@ static void nvmet_execute_rw(struct nvmet_req *req)
if (req->cmd->rw.opcode == nvme_cmd_write) { if (req->cmd->rw.opcode == nvme_cmd_write) {
op = REQ_OP_WRITE; op = REQ_OP_WRITE;
op_flags = WRITE_ODIRECT; op_flags = REQ_SYNC | REQ_IDLE;
if (req->cmd->rw.control & cpu_to_le16(NVME_RW_FUA)) if (req->cmd->rw.control & cpu_to_le16(NVME_RW_FUA))
op_flags |= REQ_FUA; op_flags |= REQ_FUA;
} else { } else {
@ -109,7 +109,7 @@ static void nvmet_execute_flush(struct nvmet_req *req)
bio->bi_bdev = req->ns->bdev; bio->bi_bdev = req->ns->bdev;
bio->bi_private = req; bio->bi_private = req;
bio->bi_end_io = nvmet_bio_done; bio->bi_end_io = nvmet_bio_done;
bio_set_op_attrs(bio, REQ_OP_WRITE, WRITE_FLUSH); bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
submit_bio(bio); submit_bio(bio);
} }

View File

@ -388,7 +388,7 @@ iblock_execute_sync_cache(struct se_cmd *cmd)
bio = bio_alloc(GFP_KERNEL, 0); bio = bio_alloc(GFP_KERNEL, 0);
bio->bi_end_io = iblock_end_io_flush; bio->bi_end_io = iblock_end_io_flush;
bio->bi_bdev = ib_dev->ibd_bd; bio->bi_bdev = ib_dev->ibd_bd;
bio_set_op_attrs(bio, REQ_OP_WRITE, WRITE_FLUSH); bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
if (!immed) if (!immed)
bio->bi_private = cmd; bio->bi_private = cmd;
submit_bio(bio); submit_bio(bio);
@ -686,15 +686,15 @@ iblock_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
struct iblock_dev *ib_dev = IBLOCK_DEV(dev); struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
struct request_queue *q = bdev_get_queue(ib_dev->ibd_bd); struct request_queue *q = bdev_get_queue(ib_dev->ibd_bd);
/* /*
* Force writethrough using WRITE_FUA if a volatile write cache * Force writethrough using REQ_FUA if a volatile write cache
* is not enabled, or if initiator set the Force Unit Access bit. * is not enabled, or if initiator set the Force Unit Access bit.
*/ */
op = REQ_OP_WRITE; op = REQ_OP_WRITE;
if (test_bit(QUEUE_FLAG_FUA, &q->queue_flags)) { if (test_bit(QUEUE_FLAG_FUA, &q->queue_flags)) {
if (cmd->se_cmd_flags & SCF_FUA) if (cmd->se_cmd_flags & SCF_FUA)
op_flags = WRITE_FUA; op_flags = REQ_FUA;
else if (!test_bit(QUEUE_FLAG_WC, &q->queue_flags)) else if (!test_bit(QUEUE_FLAG_WC, &q->queue_flags))
op_flags = WRITE_FUA; op_flags = REQ_FUA;
} }
} else { } else {
op = REQ_OP_READ; op = REQ_OP_READ;

View File

@ -3485,9 +3485,9 @@ static int write_dev_supers(struct btrfs_device *device,
* to go down lazy. * to go down lazy.
*/ */
if (i == 0) if (i == 0)
ret = btrfsic_submit_bh(REQ_OP_WRITE, WRITE_FUA, bh); ret = btrfsic_submit_bh(REQ_OP_WRITE, REQ_FUA, bh);
else else
ret = btrfsic_submit_bh(REQ_OP_WRITE, WRITE_SYNC, bh); ret = btrfsic_submit_bh(REQ_OP_WRITE, REQ_SYNC, bh);
if (ret) if (ret)
errors++; errors++;
} }
@ -3551,7 +3551,7 @@ static int write_dev_flush(struct btrfs_device *device, int wait)
bio->bi_end_io = btrfs_end_empty_barrier; bio->bi_end_io = btrfs_end_empty_barrier;
bio->bi_bdev = device->bdev; bio->bi_bdev = device->bdev;
bio_set_op_attrs(bio, REQ_OP_WRITE, WRITE_FLUSH); bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
init_completion(&device->flush_wait); init_completion(&device->flush_wait);
bio->bi_private = &device->flush_wait; bio->bi_private = &device->flush_wait;
device->flush_bio = bio; device->flush_bio = bio;

View File

@ -127,7 +127,7 @@ struct extent_page_data {
*/ */
unsigned int extent_locked:1; unsigned int extent_locked:1;
/* tells the submit_bio code to use a WRITE_SYNC */ /* tells the submit_bio code to use REQ_SYNC */
unsigned int sync_io:1; unsigned int sync_io:1;
}; };
@ -2047,7 +2047,7 @@ int repair_io_failure(struct inode *inode, u64 start, u64 length, u64 logical,
return -EIO; return -EIO;
} }
bio->bi_bdev = dev->bdev; bio->bi_bdev = dev->bdev;
bio_set_op_attrs(bio, REQ_OP_WRITE, WRITE_SYNC); bio->bi_opf = REQ_OP_WRITE | REQ_SYNC;
bio_add_page(bio, page, length, pg_offset); bio_add_page(bio, page, length, pg_offset);
if (btrfsic_submit_bio_wait(bio)) { if (btrfsic_submit_bio_wait(bio)) {
@ -2388,7 +2388,7 @@ static int bio_readpage_error(struct bio *failed_bio, u64 phy_offset,
struct inode *inode = page->mapping->host; struct inode *inode = page->mapping->host;
struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree; struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
struct bio *bio; struct bio *bio;
int read_mode; int read_mode = 0;
int ret; int ret;
BUG_ON(bio_op(failed_bio) == REQ_OP_WRITE); BUG_ON(bio_op(failed_bio) == REQ_OP_WRITE);
@ -2404,9 +2404,7 @@ static int bio_readpage_error(struct bio *failed_bio, u64 phy_offset,
} }
if (failed_bio->bi_vcnt > 1) if (failed_bio->bi_vcnt > 1)
read_mode = READ_SYNC | REQ_FAILFAST_DEV; read_mode |= REQ_FAILFAST_DEV;
else
read_mode = READ_SYNC;
phy_offset >>= inode->i_sb->s_blocksize_bits; phy_offset >>= inode->i_sb->s_blocksize_bits;
bio = btrfs_create_repair_bio(inode, failed_bio, failrec, page, bio = btrfs_create_repair_bio(inode, failed_bio, failrec, page,
@ -3484,7 +3482,7 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc,
unsigned long nr_written = 0; unsigned long nr_written = 0;
if (wbc->sync_mode == WB_SYNC_ALL) if (wbc->sync_mode == WB_SYNC_ALL)
write_flags = WRITE_SYNC; write_flags = REQ_SYNC;
trace___extent_writepage(page, inode, wbc); trace___extent_writepage(page, inode, wbc);
@ -3729,7 +3727,7 @@ static noinline_for_stack int write_one_eb(struct extent_buffer *eb,
unsigned long i, num_pages; unsigned long i, num_pages;
unsigned long bio_flags = 0; unsigned long bio_flags = 0;
unsigned long start, end; unsigned long start, end;
int write_flags = (epd->sync_io ? WRITE_SYNC : 0) | REQ_META; int write_flags = (epd->sync_io ? REQ_SYNC : 0) | REQ_META;
int ret = 0; int ret = 0;
clear_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags); clear_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags);
@ -4076,7 +4074,7 @@ static void flush_epd_write_bio(struct extent_page_data *epd)
int ret; int ret;
bio_set_op_attrs(epd->bio, REQ_OP_WRITE, bio_set_op_attrs(epd->bio, REQ_OP_WRITE,
epd->sync_io ? WRITE_SYNC : 0); epd->sync_io ? REQ_SYNC : 0);
ret = submit_one_bio(epd->bio, 0, epd->bio_flags); ret = submit_one_bio(epd->bio, 0, epd->bio_flags);
BUG_ON(ret < 0); /* -ENOMEM */ BUG_ON(ret < 0); /* -ENOMEM */

View File

@ -7917,7 +7917,7 @@ static int dio_read_error(struct inode *inode, struct bio *failed_bio,
struct io_failure_record *failrec; struct io_failure_record *failrec;
struct bio *bio; struct bio *bio;
int isector; int isector;
int read_mode; int read_mode = 0;
int ret; int ret;
BUG_ON(bio_op(failed_bio) == REQ_OP_WRITE); BUG_ON(bio_op(failed_bio) == REQ_OP_WRITE);
@ -7936,9 +7936,7 @@ static int dio_read_error(struct inode *inode, struct bio *failed_bio,
if ((failed_bio->bi_vcnt > 1) if ((failed_bio->bi_vcnt > 1)
|| (failed_bio->bi_io_vec->bv_len || (failed_bio->bi_io_vec->bv_len
> BTRFS_I(inode)->root->sectorsize)) > BTRFS_I(inode)->root->sectorsize))
read_mode = READ_SYNC | REQ_FAILFAST_DEV; read_mode |= REQ_FAILFAST_DEV;
else
read_mode = READ_SYNC;
isector = start - btrfs_io_bio(failed_bio)->logical; isector = start - btrfs_io_bio(failed_bio)->logical;
isector >>= inode->i_sb->s_blocksize_bits; isector >>= inode->i_sb->s_blocksize_bits;

View File

@ -4440,7 +4440,7 @@ static int write_page_nocow(struct scrub_ctx *sctx,
bio->bi_iter.bi_size = 0; bio->bi_iter.bi_size = 0;
bio->bi_iter.bi_sector = physical_for_dev_replace >> 9; bio->bi_iter.bi_sector = physical_for_dev_replace >> 9;
bio->bi_bdev = dev->bdev; bio->bi_bdev = dev->bdev;
bio_set_op_attrs(bio, REQ_OP_WRITE, WRITE_SYNC); bio->bi_opf = REQ_OP_WRITE | REQ_SYNC;
ret = bio_add_page(bio, page, PAGE_SIZE, 0); ret = bio_add_page(bio, page, PAGE_SIZE, 0);
if (ret != PAGE_SIZE) { if (ret != PAGE_SIZE) {
leave_with_eio: leave_with_eio:

View File

@ -6023,7 +6023,7 @@ static void btrfs_end_bio(struct bio *bio)
else else
btrfs_dev_stat_inc(dev, btrfs_dev_stat_inc(dev,
BTRFS_DEV_STAT_READ_ERRS); BTRFS_DEV_STAT_READ_ERRS);
if ((bio->bi_opf & WRITE_FLUSH) == WRITE_FLUSH) if (bio->bi_opf & REQ_PREFLUSH)
btrfs_dev_stat_inc(dev, btrfs_dev_stat_inc(dev,
BTRFS_DEV_STAT_FLUSH_ERRS); BTRFS_DEV_STAT_FLUSH_ERRS);
btrfs_dev_stat_print_on_error(dev); btrfs_dev_stat_print_on_error(dev);

View File

@ -62,7 +62,7 @@ struct btrfs_device {
int running_pending; int running_pending;
/* regular prio bios */ /* regular prio bios */
struct btrfs_pending_bios pending_bios; struct btrfs_pending_bios pending_bios;
/* WRITE_SYNC bios */ /* sync bios */
struct btrfs_pending_bios pending_sync_bios; struct btrfs_pending_bios pending_sync_bios;
struct block_device *bdev; struct block_device *bdev;

View File

@ -753,7 +753,7 @@ static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
* still in flight on potentially older * still in flight on potentially older
* contents. * contents.
*/ */
write_dirty_buffer(bh, WRITE_SYNC); write_dirty_buffer(bh, REQ_SYNC);
/* /*
* Kick off IO for the previous mapping. Note * Kick off IO for the previous mapping. Note
@ -1684,7 +1684,7 @@ static struct buffer_head *create_page_buffers(struct page *page, struct inode *
* prevents this contention from occurring. * prevents this contention from occurring.
* *
* If block_write_full_page() is called with wbc->sync_mode == * If block_write_full_page() is called with wbc->sync_mode ==
* WB_SYNC_ALL, the writes are posted using WRITE_SYNC; this * WB_SYNC_ALL, the writes are posted using REQ_SYNC; this
* causes the writes to be flagged as synchronous writes. * causes the writes to be flagged as synchronous writes.
*/ */
int __block_write_full_page(struct inode *inode, struct page *page, int __block_write_full_page(struct inode *inode, struct page *page,
@ -1697,7 +1697,7 @@ int __block_write_full_page(struct inode *inode, struct page *page,
struct buffer_head *bh, *head; struct buffer_head *bh, *head;
unsigned int blocksize, bbits; unsigned int blocksize, bbits;
int nr_underway = 0; int nr_underway = 0;
int write_flags = (wbc->sync_mode == WB_SYNC_ALL ? WRITE_SYNC : 0); int write_flags = (wbc->sync_mode == WB_SYNC_ALL ? REQ_SYNC : 0);
head = create_page_buffers(page, inode, head = create_page_buffers(page, inode,
(1 << BH_Dirty)|(1 << BH_Uptodate)); (1 << BH_Dirty)|(1 << BH_Uptodate));
@ -3210,7 +3210,7 @@ EXPORT_SYMBOL(__sync_dirty_buffer);
int sync_dirty_buffer(struct buffer_head *bh) int sync_dirty_buffer(struct buffer_head *bh)
{ {
return __sync_dirty_buffer(bh, WRITE_SYNC); return __sync_dirty_buffer(bh, REQ_SYNC);
} }
EXPORT_SYMBOL(sync_dirty_buffer); EXPORT_SYMBOL(sync_dirty_buffer);

View File

@ -1209,7 +1209,7 @@ do_blockdev_direct_IO(struct kiocb *iocb, struct inode *inode,
dio->inode = inode; dio->inode = inode;
if (iov_iter_rw(iter) == WRITE) { if (iov_iter_rw(iter) == WRITE) {
dio->op = REQ_OP_WRITE; dio->op = REQ_OP_WRITE;
dio->op_flags = WRITE_ODIRECT; dio->op_flags = REQ_SYNC | REQ_IDLE;
} else { } else {
dio->op = REQ_OP_READ; dio->op = REQ_OP_READ;
} }

View File

@ -35,7 +35,7 @@ static void ext4_mmp_csum_set(struct super_block *sb, struct mmp_struct *mmp)
} }
/* /*
* Write the MMP block using WRITE_SYNC to try to get the block on-disk * Write the MMP block using REQ_SYNC to try to get the block on-disk
* faster. * faster.
*/ */
static int write_mmp_block(struct super_block *sb, struct buffer_head *bh) static int write_mmp_block(struct super_block *sb, struct buffer_head *bh)
@ -52,7 +52,7 @@ static int write_mmp_block(struct super_block *sb, struct buffer_head *bh)
lock_buffer(bh); lock_buffer(bh);
bh->b_end_io = end_buffer_write_sync; bh->b_end_io = end_buffer_write_sync;
get_bh(bh); get_bh(bh);
submit_bh(REQ_OP_WRITE, WRITE_SYNC | REQ_META | REQ_PRIO, bh); submit_bh(REQ_OP_WRITE, REQ_SYNC | REQ_META | REQ_PRIO, bh);
wait_on_buffer(bh); wait_on_buffer(bh);
sb_end_write(sb); sb_end_write(sb);
if (unlikely(!buffer_uptodate(bh))) if (unlikely(!buffer_uptodate(bh)))
@ -88,7 +88,7 @@ static int read_mmp_block(struct super_block *sb, struct buffer_head **bh,
get_bh(*bh); get_bh(*bh);
lock_buffer(*bh); lock_buffer(*bh);
(*bh)->b_end_io = end_buffer_read_sync; (*bh)->b_end_io = end_buffer_read_sync;
submit_bh(REQ_OP_READ, READ_SYNC | REQ_META | REQ_PRIO, *bh); submit_bh(REQ_OP_READ, REQ_META | REQ_PRIO, *bh);
wait_on_buffer(*bh); wait_on_buffer(*bh);
if (!buffer_uptodate(*bh)) { if (!buffer_uptodate(*bh)) {
ret = -EIO; ret = -EIO;

View File

@ -340,7 +340,7 @@ void ext4_io_submit(struct ext4_io_submit *io)
if (bio) { if (bio) {
int io_op_flags = io->io_wbc->sync_mode == WB_SYNC_ALL ? int io_op_flags = io->io_wbc->sync_mode == WB_SYNC_ALL ?
WRITE_SYNC : 0; REQ_SYNC : 0;
bio_set_op_attrs(io->io_bio, REQ_OP_WRITE, io_op_flags); bio_set_op_attrs(io->io_bio, REQ_OP_WRITE, io_op_flags);
submit_bio(io->io_bio); submit_bio(io->io_bio);
} }

View File

@ -4553,7 +4553,7 @@ static int ext4_commit_super(struct super_block *sb, int sync)
unlock_buffer(sbh); unlock_buffer(sbh);
if (sync) { if (sync) {
error = __sync_dirty_buffer(sbh, error = __sync_dirty_buffer(sbh,
test_opt(sb, BARRIER) ? WRITE_FUA : WRITE_SYNC); test_opt(sb, BARRIER) ? REQ_FUA : REQ_SYNC);
if (error) if (error)
return error; return error;

View File

@ -65,7 +65,7 @@ static struct page *__get_meta_page(struct f2fs_sb_info *sbi, pgoff_t index,
.sbi = sbi, .sbi = sbi,
.type = META, .type = META,
.op = REQ_OP_READ, .op = REQ_OP_READ,
.op_flags = READ_SYNC | REQ_META | REQ_PRIO, .op_flags = REQ_META | REQ_PRIO,
.old_blkaddr = index, .old_blkaddr = index,
.new_blkaddr = index, .new_blkaddr = index,
.encrypted_page = NULL, .encrypted_page = NULL,
@ -160,7 +160,7 @@ int ra_meta_pages(struct f2fs_sb_info *sbi, block_t start, int nrpages,
.sbi = sbi, .sbi = sbi,
.type = META, .type = META,
.op = REQ_OP_READ, .op = REQ_OP_READ,
.op_flags = sync ? (READ_SYNC | REQ_META | REQ_PRIO) : REQ_RAHEAD, .op_flags = sync ? (REQ_META | REQ_PRIO) : REQ_RAHEAD,
.encrypted_page = NULL, .encrypted_page = NULL,
}; };
struct blk_plug plug; struct blk_plug plug;

View File

@ -198,11 +198,9 @@ static void __f2fs_submit_merged_bio(struct f2fs_sb_info *sbi,
if (type >= META_FLUSH) { if (type >= META_FLUSH) {
io->fio.type = META_FLUSH; io->fio.type = META_FLUSH;
io->fio.op = REQ_OP_WRITE; io->fio.op = REQ_OP_WRITE;
if (test_opt(sbi, NOBARRIER)) io->fio.op_flags = REQ_PREFLUSH | REQ_META | REQ_PRIO;
io->fio.op_flags = WRITE_FLUSH | REQ_META | REQ_PRIO; if (!test_opt(sbi, NOBARRIER))
else io->fio.op_flags |= REQ_FUA;
io->fio.op_flags = WRITE_FLUSH_FUA | REQ_META |
REQ_PRIO;
} }
__submit_merged_bio(io); __submit_merged_bio(io);
out: out:
@ -483,7 +481,7 @@ struct page *find_data_page(struct inode *inode, pgoff_t index)
return page; return page;
f2fs_put_page(page, 0); f2fs_put_page(page, 0);
page = get_read_data_page(inode, index, READ_SYNC, false); page = get_read_data_page(inode, index, 0, false);
if (IS_ERR(page)) if (IS_ERR(page))
return page; return page;
@ -509,7 +507,7 @@ struct page *get_lock_data_page(struct inode *inode, pgoff_t index,
struct address_space *mapping = inode->i_mapping; struct address_space *mapping = inode->i_mapping;
struct page *page; struct page *page;
repeat: repeat:
page = get_read_data_page(inode, index, READ_SYNC, for_write); page = get_read_data_page(inode, index, 0, for_write);
if (IS_ERR(page)) if (IS_ERR(page))
return page; return page;
@ -1251,7 +1249,7 @@ static int f2fs_write_data_page(struct page *page,
.sbi = sbi, .sbi = sbi,
.type = DATA, .type = DATA,
.op = REQ_OP_WRITE, .op = REQ_OP_WRITE,
.op_flags = (wbc->sync_mode == WB_SYNC_ALL) ? WRITE_SYNC : 0, .op_flags = (wbc->sync_mode == WB_SYNC_ALL) ? REQ_SYNC : 0,
.page = page, .page = page,
.encrypted_page = NULL, .encrypted_page = NULL,
}; };
@ -1663,7 +1661,7 @@ repeat:
err = PTR_ERR(bio); err = PTR_ERR(bio);
goto fail; goto fail;
} }
bio_set_op_attrs(bio, REQ_OP_READ, READ_SYNC); bio->bi_opf = REQ_OP_READ;
if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) { if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
bio_put(bio); bio_put(bio);
err = -EFAULT; err = -EFAULT;

View File

@ -550,7 +550,7 @@ static void move_encrypted_block(struct inode *inode, block_t bidx)
.sbi = F2FS_I_SB(inode), .sbi = F2FS_I_SB(inode),
.type = DATA, .type = DATA,
.op = REQ_OP_READ, .op = REQ_OP_READ,
.op_flags = READ_SYNC, .op_flags = 0,
.encrypted_page = NULL, .encrypted_page = NULL,
}; };
struct dnode_of_data dn; struct dnode_of_data dn;
@ -625,7 +625,7 @@ static void move_encrypted_block(struct inode *inode, block_t bidx)
f2fs_wait_on_page_writeback(dn.node_page, NODE, true); f2fs_wait_on_page_writeback(dn.node_page, NODE, true);
fio.op = REQ_OP_WRITE; fio.op = REQ_OP_WRITE;
fio.op_flags = WRITE_SYNC; fio.op_flags = REQ_SYNC;
fio.new_blkaddr = newaddr; fio.new_blkaddr = newaddr;
f2fs_submit_page_mbio(&fio); f2fs_submit_page_mbio(&fio);
@ -663,7 +663,7 @@ static void move_data_page(struct inode *inode, block_t bidx, int gc_type)
.sbi = F2FS_I_SB(inode), .sbi = F2FS_I_SB(inode),
.type = DATA, .type = DATA,
.op = REQ_OP_WRITE, .op = REQ_OP_WRITE,
.op_flags = WRITE_SYNC, .op_flags = REQ_SYNC,
.page = page, .page = page,
.encrypted_page = NULL, .encrypted_page = NULL,
}; };

View File

@ -111,7 +111,7 @@ int f2fs_convert_inline_page(struct dnode_of_data *dn, struct page *page)
.sbi = F2FS_I_SB(dn->inode), .sbi = F2FS_I_SB(dn->inode),
.type = DATA, .type = DATA,
.op = REQ_OP_WRITE, .op = REQ_OP_WRITE,
.op_flags = WRITE_SYNC | REQ_PRIO, .op_flags = REQ_SYNC | REQ_PRIO,
.page = page, .page = page,
.encrypted_page = NULL, .encrypted_page = NULL,
}; };

View File

@ -1134,7 +1134,7 @@ repeat:
if (!page) if (!page)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
err = read_node_page(page, READ_SYNC); err = read_node_page(page, 0);
if (err < 0) { if (err < 0) {
f2fs_put_page(page, 1); f2fs_put_page(page, 1);
return ERR_PTR(err); return ERR_PTR(err);
@ -1570,7 +1570,7 @@ static int f2fs_write_node_page(struct page *page,
.sbi = sbi, .sbi = sbi,
.type = NODE, .type = NODE,
.op = REQ_OP_WRITE, .op = REQ_OP_WRITE,
.op_flags = (wbc->sync_mode == WB_SYNC_ALL) ? WRITE_SYNC : 0, .op_flags = (wbc->sync_mode == WB_SYNC_ALL) ? REQ_SYNC : 0,
.page = page, .page = page,
.encrypted_page = NULL, .encrypted_page = NULL,
}; };

View File

@ -259,7 +259,7 @@ static int __commit_inmem_pages(struct inode *inode,
.sbi = sbi, .sbi = sbi,
.type = DATA, .type = DATA,
.op = REQ_OP_WRITE, .op = REQ_OP_WRITE,
.op_flags = WRITE_SYNC | REQ_PRIO, .op_flags = REQ_SYNC | REQ_PRIO,
.encrypted_page = NULL, .encrypted_page = NULL,
}; };
bool submit_bio = false; bool submit_bio = false;
@ -420,7 +420,7 @@ repeat:
fcc->dispatch_list = llist_reverse_order(fcc->dispatch_list); fcc->dispatch_list = llist_reverse_order(fcc->dispatch_list);
bio->bi_bdev = sbi->sb->s_bdev; bio->bi_bdev = sbi->sb->s_bdev;
bio_set_op_attrs(bio, REQ_OP_WRITE, WRITE_FLUSH); bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
ret = submit_bio_wait(bio); ret = submit_bio_wait(bio);
llist_for_each_entry_safe(cmd, next, llist_for_each_entry_safe(cmd, next,
@ -454,7 +454,7 @@ int f2fs_issue_flush(struct f2fs_sb_info *sbi)
atomic_inc(&fcc->submit_flush); atomic_inc(&fcc->submit_flush);
bio->bi_bdev = sbi->sb->s_bdev; bio->bi_bdev = sbi->sb->s_bdev;
bio_set_op_attrs(bio, REQ_OP_WRITE, WRITE_FLUSH); bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
ret = submit_bio_wait(bio); ret = submit_bio_wait(bio);
atomic_dec(&fcc->submit_flush); atomic_dec(&fcc->submit_flush);
bio_put(bio); bio_put(bio);
@ -1515,7 +1515,7 @@ void write_meta_page(struct f2fs_sb_info *sbi, struct page *page)
.sbi = sbi, .sbi = sbi,
.type = META, .type = META,
.op = REQ_OP_WRITE, .op = REQ_OP_WRITE,
.op_flags = WRITE_SYNC | REQ_META | REQ_PRIO, .op_flags = REQ_SYNC | REQ_META | REQ_PRIO,
.old_blkaddr = page->index, .old_blkaddr = page->index,
.new_blkaddr = page->index, .new_blkaddr = page->index,
.page = page, .page = page,

View File

@ -1238,7 +1238,7 @@ static int __f2fs_commit_super(struct buffer_head *bh,
unlock_buffer(bh); unlock_buffer(bh);
/* it's rare case, we can do fua all the time */ /* it's rare case, we can do fua all the time */
return __sync_dirty_buffer(bh, WRITE_FLUSH_FUA); return __sync_dirty_buffer(bh, REQ_PREFLUSH | REQ_FUA);
} }
static inline bool sanity_check_area_boundary(struct f2fs_sb_info *sbi, static inline bool sanity_check_area_boundary(struct f2fs_sb_info *sbi,

View File

@ -657,7 +657,7 @@ static void log_write_header(struct gfs2_sbd *sdp, u32 flags)
struct gfs2_log_header *lh; struct gfs2_log_header *lh;
unsigned int tail; unsigned int tail;
u32 hash; u32 hash;
int op_flags = WRITE_FLUSH_FUA | REQ_META; int op_flags = REQ_PREFLUSH | REQ_FUA | REQ_META;
struct page *page = mempool_alloc(gfs2_page_pool, GFP_NOIO); struct page *page = mempool_alloc(gfs2_page_pool, GFP_NOIO);
enum gfs2_freeze_state state = atomic_read(&sdp->sd_freeze_state); enum gfs2_freeze_state state = atomic_read(&sdp->sd_freeze_state);
lh = page_address(page); lh = page_address(page);
@ -682,7 +682,7 @@ static void log_write_header(struct gfs2_sbd *sdp, u32 flags)
if (test_bit(SDF_NOBARRIERS, &sdp->sd_flags)) { if (test_bit(SDF_NOBARRIERS, &sdp->sd_flags)) {
gfs2_ordered_wait(sdp); gfs2_ordered_wait(sdp);
log_flush_wait(sdp); log_flush_wait(sdp);
op_flags = WRITE_SYNC | REQ_META | REQ_PRIO; op_flags = REQ_SYNC | REQ_META | REQ_PRIO;
} }
sdp->sd_log_idle = (tail == sdp->sd_log_flush_head); sdp->sd_log_idle = (tail == sdp->sd_log_flush_head);

View File

@ -38,7 +38,7 @@ static int gfs2_aspace_writepage(struct page *page, struct writeback_control *wb
struct buffer_head *bh, *head; struct buffer_head *bh, *head;
int nr_underway = 0; int nr_underway = 0;
int write_flags = REQ_META | REQ_PRIO | int write_flags = REQ_META | REQ_PRIO |
(wbc->sync_mode == WB_SYNC_ALL ? WRITE_SYNC : 0); (wbc->sync_mode == WB_SYNC_ALL ? REQ_SYNC : 0);
BUG_ON(!PageLocked(page)); BUG_ON(!PageLocked(page));
BUG_ON(!page_has_buffers(page)); BUG_ON(!page_has_buffers(page));
@ -285,7 +285,7 @@ int gfs2_meta_read(struct gfs2_glock *gl, u64 blkno, int flags,
} }
} }
gfs2_submit_bhs(REQ_OP_READ, READ_SYNC | REQ_META | REQ_PRIO, bhs, num); gfs2_submit_bhs(REQ_OP_READ, REQ_META | REQ_PRIO, bhs, num);
if (!(flags & DIO_WAIT)) if (!(flags & DIO_WAIT))
return 0; return 0;
@ -453,7 +453,7 @@ struct buffer_head *gfs2_meta_ra(struct gfs2_glock *gl, u64 dblock, u32 extlen)
if (buffer_uptodate(first_bh)) if (buffer_uptodate(first_bh))
goto out; goto out;
if (!buffer_locked(first_bh)) if (!buffer_locked(first_bh))
ll_rw_block(REQ_OP_READ, READ_SYNC | REQ_META, 1, &first_bh); ll_rw_block(REQ_OP_READ, REQ_META, 1, &first_bh);
dblock++; dblock++;
extlen--; extlen--;

View File

@ -246,7 +246,7 @@ static int gfs2_read_super(struct gfs2_sbd *sdp, sector_t sector, int silent)
bio->bi_end_io = end_bio_io_page; bio->bi_end_io = end_bio_io_page;
bio->bi_private = page; bio->bi_private = page;
bio_set_op_attrs(bio, REQ_OP_READ, READ_SYNC | REQ_META); bio_set_op_attrs(bio, REQ_OP_READ, REQ_META);
submit_bio(bio); submit_bio(bio);
wait_on_page_locked(page); wait_on_page_locked(page);
bio_put(bio); bio_put(bio);

View File

@ -221,7 +221,7 @@ static int hfsplus_sync_fs(struct super_block *sb, int wait)
error2 = hfsplus_submit_bio(sb, error2 = hfsplus_submit_bio(sb,
sbi->part_start + HFSPLUS_VOLHEAD_SECTOR, sbi->part_start + HFSPLUS_VOLHEAD_SECTOR,
sbi->s_vhdr_buf, NULL, REQ_OP_WRITE, sbi->s_vhdr_buf, NULL, REQ_OP_WRITE,
WRITE_SYNC); REQ_SYNC);
if (!error) if (!error)
error = error2; error = error2;
if (!write_backup) if (!write_backup)
@ -230,7 +230,7 @@ static int hfsplus_sync_fs(struct super_block *sb, int wait)
error2 = hfsplus_submit_bio(sb, error2 = hfsplus_submit_bio(sb,
sbi->part_start + sbi->sect_count - 2, sbi->part_start + sbi->sect_count - 2,
sbi->s_backup_vhdr_buf, NULL, REQ_OP_WRITE, sbi->s_backup_vhdr_buf, NULL, REQ_OP_WRITE,
WRITE_SYNC); REQ_SYNC);
if (!error) if (!error)
error2 = error; error2 = error;
out: out:

View File

@ -186,7 +186,7 @@ __flush_batch(journal_t *journal, int *batch_count)
blk_start_plug(&plug); blk_start_plug(&plug);
for (i = 0; i < *batch_count; i++) for (i = 0; i < *batch_count; i++)
write_dirty_buffer(journal->j_chkpt_bhs[i], WRITE_SYNC); write_dirty_buffer(journal->j_chkpt_bhs[i], REQ_SYNC);
blk_finish_plug(&plug); blk_finish_plug(&plug);
for (i = 0; i < *batch_count; i++) { for (i = 0; i < *batch_count; i++) {

View File

@ -155,9 +155,10 @@ static int journal_submit_commit_record(journal_t *journal,
if (journal->j_flags & JBD2_BARRIER && if (journal->j_flags & JBD2_BARRIER &&
!jbd2_has_feature_async_commit(journal)) !jbd2_has_feature_async_commit(journal))
ret = submit_bh(REQ_OP_WRITE, WRITE_SYNC | WRITE_FLUSH_FUA, bh); ret = submit_bh(REQ_OP_WRITE,
REQ_SYNC | REQ_PREFLUSH | REQ_FUA, bh);
else else
ret = submit_bh(REQ_OP_WRITE, WRITE_SYNC, bh); ret = submit_bh(REQ_OP_WRITE, REQ_SYNC, bh);
*cbh = bh; *cbh = bh;
return ret; return ret;
@ -402,7 +403,7 @@ void jbd2_journal_commit_transaction(journal_t *journal)
jbd2_journal_update_sb_log_tail(journal, jbd2_journal_update_sb_log_tail(journal,
journal->j_tail_sequence, journal->j_tail_sequence,
journal->j_tail, journal->j_tail,
WRITE_SYNC); REQ_SYNC);
mutex_unlock(&journal->j_checkpoint_mutex); mutex_unlock(&journal->j_checkpoint_mutex);
} else { } else {
jbd_debug(3, "superblock not updated\n"); jbd_debug(3, "superblock not updated\n");
@ -717,7 +718,7 @@ start_journal_io:
clear_buffer_dirty(bh); clear_buffer_dirty(bh);
set_buffer_uptodate(bh); set_buffer_uptodate(bh);
bh->b_end_io = journal_end_buffer_io_sync; bh->b_end_io = journal_end_buffer_io_sync;
submit_bh(REQ_OP_WRITE, WRITE_SYNC, bh); submit_bh(REQ_OP_WRITE, REQ_SYNC, bh);
} }
cond_resched(); cond_resched();
stats.run.rs_blocks_logged += bufs; stats.run.rs_blocks_logged += bufs;

View File

@ -913,7 +913,7 @@ int __jbd2_update_log_tail(journal_t *journal, tid_t tid, unsigned long block)
* space and if we lose sb update during power failure we'd replay * space and if we lose sb update during power failure we'd replay
* old transaction with possibly newly overwritten data. * old transaction with possibly newly overwritten data.
*/ */
ret = jbd2_journal_update_sb_log_tail(journal, tid, block, WRITE_FUA); ret = jbd2_journal_update_sb_log_tail(journal, tid, block, REQ_FUA);
if (ret) if (ret)
goto out; goto out;
@ -1306,7 +1306,7 @@ static int journal_reset(journal_t *journal)
/* Lock here to make assertions happy... */ /* Lock here to make assertions happy... */
mutex_lock(&journal->j_checkpoint_mutex); mutex_lock(&journal->j_checkpoint_mutex);
/* /*
* Update log tail information. We use WRITE_FUA since new * Update log tail information. We use REQ_FUA since new
* transaction will start reusing journal space and so we * transaction will start reusing journal space and so we
* must make sure information about current log tail is on * must make sure information about current log tail is on
* disk before that. * disk before that.
@ -1314,7 +1314,7 @@ static int journal_reset(journal_t *journal)
jbd2_journal_update_sb_log_tail(journal, jbd2_journal_update_sb_log_tail(journal,
journal->j_tail_sequence, journal->j_tail_sequence,
journal->j_tail, journal->j_tail,
WRITE_FUA); REQ_FUA);
mutex_unlock(&journal->j_checkpoint_mutex); mutex_unlock(&journal->j_checkpoint_mutex);
} }
return jbd2_journal_start_thread(journal); return jbd2_journal_start_thread(journal);
@ -1454,7 +1454,7 @@ void jbd2_journal_update_sb_errno(journal_t *journal)
sb->s_errno = cpu_to_be32(journal->j_errno); sb->s_errno = cpu_to_be32(journal->j_errno);
read_unlock(&journal->j_state_lock); read_unlock(&journal->j_state_lock);
jbd2_write_superblock(journal, WRITE_FUA); jbd2_write_superblock(journal, REQ_FUA);
} }
EXPORT_SYMBOL(jbd2_journal_update_sb_errno); EXPORT_SYMBOL(jbd2_journal_update_sb_errno);
@ -1720,7 +1720,8 @@ int jbd2_journal_destroy(journal_t *journal)
++journal->j_transaction_sequence; ++journal->j_transaction_sequence;
write_unlock(&journal->j_state_lock); write_unlock(&journal->j_state_lock);
jbd2_mark_journal_empty(journal, WRITE_FLUSH_FUA); jbd2_mark_journal_empty(journal,
REQ_PREFLUSH | REQ_FUA);
mutex_unlock(&journal->j_checkpoint_mutex); mutex_unlock(&journal->j_checkpoint_mutex);
} else } else
err = -EIO; err = -EIO;
@ -1979,7 +1980,7 @@ int jbd2_journal_flush(journal_t *journal)
* the magic code for a fully-recovered superblock. Any future * the magic code for a fully-recovered superblock. Any future
* commits of data to the journal will restore the current * commits of data to the journal will restore the current
* s_start value. */ * s_start value. */
jbd2_mark_journal_empty(journal, WRITE_FUA); jbd2_mark_journal_empty(journal, REQ_FUA);
mutex_unlock(&journal->j_checkpoint_mutex); mutex_unlock(&journal->j_checkpoint_mutex);
write_lock(&journal->j_state_lock); write_lock(&journal->j_state_lock);
J_ASSERT(!journal->j_running_transaction); J_ASSERT(!journal->j_running_transaction);
@ -2025,7 +2026,7 @@ int jbd2_journal_wipe(journal_t *journal, int write)
if (write) { if (write) {
/* Lock to make assertions happy... */ /* Lock to make assertions happy... */
mutex_lock(&journal->j_checkpoint_mutex); mutex_lock(&journal->j_checkpoint_mutex);
jbd2_mark_journal_empty(journal, WRITE_FUA); jbd2_mark_journal_empty(journal, REQ_FUA);
mutex_unlock(&journal->j_checkpoint_mutex); mutex_unlock(&journal->j_checkpoint_mutex);
} }

View File

@ -648,7 +648,7 @@ static void flush_descriptor(journal_t *journal,
set_buffer_jwrite(descriptor); set_buffer_jwrite(descriptor);
BUFFER_TRACE(descriptor, "write"); BUFFER_TRACE(descriptor, "write");
set_buffer_dirty(descriptor); set_buffer_dirty(descriptor);
write_dirty_buffer(descriptor, WRITE_SYNC); write_dirty_buffer(descriptor, REQ_SYNC);
} }
#endif #endif

View File

@ -2002,7 +2002,7 @@ static int lbmRead(struct jfs_log * log, int pn, struct lbuf ** bpp)
bio->bi_end_io = lbmIODone; bio->bi_end_io = lbmIODone;
bio->bi_private = bp; bio->bi_private = bp;
bio_set_op_attrs(bio, REQ_OP_READ, READ_SYNC); bio->bi_opf = REQ_OP_READ;
/*check if journaling to disk has been disabled*/ /*check if journaling to disk has been disabled*/
if (log->no_integrity) { if (log->no_integrity) {
bio->bi_iter.bi_size = 0; bio->bi_iter.bi_size = 0;
@ -2146,7 +2146,7 @@ static void lbmStartIO(struct lbuf * bp)
bio->bi_end_io = lbmIODone; bio->bi_end_io = lbmIODone;
bio->bi_private = bp; bio->bi_private = bp;
bio_set_op_attrs(bio, REQ_OP_WRITE, WRITE_SYNC); bio->bi_opf = REQ_OP_WRITE | REQ_SYNC;
/* check if journaling to disk has been disabled */ /* check if journaling to disk has been disabled */
if (log->no_integrity) { if (log->no_integrity) {

View File

@ -489,7 +489,7 @@ static int __mpage_writepage(struct page *page, struct writeback_control *wbc,
struct buffer_head map_bh; struct buffer_head map_bh;
loff_t i_size = i_size_read(inode); loff_t i_size = i_size_read(inode);
int ret = 0; int ret = 0;
int op_flags = (wbc->sync_mode == WB_SYNC_ALL ? WRITE_SYNC : 0); int op_flags = (wbc->sync_mode == WB_SYNC_ALL ? REQ_SYNC : 0);
if (page_has_buffers(page)) { if (page_has_buffers(page)) {
struct buffer_head *head = page_buffers(page); struct buffer_head *head = page_buffers(page);
@ -705,7 +705,7 @@ mpage_writepages(struct address_space *mapping,
ret = write_cache_pages(mapping, wbc, __mpage_writepage, &mpd); ret = write_cache_pages(mapping, wbc, __mpage_writepage, &mpd);
if (mpd.bio) { if (mpd.bio) {
int op_flags = (wbc->sync_mode == WB_SYNC_ALL ? int op_flags = (wbc->sync_mode == WB_SYNC_ALL ?
WRITE_SYNC : 0); REQ_SYNC : 0);
mpage_bio_submit(REQ_OP_WRITE, op_flags, mpd.bio); mpage_bio_submit(REQ_OP_WRITE, op_flags, mpd.bio);
} }
} }
@ -726,7 +726,7 @@ int mpage_writepage(struct page *page, get_block_t get_block,
int ret = __mpage_writepage(page, wbc, &mpd); int ret = __mpage_writepage(page, wbc, &mpd);
if (mpd.bio) { if (mpd.bio) {
int op_flags = (wbc->sync_mode == WB_SYNC_ALL ? int op_flags = (wbc->sync_mode == WB_SYNC_ALL ?
WRITE_SYNC : 0); REQ_SYNC : 0);
mpage_bio_submit(REQ_OP_WRITE, op_flags, mpd.bio); mpage_bio_submit(REQ_OP_WRITE, op_flags, mpd.bio);
} }
return ret; return ret;

View File

@ -189,7 +189,7 @@ static int nilfs_sync_super(struct super_block *sb, int flag)
set_buffer_dirty(nilfs->ns_sbh[0]); set_buffer_dirty(nilfs->ns_sbh[0]);
if (nilfs_test_opt(nilfs, BARRIER)) { if (nilfs_test_opt(nilfs, BARRIER)) {
err = __sync_dirty_buffer(nilfs->ns_sbh[0], err = __sync_dirty_buffer(nilfs->ns_sbh[0],
WRITE_SYNC | WRITE_FLUSH_FUA); REQ_SYNC | REQ_PREFLUSH | REQ_FUA);
} else { } else {
err = sync_dirty_buffer(nilfs->ns_sbh[0]); err = sync_dirty_buffer(nilfs->ns_sbh[0]);
} }

View File

@ -627,7 +627,7 @@ static int o2hb_issue_node_write(struct o2hb_region *reg,
slot = o2nm_this_node(); slot = o2nm_this_node();
bio = o2hb_setup_one_bio(reg, write_wc, &slot, slot+1, REQ_OP_WRITE, bio = o2hb_setup_one_bio(reg, write_wc, &slot, slot+1, REQ_OP_WRITE,
WRITE_SYNC); REQ_SYNC);
if (IS_ERR(bio)) { if (IS_ERR(bio)) {
status = PTR_ERR(bio); status = PTR_ERR(bio);
mlog_errno(status); mlog_errno(status);

View File

@ -1111,7 +1111,8 @@ static int flush_commit_list(struct super_block *s,
mark_buffer_dirty(jl->j_commit_bh) ; mark_buffer_dirty(jl->j_commit_bh) ;
depth = reiserfs_write_unlock_nested(s); depth = reiserfs_write_unlock_nested(s);
if (reiserfs_barrier_flush(s)) if (reiserfs_barrier_flush(s))
__sync_dirty_buffer(jl->j_commit_bh, WRITE_FLUSH_FUA); __sync_dirty_buffer(jl->j_commit_bh,
REQ_PREFLUSH | REQ_FUA);
else else
sync_dirty_buffer(jl->j_commit_bh); sync_dirty_buffer(jl->j_commit_bh);
reiserfs_write_lock_nested(s, depth); reiserfs_write_lock_nested(s, depth);
@ -1269,7 +1270,8 @@ static int _update_journal_header_block(struct super_block *sb,
depth = reiserfs_write_unlock_nested(sb); depth = reiserfs_write_unlock_nested(sb);
if (reiserfs_barrier_flush(sb)) if (reiserfs_barrier_flush(sb))
__sync_dirty_buffer(journal->j_header_bh, WRITE_FLUSH_FUA); __sync_dirty_buffer(journal->j_header_bh,
REQ_PREFLUSH | REQ_FUA);
else else
sync_dirty_buffer(journal->j_header_bh); sync_dirty_buffer(journal->j_header_bh);

View File

@ -495,8 +495,10 @@ xfs_submit_ioend(
ioend->io_bio->bi_private = ioend; ioend->io_bio->bi_private = ioend;
ioend->io_bio->bi_end_io = xfs_end_bio; ioend->io_bio->bi_end_io = xfs_end_bio;
bio_set_op_attrs(ioend->io_bio, REQ_OP_WRITE, ioend->io_bio->bi_opf = REQ_OP_WRITE;
(wbc->sync_mode == WB_SYNC_ALL) ? WRITE_SYNC : 0); if (wbc->sync_mode == WB_SYNC_ALL)
ioend->io_bio->bi_opf |= REQ_SYNC;
/* /*
* If we are failing the IO now, just mark the ioend with an * If we are failing the IO now, just mark the ioend with an
* error and finish it. This will run IO completion immediately * error and finish it. This will run IO completion immediately
@ -567,8 +569,9 @@ xfs_chain_bio(
bio_chain(ioend->io_bio, new); bio_chain(ioend->io_bio, new);
bio_get(ioend->io_bio); /* for xfs_destroy_ioend */ bio_get(ioend->io_bio); /* for xfs_destroy_ioend */
bio_set_op_attrs(ioend->io_bio, REQ_OP_WRITE, ioend->io_bio->bi_opf = REQ_OP_WRITE;
(wbc->sync_mode == WB_SYNC_ALL) ? WRITE_SYNC : 0); if (wbc->sync_mode == WB_SYNC_ALL)
ioend->io_bio->bi_opf |= REQ_SYNC;
submit_bio(ioend->io_bio); submit_bio(ioend->io_bio);
ioend->io_bio = new; ioend->io_bio = new;
} }

View File

@ -1304,7 +1304,7 @@ _xfs_buf_ioapply(
if (bp->b_flags & XBF_WRITE) { if (bp->b_flags & XBF_WRITE) {
op = REQ_OP_WRITE; op = REQ_OP_WRITE;
if (bp->b_flags & XBF_SYNCIO) if (bp->b_flags & XBF_SYNCIO)
op_flags = WRITE_SYNC; op_flags = REQ_SYNC;
if (bp->b_flags & XBF_FUA) if (bp->b_flags & XBF_FUA)
op_flags |= REQ_FUA; op_flags |= REQ_FUA;
if (bp->b_flags & XBF_FLUSH) if (bp->b_flags & XBF_FLUSH)

View File

@ -151,58 +151,11 @@ typedef int (dio_iodone_t)(struct kiocb *iocb, loff_t offset,
*/ */
#define CHECK_IOVEC_ONLY -1 #define CHECK_IOVEC_ONLY -1
/*
* The below are the various read and write flags that we support. Some of
* them include behavioral modifiers that send information down to the
* block layer and IO scheduler. They should be used along with a req_op.
* Terminology:
*
* The block layer uses device plugging to defer IO a little bit, in
* the hope that we will see more IO very shortly. This increases
* coalescing of adjacent IO and thus reduces the number of IOs we
* have to send to the device. It also allows for better queuing,
* if the IO isn't mergeable. If the caller is going to be waiting
* for the IO, then he must ensure that the device is unplugged so
* that the IO is dispatched to the driver.
*
* All IO is handled async in Linux. This is fine for background
* writes, but for reads or writes that someone waits for completion
* on, we want to notify the block layer and IO scheduler so that they
* know about it. That allows them to make better scheduling
* decisions. So when the below references 'sync' and 'async', it
* is referencing this priority hint.
*
* With that in mind, the available types are:
*
* READ A normal read operation. Device will be plugged.
* READ_SYNC A synchronous read. Device is not plugged, caller can
* immediately wait on this read without caring about
* unplugging.
* WRITE A normal async write. Device will be plugged.
* WRITE_SYNC Synchronous write. Identical to WRITE, but passes down
* the hint that someone will be waiting on this IO
* shortly. The write equivalent of READ_SYNC.
* WRITE_ODIRECT Special case write for O_DIRECT only.
* WRITE_FLUSH Like WRITE_SYNC but with preceding cache flush.
* WRITE_FUA Like WRITE_SYNC but data is guaranteed to be on
* non-volatile media on completion.
* WRITE_FLUSH_FUA Combination of WRITE_FLUSH and FUA. The IO is preceded
* by a cache flush and data is guaranteed to be on
* non-volatile media on completion.
*
*/
#define RW_MASK REQ_OP_WRITE #define RW_MASK REQ_OP_WRITE
#define READ REQ_OP_READ #define READ REQ_OP_READ
#define WRITE REQ_OP_WRITE #define WRITE REQ_OP_WRITE
#define READ_SYNC 0
#define WRITE_SYNC REQ_SYNC
#define WRITE_ODIRECT (REQ_SYNC | REQ_IDLE)
#define WRITE_FLUSH REQ_PREFLUSH
#define WRITE_FUA REQ_FUA
#define WRITE_FLUSH_FUA (REQ_PREFLUSH | REQ_FUA)
/* /*
* Attribute flags. These should be or-ed together to figure out what * Attribute flags. These should be or-ed together to figure out what
* has been changed! * has been changed!

View File

@ -55,7 +55,7 @@ TRACE_DEFINE_ENUM(CP_DISCARD);
{ IPU, "IN-PLACE" }, \ { IPU, "IN-PLACE" }, \
{ OPU, "OUT-OF-PLACE" }) { OPU, "OUT-OF-PLACE" })
#define F2FS_BIO_FLAG_MASK(t) (t & (REQ_RAHEAD | WRITE_FLUSH_FUA)) #define F2FS_BIO_FLAG_MASK(t) (t & (REQ_RAHEAD | REQ_PREFLUSH | REQ_FUA))
#define F2FS_BIO_EXTRA_MASK(t) (t & (REQ_META | REQ_PRIO)) #define F2FS_BIO_EXTRA_MASK(t) (t & (REQ_META | REQ_PRIO))
#define show_bio_type(op_flags) show_bio_op_flags(op_flags), \ #define show_bio_type(op_flags) show_bio_op_flags(op_flags), \
@ -65,11 +65,9 @@ TRACE_DEFINE_ENUM(CP_DISCARD);
__print_symbolic(F2FS_BIO_FLAG_MASK(flags), \ __print_symbolic(F2FS_BIO_FLAG_MASK(flags), \
{ 0, "WRITE" }, \ { 0, "WRITE" }, \
{ REQ_RAHEAD, "READAHEAD" }, \ { REQ_RAHEAD, "READAHEAD" }, \
{ READ_SYNC, "READ_SYNC" }, \ { REQ_SYNC, "REQ_SYNC" }, \
{ WRITE_SYNC, "WRITE_SYNC" }, \ { REQ_PREFLUSH, "REQ_PREFLUSH" }, \
{ WRITE_FLUSH, "WRITE_FLUSH" }, \ { REQ_FUA, "REQ_FUA" })
{ WRITE_FUA, "WRITE_FUA" }, \
{ WRITE_FLUSH_FUA, "WRITE_FLUSH_FUA" })
#define show_bio_extra(type) \ #define show_bio_extra(type) \
__print_symbolic(F2FS_BIO_EXTRA_MASK(type), \ __print_symbolic(F2FS_BIO_EXTRA_MASK(type), \

View File

@ -307,7 +307,7 @@ static int mark_swapfiles(struct swap_map_handle *handle, unsigned int flags)
{ {
int error; int error;
hib_submit_io(REQ_OP_READ, READ_SYNC, swsusp_resume_block, hib_submit_io(REQ_OP_READ, 0, swsusp_resume_block,
swsusp_header, NULL); swsusp_header, NULL);
if (!memcmp("SWAP-SPACE",swsusp_header->sig, 10) || if (!memcmp("SWAP-SPACE",swsusp_header->sig, 10) ||
!memcmp("SWAPSPACE2",swsusp_header->sig, 10)) { !memcmp("SWAPSPACE2",swsusp_header->sig, 10)) {
@ -317,7 +317,7 @@ static int mark_swapfiles(struct swap_map_handle *handle, unsigned int flags)
swsusp_header->flags = flags; swsusp_header->flags = flags;
if (flags & SF_CRC32_MODE) if (flags & SF_CRC32_MODE)
swsusp_header->crc32 = handle->crc32; swsusp_header->crc32 = handle->crc32;
error = hib_submit_io(REQ_OP_WRITE, WRITE_SYNC, error = hib_submit_io(REQ_OP_WRITE, REQ_SYNC,
swsusp_resume_block, swsusp_header, NULL); swsusp_resume_block, swsusp_header, NULL);
} else { } else {
printk(KERN_ERR "PM: Swap header not found!\n"); printk(KERN_ERR "PM: Swap header not found!\n");
@ -397,7 +397,7 @@ static int write_page(void *buf, sector_t offset, struct hib_bio_batch *hb)
} else { } else {
src = buf; src = buf;
} }
return hib_submit_io(REQ_OP_WRITE, WRITE_SYNC, offset, src, hb); return hib_submit_io(REQ_OP_WRITE, REQ_SYNC, offset, src, hb);
} }
static void release_swap_writer(struct swap_map_handle *handle) static void release_swap_writer(struct swap_map_handle *handle)
@ -1000,8 +1000,7 @@ static int get_swap_reader(struct swap_map_handle *handle,
return -ENOMEM; return -ENOMEM;
} }
error = hib_submit_io(REQ_OP_READ, READ_SYNC, offset, error = hib_submit_io(REQ_OP_READ, 0, offset, tmp->map, NULL);
tmp->map, NULL);
if (error) { if (error) {
release_swap_reader(handle); release_swap_reader(handle);
return error; return error;
@ -1025,7 +1024,7 @@ static int swap_read_page(struct swap_map_handle *handle, void *buf,
offset = handle->cur->entries[handle->k]; offset = handle->cur->entries[handle->k];
if (!offset) if (!offset)
return -EFAULT; return -EFAULT;
error = hib_submit_io(REQ_OP_READ, READ_SYNC, offset, buf, hb); error = hib_submit_io(REQ_OP_READ, 0, offset, buf, hb);
if (error) if (error)
return error; return error;
if (++handle->k >= MAP_PAGE_ENTRIES) { if (++handle->k >= MAP_PAGE_ENTRIES) {
@ -1534,7 +1533,7 @@ int swsusp_check(void)
if (!IS_ERR(hib_resume_bdev)) { if (!IS_ERR(hib_resume_bdev)) {
set_blocksize(hib_resume_bdev, PAGE_SIZE); set_blocksize(hib_resume_bdev, PAGE_SIZE);
clear_page(swsusp_header); clear_page(swsusp_header);
error = hib_submit_io(REQ_OP_READ, READ_SYNC, error = hib_submit_io(REQ_OP_READ, 0,
swsusp_resume_block, swsusp_resume_block,
swsusp_header, NULL); swsusp_header, NULL);
if (error) if (error)
@ -1543,7 +1542,7 @@ int swsusp_check(void)
if (!memcmp(HIBERNATE_SIG, swsusp_header->sig, 10)) { if (!memcmp(HIBERNATE_SIG, swsusp_header->sig, 10)) {
memcpy(swsusp_header->sig, swsusp_header->orig_sig, 10); memcpy(swsusp_header->sig, swsusp_header->orig_sig, 10);
/* Reset swap signature now */ /* Reset swap signature now */
error = hib_submit_io(REQ_OP_WRITE, WRITE_SYNC, error = hib_submit_io(REQ_OP_WRITE, REQ_SYNC,
swsusp_resume_block, swsusp_resume_block,
swsusp_header, NULL); swsusp_header, NULL);
} else { } else {
@ -1588,11 +1587,11 @@ int swsusp_unmark(void)
{ {
int error; int error;
hib_submit_io(REQ_OP_READ, READ_SYNC, swsusp_resume_block, hib_submit_io(REQ_OP_READ, 0, swsusp_resume_block,
swsusp_header, NULL); swsusp_header, NULL);
if (!memcmp(HIBERNATE_SIG,swsusp_header->sig, 10)) { if (!memcmp(HIBERNATE_SIG,swsusp_header->sig, 10)) {
memcpy(swsusp_header->sig,swsusp_header->orig_sig, 10); memcpy(swsusp_header->sig,swsusp_header->orig_sig, 10);
error = hib_submit_io(REQ_OP_WRITE, WRITE_SYNC, error = hib_submit_io(REQ_OP_WRITE, REQ_SYNC,
swsusp_resume_block, swsusp_resume_block,
swsusp_header, NULL); swsusp_header, NULL);
} else { } else {