mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-11-20 00:26:39 +08:00
block: Add flag for telling the IO schedulers NOT to anticipate more IO
By default, CFQ will anticipate more IO from a given io context if the previously completed IO was sync. This used to be fine, since the only sync IO was reads and O_DIRECT writes. But with more "normal" sync writes being used now, we don't want to anticipate for those. Add a bio/request flag that informs the IO scheduler that this is a sync request that we should not idle for. Introduce WRITE_ODIRECT specifically for O_DIRECT writes, and make sure that the other sync writes set this flag. Signed-off-by: Jens Axboe <jens.axboe@oracle.com> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
644b2d99b7
commit
aeb6fafb8f
@ -1128,6 +1128,8 @@ void init_request_from_bio(struct request *req, struct bio *bio)
|
||||
req->cmd_flags |= REQ_UNPLUG;
|
||||
if (bio_rw_meta(bio))
|
||||
req->cmd_flags |= REQ_RW_META;
|
||||
if (bio_noidle(bio))
|
||||
req->cmd_flags |= REQ_NOIDLE;
|
||||
|
||||
req->errors = 0;
|
||||
req->hard_sector = req->sector = bio->bi_sector;
|
||||
|
@ -1992,9 +1992,11 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq)
|
||||
}
|
||||
if (cfq_slice_used(cfqq) || cfq_class_idle(cfqq))
|
||||
cfq_slice_expired(cfqd, 1);
|
||||
else if (sync && RB_EMPTY_ROOT(&cfqq->sort_list))
|
||||
else if (sync && !rq_noidle(rq) &&
|
||||
RB_EMPTY_ROOT(&cfqq->sort_list)) {
|
||||
cfq_arm_slice_timer(cfqd);
|
||||
}
|
||||
}
|
||||
|
||||
if (!cfqd->rq_in_driver)
|
||||
cfq_schedule_dispatch(cfqd);
|
||||
|
@ -1126,7 +1126,7 @@ __blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
|
||||
int acquire_i_mutex = 0;
|
||||
|
||||
if (rw & WRITE)
|
||||
rw = WRITE_SYNC;
|
||||
rw = WRITE_ODIRECT;
|
||||
|
||||
if (bdev)
|
||||
bdev_blkbits = blksize_bits(bdev_hardsect_size(bdev));
|
||||
|
@ -145,20 +145,21 @@ struct bio {
|
||||
* bit 2 -- barrier
|
||||
* Insert a serialization point in the IO queue, forcing previously
|
||||
* submitted IO to be completed before this one is issued.
|
||||
* bit 3 -- synchronous I/O hint: the block layer will unplug immediately
|
||||
* Note that this does NOT indicate that the IO itself is sync, just
|
||||
* that the block layer will not postpone issue of this IO by plugging.
|
||||
* bit 4 -- metadata request
|
||||
* bit 3 -- synchronous I/O hint.
|
||||
* bit 4 -- Unplug the device immediately after submitting this bio.
|
||||
* bit 5 -- metadata request
|
||||
* Used for tracing to differentiate metadata and data IO. May also
|
||||
* get some preferential treatment in the IO scheduler
|
||||
* bit 5 -- discard sectors
|
||||
* bit 6 -- discard sectors
|
||||
* Informs the lower level device that this range of sectors is no longer
|
||||
* used by the file system and may thus be freed by the device. Used
|
||||
* for flash based storage.
|
||||
* bit 6 -- fail fast device errors
|
||||
* bit 7 -- fail fast transport errors
|
||||
* bit 8 -- fail fast driver errors
|
||||
* bit 7 -- fail fast device errors
|
||||
* bit 8 -- fail fast transport errors
|
||||
* bit 9 -- fail fast driver errors
|
||||
* Don't want driver retries for any fast fail whatever the reason.
|
||||
* bit 10 -- Tell the IO scheduler not to wait for more requests after this
|
||||
one has been submitted, even if it is a SYNC request.
|
||||
*/
|
||||
#define BIO_RW 0 /* Must match RW in req flags (blkdev.h) */
|
||||
#define BIO_RW_AHEAD 1 /* Must match FAILFAST in req flags */
|
||||
@ -170,6 +171,7 @@ struct bio {
|
||||
#define BIO_RW_FAILFAST_DEV 7
|
||||
#define BIO_RW_FAILFAST_TRANSPORT 8
|
||||
#define BIO_RW_FAILFAST_DRIVER 9
|
||||
#define BIO_RW_NOIDLE 10
|
||||
|
||||
#define bio_rw_flagged(bio, flag) ((bio)->bi_rw & (1 << (flag)))
|
||||
|
||||
@ -188,6 +190,7 @@ struct bio {
|
||||
#define bio_rw_ahead(bio) bio_rw_flagged(bio, BIO_RW_AHEAD)
|
||||
#define bio_rw_meta(bio) bio_rw_flagged(bio, BIO_RW_META)
|
||||
#define bio_discard(bio) bio_rw_flagged(bio, BIO_RW_DISCARD)
|
||||
#define bio_noidle(bio) bio_rw_flagged(bio, BIO_RW_NOIDLE)
|
||||
|
||||
/*
|
||||
* upper 16 bits of bi_rw define the io priority of this bio
|
||||
|
@ -118,6 +118,7 @@ enum rq_flag_bits {
|
||||
__REQ_COPY_USER, /* contains copies of user pages */
|
||||
__REQ_INTEGRITY, /* integrity metadata has been remapped */
|
||||
__REQ_UNPLUG, /* unplug queue on submission */
|
||||
__REQ_NOIDLE, /* Don't anticipate more IO after this one */
|
||||
__REQ_NR_BITS, /* stops here */
|
||||
};
|
||||
|
||||
@ -145,6 +146,7 @@ enum rq_flag_bits {
|
||||
#define REQ_COPY_USER (1 << __REQ_COPY_USER)
|
||||
#define REQ_INTEGRITY (1 << __REQ_INTEGRITY)
|
||||
#define REQ_UNPLUG (1 << __REQ_UNPLUG)
|
||||
#define REQ_NOIDLE (1 << __REQ_NOIDLE)
|
||||
|
||||
#define BLK_MAX_CDB 16
|
||||
|
||||
@ -633,6 +635,7 @@ static inline bool rq_is_sync(struct request *rq)
|
||||
}
|
||||
|
||||
#define rq_is_meta(rq) ((rq)->cmd_flags & REQ_RW_META)
|
||||
#define rq_noidle(rq) ((rq)->cmd_flags & REQ_NOIDLE)
|
||||
|
||||
static inline int blk_queue_full(struct request_queue *q, int sync)
|
||||
{
|
||||
|
@ -95,11 +95,12 @@ struct inodes_stat_t {
|
||||
#define SWRITE 3 /* for ll_rw_block() - wait for buffer lock */
|
||||
#define READ_SYNC (READ | (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_UNPLUG))
|
||||
#define READ_META (READ | (1 << BIO_RW_META))
|
||||
#define WRITE_SYNC (WRITE | (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_UNPLUG))
|
||||
#define WRITE_SYNC_PLUG (WRITE | (1 << BIO_RW_SYNCIO))
|
||||
#define SWRITE_SYNC (SWRITE | (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_UNPLUG))
|
||||
#define WRITE_SYNC_PLUG (WRITE | (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_NOIDLE))
|
||||
#define WRITE_SYNC (WRITE_SYNC_PLUG | (1 << BIO_RW_UNPLUG))
|
||||
#define WRITE_ODIRECT (WRITE | (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_UNPLUG))
|
||||
#define SWRITE_SYNC_PLUG \
|
||||
(SWRITE | (1 << BIO_RW_SYNCIO))
|
||||
(SWRITE | (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_NOIDLE))
|
||||
#define SWRITE_SYNC (SWRITE_SYNC_PLUG | (1 << BIO_RW_UNPLUG))
|
||||
#define WRITE_BARRIER (WRITE | (1 << BIO_RW_BARRIER))
|
||||
#define DISCARD_NOBARRIER (1 << BIO_RW_DISCARD)
|
||||
#define DISCARD_BARRIER ((1 << BIO_RW_DISCARD) | (1 << BIO_RW_BARRIER))
|
||||
|
Loading…
Reference in New Issue
Block a user