mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-15 16:24:13 +08:00
80a761fd33
Failfast has characteristics from other attributes. When issuing, executing and successuflly completing requests, failfast doesn't make any difference. It only affects how a request is handled on failure. Allowing requests with different failfast settings to be merged cause normal IOs to fail prematurely while not allowing has performance penalties as failfast is used for read aheads which are likely to be located near in-flight or to-be-issued normal IOs. This patch introduces the concept of 'mixed merge'. A request is a mixed merge if it is merge of segments which require different handling on failure. Currently the only mixable attributes are failfast ones (or lack thereof). When a bio with different failfast settings is added to an existing request or requests of different failfast settings are merged, the merged request is marked mixed. Each bio carries failfast settings and the request always tracks failfast state of the first bio. When the request fails, blk_rq_err_bytes() can be used to determine how many bytes can be safely failed without crossing into an area which requires further retrials. This allows request merging regardless of failfast settings while keeping the failure handling correct. This patch only implements mixed merge but doesn't enable it. The next one will update SCSI to make use of mixed merge. Signed-off-by: Tejun Heo <tj@kernel.org> Cc: Niel Lambrechts <niel.lambrechts@gmail.com> Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
169 lines
4.5 KiB
C
169 lines
4.5 KiB
C
#ifndef BLK_INTERNAL_H
|
|
#define BLK_INTERNAL_H
|
|
|
|
/* Amount of time in which a process may batch requests */
|
|
#define BLK_BATCH_TIME (HZ/50UL)
|
|
|
|
/* Number of requests a "batching" process may submit */
|
|
#define BLK_BATCH_REQ 32
|
|
|
|
extern struct kmem_cache *blk_requestq_cachep;
|
|
extern struct kobj_type blk_queue_ktype;
|
|
|
|
void init_request_from_bio(struct request *req, struct bio *bio);
|
|
void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
|
|
struct bio *bio);
|
|
int blk_rq_append_bio(struct request_queue *q, struct request *rq,
|
|
struct bio *bio);
|
|
void blk_dequeue_request(struct request *rq);
|
|
void __blk_queue_free_tags(struct request_queue *q);
|
|
|
|
void blk_unplug_work(struct work_struct *work);
|
|
void blk_unplug_timeout(unsigned long data);
|
|
void blk_rq_timed_out_timer(unsigned long data);
|
|
void blk_delete_timer(struct request *);
|
|
void blk_add_timer(struct request *);
|
|
void __generic_unplug_device(struct request_queue *);
|
|
|
|
/*
|
|
* Internal atomic flags for request handling
|
|
*/
|
|
enum rq_atomic_flags {
|
|
REQ_ATOM_COMPLETE = 0,
|
|
};
|
|
|
|
/*
|
|
* EH timer and IO completion will both attempt to 'grab' the request, make
|
|
* sure that only one of them suceeds
|
|
*/
|
|
static inline int blk_mark_rq_complete(struct request *rq)
|
|
{
|
|
return test_and_set_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags);
|
|
}
|
|
|
|
static inline void blk_clear_rq_complete(struct request *rq)
|
|
{
|
|
clear_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags);
|
|
}
|
|
|
|
/*
|
|
* Internal elevator interface
|
|
*/
|
|
#define ELV_ON_HASH(rq) (!hlist_unhashed(&(rq)->hash))
|
|
|
|
static inline struct request *__elv_next_request(struct request_queue *q)
|
|
{
|
|
struct request *rq;
|
|
|
|
while (1) {
|
|
while (!list_empty(&q->queue_head)) {
|
|
rq = list_entry_rq(q->queue_head.next);
|
|
if (blk_do_ordered(q, &rq))
|
|
return rq;
|
|
}
|
|
|
|
if (!q->elevator->ops->elevator_dispatch_fn(q, 0))
|
|
return NULL;
|
|
}
|
|
}
|
|
|
|
static inline void elv_activate_rq(struct request_queue *q, struct request *rq)
|
|
{
|
|
struct elevator_queue *e = q->elevator;
|
|
|
|
if (e->ops->elevator_activate_req_fn)
|
|
e->ops->elevator_activate_req_fn(q, rq);
|
|
}
|
|
|
|
static inline void elv_deactivate_rq(struct request_queue *q, struct request *rq)
|
|
{
|
|
struct elevator_queue *e = q->elevator;
|
|
|
|
if (e->ops->elevator_deactivate_req_fn)
|
|
e->ops->elevator_deactivate_req_fn(q, rq);
|
|
}
|
|
|
|
#ifdef CONFIG_FAIL_IO_TIMEOUT
|
|
int blk_should_fake_timeout(struct request_queue *);
|
|
ssize_t part_timeout_show(struct device *, struct device_attribute *, char *);
|
|
ssize_t part_timeout_store(struct device *, struct device_attribute *,
|
|
const char *, size_t);
|
|
#else
|
|
static inline int blk_should_fake_timeout(struct request_queue *q)
|
|
{
|
|
return 0;
|
|
}
|
|
#endif
|
|
|
|
struct io_context *current_io_context(gfp_t gfp_flags, int node);
|
|
|
|
int ll_back_merge_fn(struct request_queue *q, struct request *req,
|
|
struct bio *bio);
|
|
int ll_front_merge_fn(struct request_queue *q, struct request *req,
|
|
struct bio *bio);
|
|
int attempt_back_merge(struct request_queue *q, struct request *rq);
|
|
int attempt_front_merge(struct request_queue *q, struct request *rq);
|
|
void blk_recalc_rq_segments(struct request *rq);
|
|
void blk_rq_set_mixed_merge(struct request *rq);
|
|
|
|
void blk_queue_congestion_threshold(struct request_queue *q);
|
|
|
|
int blk_dev_init(void);
|
|
|
|
void elv_quiesce_start(struct request_queue *q);
|
|
void elv_quiesce_end(struct request_queue *q);
|
|
|
|
|
|
/*
|
|
* Return the threshold (number of used requests) at which the queue is
|
|
* considered to be congested. It include a little hysteresis to keep the
|
|
* context switch rate down.
|
|
*/
|
|
static inline int queue_congestion_on_threshold(struct request_queue *q)
|
|
{
|
|
return q->nr_congestion_on;
|
|
}
|
|
|
|
/*
|
|
* The threshold at which a queue is considered to be uncongested
|
|
*/
|
|
static inline int queue_congestion_off_threshold(struct request_queue *q)
|
|
{
|
|
return q->nr_congestion_off;
|
|
}
|
|
|
|
#if defined(CONFIG_BLK_DEV_INTEGRITY)
|
|
|
|
#define rq_for_each_integrity_segment(bvl, _rq, _iter) \
|
|
__rq_for_each_bio(_iter.bio, _rq) \
|
|
bip_for_each_vec(bvl, _iter.bio->bi_integrity, _iter.i)
|
|
|
|
#endif /* BLK_DEV_INTEGRITY */
|
|
|
|
static inline int blk_cpu_to_group(int cpu)
|
|
{
|
|
#ifdef CONFIG_SCHED_MC
|
|
const struct cpumask *mask = cpu_coregroup_mask(cpu);
|
|
return cpumask_first(mask);
|
|
#elif defined(CONFIG_SCHED_SMT)
|
|
return cpumask_first(topology_thread_cpumask(cpu));
|
|
#else
|
|
return cpu;
|
|
#endif
|
|
}
|
|
|
|
/*
|
|
* Contribute to IO statistics IFF:
|
|
*
|
|
* a) it's attached to a gendisk, and
|
|
* b) the queue had IO stats enabled when this request was started, and
|
|
* c) it's a file system request or a discard request
|
|
*/
|
|
static inline int blk_do_io_stat(struct request *rq)
|
|
{
|
|
return rq->rq_disk && blk_rq_io_stat(rq) &&
|
|
(blk_fs_request(rq) || blk_discard_rq(rq));
|
|
}
|
|
|
|
#endif
|