mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-23 20:24:12 +08:00
block: support different tag allocation policy
The libata tag allocation is using a round-robin policy. Next patch will make libata use block generic tag allocation, so let's add a policy to tag allocation. Currently two policies: FIFO (default) and round-robin. Cc: Jens Axboe <axboe@fb.com> Cc: Tejun Heo <tj@kernel.org> Cc: Christoph Hellwig <hch@infradead.org> Signed-off-by: Shaohua Li <shli@fb.com> Signed-off-by: Jens Axboe <axboe@fb.com>
This commit is contained in:
parent
bb5c3cdda3
commit
ee1b6f7aff
@ -119,7 +119,7 @@ fail:
|
|||||||
}
|
}
|
||||||
|
|
||||||
static struct blk_queue_tag *__blk_queue_init_tags(struct request_queue *q,
|
static struct blk_queue_tag *__blk_queue_init_tags(struct request_queue *q,
|
||||||
int depth)
|
int depth, int alloc_policy)
|
||||||
{
|
{
|
||||||
struct blk_queue_tag *tags;
|
struct blk_queue_tag *tags;
|
||||||
|
|
||||||
@ -131,6 +131,8 @@ static struct blk_queue_tag *__blk_queue_init_tags(struct request_queue *q,
|
|||||||
goto fail;
|
goto fail;
|
||||||
|
|
||||||
atomic_set(&tags->refcnt, 1);
|
atomic_set(&tags->refcnt, 1);
|
||||||
|
tags->alloc_policy = alloc_policy;
|
||||||
|
tags->next_tag = 0;
|
||||||
return tags;
|
return tags;
|
||||||
fail:
|
fail:
|
||||||
kfree(tags);
|
kfree(tags);
|
||||||
@ -140,10 +142,11 @@ fail:
|
|||||||
/**
|
/**
|
||||||
* blk_init_tags - initialize the tag info for an external tag map
|
* blk_init_tags - initialize the tag info for an external tag map
|
||||||
* @depth: the maximum queue depth supported
|
* @depth: the maximum queue depth supported
|
||||||
|
* @alloc_policy: tag allocation policy
|
||||||
**/
|
**/
|
||||||
struct blk_queue_tag *blk_init_tags(int depth)
|
struct blk_queue_tag *blk_init_tags(int depth, int alloc_policy)
|
||||||
{
|
{
|
||||||
return __blk_queue_init_tags(NULL, depth);
|
return __blk_queue_init_tags(NULL, depth, alloc_policy);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(blk_init_tags);
|
EXPORT_SYMBOL(blk_init_tags);
|
||||||
|
|
||||||
@ -152,19 +155,20 @@ EXPORT_SYMBOL(blk_init_tags);
|
|||||||
* @q: the request queue for the device
|
* @q: the request queue for the device
|
||||||
* @depth: the maximum queue depth supported
|
* @depth: the maximum queue depth supported
|
||||||
* @tags: the tag to use
|
* @tags: the tag to use
|
||||||
|
* @alloc_policy: tag allocation policy
|
||||||
*
|
*
|
||||||
* Queue lock must be held here if the function is called to resize an
|
* Queue lock must be held here if the function is called to resize an
|
||||||
* existing map.
|
* existing map.
|
||||||
**/
|
**/
|
||||||
int blk_queue_init_tags(struct request_queue *q, int depth,
|
int blk_queue_init_tags(struct request_queue *q, int depth,
|
||||||
struct blk_queue_tag *tags)
|
struct blk_queue_tag *tags, int alloc_policy)
|
||||||
{
|
{
|
||||||
int rc;
|
int rc;
|
||||||
|
|
||||||
BUG_ON(tags && q->queue_tags && tags != q->queue_tags);
|
BUG_ON(tags && q->queue_tags && tags != q->queue_tags);
|
||||||
|
|
||||||
if (!tags && !q->queue_tags) {
|
if (!tags && !q->queue_tags) {
|
||||||
tags = __blk_queue_init_tags(q, depth);
|
tags = __blk_queue_init_tags(q, depth, alloc_policy);
|
||||||
|
|
||||||
if (!tags)
|
if (!tags)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
@ -344,9 +348,21 @@ int blk_queue_start_tag(struct request_queue *q, struct request *rq)
|
|||||||
}
|
}
|
||||||
|
|
||||||
do {
|
do {
|
||||||
tag = find_first_zero_bit(bqt->tag_map, max_depth);
|
if (bqt->alloc_policy == BLK_TAG_ALLOC_FIFO) {
|
||||||
if (tag >= max_depth)
|
tag = find_first_zero_bit(bqt->tag_map, max_depth);
|
||||||
return 1;
|
if (tag >= max_depth)
|
||||||
|
return 1;
|
||||||
|
} else {
|
||||||
|
int start = bqt->next_tag;
|
||||||
|
int size = min_t(int, bqt->max_depth, max_depth + start);
|
||||||
|
tag = find_next_zero_bit(bqt->tag_map, size, start);
|
||||||
|
if (tag >= size && start + size > bqt->max_depth) {
|
||||||
|
size = start + size - bqt->max_depth;
|
||||||
|
tag = find_first_zero_bit(bqt->tag_map, size);
|
||||||
|
}
|
||||||
|
if (tag >= size)
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
} while (test_and_set_bit_lock(tag, bqt->tag_map));
|
} while (test_and_set_bit_lock(tag, bqt->tag_map));
|
||||||
/*
|
/*
|
||||||
@ -354,6 +370,7 @@ int blk_queue_start_tag(struct request_queue *q, struct request *rq)
|
|||||||
* See blk_queue_end_tag for details.
|
* See blk_queue_end_tag for details.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
bqt->next_tag = (tag + 1) % bqt->max_depth;
|
||||||
rq->cmd_flags |= REQ_QUEUED;
|
rq->cmd_flags |= REQ_QUEUED;
|
||||||
rq->tag = tag;
|
rq->tag = tag;
|
||||||
bqt->tag_index[tag] = rq;
|
bqt->tag_index[tag] = rq;
|
||||||
|
@ -423,7 +423,7 @@ static int osdblk_init_disk(struct osdblk_device *osdev)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* switch queue to TCQ mode; allocate tag map */
|
/* switch queue to TCQ mode; allocate tag map */
|
||||||
rc = blk_queue_init_tags(q, OSDBLK_MAX_REQ, NULL);
|
rc = blk_queue_init_tags(q, OSDBLK_MAX_REQ, NULL, BLK_TAG_ALLOC_FIFO);
|
||||||
if (rc) {
|
if (rc) {
|
||||||
blk_cleanup_queue(q);
|
blk_cleanup_queue(q);
|
||||||
put_disk(disk);
|
put_disk(disk);
|
||||||
|
@ -290,7 +290,8 @@ static struct scsi_device *scsi_alloc_sdev(struct scsi_target *starget,
|
|||||||
if (!shost_use_blk_mq(sdev->host) &&
|
if (!shost_use_blk_mq(sdev->host) &&
|
||||||
(shost->bqt || shost->hostt->use_blk_tags)) {
|
(shost->bqt || shost->hostt->use_blk_tags)) {
|
||||||
blk_queue_init_tags(sdev->request_queue,
|
blk_queue_init_tags(sdev->request_queue,
|
||||||
sdev->host->cmd_per_lun, shost->bqt);
|
sdev->host->cmd_per_lun, shost->bqt,
|
||||||
|
shost->hostt->tag_alloc_policy);
|
||||||
}
|
}
|
||||||
scsi_change_queue_depth(sdev, sdev->host->cmd_per_lun);
|
scsi_change_queue_depth(sdev, sdev->host->cmd_per_lun);
|
||||||
|
|
||||||
|
@ -272,7 +272,11 @@ struct blk_queue_tag {
|
|||||||
int max_depth; /* what we will send to device */
|
int max_depth; /* what we will send to device */
|
||||||
int real_max_depth; /* what the array can hold */
|
int real_max_depth; /* what the array can hold */
|
||||||
atomic_t refcnt; /* map can be shared */
|
atomic_t refcnt; /* map can be shared */
|
||||||
|
int alloc_policy; /* tag allocation policy */
|
||||||
|
int next_tag; /* next tag */
|
||||||
};
|
};
|
||||||
|
#define BLK_TAG_ALLOC_FIFO 0 /* allocate starting from 0 */
|
||||||
|
#define BLK_TAG_ALLOC_RR 1 /* allocate starting from last allocated tag */
|
||||||
|
|
||||||
#define BLK_SCSI_MAX_CMDS (256)
|
#define BLK_SCSI_MAX_CMDS (256)
|
||||||
#define BLK_SCSI_CMD_PER_LONG (BLK_SCSI_MAX_CMDS / (sizeof(long) * 8))
|
#define BLK_SCSI_CMD_PER_LONG (BLK_SCSI_MAX_CMDS / (sizeof(long) * 8))
|
||||||
@ -1139,11 +1143,11 @@ static inline bool blk_needs_flush_plug(struct task_struct *tsk)
|
|||||||
extern int blk_queue_start_tag(struct request_queue *, struct request *);
|
extern int blk_queue_start_tag(struct request_queue *, struct request *);
|
||||||
extern struct request *blk_queue_find_tag(struct request_queue *, int);
|
extern struct request *blk_queue_find_tag(struct request_queue *, int);
|
||||||
extern void blk_queue_end_tag(struct request_queue *, struct request *);
|
extern void blk_queue_end_tag(struct request_queue *, struct request *);
|
||||||
extern int blk_queue_init_tags(struct request_queue *, int, struct blk_queue_tag *);
|
extern int blk_queue_init_tags(struct request_queue *, int, struct blk_queue_tag *, int);
|
||||||
extern void blk_queue_free_tags(struct request_queue *);
|
extern void blk_queue_free_tags(struct request_queue *);
|
||||||
extern int blk_queue_resize_tags(struct request_queue *, int);
|
extern int blk_queue_resize_tags(struct request_queue *, int);
|
||||||
extern void blk_queue_invalidate_tags(struct request_queue *);
|
extern void blk_queue_invalidate_tags(struct request_queue *);
|
||||||
extern struct blk_queue_tag *blk_init_tags(int);
|
extern struct blk_queue_tag *blk_init_tags(int, int);
|
||||||
extern void blk_free_tags(struct blk_queue_tag *);
|
extern void blk_free_tags(struct blk_queue_tag *);
|
||||||
|
|
||||||
static inline struct request *blk_map_queue_find_tag(struct blk_queue_tag *bqt,
|
static inline struct request *blk_map_queue_find_tag(struct blk_queue_tag *bqt,
|
||||||
|
@ -402,6 +402,9 @@ struct scsi_host_template {
|
|||||||
*/
|
*/
|
||||||
unsigned char present;
|
unsigned char present;
|
||||||
|
|
||||||
|
/* If use block layer to manage tags, this is tag allocation policy */
|
||||||
|
int tag_alloc_policy;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Let the block layer assigns tags to all commands.
|
* Let the block layer assigns tags to all commands.
|
||||||
*/
|
*/
|
||||||
|
@ -66,7 +66,8 @@ static inline int scsi_init_shared_tag_map(struct Scsi_Host *shost, int depth)
|
|||||||
* devices on the shared host (for libata)
|
* devices on the shared host (for libata)
|
||||||
*/
|
*/
|
||||||
if (!shost->bqt) {
|
if (!shost->bqt) {
|
||||||
shost->bqt = blk_init_tags(depth);
|
shost->bqt = blk_init_tags(depth,
|
||||||
|
shost->hostt->tag_alloc_policy);
|
||||||
if (!shost->bqt)
|
if (!shost->bqt)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user