mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-18 01:34:14 +08:00
5e57dc8110
Pull block IO fixes from Jens Axboe: "Second round of updates and fixes for 3.14-rc2. Most of this stuff has been queued up for a while. The notable exception is the blk-mq changes, which are naturally a bit more in flux still. The pull request contains: - Two bug fixes for the new immutable vecs, causing crashes with raid or swap. From Kent. - Various blk-mq tweaks and fixes from Christoph. A fix for integrity bio's from Nic. - A few bcache fixes from Kent and Darrick Wong. - xen-blk{front,back} fixes from David Vrabel, Matt Rushton, Nicolas Swenson, and Roger Pau Monne. - Fix for a vec miscount with integrity vectors from Martin. - Minor annotations or fixes from Masanari Iida and Rashika Kheria. - Tweak to null_blk to do more normal FIFO processing of requests from Shlomo Pongratz. - Elevator switching bypass fix from Tejun. - Softlockup in blkdev_issue_discard() fix when !CONFIG_PREEMPT from me" * 'for-linus' of git://git.kernel.dk/linux-block: (31 commits) block: add cond_resched() to potentially long running ioctl discard loop xen-blkback: init persistent_purge_work work_struct blk-mq: pair blk_mq_start_request / blk_mq_requeue_request blk-mq: dont assume rq->errors is set when returning an error from ->queue_rq block: Fix cloning of discard/write same bios block: Fix type mismatch in ssize_t_blk_mq_tag_sysfs_show blk-mq: rework flush sequencing logic null_blk: use blk_complete_request and blk_mq_complete_request virtio_blk: use blk_mq_complete_request blk-mq: rework I/O completions fs: Add prototype declaration to appropriate header file include/linux/bio.h fs: Mark function as static in fs/bio-integrity.c block/null_blk: Fix completion processing from LIFO to FIFO block: Explicitly handle discard/write same segments block: Fix nr_vecs for inline integrity vectors blk-mq: Add bio_integrity setup to blk_mq_make_request blk-mq: initialize sg_reserved_size blk-mq: handle dma_drain_size blk-mq: divert __blk_put_request for MQ ops blk-mq: support at_head inserations for blk_execute_rq ...
207 lines
4.7 KiB
C
207 lines
4.7 KiB
C
#include <linux/kernel.h>
|
|
#include <linux/module.h>
|
|
#include <linux/percpu_ida.h>
|
|
|
|
#include <linux/blk-mq.h>
|
|
#include "blk.h"
|
|
#include "blk-mq.h"
|
|
#include "blk-mq-tag.h"
|
|
|
|
/*
|
|
* Per tagged queue (tag address space) map
|
|
*/
|
|
struct blk_mq_tags {
|
|
unsigned int nr_tags;
|
|
unsigned int nr_reserved_tags;
|
|
unsigned int nr_batch_move;
|
|
unsigned int nr_max_cache;
|
|
|
|
struct percpu_ida free_tags;
|
|
struct percpu_ida reserved_tags;
|
|
};
|
|
|
|
void blk_mq_wait_for_tags(struct blk_mq_tags *tags)
|
|
{
|
|
int tag = blk_mq_get_tag(tags, __GFP_WAIT, false);
|
|
blk_mq_put_tag(tags, tag);
|
|
}
|
|
|
|
bool blk_mq_has_free_tags(struct blk_mq_tags *tags)
|
|
{
|
|
return !tags ||
|
|
percpu_ida_free_tags(&tags->free_tags, nr_cpu_ids) != 0;
|
|
}
|
|
|
|
static unsigned int __blk_mq_get_tag(struct blk_mq_tags *tags, gfp_t gfp)
|
|
{
|
|
int tag;
|
|
|
|
tag = percpu_ida_alloc(&tags->free_tags, (gfp & __GFP_WAIT) ?
|
|
TASK_UNINTERRUPTIBLE : TASK_RUNNING);
|
|
if (tag < 0)
|
|
return BLK_MQ_TAG_FAIL;
|
|
return tag + tags->nr_reserved_tags;
|
|
}
|
|
|
|
static unsigned int __blk_mq_get_reserved_tag(struct blk_mq_tags *tags,
|
|
gfp_t gfp)
|
|
{
|
|
int tag;
|
|
|
|
if (unlikely(!tags->nr_reserved_tags)) {
|
|
WARN_ON_ONCE(1);
|
|
return BLK_MQ_TAG_FAIL;
|
|
}
|
|
|
|
tag = percpu_ida_alloc(&tags->reserved_tags, (gfp & __GFP_WAIT) ?
|
|
TASK_UNINTERRUPTIBLE : TASK_RUNNING);
|
|
if (tag < 0)
|
|
return BLK_MQ_TAG_FAIL;
|
|
return tag;
|
|
}
|
|
|
|
unsigned int blk_mq_get_tag(struct blk_mq_tags *tags, gfp_t gfp, bool reserved)
|
|
{
|
|
if (!reserved)
|
|
return __blk_mq_get_tag(tags, gfp);
|
|
|
|
return __blk_mq_get_reserved_tag(tags, gfp);
|
|
}
|
|
|
|
static void __blk_mq_put_tag(struct blk_mq_tags *tags, unsigned int tag)
|
|
{
|
|
BUG_ON(tag >= tags->nr_tags);
|
|
|
|
percpu_ida_free(&tags->free_tags, tag - tags->nr_reserved_tags);
|
|
}
|
|
|
|
static void __blk_mq_put_reserved_tag(struct blk_mq_tags *tags,
|
|
unsigned int tag)
|
|
{
|
|
BUG_ON(tag >= tags->nr_reserved_tags);
|
|
|
|
percpu_ida_free(&tags->reserved_tags, tag);
|
|
}
|
|
|
|
void blk_mq_put_tag(struct blk_mq_tags *tags, unsigned int tag)
|
|
{
|
|
if (tag >= tags->nr_reserved_tags)
|
|
__blk_mq_put_tag(tags, tag);
|
|
else
|
|
__blk_mq_put_reserved_tag(tags, tag);
|
|
}
|
|
|
|
static int __blk_mq_tag_iter(unsigned id, void *data)
|
|
{
|
|
unsigned long *tag_map = data;
|
|
__set_bit(id, tag_map);
|
|
return 0;
|
|
}
|
|
|
|
void blk_mq_tag_busy_iter(struct blk_mq_tags *tags,
|
|
void (*fn)(void *, unsigned long *), void *data)
|
|
{
|
|
unsigned long *tag_map;
|
|
size_t map_size;
|
|
|
|
map_size = ALIGN(tags->nr_tags, BITS_PER_LONG) / BITS_PER_LONG;
|
|
tag_map = kzalloc(map_size * sizeof(unsigned long), GFP_ATOMIC);
|
|
if (!tag_map)
|
|
return;
|
|
|
|
percpu_ida_for_each_free(&tags->free_tags, __blk_mq_tag_iter, tag_map);
|
|
if (tags->nr_reserved_tags)
|
|
percpu_ida_for_each_free(&tags->reserved_tags, __blk_mq_tag_iter,
|
|
tag_map);
|
|
|
|
fn(data, tag_map);
|
|
kfree(tag_map);
|
|
}
|
|
|
|
struct blk_mq_tags *blk_mq_init_tags(unsigned int total_tags,
|
|
unsigned int reserved_tags, int node)
|
|
{
|
|
unsigned int nr_tags, nr_cache;
|
|
struct blk_mq_tags *tags;
|
|
int ret;
|
|
|
|
if (total_tags > BLK_MQ_TAG_MAX) {
|
|
pr_err("blk-mq: tag depth too large\n");
|
|
return NULL;
|
|
}
|
|
|
|
tags = kzalloc_node(sizeof(*tags), GFP_KERNEL, node);
|
|
if (!tags)
|
|
return NULL;
|
|
|
|
nr_tags = total_tags - reserved_tags;
|
|
nr_cache = nr_tags / num_possible_cpus();
|
|
|
|
if (nr_cache < BLK_MQ_TAG_CACHE_MIN)
|
|
nr_cache = BLK_MQ_TAG_CACHE_MIN;
|
|
else if (nr_cache > BLK_MQ_TAG_CACHE_MAX)
|
|
nr_cache = BLK_MQ_TAG_CACHE_MAX;
|
|
|
|
tags->nr_tags = total_tags;
|
|
tags->nr_reserved_tags = reserved_tags;
|
|
tags->nr_max_cache = nr_cache;
|
|
tags->nr_batch_move = max(1u, nr_cache / 2);
|
|
|
|
ret = __percpu_ida_init(&tags->free_tags, tags->nr_tags -
|
|
tags->nr_reserved_tags,
|
|
tags->nr_max_cache,
|
|
tags->nr_batch_move);
|
|
if (ret)
|
|
goto err_free_tags;
|
|
|
|
if (reserved_tags) {
|
|
/*
|
|
* With max_cahe and batch set to 1, the allocator fallbacks to
|
|
* no cached. It's fine reserved tags allocation is slow.
|
|
*/
|
|
ret = __percpu_ida_init(&tags->reserved_tags, reserved_tags,
|
|
1, 1);
|
|
if (ret)
|
|
goto err_reserved_tags;
|
|
}
|
|
|
|
return tags;
|
|
|
|
err_reserved_tags:
|
|
percpu_ida_destroy(&tags->free_tags);
|
|
err_free_tags:
|
|
kfree(tags);
|
|
return NULL;
|
|
}
|
|
|
|
void blk_mq_free_tags(struct blk_mq_tags *tags)
|
|
{
|
|
percpu_ida_destroy(&tags->free_tags);
|
|
percpu_ida_destroy(&tags->reserved_tags);
|
|
kfree(tags);
|
|
}
|
|
|
|
ssize_t blk_mq_tag_sysfs_show(struct blk_mq_tags *tags, char *page)
|
|
{
|
|
char *orig_page = page;
|
|
unsigned int cpu;
|
|
|
|
if (!tags)
|
|
return 0;
|
|
|
|
page += sprintf(page, "nr_tags=%u, reserved_tags=%u, batch_move=%u,"
|
|
" max_cache=%u\n", tags->nr_tags, tags->nr_reserved_tags,
|
|
tags->nr_batch_move, tags->nr_max_cache);
|
|
|
|
page += sprintf(page, "nr_free=%u, nr_reserved=%u\n",
|
|
percpu_ida_free_tags(&tags->free_tags, nr_cpu_ids),
|
|
percpu_ida_free_tags(&tags->reserved_tags, nr_cpu_ids));
|
|
|
|
for_each_possible_cpu(cpu) {
|
|
page += sprintf(page, " cpu%02u: nr_free=%u\n", cpu,
|
|
percpu_ida_free_tags(&tags->free_tags, cpu));
|
|
}
|
|
|
|
return page - orig_page;
|
|
}
|