mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-12-18 08:35:08 +08:00
Merge branch 'for-3.16/core' into for-3.16/drivers
Pull in core changes (again), since we got rid of the alloc/free hctx mq_ops hooks and mtip32xx then needed updating again. Signed-off-by: Jens Axboe <axboe@fb.com>
This commit is contained in:
commit
0fb662e225
@ -1,3 +1,8 @@
|
||||
/*
|
||||
* CPU notifier helper code for blk-mq
|
||||
*
|
||||
* Copyright (C) 2013-2014 Jens Axboe
|
||||
*/
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/init.h>
|
||||
|
@ -1,3 +1,8 @@
|
||||
/*
|
||||
* CPU <-> hardware queue mapping helpers
|
||||
*
|
||||
* Copyright (C) 2013-2014 Jens Axboe
|
||||
*/
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/threads.h>
|
||||
#include <linux/module.h>
|
||||
|
@ -1,3 +1,15 @@
|
||||
/*
|
||||
* Fast and scalable bitmap tagging variant. Uses sparser bitmaps spread
|
||||
* over multiple cachelines to avoid ping-pong between multiple submitters
|
||||
* or submitter and completer. Uses rolling wakeups to avoid falling of
|
||||
* the scaling cliff when we run out of tags and have to start putting
|
||||
* submitters to sleep.
|
||||
*
|
||||
* Uses active queue tracking to support fairer distribution of tags
|
||||
* between multiple submitters when a shared tag map is used.
|
||||
*
|
||||
* Copyright (C) 2013-2014 Jens Axboe
|
||||
*/
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/random.h>
|
||||
|
@ -1,3 +1,9 @@
|
||||
/*
|
||||
* Block multiqueue core code
|
||||
*
|
||||
* Copyright (C) 2013-2014 Jens Axboe
|
||||
* Copyright (C) 2013-2014 Christoph Hellwig
|
||||
*/
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/backing-dev.h>
|
||||
@ -1329,21 +1335,6 @@ struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q, const int cpu)
|
||||
}
|
||||
EXPORT_SYMBOL(blk_mq_map_queue);
|
||||
|
||||
struct blk_mq_hw_ctx *blk_mq_alloc_single_hw_queue(struct blk_mq_tag_set *set,
|
||||
unsigned int hctx_index,
|
||||
int node)
|
||||
{
|
||||
return kzalloc_node(sizeof(struct blk_mq_hw_ctx), GFP_KERNEL, node);
|
||||
}
|
||||
EXPORT_SYMBOL(blk_mq_alloc_single_hw_queue);
|
||||
|
||||
void blk_mq_free_single_hw_queue(struct blk_mq_hw_ctx *hctx,
|
||||
unsigned int hctx_index)
|
||||
{
|
||||
kfree(hctx);
|
||||
}
|
||||
EXPORT_SYMBOL(blk_mq_free_single_hw_queue);
|
||||
|
||||
static void blk_mq_free_rq_map(struct blk_mq_tag_set *set,
|
||||
struct blk_mq_tags *tags, unsigned int hctx_idx)
|
||||
{
|
||||
@ -1584,7 +1575,7 @@ static void blk_mq_free_hw_queues(struct request_queue *q,
|
||||
|
||||
queue_for_each_hw_ctx(q, hctx, i) {
|
||||
free_cpumask_var(hctx->cpumask);
|
||||
set->ops->free_hctx(hctx, i);
|
||||
kfree(hctx);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1805,7 +1796,8 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
|
||||
for (i = 0; i < set->nr_hw_queues; i++) {
|
||||
int node = blk_mq_hw_queue_to_node(map, i);
|
||||
|
||||
hctxs[i] = set->ops->alloc_hctx(set, i, node);
|
||||
hctxs[i] = kzalloc_node(sizeof(struct blk_mq_hw_ctx),
|
||||
GFP_KERNEL, node);
|
||||
if (!hctxs[i])
|
||||
goto err_hctxs;
|
||||
|
||||
@ -1892,7 +1884,7 @@ err_hctxs:
|
||||
if (!hctxs[i])
|
||||
break;
|
||||
free_cpumask_var(hctxs[i]->cpumask);
|
||||
set->ops->free_hctx(hctxs[i], i);
|
||||
kfree(hctxs[i]);
|
||||
}
|
||||
err_map:
|
||||
kfree(hctxs);
|
||||
@ -1977,9 +1969,7 @@ int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
|
||||
if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN)
|
||||
return -EINVAL;
|
||||
|
||||
if (!set->nr_hw_queues ||
|
||||
!set->ops->queue_rq || !set->ops->map_queue ||
|
||||
!set->ops->alloc_hctx || !set->ops->free_hctx)
|
||||
if (!set->nr_hw_queues || !set->ops->queue_rq || !set->ops->map_queue)
|
||||
return -EINVAL;
|
||||
|
||||
|
||||
|
@ -3832,8 +3832,6 @@ static int mtip_init_cmd(void *data, struct request *rq, unsigned int hctx_idx,
|
||||
static struct blk_mq_ops mtip_mq_ops = {
|
||||
.queue_rq = mtip_queue_rq,
|
||||
.map_queue = blk_mq_map_queue,
|
||||
.alloc_hctx = blk_mq_alloc_single_hw_queue,
|
||||
.free_hctx = blk_mq_free_single_hw_queue,
|
||||
.init_request = mtip_init_cmd,
|
||||
.exit_request = mtip_free_cmd,
|
||||
};
|
||||
|
@ -321,18 +321,6 @@ static int null_queue_rq(struct blk_mq_hw_ctx *hctx, struct request *rq)
|
||||
return BLK_MQ_RQ_QUEUE_OK;
|
||||
}
|
||||
|
||||
static struct blk_mq_hw_ctx *null_alloc_hctx(struct blk_mq_tag_set *set,
|
||||
unsigned int hctx_index,
|
||||
int node)
|
||||
{
|
||||
return kzalloc_node(sizeof(struct blk_mq_hw_ctx), GFP_KERNEL, node);
|
||||
}
|
||||
|
||||
static void null_free_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_index)
|
||||
{
|
||||
kfree(hctx);
|
||||
}
|
||||
|
||||
static void null_init_queue(struct nullb *nullb, struct nullb_queue *nq)
|
||||
{
|
||||
BUG_ON(!nullb);
|
||||
@ -360,17 +348,6 @@ static struct blk_mq_ops null_mq_ops = {
|
||||
.map_queue = blk_mq_map_queue,
|
||||
.init_hctx = null_init_hctx,
|
||||
.complete = null_softirq_done_fn,
|
||||
.alloc_hctx = blk_mq_alloc_single_hw_queue,
|
||||
.free_hctx = blk_mq_free_single_hw_queue,
|
||||
};
|
||||
|
||||
static struct blk_mq_ops null_mq_ops_pernode = {
|
||||
.queue_rq = null_queue_rq,
|
||||
.map_queue = blk_mq_map_queue,
|
||||
.init_hctx = null_init_hctx,
|
||||
.complete = null_softirq_done_fn,
|
||||
.alloc_hctx = null_alloc_hctx,
|
||||
.free_hctx = null_free_hctx,
|
||||
};
|
||||
|
||||
static void null_del_dev(struct nullb *nullb)
|
||||
@ -496,10 +473,7 @@ static int null_add_dev(void)
|
||||
goto out_free_nullb;
|
||||
|
||||
if (queue_mode == NULL_Q_MQ) {
|
||||
if (use_per_node_hctx)
|
||||
nullb->tag_set.ops = &null_mq_ops_pernode;
|
||||
else
|
||||
nullb->tag_set.ops = &null_mq_ops;
|
||||
nullb->tag_set.ops = &null_mq_ops;
|
||||
nullb->tag_set.nr_hw_queues = submit_queues;
|
||||
nullb->tag_set.queue_depth = hw_queue_depth;
|
||||
nullb->tag_set.numa_node = home_node;
|
||||
|
@ -497,8 +497,6 @@ static int virtblk_init_request(void *data, struct request *rq,
|
||||
static struct blk_mq_ops virtio_mq_ops = {
|
||||
.queue_rq = virtio_queue_rq,
|
||||
.map_queue = blk_mq_map_queue,
|
||||
.alloc_hctx = blk_mq_alloc_single_hw_queue,
|
||||
.free_hctx = blk_mq_free_single_hw_queue,
|
||||
.complete = virtblk_request_done,
|
||||
.init_request = virtblk_init_request,
|
||||
};
|
||||
|
@ -79,9 +79,6 @@ struct blk_mq_tag_set {
|
||||
|
||||
typedef int (queue_rq_fn)(struct blk_mq_hw_ctx *, struct request *);
|
||||
typedef struct blk_mq_hw_ctx *(map_queue_fn)(struct request_queue *, const int);
|
||||
typedef struct blk_mq_hw_ctx *(alloc_hctx_fn)(struct blk_mq_tag_set *,
|
||||
unsigned int, int);
|
||||
typedef void (free_hctx_fn)(struct blk_mq_hw_ctx *, unsigned int);
|
||||
typedef int (init_hctx_fn)(struct blk_mq_hw_ctx *, void *, unsigned int);
|
||||
typedef void (exit_hctx_fn)(struct blk_mq_hw_ctx *, unsigned int);
|
||||
typedef int (init_request_fn)(void *, struct request *, unsigned int,
|
||||
@ -107,12 +104,6 @@ struct blk_mq_ops {
|
||||
|
||||
softirq_done_fn *complete;
|
||||
|
||||
/*
|
||||
* Override for hctx allocations (should probably go)
|
||||
*/
|
||||
alloc_hctx_fn *alloc_hctx;
|
||||
free_hctx_fn *free_hctx;
|
||||
|
||||
/*
|
||||
* Called when the block layer side of a hardware queue has been
|
||||
* set up, allowing the driver to allocate/init matching structures.
|
||||
@ -166,7 +157,6 @@ struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag);
|
||||
|
||||
struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *, const int ctx_index);
|
||||
struct blk_mq_hw_ctx *blk_mq_alloc_single_hw_queue(struct blk_mq_tag_set *, unsigned int, int);
|
||||
void blk_mq_free_single_hw_queue(struct blk_mq_hw_ctx *, unsigned int);
|
||||
|
||||
void blk_mq_end_io(struct request *rq, int error);
|
||||
void __blk_mq_end_io(struct request *rq, int error);
|
||||
|
Loading…
Reference in New Issue
Block a user