mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-23 12:14:10 +08:00
block-6.5-2023-07-21
-----BEGIN PGP SIGNATURE----- iQJEBAABCAAuFiEEwPw5LcreJtl1+l5K99NY+ylx4KYFAmS629wQHGF4Ym9lQGtl cm5lbC5kawAKCRD301j7KXHgpv/7D/99ysE5ZszmjxNOmyy1lGfqtQnaTLuToRsl wB16umIPAFfye5r4TV8l9GZuUyI7FU8LySglu0Y0qMKmCp+kJKLh90kB281Co4Dn yp1AbqlTorAlG4ElQJBRaQr4kaqqvI2tzeVmFdUhIE1oX2e9OX/O+YKa8k1JfsKI oecChQgodlPxX3wusItgiyvZKl2q2+mivg5E6cqiGIgP3uF8fmOQCbio4Vm8ZSxb TO8JEfBTiXslR+CvJD3Gi96pzexN1qCUed8/7FDiIUufhETmwqSIOo89GxzGAQ6O 7o/83IkqgXPHjKLYs3R4/jhHPXZmXmvDZHWIiSg+KLOFqxxWmRPNJ6V6igIBP8SG eu5PTA7SDGtvIXePpu38FTPmSiUW7MbGhnjqY8u64Je6MaQ8l28KN7xkFtmxV+n4 hgB0gr6uKBnXMKZHobk0yJeUUI/L/0ESzbVPDHY8JM/rQCsp1eSNQDpZoVjPWZmg lMGYmOq57oPA20LVch7U3gUFhD4CJ7c3e2/EzJdJVjsTveTYieBCEESQErFbMcEr VuRZSAGnPyXQ4yF4wG93x4sDye28ZFS/Q9c6Q3DCUxctDkCz4eY1+vmdX+NJXwDA aYXCyyKzk18udbKvV0QvTuDTb6PrJDPxbFagCveibPTtP4XDMv1LvpdZPUPJ/HGX 4xA1mrsGJA== =e2OR -----END PGP SIGNATURE----- Merge tag 'block-6.5-2023-07-21' of git://git.kernel.dk/linux Pull block fixes from Jens Axboe: - Fix for loop regressions (Mauricio) - Fix a potential stall with batched wakeups in sbitmap (David) - Fix for stall with recursive plug flushes (Ross) - Skip accounting of empty requests for blk-iocost (Chengming) - Remove a dead field in struct blk_mq_hw_ctx (Chengming) * tag 'block-6.5-2023-07-21' of git://git.kernel.dk/linux: loop: do not enforce max_loop hard limit by (new) default loop: deprecate autoloading callback loop_probe() sbitmap: fix batching wakeup blk-iocost: skip empty flush bio in iocost blk-mq: delete dead struct blk_mq_hw_ctx->queued field blk-mq: Fix stall due to recursive flush plug
This commit is contained in:
commit
f036d67c02
@ -1144,8 +1144,7 @@ void __blk_flush_plug(struct blk_plug *plug, bool from_schedule)
|
||||
{
|
||||
if (!list_empty(&plug->cb_list))
|
||||
flush_plug_callbacks(plug, from_schedule);
|
||||
if (!rq_list_empty(plug->mq_list))
|
||||
blk_mq_flush_plug_list(plug, from_schedule);
|
||||
blk_mq_flush_plug_list(plug, from_schedule);
|
||||
/*
|
||||
* Unconditionally flush out cached requests, even if the unplug
|
||||
* event came from schedule. Since we know hold references to the
|
||||
|
@ -2516,6 +2516,10 @@ static void calc_vtime_cost_builtin(struct bio *bio, struct ioc_gq *iocg,
|
||||
u64 seek_pages = 0;
|
||||
u64 cost = 0;
|
||||
|
||||
/* Can't calculate cost for empty bio */
|
||||
if (!bio->bi_iter.bi_size)
|
||||
goto out;
|
||||
|
||||
switch (bio_op(bio)) {
|
||||
case REQ_OP_READ:
|
||||
coef_seqio = ioc->params.lcoefs[LCOEF_RSEQIO];
|
||||
|
@ -2754,7 +2754,14 @@ void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule)
|
||||
{
|
||||
struct request *rq;
|
||||
|
||||
if (rq_list_empty(plug->mq_list))
|
||||
/*
|
||||
* We may have been called recursively midway through handling
|
||||
* plug->mq_list via a schedule() in the driver's queue_rq() callback.
|
||||
* To avoid mq_list changing under our feet, clear rq_count early and
|
||||
* bail out specifically if rq_count is 0 rather than checking
|
||||
* whether the mq_list is empty.
|
||||
*/
|
||||
if (plug->rq_count == 0)
|
||||
return;
|
||||
plug->rq_count = 0;
|
||||
|
||||
|
@ -1775,14 +1775,43 @@ static const struct block_device_operations lo_fops = {
|
||||
/*
|
||||
* If max_loop is specified, create that many devices upfront.
|
||||
* This also becomes a hard limit. If max_loop is not specified,
|
||||
* the default isn't a hard limit (as before commit 85c50197716c
|
||||
* changed the default value from 0 for max_loop=0 reasons), just
|
||||
* create CONFIG_BLK_DEV_LOOP_MIN_COUNT loop devices at module
|
||||
* init time. Loop devices can be requested on-demand with the
|
||||
* /dev/loop-control interface, or be instantiated by accessing
|
||||
* a 'dead' device node.
|
||||
*/
|
||||
static int max_loop = CONFIG_BLK_DEV_LOOP_MIN_COUNT;
|
||||
module_param(max_loop, int, 0444);
|
||||
|
||||
#ifdef CONFIG_BLOCK_LEGACY_AUTOLOAD
|
||||
static bool max_loop_specified;
|
||||
|
||||
static int max_loop_param_set_int(const char *val,
|
||||
const struct kernel_param *kp)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = param_set_int(val, kp);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
max_loop_specified = true;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct kernel_param_ops max_loop_param_ops = {
|
||||
.set = max_loop_param_set_int,
|
||||
.get = param_get_int,
|
||||
};
|
||||
|
||||
module_param_cb(max_loop, &max_loop_param_ops, &max_loop, 0444);
|
||||
MODULE_PARM_DESC(max_loop, "Maximum number of loop devices");
|
||||
#else
|
||||
module_param(max_loop, int, 0444);
|
||||
MODULE_PARM_DESC(max_loop, "Initial number of loop devices");
|
||||
#endif
|
||||
|
||||
module_param(max_part, int, 0444);
|
||||
MODULE_PARM_DESC(max_part, "Maximum number of partitions per loop device");
|
||||
|
||||
@ -2093,14 +2122,18 @@ static void loop_remove(struct loop_device *lo)
|
||||
put_disk(lo->lo_disk);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_BLOCK_LEGACY_AUTOLOAD
|
||||
static void loop_probe(dev_t dev)
|
||||
{
|
||||
int idx = MINOR(dev) >> part_shift;
|
||||
|
||||
if (max_loop && idx >= max_loop)
|
||||
if (max_loop_specified && max_loop && idx >= max_loop)
|
||||
return;
|
||||
loop_add(idx);
|
||||
}
|
||||
#else
|
||||
#define loop_probe NULL
|
||||
#endif /* !CONFIG_BLOCK_LEGACY_AUTOLOAD */
|
||||
|
||||
static int loop_control_remove(int idx)
|
||||
{
|
||||
@ -2281,6 +2314,9 @@ module_exit(loop_exit);
|
||||
static int __init max_loop_setup(char *str)
|
||||
{
|
||||
max_loop = simple_strtol(str, NULL, 0);
|
||||
#ifdef CONFIG_BLOCK_LEGACY_AUTOLOAD
|
||||
max_loop_specified = true;
|
||||
#endif
|
||||
return 1;
|
||||
}
|
||||
|
||||
|
@ -397,8 +397,6 @@ struct blk_mq_hw_ctx {
|
||||
*/
|
||||
struct blk_mq_tags *sched_tags;
|
||||
|
||||
/** @queued: Number of queued requests. */
|
||||
unsigned long queued;
|
||||
/** @run: Number of dispatched requests. */
|
||||
unsigned long run;
|
||||
|
||||
|
@ -550,7 +550,7 @@ EXPORT_SYMBOL_GPL(sbitmap_queue_min_shallow_depth);
|
||||
|
||||
static void __sbitmap_queue_wake_up(struct sbitmap_queue *sbq, int nr)
|
||||
{
|
||||
int i, wake_index;
|
||||
int i, wake_index, woken;
|
||||
|
||||
if (!atomic_read(&sbq->ws_active))
|
||||
return;
|
||||
@ -567,13 +567,12 @@ static void __sbitmap_queue_wake_up(struct sbitmap_queue *sbq, int nr)
|
||||
*/
|
||||
wake_index = sbq_index_inc(wake_index);
|
||||
|
||||
/*
|
||||
* It is sufficient to wake up at least one waiter to
|
||||
* guarantee forward progress.
|
||||
*/
|
||||
if (waitqueue_active(&ws->wait) &&
|
||||
wake_up_nr(&ws->wait, nr))
|
||||
break;
|
||||
if (waitqueue_active(&ws->wait)) {
|
||||
woken = wake_up_nr(&ws->wait, nr);
|
||||
if (woken == nr)
|
||||
break;
|
||||
nr -= woken;
|
||||
}
|
||||
}
|
||||
|
||||
if (wake_index != atomic_read(&sbq->wake_index))
|
||||
|
Loading…
Reference in New Issue
Block a user