block-6.2-2022-12-19

-----BEGIN PGP SIGNATURE-----
 
 iQJEBAABCAAuFiEEwPw5LcreJtl1+l5K99NY+ylx4KYFAmOgp5AQHGF4Ym9lQGtl
 cm5lbC5kawAKCRD301j7KXHgpm5SD/9tduSZQW00aDm83HbEikWdCgQm0w37tyYl
 C2+IwRwLF8pnAoSb6yaO7LZM9ZUYfoIfIlkHXkKhT1xNJ/XdeGDgwjOHi106iaEx
 kG08DcFnUjyJ4Yh6hnnpnSepIo0ckwa18pSaE4smvmKZirj3it3O6xSspyBxtUcv
 q6PvJDMN15aG6uLHq3xNZPzoI2KYXBDgwanyImRhdvLoOTiS9rok+F9e2ob3lzAa
 PB+FOipQoKb7M6jbyfZe4KbeTiJh4EYEl5Qa6ebrDIkOTm7zjc8sQbCkNeI7osh+
 D0FvEQ1Vsrjj5Bp6N9CmZcrmNagjEcAPbzguxAilrgw2/XvA8d0fymziGXvuyUEv
 bSAx6lyJzfMLrvtubSqMhIF+8DlccQnnXz2ccacwvAfayytzNJjC9serU+czHA4O
 ZkPTwZFjAmbn6q6SK3qaOCB9IgITHipj8R/ncGu9KjNvM2QgzM+OIrP0xGxtk6uI
 ZGrt9nGMUmgjtaliQjiDVZomMewru1lRWPRAjfQ995gmVkejgapUHYoaDtDzaLKZ
 Q9BaK5CC2jltGUuuoFEnXnwu/Eyvp9y++pKkz4Esb+/Wkst4qyGtr9DOSTnv1wKN
 W20h3Z5vOAXXquvUJ5S3mQl8TNJHiBz+/CRB9PZG8XFtn8ubGo8XttGdgjQgyLM3
 6FHzcZgeWw==
 =TSec
 -----END PGP SIGNATURE-----

Merge tag 'block-6.2-2022-12-19' of git://git.kernel.dk/linux

Pull block fixes from Jens Axboe:

 - Various fixes for BFQ (Yu, Yuwei)

 - Fix for loop command line parsing (Isaac)

 - No need to specifically clear REQ_ALLOC_CACHE on IOPOLL downgrade
   anymore (me)

 - blk-iocost enum fix for newer gcc (Jiri)

 - UAF fix for queue release (Ming)

 - blk-iolatency error handling memory leak fix (Tejun)

* tag 'block-6.2-2022-12-19' of git://git.kernel.dk/linux:
  block: don't clear REQ_ALLOC_CACHE for non-polled requests
  block: fix use-after-free of q->q_usage_counter
  block, bfq: only do counting of pending-request for BFQ_GROUP_IOSCHED
  blk-iolatency: Fix memory leak on add_disk() failures
  loop: Fix the max_loop commandline argument treatment when it is set to 0
  block/blk-iocost (gcc13): keep large values in a new enum
  block, bfq: replace 0/1 with false/true in bic apis
  block, bfq: don't return bfqg from __bfq_bic_change_cgroup()
  block, bfq: fix possible uaf for 'bfqq->bic'
This commit is contained in:
Linus Torvalds 2022-12-21 16:35:26 -08:00
commit 569c3a283c
9 changed files with 47 additions and 38 deletions

View File

@ -724,19 +724,19 @@ void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq,
* sure that the reference to cgroup is valid across the call (see
* comments in bfq_bic_update_cgroup on this issue)
*/
static void *__bfq_bic_change_cgroup(struct bfq_data *bfqd,
struct bfq_io_cq *bic,
struct bfq_group *bfqg)
static void __bfq_bic_change_cgroup(struct bfq_data *bfqd,
struct bfq_io_cq *bic,
struct bfq_group *bfqg)
{
struct bfq_queue *async_bfqq = bic_to_bfqq(bic, 0);
struct bfq_queue *sync_bfqq = bic_to_bfqq(bic, 1);
struct bfq_queue *async_bfqq = bic_to_bfqq(bic, false);
struct bfq_queue *sync_bfqq = bic_to_bfqq(bic, true);
struct bfq_entity *entity;
if (async_bfqq) {
entity = &async_bfqq->entity;
if (entity->sched_data != &bfqg->sched_data) {
bic_set_bfqq(bic, NULL, 0);
bic_set_bfqq(bic, NULL, false);
bfq_release_process_ref(bfqd, async_bfqq);
}
}
@ -772,12 +772,10 @@ static void *__bfq_bic_change_cgroup(struct bfq_data *bfqd,
*/
bfq_put_cooperator(sync_bfqq);
bfq_release_process_ref(bfqd, sync_bfqq);
bic_set_bfqq(bic, NULL, 1);
bic_set_bfqq(bic, NULL, true);
}
}
}
return bfqg;
}
void bfq_bic_update_cgroup(struct bfq_io_cq *bic, struct bio *bio)

View File

@ -386,6 +386,12 @@ static void bfq_put_stable_ref(struct bfq_queue *bfqq);
void bic_set_bfqq(struct bfq_io_cq *bic, struct bfq_queue *bfqq, bool is_sync)
{
struct bfq_queue *old_bfqq = bic->bfqq[is_sync];
/* Clear bic pointer if bfqq is detached from this bic */
if (old_bfqq && old_bfqq->bic == bic)
old_bfqq->bic = NULL;
/*
* If bfqq != NULL, then a non-stable queue merge between
* bic->bfqq and bfqq is happening here. This causes troubles
@ -3108,7 +3114,7 @@ bfq_merge_bfqqs(struct bfq_data *bfqd, struct bfq_io_cq *bic,
/*
* Merge queues (that is, let bic redirect its requests to new_bfqq)
*/
bic_set_bfqq(bic, new_bfqq, 1);
bic_set_bfqq(bic, new_bfqq, true);
bfq_mark_bfqq_coop(new_bfqq);
/*
* new_bfqq now belongs to at least two bics (it is a shared queue):
@ -5311,7 +5317,6 @@ static void bfq_exit_icq_bfqq(struct bfq_io_cq *bic, bool is_sync)
unsigned long flags;
spin_lock_irqsave(&bfqd->lock, flags);
bfqq->bic = NULL;
bfq_exit_bfqq(bfqd, bfqq);
bic_set_bfqq(bic, NULL, is_sync);
spin_unlock_irqrestore(&bfqd->lock, flags);
@ -6557,7 +6562,7 @@ bfq_split_bfqq(struct bfq_io_cq *bic, struct bfq_queue *bfqq)
return bfqq;
}
bic_set_bfqq(bic, NULL, 1);
bic_set_bfqq(bic, NULL, true);
bfq_put_cooperator(bfqq);
@ -7058,7 +7063,9 @@ static int bfq_init_queue(struct request_queue *q, struct elevator_type *e)
bfqd->idle_slice_timer.function = bfq_idle_slice_timer;
bfqd->queue_weights_tree = RB_ROOT_CACHED;
#ifdef CONFIG_BFQ_GROUP_IOSCHED
bfqd->num_groups_with_pending_reqs = 0;
#endif
INIT_LIST_HEAD(&bfqd->active_list);
INIT_LIST_HEAD(&bfqd->idle_list);

View File

@ -197,8 +197,10 @@ struct bfq_entity {
/* flag, set to request a weight, ioprio or ioprio_class change */
int prio_changed;
#ifdef CONFIG_BFQ_GROUP_IOSCHED
/* flag, set if the entity is counted in groups_with_pending_reqs */
bool in_groups_with_pending_reqs;
#endif
/* last child queue of entity created (for non-leaf entities) */
struct bfq_queue *last_bfqq_created;
@ -491,6 +493,7 @@ struct bfq_data {
*/
struct rb_root_cached queue_weights_tree;
#ifdef CONFIG_BFQ_GROUP_IOSCHED
/*
* Number of groups with at least one process that
* has at least one request waiting for completion. Note that
@ -538,6 +541,7 @@ struct bfq_data {
* with no request waiting for completion.
*/
unsigned int num_groups_with_pending_reqs;
#endif
/*
* Per-class (RT, BE, IDLE) number of bfq_queues containing

View File

@ -1612,28 +1612,28 @@ void bfq_requeue_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq,
void bfq_add_bfqq_in_groups_with_pending_reqs(struct bfq_queue *bfqq)
{
#ifdef CONFIG_BFQ_GROUP_IOSCHED
struct bfq_entity *entity = &bfqq->entity;
if (!entity->in_groups_with_pending_reqs) {
entity->in_groups_with_pending_reqs = true;
#ifdef CONFIG_BFQ_GROUP_IOSCHED
if (!(bfqq_group(bfqq)->num_queues_with_pending_reqs++))
bfqq->bfqd->num_groups_with_pending_reqs++;
#endif
}
#endif
}
void bfq_del_bfqq_in_groups_with_pending_reqs(struct bfq_queue *bfqq)
{
#ifdef CONFIG_BFQ_GROUP_IOSCHED
struct bfq_entity *entity = &bfqq->entity;
if (entity->in_groups_with_pending_reqs) {
entity->in_groups_with_pending_reqs = false;
#ifdef CONFIG_BFQ_GROUP_IOSCHED
if (!(--bfqq_group(bfqq)->num_queues_with_pending_reqs))
bfqq->bfqd->num_groups_with_pending_reqs--;
#endif
}
#endif
}
/*

View File

@ -33,6 +33,7 @@
#include "blk-cgroup.h"
#include "blk-ioprio.h"
#include "blk-throttle.h"
#include "blk-rq-qos.h"
/*
* blkcg_pol_mutex protects blkcg_policy[] and policy [de]activation.
@ -1322,6 +1323,7 @@ err_unlock:
void blkcg_exit_disk(struct gendisk *disk)
{
blkg_destroy_all(disk);
rq_qos_exit(disk->queue);
blk_throtl_exit(disk);
}

View File

@ -254,14 +254,15 @@ EXPORT_SYMBOL_GPL(blk_clear_pm_only);
static void blk_free_queue_rcu(struct rcu_head *rcu_head)
{
kmem_cache_free(blk_requestq_cachep,
container_of(rcu_head, struct request_queue, rcu_head));
struct request_queue *q = container_of(rcu_head,
struct request_queue, rcu_head);
percpu_ref_exit(&q->q_usage_counter);
kmem_cache_free(blk_requestq_cachep, q);
}
static void blk_free_queue(struct request_queue *q)
{
percpu_ref_exit(&q->q_usage_counter);
if (q->poll_stat)
blk_stat_remove_callback(q, q->poll_cb);
blk_stat_free_callback(q->poll_cb);

View File

@ -232,7 +232,9 @@ enum {
/* 1/64k is granular enough and can easily be handled w/ u32 */
WEIGHT_ONE = 1 << 16,
};
enum {
/*
* As vtime is used to calculate the cost of each IO, it needs to
* be fairly high precision. For example, it should be able to

View File

@ -1773,7 +1773,16 @@ static const struct block_device_operations lo_fops = {
/*
* And now the modules code and kernel interface.
*/
static int max_loop;
/*
* If max_loop is specified, create that many devices upfront.
* This also becomes a hard limit. If max_loop is not specified,
* create CONFIG_BLK_DEV_LOOP_MIN_COUNT loop devices at module
* init time. Loop devices can be requested on-demand with the
* /dev/loop-control interface, or be instantiated by accessing
* a 'dead' device node.
*/
static int max_loop = CONFIG_BLK_DEV_LOOP_MIN_COUNT;
module_param(max_loop, int, 0444);
MODULE_PARM_DESC(max_loop, "Maximum number of loop devices");
module_param(max_part, int, 0444);
@ -2181,7 +2190,7 @@ MODULE_ALIAS("devname:loop-control");
static int __init loop_init(void)
{
int i, nr;
int i;
int err;
part_shift = 0;
@ -2209,19 +2218,6 @@ static int __init loop_init(void)
goto err_out;
}
/*
* If max_loop is specified, create that many devices upfront.
* This also becomes a hard limit. If max_loop is not specified,
* create CONFIG_BLK_DEV_LOOP_MIN_COUNT loop devices at module
* init time. Loop devices can be requested on-demand with the
* /dev/loop-control interface, or be instantiated by accessing
* a 'dead' device node.
*/
if (max_loop)
nr = max_loop;
else
nr = CONFIG_BLK_DEV_LOOP_MIN_COUNT;
err = misc_register(&loop_misc);
if (err < 0)
goto err_out;
@ -2233,7 +2229,7 @@ static int __init loop_init(void)
}
/* pre-create number of devices given by config or max_loop */
for (i = 0; i < nr; i++)
for (i = 0; i < max_loop; i++)
loop_add(i);
printk(KERN_INFO "loop: module loaded\n");

View File

@ -782,8 +782,7 @@ static inline void bio_set_polled(struct bio *bio, struct kiocb *kiocb)
static inline void bio_clear_polled(struct bio *bio)
{
/* can't support alloc cache if we turn off polling */
bio->bi_opf &= ~(REQ_POLLED | REQ_ALLOC_CACHE);
bio->bi_opf &= ~REQ_POLLED;
}
struct bio *blk_next_bio(struct bio *bio, struct block_device *bdev,