mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-16 09:13:55 +08:00
block-5.15-2021-10-17
-----BEGIN PGP SIGNATURE----- iQJEBAABCAAuFiEEwPw5LcreJtl1+l5K99NY+ylx4KYFAmFsIqAQHGF4Ym9lQGtl cm5lbC5kawAKCRD301j7KXHgppbBEACDewLUv7bg1VFIGdroRN51OGiOv1oV+8HP ruY7O9CPtV7wcb3lA1Zy9igICuzuC5culHjbRJrNIUeTdWCQHCFk/sfKSD6VGMoT cFTqpKxV7M3vYr9G2m5TFWgY2mfS+I5fxyDZxK2z2esHCFw6TZ7A5W13xScVXKP+ QdNFSlTrGkpggsSIEeHApG+NLsIecnkT4qzm8zPfUodUtQ3A8JMjQjnYUFEAWfWv l9x9zDIzaGjPtXf5soFEvmdh1ALh3WWiYb1kIwK1FeP/PYX0JV/3zCMgqOwpK+4b 69OM3Q0NPHvu2TgSRK+ghekAtz5qgPDMCrzdhSgLYJEL/PGAOboqjrB9E+wWoEjd IKrYLx4Xao2TUZLJF2y34hHfODGdasx7d+wS191UpVFEZHFhDhIaazZ2rDd5xnQK LdzQw1JQF/igJovHauhSkGFIdJWBSDneLQoMimBnitZlsWARUmFSZej34FFRLZsW 8ZXfqipn/x+fh4sQ/HdEfWxnGHtveDpU+0Ka5bMUe/tJ9RPtmn/Ye7nFjYecC6NY 4UzFSNn+4e9DpHaDuP3I/eA1YBmVlcB5Hum3ve7X6ovwpjArYg3dgJOEi8uCZjfb hdMANmkVptcPiEO9njEHhC7S8+Nm3t+8o3qQceN81j6Vcjgzt/Y/n3Z6UkKeSlkn Ila+cZI1oA== =J/e4 -----END PGP SIGNATURE----- Merge tag 'block-5.15-2021-10-17' of git://git.kernel.dk/linux-block Pull block fixes from Jens Axboe: "Bigger than usual for this point in time, the majority is fixing some issues around BDI lifetimes with the move from the request_queue to the disk in this release. In detail: - Series on draining fs IO for del_gendisk() (Christoph) - NVMe pull request via Christoph: - fix the abort command id (Keith Busch) - nvme: fix per-namespace chardev deletion (Adam Manzanares) - brd locking scope fix (Tetsuo) - BFQ fix (Paolo)" * tag 'block-5.15-2021-10-17' of git://git.kernel.dk/linux-block: block, bfq: reset last_bfqq_created on group change block: warn when putting the final reference on a registered disk brd: reduce the brd_devices_mutex scope kyber: avoid q->disk dereferences in trace points block: keep q_usage_counter in atomic mode after del_gendisk block: drain file system I/O on del_gendisk block: split bio_queue_enter from blk_queue_enter block: factor out a blk_try_enter_queue helper block: call submit_bio_checks under q_usage_counter nvme: fix per-namespace chardev deletion block/rnbd-clt-sysfs: fix a couple uninitialized variable bugs nvme-pci: Fix abort command id
This commit is contained in:
commit
f2b3420b92
@ -666,6 +666,12 @@ void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq,
|
||||
bfq_put_idle_entity(bfq_entity_service_tree(entity), entity);
|
||||
bfqg_and_blkg_put(bfqq_group(bfqq));
|
||||
|
||||
if (entity->parent &&
|
||||
entity->parent->last_bfqq_created == bfqq)
|
||||
entity->parent->last_bfqq_created = NULL;
|
||||
else if (bfqd->last_bfqq_created == bfqq)
|
||||
bfqd->last_bfqq_created = NULL;
|
||||
|
||||
entity->parent = bfqg->my_entity;
|
||||
entity->sched_data = &bfqg->sched_data;
|
||||
/* pin down bfqg and its associated blkg */
|
||||
|
148
block/blk-core.c
148
block/blk-core.c
@ -49,7 +49,6 @@
|
||||
#include "blk-mq.h"
|
||||
#include "blk-mq-sched.h"
|
||||
#include "blk-pm.h"
|
||||
#include "blk-rq-qos.h"
|
||||
|
||||
struct dentry *blk_debugfs_root;
|
||||
|
||||
@ -337,23 +336,25 @@ void blk_put_queue(struct request_queue *q)
|
||||
}
|
||||
EXPORT_SYMBOL(blk_put_queue);
|
||||
|
||||
void blk_set_queue_dying(struct request_queue *q)
|
||||
void blk_queue_start_drain(struct request_queue *q)
|
||||
{
|
||||
blk_queue_flag_set(QUEUE_FLAG_DYING, q);
|
||||
|
||||
/*
|
||||
* When queue DYING flag is set, we need to block new req
|
||||
* entering queue, so we call blk_freeze_queue_start() to
|
||||
* prevent I/O from crossing blk_queue_enter().
|
||||
*/
|
||||
blk_freeze_queue_start(q);
|
||||
|
||||
if (queue_is_mq(q))
|
||||
blk_mq_wake_waiters(q);
|
||||
|
||||
/* Make blk_queue_enter() reexamine the DYING flag. */
|
||||
wake_up_all(&q->mq_freeze_wq);
|
||||
}
|
||||
|
||||
void blk_set_queue_dying(struct request_queue *q)
|
||||
{
|
||||
blk_queue_flag_set(QUEUE_FLAG_DYING, q);
|
||||
blk_queue_start_drain(q);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(blk_set_queue_dying);
|
||||
|
||||
/**
|
||||
@ -385,13 +386,8 @@ void blk_cleanup_queue(struct request_queue *q)
|
||||
*/
|
||||
blk_freeze_queue(q);
|
||||
|
||||
rq_qos_exit(q);
|
||||
|
||||
blk_queue_flag_set(QUEUE_FLAG_DEAD, q);
|
||||
|
||||
/* for synchronous bio-based driver finish in-flight integrity i/o */
|
||||
blk_flush_integrity();
|
||||
|
||||
blk_sync_queue(q);
|
||||
if (queue_is_mq(q))
|
||||
blk_mq_exit_queue(q);
|
||||
@ -416,6 +412,30 @@ void blk_cleanup_queue(struct request_queue *q)
|
||||
}
|
||||
EXPORT_SYMBOL(blk_cleanup_queue);
|
||||
|
||||
static bool blk_try_enter_queue(struct request_queue *q, bool pm)
|
||||
{
|
||||
rcu_read_lock();
|
||||
if (!percpu_ref_tryget_live(&q->q_usage_counter))
|
||||
goto fail;
|
||||
|
||||
/*
|
||||
* The code that increments the pm_only counter must ensure that the
|
||||
* counter is globally visible before the queue is unfrozen.
|
||||
*/
|
||||
if (blk_queue_pm_only(q) &&
|
||||
(!pm || queue_rpm_status(q) == RPM_SUSPENDED))
|
||||
goto fail_put;
|
||||
|
||||
rcu_read_unlock();
|
||||
return true;
|
||||
|
||||
fail_put:
|
||||
percpu_ref_put(&q->q_usage_counter);
|
||||
fail:
|
||||
rcu_read_unlock();
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* blk_queue_enter() - try to increase q->q_usage_counter
|
||||
* @q: request queue pointer
|
||||
@ -425,40 +445,18 @@ int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags)
|
||||
{
|
||||
const bool pm = flags & BLK_MQ_REQ_PM;
|
||||
|
||||
while (true) {
|
||||
bool success = false;
|
||||
|
||||
rcu_read_lock();
|
||||
if (percpu_ref_tryget_live(&q->q_usage_counter)) {
|
||||
/*
|
||||
* The code that increments the pm_only counter is
|
||||
* responsible for ensuring that that counter is
|
||||
* globally visible before the queue is unfrozen.
|
||||
*/
|
||||
if ((pm && queue_rpm_status(q) != RPM_SUSPENDED) ||
|
||||
!blk_queue_pm_only(q)) {
|
||||
success = true;
|
||||
} else {
|
||||
percpu_ref_put(&q->q_usage_counter);
|
||||
}
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
if (success)
|
||||
return 0;
|
||||
|
||||
while (!blk_try_enter_queue(q, pm)) {
|
||||
if (flags & BLK_MQ_REQ_NOWAIT)
|
||||
return -EBUSY;
|
||||
|
||||
/*
|
||||
* read pair of barrier in blk_freeze_queue_start(),
|
||||
* we need to order reading __PERCPU_REF_DEAD flag of
|
||||
* .q_usage_counter and reading .mq_freeze_depth or
|
||||
* queue dying flag, otherwise the following wait may
|
||||
* never return if the two reads are reordered.
|
||||
* read pair of barrier in blk_freeze_queue_start(), we need to
|
||||
* order reading __PERCPU_REF_DEAD flag of .q_usage_counter and
|
||||
* reading .mq_freeze_depth or queue dying flag, otherwise the
|
||||
* following wait may never return if the two reads are
|
||||
* reordered.
|
||||
*/
|
||||
smp_rmb();
|
||||
|
||||
wait_event(q->mq_freeze_wq,
|
||||
(!q->mq_freeze_depth &&
|
||||
blk_pm_resume_queue(pm, q)) ||
|
||||
@ -466,23 +464,43 @@ int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags)
|
||||
if (blk_queue_dying(q))
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int bio_queue_enter(struct bio *bio)
|
||||
{
|
||||
struct request_queue *q = bio->bi_bdev->bd_disk->queue;
|
||||
bool nowait = bio->bi_opf & REQ_NOWAIT;
|
||||
int ret;
|
||||
struct gendisk *disk = bio->bi_bdev->bd_disk;
|
||||
struct request_queue *q = disk->queue;
|
||||
|
||||
ret = blk_queue_enter(q, nowait ? BLK_MQ_REQ_NOWAIT : 0);
|
||||
if (unlikely(ret)) {
|
||||
if (nowait && !blk_queue_dying(q))
|
||||
while (!blk_try_enter_queue(q, false)) {
|
||||
if (bio->bi_opf & REQ_NOWAIT) {
|
||||
if (test_bit(GD_DEAD, &disk->state))
|
||||
goto dead;
|
||||
bio_wouldblock_error(bio);
|
||||
else
|
||||
bio_io_error(bio);
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
/*
|
||||
* read pair of barrier in blk_freeze_queue_start(), we need to
|
||||
* order reading __PERCPU_REF_DEAD flag of .q_usage_counter and
|
||||
* reading .mq_freeze_depth or queue dying flag, otherwise the
|
||||
* following wait may never return if the two reads are
|
||||
* reordered.
|
||||
*/
|
||||
smp_rmb();
|
||||
wait_event(q->mq_freeze_wq,
|
||||
(!q->mq_freeze_depth &&
|
||||
blk_pm_resume_queue(false, q)) ||
|
||||
test_bit(GD_DEAD, &disk->state));
|
||||
if (test_bit(GD_DEAD, &disk->state))
|
||||
goto dead;
|
||||
}
|
||||
|
||||
return ret;
|
||||
return 0;
|
||||
dead:
|
||||
bio_io_error(bio);
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
void blk_queue_exit(struct request_queue *q)
|
||||
@ -899,11 +917,18 @@ static blk_qc_t __submit_bio(struct bio *bio)
|
||||
struct gendisk *disk = bio->bi_bdev->bd_disk;
|
||||
blk_qc_t ret = BLK_QC_T_NONE;
|
||||
|
||||
if (blk_crypto_bio_prep(&bio)) {
|
||||
if (!disk->fops->submit_bio)
|
||||
return blk_mq_submit_bio(bio);
|
||||
if (unlikely(bio_queue_enter(bio) != 0))
|
||||
return BLK_QC_T_NONE;
|
||||
|
||||
if (!submit_bio_checks(bio) || !blk_crypto_bio_prep(&bio))
|
||||
goto queue_exit;
|
||||
if (disk->fops->submit_bio) {
|
||||
ret = disk->fops->submit_bio(bio);
|
||||
goto queue_exit;
|
||||
}
|
||||
return blk_mq_submit_bio(bio);
|
||||
|
||||
queue_exit:
|
||||
blk_queue_exit(disk->queue);
|
||||
return ret;
|
||||
}
|
||||
@ -941,9 +966,6 @@ static blk_qc_t __submit_bio_noacct(struct bio *bio)
|
||||
struct request_queue *q = bio->bi_bdev->bd_disk->queue;
|
||||
struct bio_list lower, same;
|
||||
|
||||
if (unlikely(bio_queue_enter(bio) != 0))
|
||||
continue;
|
||||
|
||||
/*
|
||||
* Create a fresh bio_list for all subordinate requests.
|
||||
*/
|
||||
@ -979,23 +1001,12 @@ static blk_qc_t __submit_bio_noacct(struct bio *bio)
|
||||
static blk_qc_t __submit_bio_noacct_mq(struct bio *bio)
|
||||
{
|
||||
struct bio_list bio_list[2] = { };
|
||||
blk_qc_t ret = BLK_QC_T_NONE;
|
||||
blk_qc_t ret;
|
||||
|
||||
current->bio_list = bio_list;
|
||||
|
||||
do {
|
||||
struct gendisk *disk = bio->bi_bdev->bd_disk;
|
||||
|
||||
if (unlikely(bio_queue_enter(bio) != 0))
|
||||
continue;
|
||||
|
||||
if (!blk_crypto_bio_prep(&bio)) {
|
||||
blk_queue_exit(disk->queue);
|
||||
ret = BLK_QC_T_NONE;
|
||||
continue;
|
||||
}
|
||||
|
||||
ret = blk_mq_submit_bio(bio);
|
||||
ret = __submit_bio(bio);
|
||||
} while ((bio = bio_list_pop(&bio_list[0])));
|
||||
|
||||
current->bio_list = NULL;
|
||||
@ -1013,9 +1024,6 @@ static blk_qc_t __submit_bio_noacct_mq(struct bio *bio)
|
||||
*/
|
||||
blk_qc_t submit_bio_noacct(struct bio *bio)
|
||||
{
|
||||
if (!submit_bio_checks(bio))
|
||||
return BLK_QC_T_NONE;
|
||||
|
||||
/*
|
||||
* We only want one ->submit_bio to be active at a time, else stack
|
||||
* usage with stacked devices could be a problem. Use current->bio_list
|
||||
|
@ -188,9 +188,11 @@ void blk_mq_freeze_queue(struct request_queue *q)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(blk_mq_freeze_queue);
|
||||
|
||||
void blk_mq_unfreeze_queue(struct request_queue *q)
|
||||
void __blk_mq_unfreeze_queue(struct request_queue *q, bool force_atomic)
|
||||
{
|
||||
mutex_lock(&q->mq_freeze_lock);
|
||||
if (force_atomic)
|
||||
q->q_usage_counter.data->force_atomic = true;
|
||||
q->mq_freeze_depth--;
|
||||
WARN_ON_ONCE(q->mq_freeze_depth < 0);
|
||||
if (!q->mq_freeze_depth) {
|
||||
@ -199,6 +201,11 @@ void blk_mq_unfreeze_queue(struct request_queue *q)
|
||||
}
|
||||
mutex_unlock(&q->mq_freeze_lock);
|
||||
}
|
||||
|
||||
void blk_mq_unfreeze_queue(struct request_queue *q)
|
||||
{
|
||||
__blk_mq_unfreeze_queue(q, false);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(blk_mq_unfreeze_queue);
|
||||
|
||||
/*
|
||||
|
@ -51,6 +51,8 @@ struct blk_flush_queue *blk_alloc_flush_queue(int node, int cmd_size,
|
||||
void blk_free_flush_queue(struct blk_flush_queue *q);
|
||||
|
||||
void blk_freeze_queue(struct request_queue *q);
|
||||
void __blk_mq_unfreeze_queue(struct request_queue *q, bool force_atomic);
|
||||
void blk_queue_start_drain(struct request_queue *q);
|
||||
|
||||
#define BIO_INLINE_VECS 4
|
||||
struct bio_vec *bvec_alloc(mempool_t *pool, unsigned short *nr_vecs,
|
||||
|
@ -26,6 +26,7 @@
|
||||
#include <linux/badblocks.h>
|
||||
|
||||
#include "blk.h"
|
||||
#include "blk-rq-qos.h"
|
||||
|
||||
static struct kobject *block_depr;
|
||||
|
||||
@ -559,6 +560,8 @@ EXPORT_SYMBOL(device_add_disk);
|
||||
*/
|
||||
void del_gendisk(struct gendisk *disk)
|
||||
{
|
||||
struct request_queue *q = disk->queue;
|
||||
|
||||
might_sleep();
|
||||
|
||||
if (WARN_ON_ONCE(!disk_live(disk) && !(disk->flags & GENHD_FL_HIDDEN)))
|
||||
@ -575,8 +578,27 @@ void del_gendisk(struct gendisk *disk)
|
||||
fsync_bdev(disk->part0);
|
||||
__invalidate_device(disk->part0, true);
|
||||
|
||||
/*
|
||||
* Fail any new I/O.
|
||||
*/
|
||||
set_bit(GD_DEAD, &disk->state);
|
||||
set_capacity(disk, 0);
|
||||
|
||||
/*
|
||||
* Prevent new I/O from crossing bio_queue_enter().
|
||||
*/
|
||||
blk_queue_start_drain(q);
|
||||
blk_mq_freeze_queue_wait(q);
|
||||
|
||||
rq_qos_exit(q);
|
||||
blk_sync_queue(q);
|
||||
blk_flush_integrity();
|
||||
/*
|
||||
* Allow using passthrough request again after the queue is torn down.
|
||||
*/
|
||||
blk_queue_flag_clear(QUEUE_FLAG_INIT_DONE, q);
|
||||
__blk_mq_unfreeze_queue(q, true);
|
||||
|
||||
if (!(disk->flags & GENHD_FL_HIDDEN)) {
|
||||
sysfs_remove_link(&disk_to_dev(disk)->kobj, "bdi");
|
||||
|
||||
@ -1056,6 +1078,7 @@ static void disk_release(struct device *dev)
|
||||
struct gendisk *disk = dev_to_disk(dev);
|
||||
|
||||
might_sleep();
|
||||
WARN_ON_ONCE(disk_live(disk));
|
||||
|
||||
disk_release_events(disk);
|
||||
kfree(disk->random);
|
||||
|
@ -151,6 +151,7 @@ struct kyber_ctx_queue {
|
||||
|
||||
struct kyber_queue_data {
|
||||
struct request_queue *q;
|
||||
dev_t dev;
|
||||
|
||||
/*
|
||||
* Each scheduling domain has a limited number of in-flight requests
|
||||
@ -257,7 +258,7 @@ static int calculate_percentile(struct kyber_queue_data *kqd,
|
||||
}
|
||||
memset(buckets, 0, sizeof(kqd->latency_buckets[sched_domain][type]));
|
||||
|
||||
trace_kyber_latency(kqd->q, kyber_domain_names[sched_domain],
|
||||
trace_kyber_latency(kqd->dev, kyber_domain_names[sched_domain],
|
||||
kyber_latency_type_names[type], percentile,
|
||||
bucket + 1, 1 << KYBER_LATENCY_SHIFT, samples);
|
||||
|
||||
@ -270,7 +271,7 @@ static void kyber_resize_domain(struct kyber_queue_data *kqd,
|
||||
depth = clamp(depth, 1U, kyber_depth[sched_domain]);
|
||||
if (depth != kqd->domain_tokens[sched_domain].sb.depth) {
|
||||
sbitmap_queue_resize(&kqd->domain_tokens[sched_domain], depth);
|
||||
trace_kyber_adjust(kqd->q, kyber_domain_names[sched_domain],
|
||||
trace_kyber_adjust(kqd->dev, kyber_domain_names[sched_domain],
|
||||
depth);
|
||||
}
|
||||
}
|
||||
@ -366,6 +367,7 @@ static struct kyber_queue_data *kyber_queue_data_alloc(struct request_queue *q)
|
||||
goto err;
|
||||
|
||||
kqd->q = q;
|
||||
kqd->dev = disk_devt(q->disk);
|
||||
|
||||
kqd->cpu_latency = alloc_percpu_gfp(struct kyber_cpu_latency,
|
||||
GFP_KERNEL | __GFP_ZERO);
|
||||
@ -774,7 +776,7 @@ kyber_dispatch_cur_domain(struct kyber_queue_data *kqd,
|
||||
list_del_init(&rq->queuelist);
|
||||
return rq;
|
||||
} else {
|
||||
trace_kyber_throttled(kqd->q,
|
||||
trace_kyber_throttled(kqd->dev,
|
||||
kyber_domain_names[khd->cur_domain]);
|
||||
}
|
||||
} else if (sbitmap_any_bit_set(&khd->kcq_map[khd->cur_domain])) {
|
||||
@ -787,7 +789,7 @@ kyber_dispatch_cur_domain(struct kyber_queue_data *kqd,
|
||||
list_del_init(&rq->queuelist);
|
||||
return rq;
|
||||
} else {
|
||||
trace_kyber_throttled(kqd->q,
|
||||
trace_kyber_throttled(kqd->dev,
|
||||
kyber_domain_names[khd->cur_domain]);
|
||||
}
|
||||
}
|
||||
|
@ -373,10 +373,22 @@ static int brd_alloc(int i)
|
||||
struct gendisk *disk;
|
||||
char buf[DISK_NAME_LEN];
|
||||
|
||||
mutex_lock(&brd_devices_mutex);
|
||||
list_for_each_entry(brd, &brd_devices, brd_list) {
|
||||
if (brd->brd_number == i) {
|
||||
mutex_unlock(&brd_devices_mutex);
|
||||
return -EEXIST;
|
||||
}
|
||||
}
|
||||
brd = kzalloc(sizeof(*brd), GFP_KERNEL);
|
||||
if (!brd)
|
||||
if (!brd) {
|
||||
mutex_unlock(&brd_devices_mutex);
|
||||
return -ENOMEM;
|
||||
}
|
||||
brd->brd_number = i;
|
||||
list_add_tail(&brd->brd_list, &brd_devices);
|
||||
mutex_unlock(&brd_devices_mutex);
|
||||
|
||||
spin_lock_init(&brd->brd_lock);
|
||||
INIT_RADIX_TREE(&brd->brd_pages, GFP_ATOMIC);
|
||||
|
||||
@ -411,37 +423,30 @@ static int brd_alloc(int i)
|
||||
blk_queue_flag_set(QUEUE_FLAG_NONROT, disk->queue);
|
||||
blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, disk->queue);
|
||||
add_disk(disk);
|
||||
list_add_tail(&brd->brd_list, &brd_devices);
|
||||
|
||||
return 0;
|
||||
|
||||
out_free_dev:
|
||||
mutex_lock(&brd_devices_mutex);
|
||||
list_del(&brd->brd_list);
|
||||
mutex_unlock(&brd_devices_mutex);
|
||||
kfree(brd);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
static void brd_probe(dev_t dev)
|
||||
{
|
||||
int i = MINOR(dev) / max_part;
|
||||
struct brd_device *brd;
|
||||
|
||||
mutex_lock(&brd_devices_mutex);
|
||||
list_for_each_entry(brd, &brd_devices, brd_list) {
|
||||
if (brd->brd_number == i)
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
brd_alloc(i);
|
||||
out_unlock:
|
||||
mutex_unlock(&brd_devices_mutex);
|
||||
brd_alloc(MINOR(dev) / max_part);
|
||||
}
|
||||
|
||||
static void brd_del_one(struct brd_device *brd)
|
||||
{
|
||||
list_del(&brd->brd_list);
|
||||
del_gendisk(brd->brd_disk);
|
||||
blk_cleanup_disk(brd->brd_disk);
|
||||
brd_free_pages(brd);
|
||||
mutex_lock(&brd_devices_mutex);
|
||||
list_del(&brd->brd_list);
|
||||
mutex_unlock(&brd_devices_mutex);
|
||||
kfree(brd);
|
||||
}
|
||||
|
||||
@ -491,25 +496,21 @@ static int __init brd_init(void)
|
||||
|
||||
brd_debugfs_dir = debugfs_create_dir("ramdisk_pages", NULL);
|
||||
|
||||
mutex_lock(&brd_devices_mutex);
|
||||
for (i = 0; i < rd_nr; i++) {
|
||||
err = brd_alloc(i);
|
||||
if (err)
|
||||
goto out_free;
|
||||
}
|
||||
|
||||
mutex_unlock(&brd_devices_mutex);
|
||||
|
||||
pr_info("brd: module loaded\n");
|
||||
return 0;
|
||||
|
||||
out_free:
|
||||
unregister_blkdev(RAMDISK_MAJOR, "ramdisk");
|
||||
debugfs_remove_recursive(brd_debugfs_dir);
|
||||
|
||||
list_for_each_entry_safe(brd, next, &brd_devices, brd_list)
|
||||
brd_del_one(brd);
|
||||
mutex_unlock(&brd_devices_mutex);
|
||||
unregister_blkdev(RAMDISK_MAJOR, "ramdisk");
|
||||
|
||||
pr_info("brd: module NOT loaded !!!\n");
|
||||
return err;
|
||||
@ -519,13 +520,12 @@ static void __exit brd_exit(void)
|
||||
{
|
||||
struct brd_device *brd, *next;
|
||||
|
||||
unregister_blkdev(RAMDISK_MAJOR, "ramdisk");
|
||||
debugfs_remove_recursive(brd_debugfs_dir);
|
||||
|
||||
list_for_each_entry_safe(brd, next, &brd_devices, brd_list)
|
||||
brd_del_one(brd);
|
||||
|
||||
unregister_blkdev(RAMDISK_MAJOR, "ramdisk");
|
||||
|
||||
pr_info("brd: module unloaded\n");
|
||||
}
|
||||
|
||||
|
@ -71,8 +71,10 @@ static int rnbd_clt_parse_map_options(const char *buf, size_t max_path_cnt,
|
||||
int opt_mask = 0;
|
||||
int token;
|
||||
int ret = -EINVAL;
|
||||
int i, dest_port, nr_poll_queues;
|
||||
int nr_poll_queues = 0;
|
||||
int dest_port = 0;
|
||||
int p_cnt = 0;
|
||||
int i;
|
||||
|
||||
options = kstrdup(buf, GFP_KERNEL);
|
||||
if (!options)
|
||||
|
@ -3550,10 +3550,15 @@ static int __nvme_check_ids(struct nvme_subsystem *subsys,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void nvme_cdev_rel(struct device *dev)
|
||||
{
|
||||
ida_simple_remove(&nvme_ns_chr_minor_ida, MINOR(dev->devt));
|
||||
}
|
||||
|
||||
void nvme_cdev_del(struct cdev *cdev, struct device *cdev_device)
|
||||
{
|
||||
cdev_device_del(cdev, cdev_device);
|
||||
ida_simple_remove(&nvme_ns_chr_minor_ida, MINOR(cdev_device->devt));
|
||||
put_device(cdev_device);
|
||||
}
|
||||
|
||||
int nvme_cdev_add(struct cdev *cdev, struct device *cdev_device,
|
||||
@ -3566,14 +3571,14 @@ int nvme_cdev_add(struct cdev *cdev, struct device *cdev_device,
|
||||
return minor;
|
||||
cdev_device->devt = MKDEV(MAJOR(nvme_ns_chr_devt), minor);
|
||||
cdev_device->class = nvme_ns_chr_class;
|
||||
cdev_device->release = nvme_cdev_rel;
|
||||
device_initialize(cdev_device);
|
||||
cdev_init(cdev, fops);
|
||||
cdev->owner = owner;
|
||||
ret = cdev_device_add(cdev, cdev_device);
|
||||
if (ret) {
|
||||
if (ret)
|
||||
put_device(cdev_device);
|
||||
ida_simple_remove(&nvme_ns_chr_minor_ida, minor);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -3605,11 +3610,9 @@ static int nvme_add_ns_cdev(struct nvme_ns *ns)
|
||||
ns->ctrl->instance, ns->head->instance);
|
||||
if (ret)
|
||||
return ret;
|
||||
ret = nvme_cdev_add(&ns->cdev, &ns->cdev_device, &nvme_ns_chr_fops,
|
||||
ns->ctrl->ops->module);
|
||||
if (ret)
|
||||
kfree_const(ns->cdev_device.kobj.name);
|
||||
return ret;
|
||||
|
||||
return nvme_cdev_add(&ns->cdev, &ns->cdev_device, &nvme_ns_chr_fops,
|
||||
ns->ctrl->ops->module);
|
||||
}
|
||||
|
||||
static struct nvme_ns_head *nvme_alloc_ns_head(struct nvme_ctrl *ctrl,
|
||||
|
@ -431,8 +431,6 @@ static int nvme_add_ns_head_cdev(struct nvme_ns_head *head)
|
||||
return ret;
|
||||
ret = nvme_cdev_add(&head->cdev, &head->cdev_device,
|
||||
&nvme_ns_head_chr_fops, THIS_MODULE);
|
||||
if (ret)
|
||||
kfree_const(head->cdev_device.kobj.name);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -1330,7 +1330,7 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
|
||||
iod->aborted = 1;
|
||||
|
||||
cmd.abort.opcode = nvme_admin_abort_cmd;
|
||||
cmd.abort.cid = req->tag;
|
||||
cmd.abort.cid = nvme_cid(req);
|
||||
cmd.abort.sqid = cpu_to_le16(nvmeq->qid);
|
||||
|
||||
dev_warn(nvmeq->dev->ctrl.device,
|
||||
|
@ -149,6 +149,7 @@ struct gendisk {
|
||||
unsigned long state;
|
||||
#define GD_NEED_PART_SCAN 0
|
||||
#define GD_READ_ONLY 1
|
||||
#define GD_DEAD 2
|
||||
|
||||
struct mutex open_mutex; /* open/close mutex */
|
||||
unsigned open_partitions; /* number of open partitions */
|
||||
|
@ -13,11 +13,11 @@
|
||||
|
||||
TRACE_EVENT(kyber_latency,
|
||||
|
||||
TP_PROTO(struct request_queue *q, const char *domain, const char *type,
|
||||
TP_PROTO(dev_t dev, const char *domain, const char *type,
|
||||
unsigned int percentile, unsigned int numerator,
|
||||
unsigned int denominator, unsigned int samples),
|
||||
|
||||
TP_ARGS(q, domain, type, percentile, numerator, denominator, samples),
|
||||
TP_ARGS(dev, domain, type, percentile, numerator, denominator, samples),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field( dev_t, dev )
|
||||
@ -30,7 +30,7 @@ TRACE_EVENT(kyber_latency,
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->dev = disk_devt(q->disk);
|
||||
__entry->dev = dev;
|
||||
strlcpy(__entry->domain, domain, sizeof(__entry->domain));
|
||||
strlcpy(__entry->type, type, sizeof(__entry->type));
|
||||
__entry->percentile = percentile;
|
||||
@ -47,10 +47,9 @@ TRACE_EVENT(kyber_latency,
|
||||
|
||||
TRACE_EVENT(kyber_adjust,
|
||||
|
||||
TP_PROTO(struct request_queue *q, const char *domain,
|
||||
unsigned int depth),
|
||||
TP_PROTO(dev_t dev, const char *domain, unsigned int depth),
|
||||
|
||||
TP_ARGS(q, domain, depth),
|
||||
TP_ARGS(dev, domain, depth),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field( dev_t, dev )
|
||||
@ -59,7 +58,7 @@ TRACE_EVENT(kyber_adjust,
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->dev = disk_devt(q->disk);
|
||||
__entry->dev = dev;
|
||||
strlcpy(__entry->domain, domain, sizeof(__entry->domain));
|
||||
__entry->depth = depth;
|
||||
),
|
||||
@ -71,9 +70,9 @@ TRACE_EVENT(kyber_adjust,
|
||||
|
||||
TRACE_EVENT(kyber_throttled,
|
||||
|
||||
TP_PROTO(struct request_queue *q, const char *domain),
|
||||
TP_PROTO(dev_t dev, const char *domain),
|
||||
|
||||
TP_ARGS(q, domain),
|
||||
TP_ARGS(dev, domain),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field( dev_t, dev )
|
||||
@ -81,7 +80,7 @@ TRACE_EVENT(kyber_throttled,
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->dev = disk_devt(q->disk);
|
||||
__entry->dev = dev;
|
||||
strlcpy(__entry->domain, domain, sizeof(__entry->domain));
|
||||
),
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user