From f6b6a28e2dbc401416ff12f775d75281c9b41918 Mon Sep 17 00:00:00 2001 From: Gabriel Krisman Bertazi Date: Fri, 29 Jul 2016 16:15:18 -0300 Subject: [PATCH 01/18] nvme: Prevent controller state invalid transition Acquiring the nvme_ctrl lock before reading ctrl->state in nvme_change_ctrl_state() should prevent a theoretical invalid state transition, in the event of two threads racing inside that function. I haven't been able to observe this happening with the current code, and the current state machine seems to be simple enough to not be affected by these invalid transitions, but future modifications could make it more likely to happen. Signed-off-by: Gabriel Krisman Bertazi Reviewed-by: Sagi Grimberg Reviewed-by: Steve Wise Signed-off-by: Jens Axboe --- drivers/nvme/host/core.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index 7ff2e820bbf4..7f75d661237f 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -81,10 +81,12 @@ EXPORT_SYMBOL_GPL(nvme_cancel_request); bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl, enum nvme_ctrl_state new_state) { - enum nvme_ctrl_state old_state = ctrl->state; + enum nvme_ctrl_state old_state; bool changed = false; spin_lock_irq(&ctrl->lock); + + old_state = ctrl->state; switch (new_state) { case NVME_CTRL_LIVE: switch (old_state) { @@ -140,11 +142,12 @@ bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl, default: break; } - spin_unlock_irq(&ctrl->lock); if (changed) ctrl->state = new_state; + spin_unlock_irq(&ctrl->lock); + return changed; } EXPORT_SYMBOL_GPL(nvme_change_ctrl_state); From 7afafc8a44bf0ab841b17d450b02aedb3a138985 Mon Sep 17 00:00:00 2001 From: Adrian Hunter Date: Tue, 16 Aug 2016 10:59:35 +0300 Subject: [PATCH 02/18] block: Fix secure erase Commit 288dab8a35a0 ("block: add a separate operation type for secure erase") split REQ_OP_SECURE_ERASE from REQ_OP_DISCARD without considering all the places REQ_OP_DISCARD was being used to mean either. Fix those. Signed-off-by: Adrian Hunter Fixes: 288dab8a35a0 ("block: add a separate operation type for secure erase") Signed-off-by: Jens Axboe --- block/bio.c | 21 +++++++++++---------- block/blk-merge.c | 33 +++++++++++++++++++-------------- block/elevator.c | 2 +- drivers/mmc/card/block.c | 1 + drivers/mmc/card/queue.c | 3 ++- drivers/mmc/card/queue.h | 4 +++- include/linux/bio.h | 10 ++++++++-- include/linux/blkdev.h | 6 ++++-- kernel/trace/blktrace.c | 2 +- 9 files changed, 50 insertions(+), 32 deletions(-) diff --git a/block/bio.c b/block/bio.c index f39477538fef..aa7354088008 100644 --- a/block/bio.c +++ b/block/bio.c @@ -667,18 +667,19 @@ struct bio *bio_clone_bioset(struct bio *bio_src, gfp_t gfp_mask, bio->bi_iter.bi_sector = bio_src->bi_iter.bi_sector; bio->bi_iter.bi_size = bio_src->bi_iter.bi_size; - if (bio_op(bio) == REQ_OP_DISCARD) - goto integrity_clone; - - if (bio_op(bio) == REQ_OP_WRITE_SAME) { + switch (bio_op(bio)) { + case REQ_OP_DISCARD: + case REQ_OP_SECURE_ERASE: + break; + case REQ_OP_WRITE_SAME: bio->bi_io_vec[bio->bi_vcnt++] = bio_src->bi_io_vec[0]; - goto integrity_clone; + break; + default: + bio_for_each_segment(bv, bio_src, iter) + bio->bi_io_vec[bio->bi_vcnt++] = bv; + break; } - bio_for_each_segment(bv, bio_src, iter) - bio->bi_io_vec[bio->bi_vcnt++] = bv; - -integrity_clone: if (bio_integrity(bio_src)) { int ret; @@ -1788,7 +1789,7 @@ struct bio *bio_split(struct bio *bio, int sectors, * Discards need a mutable bio_vec to accommodate the payload * required by the DSM TRIM and UNMAP commands. */ - if (bio_op(bio) == REQ_OP_DISCARD) + if (bio_op(bio) == REQ_OP_DISCARD || bio_op(bio) == REQ_OP_SECURE_ERASE) split = bio_clone_bioset(bio, gfp, bs); else split = bio_clone_fast(bio, gfp, bs); diff --git a/block/blk-merge.c b/block/blk-merge.c index 3eec75a9e91d..72627e3cf91e 100644 --- a/block/blk-merge.c +++ b/block/blk-merge.c @@ -172,12 +172,18 @@ void blk_queue_split(struct request_queue *q, struct bio **bio, struct bio *split, *res; unsigned nsegs; - if (bio_op(*bio) == REQ_OP_DISCARD) + switch (bio_op(*bio)) { + case REQ_OP_DISCARD: + case REQ_OP_SECURE_ERASE: split = blk_bio_discard_split(q, *bio, bs, &nsegs); - else if (bio_op(*bio) == REQ_OP_WRITE_SAME) + break; + case REQ_OP_WRITE_SAME: split = blk_bio_write_same_split(q, *bio, bs, &nsegs); - else + break; + default: split = blk_bio_segment_split(q, *bio, q->bio_split, &nsegs); + break; + } /* physical segments can be figured out during splitting */ res = split ? split : *bio; @@ -213,7 +219,7 @@ static unsigned int __blk_recalc_rq_segments(struct request_queue *q, * This should probably be returning 0, but blk_add_request_payload() * (Christoph!!!!) */ - if (bio_op(bio) == REQ_OP_DISCARD) + if (bio_op(bio) == REQ_OP_DISCARD || bio_op(bio) == REQ_OP_SECURE_ERASE) return 1; if (bio_op(bio) == REQ_OP_WRITE_SAME) @@ -385,7 +391,9 @@ static int __blk_bios_map_sg(struct request_queue *q, struct bio *bio, nsegs = 0; cluster = blk_queue_cluster(q); - if (bio_op(bio) == REQ_OP_DISCARD) { + switch (bio_op(bio)) { + case REQ_OP_DISCARD: + case REQ_OP_SECURE_ERASE: /* * This is a hack - drivers should be neither modifying the * biovec, nor relying on bi_vcnt - but because of @@ -393,19 +401,16 @@ static int __blk_bios_map_sg(struct request_queue *q, struct bio *bio, * a payload we need to set up here (thank you Christoph) and * bi_vcnt is really the only way of telling if we need to. */ - - if (bio->bi_vcnt) - goto single_segment; - - return 0; - } - - if (bio_op(bio) == REQ_OP_WRITE_SAME) { -single_segment: + if (!bio->bi_vcnt) + return 0; + /* Fall through */ + case REQ_OP_WRITE_SAME: *sg = sglist; bvec = bio_iovec(bio); sg_set_page(*sg, bvec.bv_page, bvec.bv_len, bvec.bv_offset); return 1; + default: + break; } for_each_bio(bio) diff --git a/block/elevator.c b/block/elevator.c index 7096c22041e7..f7d973a56fd7 100644 --- a/block/elevator.c +++ b/block/elevator.c @@ -366,7 +366,7 @@ void elv_dispatch_sort(struct request_queue *q, struct request *rq) list_for_each_prev(entry, &q->queue_head) { struct request *pos = list_entry_rq(entry); - if ((req_op(rq) == REQ_OP_DISCARD) != (req_op(pos) == REQ_OP_DISCARD)) + if (req_op(rq) != req_op(pos)) break; if (rq_data_dir(rq) != rq_data_dir(pos)) break; diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c index 48a5dd740f3b..82503e6f04b3 100644 --- a/drivers/mmc/card/block.c +++ b/drivers/mmc/card/block.c @@ -1726,6 +1726,7 @@ static u8 mmc_blk_prep_packed_list(struct mmc_queue *mq, struct request *req) break; if (req_op(next) == REQ_OP_DISCARD || + req_op(next) == REQ_OP_SECURE_ERASE || req_op(next) == REQ_OP_FLUSH) break; diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c index bf14642a576a..29578e98603d 100644 --- a/drivers/mmc/card/queue.c +++ b/drivers/mmc/card/queue.c @@ -33,7 +33,8 @@ static int mmc_prep_request(struct request_queue *q, struct request *req) /* * We only like normal block requests and discards. */ - if (req->cmd_type != REQ_TYPE_FS && req_op(req) != REQ_OP_DISCARD) { + if (req->cmd_type != REQ_TYPE_FS && req_op(req) != REQ_OP_DISCARD && + req_op(req) != REQ_OP_SECURE_ERASE) { blk_dump_rq_flags(req, "MMC bad request"); return BLKPREP_KILL; } diff --git a/drivers/mmc/card/queue.h b/drivers/mmc/card/queue.h index d62531124d54..fee5e1271465 100644 --- a/drivers/mmc/card/queue.h +++ b/drivers/mmc/card/queue.h @@ -4,7 +4,9 @@ static inline bool mmc_req_is_special(struct request *req) { return req && - (req_op(req) == REQ_OP_FLUSH || req_op(req) == REQ_OP_DISCARD); + (req_op(req) == REQ_OP_FLUSH || + req_op(req) == REQ_OP_DISCARD || + req_op(req) == REQ_OP_SECURE_ERASE); } struct request; diff --git a/include/linux/bio.h b/include/linux/bio.h index 59ffaa68b11b..23ddf4b46a9b 100644 --- a/include/linux/bio.h +++ b/include/linux/bio.h @@ -71,7 +71,8 @@ static inline bool bio_has_data(struct bio *bio) { if (bio && bio->bi_iter.bi_size && - bio_op(bio) != REQ_OP_DISCARD) + bio_op(bio) != REQ_OP_DISCARD && + bio_op(bio) != REQ_OP_SECURE_ERASE) return true; return false; @@ -79,7 +80,9 @@ static inline bool bio_has_data(struct bio *bio) static inline bool bio_no_advance_iter(struct bio *bio) { - return bio_op(bio) == REQ_OP_DISCARD || bio_op(bio) == REQ_OP_WRITE_SAME; + return bio_op(bio) == REQ_OP_DISCARD || + bio_op(bio) == REQ_OP_SECURE_ERASE || + bio_op(bio) == REQ_OP_WRITE_SAME; } static inline bool bio_is_rw(struct bio *bio) @@ -199,6 +202,9 @@ static inline unsigned bio_segments(struct bio *bio) if (bio_op(bio) == REQ_OP_DISCARD) return 1; + if (bio_op(bio) == REQ_OP_SECURE_ERASE) + return 1; + if (bio_op(bio) == REQ_OP_WRITE_SAME) return 1; diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 2c210b6a7bcf..e79055c8b577 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -882,7 +882,7 @@ static inline unsigned int blk_rq_cur_sectors(const struct request *rq) static inline unsigned int blk_queue_get_max_sectors(struct request_queue *q, int op) { - if (unlikely(op == REQ_OP_DISCARD)) + if (unlikely(op == REQ_OP_DISCARD || op == REQ_OP_SECURE_ERASE)) return min(q->limits.max_discard_sectors, UINT_MAX >> 9); if (unlikely(op == REQ_OP_WRITE_SAME)) @@ -913,7 +913,9 @@ static inline unsigned int blk_rq_get_max_sectors(struct request *rq, if (unlikely(rq->cmd_type != REQ_TYPE_FS)) return q->limits.max_hw_sectors; - if (!q->limits.chunk_sectors || (req_op(rq) == REQ_OP_DISCARD)) + if (!q->limits.chunk_sectors || + req_op(rq) == REQ_OP_DISCARD || + req_op(rq) == REQ_OP_SECURE_ERASE) return blk_queue_get_max_sectors(q, req_op(rq)); return min(blk_max_size_offset(q, offset), diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c index 7598e6ca817a..dbafc5df03f3 100644 --- a/kernel/trace/blktrace.c +++ b/kernel/trace/blktrace.c @@ -223,7 +223,7 @@ static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes, what |= MASK_TC_BIT(op_flags, META); what |= MASK_TC_BIT(op_flags, PREFLUSH); what |= MASK_TC_BIT(op_flags, FUA); - if (op == REQ_OP_DISCARD) + if (op == REQ_OP_DISCARD || op == REQ_OP_SECURE_ERASE) what |= BLK_TC_ACT(BLK_TC_DISCARD); if (op == REQ_OP_FLUSH) what |= BLK_TC_ACT(BLK_TC_FLUSH); From 1b856086813be9371929b6cc62045f9fd470f5a0 Mon Sep 17 00:00:00 2001 From: Bart Van Assche Date: Tue, 16 Aug 2016 16:48:36 -0700 Subject: [PATCH 03/18] block: Fix race triggered by blk_set_queue_dying() blk_set_queue_dying() can be called while another thread is submitting I/O or changing queue flags, e.g. through dm_stop_queue(). Hence protect the QUEUE_FLAG_DYING flag change with locking. Signed-off-by: Bart Van Assche Cc: Christoph Hellwig Cc: Mike Snitzer Cc: stable Signed-off-by: Jens Axboe --- block/blk-core.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/block/blk-core.c b/block/blk-core.c index 999442ec4601..36c7ac328d8c 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -515,7 +515,9 @@ EXPORT_SYMBOL_GPL(blk_queue_bypass_end); void blk_set_queue_dying(struct request_queue *q) { - queue_flag_set_unlocked(QUEUE_FLAG_DYING, q); + spin_lock_irq(q->queue_lock); + queue_flag_set(QUEUE_FLAG_DYING, q); + spin_unlock_irq(q->queue_lock); if (q->mq_ops) blk_mq_wake_waiters(q); From d9dc1702b297ec4a6bb9c0326a70641b322ba886 Mon Sep 17 00:00:00 2001 From: Eric Wheeler Date: Fri, 17 Jun 2016 15:01:54 -0700 Subject: [PATCH 04/18] bcache: register_bcache(): call blkdev_put() when cache_alloc() fails register_cache() is supposed to return an error string on error so that register_bcache() will will blkdev_put and cleanup other user counters, but it does not set 'char *err' when cache_alloc() fails (eg, due to memory pressure) and thus register_bcache() performs no cleanup. register_bcache() <----------\ <- no jump to err_close, no blkdev_put() | | +->register_cache() | <- fails to set char *err | | +->cache_alloc() ---/ <- returns error This patch sets `char *err` for this failure case so that register_cache() will cause register_bcache() to correctly jump to err_close and do cleanup. This was tested under OOM conditions that triggered the bug. Signed-off-by: Eric Wheeler Cc: Kent Overstreet Cc: stable@vger.kernel.org --- drivers/md/bcache/super.c | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c index 95a4ca6ce6ff..6ada14b9a157 100644 --- a/drivers/md/bcache/super.c +++ b/drivers/md/bcache/super.c @@ -1844,7 +1844,7 @@ static int register_cache(struct cache_sb *sb, struct page *sb_page, struct block_device *bdev, struct cache *ca) { char name[BDEVNAME_SIZE]; - const char *err = NULL; + const char *err = NULL; /* must be set for any error case */ int ret = 0; memcpy(&ca->sb, sb, sizeof(struct cache_sb)); @@ -1861,8 +1861,13 @@ static int register_cache(struct cache_sb *sb, struct page *sb_page, ca->discard = CACHE_DISCARD(&ca->sb); ret = cache_alloc(ca); - if (ret != 0) + if (ret != 0) { + if (ret == -ENOMEM) + err = "cache_alloc(): -ENOMEM"; + else + err = "cache_alloc(): unknown error"; goto err; + } if (kobject_add(&ca->kobj, &part_to_dev(bdev->bd_part)->kobj, "bcache")) { err = "error calling kobject_add"; From acc9cf8c66c66b2cbbdb4a375537edee72be64df Mon Sep 17 00:00:00 2001 From: Kent Overstreet Date: Wed, 17 Aug 2016 18:21:24 -0700 Subject: [PATCH 05/18] bcache: RESERVE_PRIO is too small by one when prio_buckets() is a power of two. This patch fixes a cachedev registration-time allocation deadlock. This can deadlock on boot if your initrd auto-registeres bcache devices: Allocator thread: [ 720.727614] INFO: task bcache_allocato:3833 blocked for more than 120 seconds. [ 720.732361] [] schedule+0x37/0x90 [ 720.732963] [] bch_bucket_alloc+0x188/0x360 [bcache] [ 720.733538] [] ? prepare_to_wait_event+0xf0/0xf0 [ 720.734137] [] bch_prio_write+0x19d/0x340 [bcache] [ 720.734715] [] bch_allocator_thread+0x3ff/0x470 [bcache] [ 720.735311] [] ? __schedule+0x2dc/0x950 [ 720.735884] [] ? invalidate_buckets+0x980/0x980 [bcache] Registration thread: [ 720.710403] INFO: task bash:3531 blocked for more than 120 seconds. [ 720.715226] [] schedule+0x37/0x90 [ 720.715805] [] __bch_btree_map_nodes+0x12d/0x150 [bcache] [ 720.716409] [] ? bch_btree_insert_check_key+0x1c0/0x1c0 [bcache] [ 720.717008] [] bch_btree_insert+0xf4/0x170 [bcache] [ 720.717586] [] ? prepare_to_wait_event+0xf0/0xf0 [ 720.718191] [] bch_journal_replay+0x14a/0x290 [bcache] [ 720.718766] [] ? ttwu_do_activate.constprop.94+0x5d/0x70 [ 720.719369] [] ? try_to_wake_up+0x1d4/0x350 [ 720.719968] [] run_cache_set+0x580/0x8e0 [bcache] [ 720.720553] [] register_bcache+0xe2e/0x13b0 [bcache] [ 720.721153] [] kobj_attr_store+0xf/0x20 [ 720.721730] [] sysfs_kf_write+0x3d/0x50 [ 720.722327] [] kernfs_fop_write+0x12a/0x180 [ 720.722904] [] __vfs_write+0x37/0x110 [ 720.723503] [] ? __sb_start_write+0x58/0x110 [ 720.724100] [] ? security_file_permission+0x23/0xa0 [ 720.724675] [] vfs_write+0xa9/0x1b0 [ 720.725275] [] ? do_audit_syscall_entry+0x6c/0x70 [ 720.725849] [] SyS_write+0x55/0xd0 [ 720.726451] [] ? do_page_fault+0x30/0x80 [ 720.727045] [] system_call_fastpath+0x12/0x71 The fifo code in upstream bcache can't use the last element in the buffer, which was the cause of the bug: if you asked for a power of two size, it'd give you a fifo that could hold one less than what you asked for rather than allocating a buffer twice as big. Signed-off-by: Kent Overstreet Tested-by: Eric Wheeler Cc: stable@vger.kernel.org --- drivers/md/bcache/super.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c index 6ada14b9a157..6b93e1b77767 100644 --- a/drivers/md/bcache/super.c +++ b/drivers/md/bcache/super.c @@ -1820,7 +1820,7 @@ static int cache_alloc(struct cache *ca) free = roundup_pow_of_two(ca->sb.nbuckets) >> 10; if (!init_fifo(&ca->free[RESERVE_BTREE], 8, GFP_KERNEL) || - !init_fifo(&ca->free[RESERVE_PRIO], prio_buckets(ca), GFP_KERNEL) || + !init_fifo_exact(&ca->free[RESERVE_PRIO], prio_buckets(ca), GFP_KERNEL) || !init_fifo(&ca->free[RESERVE_MOVINGGC], free, GFP_KERNEL) || !init_fifo(&ca->free[RESERVE_NONE], free, GFP_KERNEL) || !init_fifo(&ca->free_inc, free << 2, GFP_KERNEL) || From 90706094d5be614ae7285b3c96c3125bb198618c Mon Sep 17 00:00:00 2001 From: Eric Wheeler Date: Thu, 18 Aug 2016 20:15:26 -0700 Subject: [PATCH 06/18] bcache: pr_err: more meaningful error message when nr_stripes is invalid The original error was thought to be corruption, but was actually caused by: make-bcache --data-offset N where N was in bytes and should have been in sectors. While userspace tools should be updated to check --data-offset beyond end of volume, hopefully this will help others that might not have noticed the units. Signed-off-by: Eric Wheeler Cc: Kent Overstreet --- drivers/md/bcache/super.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c index 6b93e1b77767..849ad441cd76 100644 --- a/drivers/md/bcache/super.c +++ b/drivers/md/bcache/super.c @@ -760,7 +760,8 @@ static int bcache_device_init(struct bcache_device *d, unsigned block_size, if (!d->nr_stripes || d->nr_stripes > INT_MAX || d->nr_stripes > SIZE_MAX / sizeof(atomic_t)) { - pr_err("nr_stripes too large"); + pr_err("nr_stripes too large or invalid: %u (start sector beyond end of disk?)", + (unsigned)d->nr_stripes); return -ENOMEM; } From 6c647b0eb01cd7326dca093590f5e123e3c68b9c Mon Sep 17 00:00:00 2001 From: Bob Liu Date: Fri, 1 Jul 2016 15:45:57 -0400 Subject: [PATCH 07/18] xen-blkfront: fix places not updated after introducing 64KB page granularity MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Two places didn't get updated when 64KB page granularity was introduced, this patch fix them. Signed-off-by: Bob Liu Acked-by: Roger Pau Monné Signed-off-by: Konrad Rzeszutek Wilk --- drivers/block/xen-blkfront.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c index be4fea6a5dd3..6a1756d72dcb 100644 --- a/drivers/block/xen-blkfront.c +++ b/drivers/block/xen-blkfront.c @@ -1315,7 +1315,7 @@ free_shadow: rinfo->ring_ref[i] = GRANT_INVALID_REF; } } - free_pages((unsigned long)rinfo->ring.sring, get_order(info->nr_ring_pages * PAGE_SIZE)); + free_pages((unsigned long)rinfo->ring.sring, get_order(info->nr_ring_pages * XEN_PAGE_SIZE)); rinfo->ring.sring = NULL; if (rinfo->irq) @@ -2008,7 +2008,7 @@ static int blkif_recover(struct blkfront_info *info) blkfront_gather_backend_features(info); segs = info->max_indirect_segments ? : BLKIF_MAX_SEGMENTS_PER_REQUEST; - blk_queue_max_segments(info->rq, segs); + blk_queue_max_segments(info->rq, segs / GRANTS_PER_PSEG); for (r_index = 0; r_index < info->nr_rings; r_index++) { struct blkfront_ring_info *rinfo = &info->rinfo[r_index]; From 172335ada40ce26806e514c83a504b45c14a4139 Mon Sep 17 00:00:00 2001 From: Bob Liu Date: Fri, 1 Jul 2016 17:43:39 -0400 Subject: [PATCH 08/18] xen-blkfront: introduce blkif_set_queue_limits() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit blk_mq_update_nr_hw_queues() reset all queue limits to default which it's not as xen-blkfront expected, introducing blkif_set_queue_limits() to reset limits with initial correct values. Signed-off-by: Bob Liu Acked-by: Roger Pau Monné Signed-off-by: Konrad Rzeszutek Wilk --- drivers/block/xen-blkfront.c | 86 ++++++++++++++++++++---------------- 1 file changed, 48 insertions(+), 38 deletions(-) diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c index 6a1756d72dcb..f84e220a26e6 100644 --- a/drivers/block/xen-blkfront.c +++ b/drivers/block/xen-blkfront.c @@ -189,6 +189,8 @@ struct blkfront_info struct mutex mutex; struct xenbus_device *xbdev; struct gendisk *gd; + u16 sector_size; + unsigned int physical_sector_size; int vdevice; blkif_vdev_t handle; enum blkif_state connected; @@ -910,9 +912,45 @@ static struct blk_mq_ops blkfront_mq_ops = { .map_queue = blk_mq_map_queue, }; +static void blkif_set_queue_limits(struct blkfront_info *info) +{ + struct request_queue *rq = info->rq; + struct gendisk *gd = info->gd; + unsigned int segments = info->max_indirect_segments ? : + BLKIF_MAX_SEGMENTS_PER_REQUEST; + + queue_flag_set_unlocked(QUEUE_FLAG_VIRT, rq); + + if (info->feature_discard) { + queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, rq); + blk_queue_max_discard_sectors(rq, get_capacity(gd)); + rq->limits.discard_granularity = info->discard_granularity; + rq->limits.discard_alignment = info->discard_alignment; + if (info->feature_secdiscard) + queue_flag_set_unlocked(QUEUE_FLAG_SECERASE, rq); + } + + /* Hard sector size and max sectors impersonate the equiv. hardware. */ + blk_queue_logical_block_size(rq, info->sector_size); + blk_queue_physical_block_size(rq, info->physical_sector_size); + blk_queue_max_hw_sectors(rq, (segments * XEN_PAGE_SIZE) / 512); + + /* Each segment in a request is up to an aligned page in size. */ + blk_queue_segment_boundary(rq, PAGE_SIZE - 1); + blk_queue_max_segment_size(rq, PAGE_SIZE); + + /* Ensure a merged request will fit in a single I/O ring slot. */ + blk_queue_max_segments(rq, segments / GRANTS_PER_PSEG); + + /* Make sure buffer addresses are sector-aligned. */ + blk_queue_dma_alignment(rq, 511); + + /* Make sure we don't use bounce buffers. */ + blk_queue_bounce_limit(rq, BLK_BOUNCE_ANY); +} + static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size, - unsigned int physical_sector_size, - unsigned int segments) + unsigned int physical_sector_size) { struct request_queue *rq; struct blkfront_info *info = gd->private_data; @@ -944,36 +982,11 @@ static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size, } rq->queuedata = info; - queue_flag_set_unlocked(QUEUE_FLAG_VIRT, rq); - - if (info->feature_discard) { - queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, rq); - blk_queue_max_discard_sectors(rq, get_capacity(gd)); - rq->limits.discard_granularity = info->discard_granularity; - rq->limits.discard_alignment = info->discard_alignment; - if (info->feature_secdiscard) - queue_flag_set_unlocked(QUEUE_FLAG_SECERASE, rq); - } - - /* Hard sector size and max sectors impersonate the equiv. hardware. */ - blk_queue_logical_block_size(rq, sector_size); - blk_queue_physical_block_size(rq, physical_sector_size); - blk_queue_max_hw_sectors(rq, (segments * XEN_PAGE_SIZE) / 512); - - /* Each segment in a request is up to an aligned page in size. */ - blk_queue_segment_boundary(rq, PAGE_SIZE - 1); - blk_queue_max_segment_size(rq, PAGE_SIZE); - - /* Ensure a merged request will fit in a single I/O ring slot. */ - blk_queue_max_segments(rq, segments / GRANTS_PER_PSEG); - - /* Make sure buffer addresses are sector-aligned. */ - blk_queue_dma_alignment(rq, 511); - - /* Make sure we don't use bounce buffers. */ - blk_queue_bounce_limit(rq, BLK_BOUNCE_ANY); - - gd->queue = rq; + info->rq = gd->queue = rq; + info->gd = gd; + info->sector_size = sector_size; + info->physical_sector_size = physical_sector_size; + blkif_set_queue_limits(info); return 0; } @@ -1136,16 +1149,11 @@ static int xlvbd_alloc_gendisk(blkif_sector_t capacity, gd->private_data = info; set_capacity(gd, capacity); - if (xlvbd_init_blk_queue(gd, sector_size, physical_sector_size, - info->max_indirect_segments ? : - BLKIF_MAX_SEGMENTS_PER_REQUEST)) { + if (xlvbd_init_blk_queue(gd, sector_size, physical_sector_size)) { del_gendisk(gd); goto release; } - info->rq = gd->queue; - info->gd = gd; - xlvbd_flush(info); if (vdisk_info & VDISK_READONLY) @@ -2007,6 +2015,8 @@ static int blkif_recover(struct blkfront_info *info) struct split_bio *split_bio; blkfront_gather_backend_features(info); + /* Reset limits changed by blk_mq_update_nr_hw_queues(). */ + blkif_set_queue_limits(info); segs = info->max_indirect_segments ? : BLKIF_MAX_SEGMENTS_PER_REQUEST; blk_queue_max_segments(info->rq, segs / GRANTS_PER_PSEG); From 4e876c2bd37fbb5c37a4554a79cf979d486f0e82 Mon Sep 17 00:00:00 2001 From: Bob Liu Date: Wed, 27 Jul 2016 17:42:04 +0800 Subject: [PATCH 09/18] xen-blkfront: free resources if xlvbd_alloc_gendisk fails Current code forgets to free resources in the failure path of xlvbd_alloc_gendisk(), this patch fix it. Signed-off-by: Bob Liu Signed-off-by: Konrad Rzeszutek Wilk --- drivers/block/xen-blkfront.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c index f84e220a26e6..88ef6d4729b4 100644 --- a/drivers/block/xen-blkfront.c +++ b/drivers/block/xen-blkfront.c @@ -2442,7 +2442,7 @@ static void blkfront_connect(struct blkfront_info *info) if (err) { xenbus_dev_fatal(info->xbdev, err, "xlvbd_add at %s", info->xbdev->otherend); - return; + goto fail; } xenbus_switch_state(info->xbdev, XenbusStateConnected); @@ -2455,6 +2455,11 @@ static void blkfront_connect(struct blkfront_info *info) device_add_disk(&info->xbdev->dev, info->gd); info->is_ready = 1; + return; + +fail: + blkif_free(info, 0); + return; } /** From e9e5e3fae8da7e237049e00e0bfc9e32fd808fe8 Mon Sep 17 00:00:00 2001 From: Vegard Nossum Date: Mon, 22 Aug 2016 12:47:43 +0200 Subject: [PATCH 10/18] bdev: fix NULL pointer dereference I got this: kasan: GPF could be caused by NULL-ptr deref or user memory access general protection fault: 0000 [#1] PREEMPT SMP KASAN Dumping ftrace buffer: (ftrace buffer empty) CPU: 0 PID: 5505 Comm: syz-executor Not tainted 4.8.0-rc2+ #161 Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS rel-1.9.3-0-ge2fc41e-prebuilt.qemu-project.org 04/01/2014 task: ffff880113415940 task.stack: ffff880118350000 RIP: 0010:[] [] bd_mount+0x52/0xa0 RSP: 0018:ffff880118357ca0 EFLAGS: 00010207 RAX: dffffc0000000000 RBX: ffffffffffffffff RCX: ffffc90000bb6000 RDX: 0000000000000018 RSI: ffffffff846d6b20 RDI: 00000000000000c7 RBP: ffff880118357cb0 R08: ffff880115967c68 R09: 0000000000000000 R10: 0000000000000000 R11: 0000000000000000 R12: ffff8801188211e8 R13: ffffffff847baa20 R14: ffff8801139cb000 R15: 0000000000000080 FS: 00007fa3ff6c0700(0000) GS:ffff88011aa00000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 CR2: 00007fc1d8cc7e78 CR3: 0000000109f20000 CR4: 00000000000006f0 DR0: 000000000000001e DR1: 000000000000001e DR2: 0000000000000000 DR3: 0000000000000000 DR6: 00000000ffff0ff0 DR7: 0000000000000600 Stack: ffff880112cfd6c0 ffff8801188211e8 ffff880118357cf0 ffffffff8167f207 ffffffff816d7a1e ffff880112a413c0 ffffffff847baa20 ffff8801188211e8 0000000000000080 ffff880112cfd6c0 ffff880118357d38 ffffffff816dce0a Call Trace: [] mount_fs+0x97/0x2e0 [] ? alloc_vfsmnt+0x55e/0x760 [] vfs_kern_mount+0x7a/0x300 [] ? _raw_read_unlock+0x2c/0x50 [] do_mount+0x3d7/0x2730 [] ? trace_do_page_fault+0x1f4/0x3a0 [] ? copy_mount_string+0x40/0x40 [] ? memset+0x31/0x40 [] ? copy_mount_options+0x1ee/0x320 [] SyS_mount+0xb2/0x120 [] ? copy_mnt_ns+0x970/0x970 [] do_syscall_64+0x1c4/0x4e0 [] entry_SYSCALL64_slow_path+0x25/0x25 Code: 83 e8 63 1b fc ff 48 85 c0 48 89 c3 74 4c e8 56 35 d1 ff 48 8d bb c8 00 00 00 48 b8 00 00 00 00 00 fc ff df 48 89 fa 48 c1 ea 03 <80> 3c 02 00 75 36 4c 8b a3 c8 00 00 00 48 b8 00 00 00 00 00 fc RIP [] bd_mount+0x52/0xa0 RSP ---[ end trace 13690ad962168b98 ]--- mount_pseudo() returns ERR_PTR(), not NULL, on error. Fixes: 3684aa7099e0 ("block-dev: enable writeback cgroup support") Cc: Shaohua Li Cc: Tejun Heo Cc: Jens Axboe Cc: stable@vger.kernel.org Signed-off-by: Vegard Nossum Signed-off-by: Jens Axboe --- fs/block_dev.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fs/block_dev.c b/fs/block_dev.c index c3cdde87cc8c..e17bdbdfe9b1 100644 --- a/fs/block_dev.c +++ b/fs/block_dev.c @@ -646,7 +646,7 @@ static struct dentry *bd_mount(struct file_system_type *fs_type, { struct dentry *dent; dent = mount_pseudo(fs_type, "bdev:", &bdev_sops, NULL, BDEVFS_MAGIC); - if (dent) + if (!IS_ERR(dent)) dent->d_sb->s_iflags |= SB_I_CGROUPWB; return dent; } From 9b47f77a680447e0132b2cf7fb82374e014bec1c Mon Sep 17 00:00:00 2001 From: Andy Lutomirski Date: Wed, 24 Aug 2016 03:52:12 -0700 Subject: [PATCH 11/18] nvme: Fix nvme_get/set_features() with a NULL result pointer nvme_set_features() callers seem to expect that passing NULL as the result pointer is acceptable. Teach nvme_set_features() not to try to write to the NULL address. For symmetry, make the same change to nvme_get_features(), despite the fact that all current callers pass a valid result pointer. I assume that this bug hasn't been reported in practice because the callers that pass NULL are all in the SCSI translation layer and no one uses the relevant operations. Cc: stable@vger.kernel.org Signed-off-by: Andy Lutomirski Reviewed-by: Sagi Grimberg Signed-off-by: Jens Axboe --- drivers/nvme/host/core.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index 7f75d661237f..2feacc70bf61 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -611,7 +611,7 @@ int nvme_get_features(struct nvme_ctrl *dev, unsigned fid, unsigned nsid, ret = __nvme_submit_sync_cmd(dev->admin_q, &c, &cqe, NULL, 0, 0, NVME_QID_ANY, 0, 0); - if (ret >= 0) + if (ret >= 0 && result) *result = le32_to_cpu(cqe.result); return ret; } @@ -631,7 +631,7 @@ int nvme_set_features(struct nvme_ctrl *dev, unsigned fid, unsigned dword11, ret = __nvme_submit_sync_cmd(dev->admin_q, &c, &cqe, NULL, 0, 0, NVME_QID_ANY, 0, 0); - if (ret >= 0) + if (ret >= 0 && result) *result = le32_to_cpu(cqe.result); return ret; } From 4d70dca4eadf2f95abe389116ac02b8439c2d16c Mon Sep 17 00:00:00 2001 From: Ming Lei Date: Tue, 23 Aug 2016 21:49:45 +0800 Subject: [PATCH 12/18] block: make sure a big bio is split into at most 256 bvecs After arbitrary bio size was introduced, the incoming bio may be very big. We have to split the bio into small bios so that each holds at most BIO_MAX_PAGES bvecs for safety reason, such as bio_clone(). This patch fixes the following kernel crash: > [ 172.660142] BUG: unable to handle kernel NULL pointer dereference at 0000000000000028 > [ 172.660229] IP: [] bio_trim+0xf/0x2a > [ 172.660289] PGD 7faf3e067 PUD 7f9279067 PMD 0 > [ 172.660399] Oops: 0000 [#1] SMP > [...] > [ 172.664780] Call Trace: > [ 172.664813] [] ? raid1_make_request+0x2e8/0xad7 [raid1] > [ 172.664846] [] ? blk_queue_split+0x377/0x3d4 > [ 172.664880] [] ? md_make_request+0xf6/0x1e9 [md_mod] > [ 172.664912] [] ? generic_make_request+0xb5/0x155 > [ 172.664947] [] ? prio_io+0x85/0x95 [bcache] > [ 172.664981] [] ? register_cache_set+0x355/0x8d0 [bcache] > [ 172.665016] [] ? register_bcache+0x1006/0x1174 [bcache] The issue can be reproduced by the following steps: - create one raid1 over two virtio-blk - build bcache device over the above raid1 and another cache device and bucket size is set as 2Mbytes - set cache mode as writeback - run random write over ext4 on the bcache device Fixes: 54efd50(block: make generic_make_request handle arbitrarily sized bios) Reported-by: Sebastian Roesner Reported-by: Eric Wheeler Cc: stable@vger.kernel.org (4.3+) Cc: Shaohua Li Acked-by: Kent Overstreet Signed-off-by: Ming Lei Signed-off-by: Jens Axboe --- block/blk-merge.c | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/block/blk-merge.c b/block/blk-merge.c index 72627e3cf91e..2642e5fc8b69 100644 --- a/block/blk-merge.c +++ b/block/blk-merge.c @@ -94,8 +94,30 @@ static struct bio *blk_bio_segment_split(struct request_queue *q, bool do_split = true; struct bio *new = NULL; const unsigned max_sectors = get_max_io_size(q, bio); + unsigned bvecs = 0; bio_for_each_segment(bv, bio, iter) { + /* + * With arbitrary bio size, the incoming bio may be very + * big. We have to split the bio into small bios so that + * each holds at most BIO_MAX_PAGES bvecs because + * bio_clone() can fail to allocate big bvecs. + * + * It should have been better to apply the limit per + * request queue in which bio_clone() is involved, + * instead of globally. The biggest blocker is the + * bio_clone() in bio bounce. + * + * If bio is splitted by this reason, we should have + * allowed to continue bios merging, but don't do + * that now for making the change simple. + * + * TODO: deal with bio bounce's bio_clone() gracefully + * and convert the global limit into per-queue limit. + */ + if (bvecs++ >= BIO_MAX_PAGES) + goto split; + /* * If the queue doesn't support SG gaps and adding this * offset would create a gap, disallow it. From e57690fe009b2ab0cee8a57f53be634540e49c9d Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Wed, 24 Aug 2016 15:34:35 -0600 Subject: [PATCH 13/18] blk-mq: don't overwrite rq->mq_ctx We do this in a few places, if the CPU is offline. This isn't allowed, though, since on multi queue hardware, we can't just move a request from one software queue to another, if they map to different hardware queues. The request and tag isn't valid on another hardware queue. This can happen if plugging races with CPU offlining. But it does no harm, since it can only happen in the window where we are currently busy freezing the queue and flushing IO, in preparation for redoing the software <-> hardware queue mappings. Signed-off-by: Jens Axboe --- block/blk-mq.c | 55 +++++++++++++++----------------------------------- 1 file changed, 16 insertions(+), 39 deletions(-) diff --git a/block/blk-mq.c b/block/blk-mq.c index e931a0e8e73d..729169d022fc 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -1036,10 +1036,11 @@ void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs) EXPORT_SYMBOL(blk_mq_delay_queue); static inline void __blk_mq_insert_req_list(struct blk_mq_hw_ctx *hctx, - struct blk_mq_ctx *ctx, struct request *rq, bool at_head) { + struct blk_mq_ctx *ctx = rq->mq_ctx; + trace_block_rq_insert(hctx->queue, rq); if (at_head) @@ -1053,20 +1054,16 @@ static void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, { struct blk_mq_ctx *ctx = rq->mq_ctx; - __blk_mq_insert_req_list(hctx, ctx, rq, at_head); + __blk_mq_insert_req_list(hctx, rq, at_head); blk_mq_hctx_mark_pending(hctx, ctx); } void blk_mq_insert_request(struct request *rq, bool at_head, bool run_queue, - bool async) + bool async) { + struct blk_mq_ctx *ctx = rq->mq_ctx; struct request_queue *q = rq->q; struct blk_mq_hw_ctx *hctx; - struct blk_mq_ctx *ctx = rq->mq_ctx, *current_ctx; - - current_ctx = blk_mq_get_ctx(q); - if (!cpu_online(ctx->cpu)) - rq->mq_ctx = ctx = current_ctx; hctx = q->mq_ops->map_queue(q, ctx->cpu); @@ -1076,8 +1073,6 @@ void blk_mq_insert_request(struct request *rq, bool at_head, bool run_queue, if (run_queue) blk_mq_run_hw_queue(hctx, async); - - blk_mq_put_ctx(current_ctx); } static void blk_mq_insert_requests(struct request_queue *q, @@ -1088,14 +1083,9 @@ static void blk_mq_insert_requests(struct request_queue *q, { struct blk_mq_hw_ctx *hctx; - struct blk_mq_ctx *current_ctx; trace_block_unplug(q, depth, !from_schedule); - current_ctx = blk_mq_get_ctx(q); - - if (!cpu_online(ctx->cpu)) - ctx = current_ctx; hctx = q->mq_ops->map_queue(q, ctx->cpu); /* @@ -1107,15 +1097,14 @@ static void blk_mq_insert_requests(struct request_queue *q, struct request *rq; rq = list_first_entry(list, struct request, queuelist); + BUG_ON(rq->mq_ctx != ctx); list_del_init(&rq->queuelist); - rq->mq_ctx = ctx; - __blk_mq_insert_req_list(hctx, ctx, rq, false); + __blk_mq_insert_req_list(hctx, rq, false); } blk_mq_hctx_mark_pending(hctx, ctx); spin_unlock(&ctx->lock); blk_mq_run_hw_queue(hctx, from_schedule); - blk_mq_put_ctx(current_ctx); } static int plug_ctx_cmp(void *priv, struct list_head *a, struct list_head *b) @@ -1630,16 +1619,17 @@ static int blk_mq_alloc_bitmap(struct blk_mq_ctxmap *bitmap, int node) return 0; } +/* + * 'cpu' is going away. splice any existing rq_list entries from this + * software queue to the hw queue dispatch list, and ensure that it + * gets run. + */ static int blk_mq_hctx_cpu_offline(struct blk_mq_hw_ctx *hctx, int cpu) { - struct request_queue *q = hctx->queue; struct blk_mq_ctx *ctx; LIST_HEAD(tmp); - /* - * Move ctx entries to new CPU, if this one is going away. - */ - ctx = __blk_mq_get_ctx(q, cpu); + ctx = __blk_mq_get_ctx(hctx->queue, cpu); spin_lock(&ctx->lock); if (!list_empty(&ctx->rq_list)) { @@ -1651,24 +1641,11 @@ static int blk_mq_hctx_cpu_offline(struct blk_mq_hw_ctx *hctx, int cpu) if (list_empty(&tmp)) return NOTIFY_OK; - ctx = blk_mq_get_ctx(q); - spin_lock(&ctx->lock); - - while (!list_empty(&tmp)) { - struct request *rq; - - rq = list_first_entry(&tmp, struct request, queuelist); - rq->mq_ctx = ctx; - list_move_tail(&rq->queuelist, &ctx->rq_list); - } - - hctx = q->mq_ops->map_queue(q, ctx->cpu); - blk_mq_hctx_mark_pending(hctx, ctx); - - spin_unlock(&ctx->lock); + spin_lock(&hctx->lock); + list_splice_tail_init(&tmp, &hctx->dispatch); + spin_unlock(&hctx->lock); blk_mq_run_hw_queue(hctx, true); - blk_mq_put_ctx(ctx); return NOTIFY_OK; } From 0e87e58bf60edb6bb28e493c7a143f41b091a5e5 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Wed, 24 Aug 2016 15:38:01 -0600 Subject: [PATCH 14/18] blk-mq: improve warning for running a queue on the wrong CPU __blk_mq_run_hw_queue() currently warns if we are running the queue on a CPU that isn't set in its mask. However, this can happen if a CPU is being offlined, and the workqueue handling will place the work on CPU0 instead. Improve the warning so that it only triggers if the batch cpu in the hardware queue is currently online. If it triggers for that case, then it's indicative of a flow problem in blk-mq, so we want to retain it for that case. Signed-off-by: Jens Axboe --- block/blk-mq.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/block/blk-mq.c b/block/blk-mq.c index 729169d022fc..13f5a6c1de76 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -793,11 +793,12 @@ static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx) struct list_head *dptr; int queued; - WARN_ON(!cpumask_test_cpu(raw_smp_processor_id(), hctx->cpumask)); - if (unlikely(test_bit(BLK_MQ_S_STOPPED, &hctx->state))) return; + WARN_ON(!cpumask_test_cpu(raw_smp_processor_id(), hctx->cpumask) && + cpu_online(hctx->next_cpu)); + hctx->run++; /* From 5bb53c0fb8e0fc2e34287d5d0fcadc784de913e1 Mon Sep 17 00:00:00 2001 From: Andrey Ryabinin Date: Tue, 23 Aug 2016 18:55:31 +0300 Subject: [PATCH 15/18] fs/block_dev: fix potential NULL ptr deref in freeze_bdev() Calling freeze_bdev() twice on the same block device without mounted filesystem get_super() will return NULL, which will lead to NULL-ptr dereference later in drop_super(). Check get_super() result to fix that. Note, that this is a purely theoretical issue. We have only 3 freeze_bdev() callers. 2 of them are in filesystem code and used on a device with mounted fs. The third one in lock_fs() has protection in upper-layer code against freezing block device the second time without thawing it first. Signed-off-by: Andrey Ryabinin Reviewed-by: Christoph Hellwig Signed-off-by: Jens Axboe --- fs/block_dev.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/fs/block_dev.c b/fs/block_dev.c index e17bdbdfe9b1..08ae99343d92 100644 --- a/fs/block_dev.c +++ b/fs/block_dev.c @@ -249,7 +249,8 @@ struct super_block *freeze_bdev(struct block_device *bdev) * thaw_bdev drops it. */ sb = get_super(bdev); - drop_super(sb); + if (sb) + drop_super(sb); mutex_unlock(&bdev->bd_fsfreeze_mutex); return sb; } From 468c298ad3ed3f0d94a65f8ca00f6bfc6c2b4e33 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Thu, 25 Aug 2016 08:56:44 -0600 Subject: [PATCH 16/18] Revert "floppy: fix open(O_ACCMODE) for ioctl-only open" This reverts commit ff06db1efb2ad6db06eb5b99b88a0c15a9cc9b0e. --- drivers/block/floppy.c | 21 ++++++++++++--------- 1 file changed, 12 insertions(+), 9 deletions(-) diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c index b71a9c767009..c557057fe8ae 100644 --- a/drivers/block/floppy.c +++ b/drivers/block/floppy.c @@ -3663,6 +3663,11 @@ static int floppy_open(struct block_device *bdev, fmode_t mode) opened_bdev[drive] = bdev; + if (!(mode & (FMODE_READ|FMODE_WRITE))) { + res = -EINVAL; + goto out; + } + res = -ENXIO; if (!floppy_track_buffer) { @@ -3706,15 +3711,13 @@ static int floppy_open(struct block_device *bdev, fmode_t mode) if (UFDCS->rawcmd == 1) UFDCS->rawcmd = 2; - if (mode & (FMODE_READ|FMODE_WRITE)) { - UDRS->last_checked = 0; - clear_bit(FD_OPEN_SHOULD_FAIL_BIT, &UDRS->flags); - check_disk_change(bdev); - if (test_bit(FD_DISK_CHANGED_BIT, &UDRS->flags)) - goto out; - if (test_bit(FD_OPEN_SHOULD_FAIL_BIT, &UDRS->flags)) - goto out; - } + UDRS->last_checked = 0; + clear_bit(FD_OPEN_SHOULD_FAIL_BIT, &UDRS->flags); + check_disk_change(bdev); + if (test_bit(FD_DISK_CHANGED_BIT, &UDRS->flags)) + goto out; + if (test_bit(FD_OPEN_SHOULD_FAIL_BIT, &UDRS->flags)) + goto out; res = -EROFS; From f2791e7eadf437633f30faa51b30878cf15650be Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Thu, 25 Aug 2016 08:56:51 -0600 Subject: [PATCH 17/18] Revert "floppy: refactor open() flags handling" This reverts commit 09954bad448791ef01202351d437abdd9497a804. --- drivers/block/floppy.c | 34 +++++++++++++++------------------- 1 file changed, 15 insertions(+), 19 deletions(-) diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c index c557057fe8ae..e3d8e4ced4a2 100644 --- a/drivers/block/floppy.c +++ b/drivers/block/floppy.c @@ -3663,11 +3663,6 @@ static int floppy_open(struct block_device *bdev, fmode_t mode) opened_bdev[drive] = bdev; - if (!(mode & (FMODE_READ|FMODE_WRITE))) { - res = -EINVAL; - goto out; - } - res = -ENXIO; if (!floppy_track_buffer) { @@ -3711,20 +3706,21 @@ static int floppy_open(struct block_device *bdev, fmode_t mode) if (UFDCS->rawcmd == 1) UFDCS->rawcmd = 2; - UDRS->last_checked = 0; - clear_bit(FD_OPEN_SHOULD_FAIL_BIT, &UDRS->flags); - check_disk_change(bdev); - if (test_bit(FD_DISK_CHANGED_BIT, &UDRS->flags)) - goto out; - if (test_bit(FD_OPEN_SHOULD_FAIL_BIT, &UDRS->flags)) - goto out; - - res = -EROFS; - - if ((mode & FMODE_WRITE) && - !test_bit(FD_DISK_WRITABLE_BIT, &UDRS->flags)) - goto out; - + if (!(mode & FMODE_NDELAY)) { + if (mode & (FMODE_READ|FMODE_WRITE)) { + UDRS->last_checked = 0; + clear_bit(FD_OPEN_SHOULD_FAIL_BIT, &UDRS->flags); + check_disk_change(bdev); + if (test_bit(FD_DISK_CHANGED_BIT, &UDRS->flags)) + goto out; + if (test_bit(FD_OPEN_SHOULD_FAIL_BIT, &UDRS->flags)) + goto out; + } + res = -EROFS; + if ((mode & FMODE_WRITE) && + !test_bit(FD_DISK_WRITABLE_BIT, &UDRS->flags)) + goto out; + } mutex_unlock(&open_lock); mutex_unlock(&floppy_mutex); return 0; From 869c554808ccf7ddd25be5317073b88ceddb8507 Mon Sep 17 00:00:00 2001 From: Adrian Hunter Date: Thu, 25 Aug 2016 14:11:43 -0600 Subject: [PATCH 18/18] mmc: fix use-after-free of struct request We call mmc_req_is_special() after having processed a request, but it could be freed after that. Check that ahead of time, and use the cached value. Reported-by: Hans de Goede Tested-by: Hans de Goede Fixes: c2df40dfb8c0 ("drivers: use req op accessor") Signed-off-by: Jens Axboe --- drivers/mmc/card/block.c | 4 ++-- drivers/mmc/card/queue.c | 4 +++- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c index 82503e6f04b3..2206d4477dbb 100644 --- a/drivers/mmc/card/block.c +++ b/drivers/mmc/card/block.c @@ -2151,6 +2151,7 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req) struct mmc_card *card = md->queue.card; struct mmc_host *host = card->host; unsigned long flags; + bool req_is_special = mmc_req_is_special(req); if (req && !mq->mqrq_prev->req) /* claim host only for the first request */ @@ -2191,8 +2192,7 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req) } out: - if ((!req && !(mq->flags & MMC_QUEUE_NEW_REQUEST)) || - mmc_req_is_special(req)) + if ((!req && !(mq->flags & MMC_QUEUE_NEW_REQUEST)) || req_is_special) /* * Release host when there are no more requests * and after special request(discard, flush) is done. diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c index 29578e98603d..708057261b38 100644 --- a/drivers/mmc/card/queue.c +++ b/drivers/mmc/card/queue.c @@ -65,6 +65,8 @@ static int mmc_queue_thread(void *d) spin_unlock_irq(q->queue_lock); if (req || mq->mqrq_prev->req) { + bool req_is_special = mmc_req_is_special(req); + set_current_state(TASK_RUNNING); mq->issue_fn(mq, req); cond_resched(); @@ -80,7 +82,7 @@ static int mmc_queue_thread(void *d) * has been finished. Do not assign it to previous * request. */ - if (mmc_req_is_special(req)) + if (req_is_special) mq->mqrq_cur->req = NULL; mq->mqrq_prev->brq.mrq.data = NULL;