mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-24 04:34:08 +08:00
block-5.12-2021-02-27
-----BEGIN PGP SIGNATURE----- iQJEBAABCAAuFiEEwPw5LcreJtl1+l5K99NY+ylx4KYFAmA6njIQHGF4Ym9lQGtl cm5lbC5kawAKCRD301j7KXHgprolD/9zWti9LsZvA7yE+PhVwrwF3CsNzLfQlClw 99HaA7HxtAc/VLJrnD/SubhCAPdBC5B2xPv6faajdwF2iUR3Rr1Uc93CQ3uP2KKq kvm6ALTpzPTMI6YSABhY74sg9BkkoDbMo54JQYVQPleiE+5eDLbuFZck6ObfUHyY a4aaImlndWp/t14GzrClL4hucF+5KJy846P+QCVclkh0yl8xSsqZ5LIFU7tu3iQb HpZ5HKLT/2ma/EOr3wknnsIe97AUZQU0q5aMparhYlm+qR511eop3QXx850FL/oC tEGceKLij6qazmkiocKVzML8Fs+Y9/a4vCMjLCScWJmzDlmKdlH2uudeahN6b9Hm 15qRQHOjl1Hc2bdr5ZVn87nq9RWhSm18C+SRMwOKHCOnEhwxqM3RjRfAgj4BJ6QB PFbFqdY+8Y1YLPFmn9hph72ePaEcN4L2IXW6TI/WX8mot8ODAnkq9Hr38dKwzO+i 0mon6DVyJKKho6XwvVu5IYurkR2beQprjeVUxwZjjT6DxUgsc+J6itK5LDHFSkeZ qZlXn5Di8MkiXg0DFJYDQiFXnO0Z5GlRWOGPVfBaOr3x+1dqzDdHGw4oz1oGqvnr GNNYCsYIpDGm7eauX5lqL5MUFpjqRCceXy5JSHPhnWWw617nYkr4H9jdsV9HiTX1 tQFx05QW3w== =ccMs -----END PGP SIGNATURE----- Merge tag 'block-5.12-2021-02-27' of git://git.kernel.dk/linux-block Pull more block updates from Jens Axboe: "A few stragglers (and one due to me missing it originally), and fixes for changes in this merge window mostly. In particular: - blktrace cleanups (Chaitanya, Greg) - Kill dead blk_pm_* functions (Bart) - Fixes for the bio alloc changes (Christoph) - Fix for the partition changes (Christoph, Ming) - Fix for turning off iopoll with polled IO inflight (Jeffle) - nbd disconnect fix (Josef) - loop fsync error fix (Mauricio) - kyber update depth fix (Yang) - max_sectors alignment fix (Mikulas) - Add bio_max_segs helper (Matthew)" * tag 'block-5.12-2021-02-27' of git://git.kernel.dk/linux-block: (21 commits) block: Add bio_max_segs blktrace: fix documentation for blk_fill_rw() block: memory allocations in bounce_clone_bio must not fail block: remove the gfp_mask argument to bounce_clone_bio block: fix bounce_clone_bio for passthrough bios block-crypto-fallback: use a bio_set for splitting bios block: fix logging on capacity change blk-settings: align max_sectors on "logical_block_size" boundary block: reopen the device in blkdev_reread_part block: don't skip empty device in in disk_uevent blktrace: remove debugfs file dentries from struct blk_trace nbd: handle device refs for DESTROY_ON_DISCONNECT properly kyber: introduce kyber_depth_updated() loop: fix I/O error on fsync() in detached loop devices block: fix potential IO hang when turning off io_poll block: get rid of the trace rq insert wrapper blktrace: fix blk_rq_merge documentation blktrace: fix blk_rq_issue documentation blktrace: add blk_fill_rwbs documentation comment block: remove superfluous param in blk_fill_rwbs() ...
This commit is contained in:
commit
3ab6608e66
@ -125,6 +125,8 @@
|
||||
#include <linux/delay.h>
|
||||
#include <linux/backing-dev.h>
|
||||
|
||||
#include <trace/events/block.h>
|
||||
|
||||
#include "blk.h"
|
||||
#include "blk-mq.h"
|
||||
#include "blk-mq-tag.h"
|
||||
@ -5621,7 +5623,7 @@ static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
|
||||
|
||||
spin_unlock_irq(&bfqd->lock);
|
||||
|
||||
blk_mq_sched_request_inserted(rq);
|
||||
trace_block_rq_insert(rq);
|
||||
|
||||
spin_lock_irq(&bfqd->lock);
|
||||
bfqq = bfq_init_rq(rq);
|
||||
|
@ -59,6 +59,7 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_remap);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_complete);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(block_split);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(block_unplug);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_insert);
|
||||
|
||||
DEFINE_IDA(blk_queue_ida);
|
||||
|
||||
|
@ -80,6 +80,7 @@ static struct blk_crypto_keyslot {
|
||||
static struct blk_keyslot_manager blk_crypto_ksm;
|
||||
static struct workqueue_struct *blk_crypto_wq;
|
||||
static mempool_t *blk_crypto_bounce_page_pool;
|
||||
static struct bio_set crypto_bio_split;
|
||||
|
||||
/*
|
||||
* This is the key we set when evicting a keyslot. This *should* be the all 0's
|
||||
@ -224,7 +225,8 @@ static bool blk_crypto_split_bio_if_needed(struct bio **bio_ptr)
|
||||
if (num_sectors < bio_sectors(bio)) {
|
||||
struct bio *split_bio;
|
||||
|
||||
split_bio = bio_split(bio, num_sectors, GFP_NOIO, NULL);
|
||||
split_bio = bio_split(bio, num_sectors, GFP_NOIO,
|
||||
&crypto_bio_split);
|
||||
if (!split_bio) {
|
||||
bio->bi_status = BLK_STS_RESOURCE;
|
||||
return false;
|
||||
@ -538,9 +540,13 @@ static int blk_crypto_fallback_init(void)
|
||||
|
||||
prandom_bytes(blank_key, BLK_CRYPTO_MAX_KEY_SIZE);
|
||||
|
||||
err = blk_ksm_init(&blk_crypto_ksm, blk_crypto_num_keyslots);
|
||||
err = bioset_init(&crypto_bio_split, 64, 0, 0);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
err = blk_ksm_init(&blk_crypto_ksm, blk_crypto_num_keyslots);
|
||||
if (err)
|
||||
goto fail_free_bioset;
|
||||
err = -ENOMEM;
|
||||
|
||||
blk_crypto_ksm.ksm_ll_ops = blk_crypto_ksm_ll_ops;
|
||||
@ -591,6 +597,8 @@ fail_free_wq:
|
||||
destroy_workqueue(blk_crypto_wq);
|
||||
fail_free_ksm:
|
||||
blk_ksm_destroy(&blk_crypto_ksm);
|
||||
fail_free_bioset:
|
||||
bioset_exit(&crypto_bio_split);
|
||||
out:
|
||||
return err;
|
||||
}
|
||||
|
@ -150,9 +150,7 @@ static int bio_copy_user_iov(struct request *rq, struct rq_map_data *map_data,
|
||||
bmd->is_our_pages = !map_data;
|
||||
bmd->is_null_mapped = (map_data && map_data->null_mapped);
|
||||
|
||||
nr_pages = DIV_ROUND_UP(offset + len, PAGE_SIZE);
|
||||
if (nr_pages > BIO_MAX_PAGES)
|
||||
nr_pages = BIO_MAX_PAGES;
|
||||
nr_pages = bio_max_segs(DIV_ROUND_UP(offset + len, PAGE_SIZE));
|
||||
|
||||
ret = -ENOMEM;
|
||||
bio = bio_kmalloc(gfp_mask, nr_pages);
|
||||
|
@ -384,12 +384,6 @@ bool blk_mq_sched_try_insert_merge(struct request_queue *q, struct request *rq)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(blk_mq_sched_try_insert_merge);
|
||||
|
||||
void blk_mq_sched_request_inserted(struct request *rq)
|
||||
{
|
||||
trace_block_rq_insert(rq);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(blk_mq_sched_request_inserted);
|
||||
|
||||
static bool blk_mq_sched_bypass_insert(struct blk_mq_hw_ctx *hctx,
|
||||
bool has_sched,
|
||||
struct request *rq)
|
||||
|
@ -7,7 +7,6 @@
|
||||
|
||||
void blk_mq_sched_assign_ioc(struct request *rq);
|
||||
|
||||
void blk_mq_sched_request_inserted(struct request *rq);
|
||||
bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio,
|
||||
unsigned int nr_segs, struct request **merged_request);
|
||||
bool __blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio,
|
||||
|
@ -21,31 +21,6 @@ static inline void blk_pm_mark_last_busy(struct request *rq)
|
||||
if (rq->q->dev && !(rq->rq_flags & RQF_PM))
|
||||
pm_runtime_mark_last_busy(rq->q->dev);
|
||||
}
|
||||
|
||||
static inline void blk_pm_requeue_request(struct request *rq)
|
||||
{
|
||||
lockdep_assert_held(&rq->q->queue_lock);
|
||||
|
||||
if (rq->q->dev && !(rq->rq_flags & RQF_PM))
|
||||
rq->q->nr_pending--;
|
||||
}
|
||||
|
||||
static inline void blk_pm_add_request(struct request_queue *q,
|
||||
struct request *rq)
|
||||
{
|
||||
lockdep_assert_held(&q->queue_lock);
|
||||
|
||||
if (q->dev && !(rq->rq_flags & RQF_PM))
|
||||
q->nr_pending++;
|
||||
}
|
||||
|
||||
static inline void blk_pm_put_request(struct request *rq)
|
||||
{
|
||||
lockdep_assert_held(&rq->q->queue_lock);
|
||||
|
||||
if (rq->q->dev && !(rq->rq_flags & RQF_PM))
|
||||
--rq->q->nr_pending;
|
||||
}
|
||||
#else
|
||||
static inline int blk_pm_resume_queue(const bool pm, struct request_queue *q)
|
||||
{
|
||||
@ -55,19 +30,6 @@ static inline int blk_pm_resume_queue(const bool pm, struct request_queue *q)
|
||||
static inline void blk_pm_mark_last_busy(struct request *rq)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void blk_pm_requeue_request(struct request *rq)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void blk_pm_add_request(struct request_queue *q,
|
||||
struct request *rq)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void blk_pm_put_request(struct request *rq)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* _BLOCK_BLK_PM_H_ */
|
||||
|
@ -504,6 +504,14 @@ void blk_queue_io_opt(struct request_queue *q, unsigned int opt)
|
||||
}
|
||||
EXPORT_SYMBOL(blk_queue_io_opt);
|
||||
|
||||
static unsigned int blk_round_down_sectors(unsigned int sectors, unsigned int lbs)
|
||||
{
|
||||
sectors = round_down(sectors, lbs >> SECTOR_SHIFT);
|
||||
if (sectors < PAGE_SIZE >> SECTOR_SHIFT)
|
||||
sectors = PAGE_SIZE >> SECTOR_SHIFT;
|
||||
return sectors;
|
||||
}
|
||||
|
||||
/**
|
||||
* blk_stack_limits - adjust queue_limits for stacked devices
|
||||
* @t: the stacking driver limits (top device)
|
||||
@ -630,6 +638,10 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
|
||||
ret = -1;
|
||||
}
|
||||
|
||||
t->max_sectors = blk_round_down_sectors(t->max_sectors, t->logical_block_size);
|
||||
t->max_hw_sectors = blk_round_down_sectors(t->max_hw_sectors, t->logical_block_size);
|
||||
t->max_dev_sectors = blk_round_down_sectors(t->max_dev_sectors, t->logical_block_size);
|
||||
|
||||
/* Discard alignment and granularity */
|
||||
if (b->discard_granularity) {
|
||||
alignment = queue_limit_discard_alignment(b, start);
|
||||
|
@ -434,10 +434,13 @@ static ssize_t queue_poll_store(struct request_queue *q, const char *page,
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
if (poll_on)
|
||||
if (poll_on) {
|
||||
blk_queue_flag_set(QUEUE_FLAG_POLL, q);
|
||||
else
|
||||
} else {
|
||||
blk_mq_freeze_queue(q);
|
||||
blk_queue_flag_clear(QUEUE_FLAG_POLL, q);
|
||||
blk_mq_unfreeze_queue(q);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -214,8 +214,7 @@ static void bounce_end_io_read_isa(struct bio *bio)
|
||||
__bounce_end_io_read(bio, &isa_page_pool);
|
||||
}
|
||||
|
||||
static struct bio *bounce_clone_bio(struct bio *bio_src, gfp_t gfp_mask,
|
||||
struct bio_set *bs)
|
||||
static struct bio *bounce_clone_bio(struct bio *bio_src)
|
||||
{
|
||||
struct bvec_iter iter;
|
||||
struct bio_vec bv;
|
||||
@ -242,10 +241,12 @@ static struct bio *bounce_clone_bio(struct bio *bio_src, gfp_t gfp_mask,
|
||||
* asking for trouble and would force extra work on
|
||||
* __bio_clone_fast() anyways.
|
||||
*/
|
||||
|
||||
bio = bio_alloc_bioset(gfp_mask, bio_segments(bio_src), bs);
|
||||
if (!bio)
|
||||
return NULL;
|
||||
if (bio_is_passthrough(bio_src))
|
||||
bio = bio_kmalloc(GFP_NOIO | __GFP_NOFAIL,
|
||||
bio_segments(bio_src));
|
||||
else
|
||||
bio = bio_alloc_bioset(GFP_NOIO, bio_segments(bio_src),
|
||||
&bounce_bio_set);
|
||||
bio->bi_bdev = bio_src->bi_bdev;
|
||||
if (bio_flagged(bio_src, BIO_REMAPPED))
|
||||
bio_set_flag(bio, BIO_REMAPPED);
|
||||
@ -269,11 +270,11 @@ static struct bio *bounce_clone_bio(struct bio *bio_src, gfp_t gfp_mask,
|
||||
break;
|
||||
}
|
||||
|
||||
if (bio_crypt_clone(bio, bio_src, gfp_mask) < 0)
|
||||
if (bio_crypt_clone(bio, bio_src, GFP_NOIO) < 0)
|
||||
goto err_put;
|
||||
|
||||
if (bio_integrity(bio_src) &&
|
||||
bio_integrity_clone(bio, bio_src, gfp_mask) < 0)
|
||||
bio_integrity_clone(bio, bio_src, GFP_NOIO) < 0)
|
||||
goto err_put;
|
||||
|
||||
bio_clone_blkg_association(bio, bio_src);
|
||||
@ -296,7 +297,6 @@ static void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig,
|
||||
unsigned i = 0;
|
||||
bool bounce = false;
|
||||
int sectors = 0;
|
||||
bool passthrough = bio_is_passthrough(*bio_orig);
|
||||
|
||||
bio_for_each_segment(from, *bio_orig, iter) {
|
||||
if (i++ < BIO_MAX_PAGES)
|
||||
@ -307,14 +307,14 @@ static void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig,
|
||||
if (!bounce)
|
||||
return;
|
||||
|
||||
if (!passthrough && sectors < bio_sectors(*bio_orig)) {
|
||||
if (!bio_is_passthrough(*bio_orig) &&
|
||||
sectors < bio_sectors(*bio_orig)) {
|
||||
bio = bio_split(*bio_orig, sectors, GFP_NOIO, &bounce_bio_split);
|
||||
bio_chain(bio, *bio_orig);
|
||||
submit_bio_noacct(*bio_orig);
|
||||
*bio_orig = bio;
|
||||
}
|
||||
bio = bounce_clone_bio(*bio_orig, GFP_NOIO, passthrough ? NULL :
|
||||
&bounce_bio_set);
|
||||
bio = bounce_clone_bio(*bio_orig);
|
||||
|
||||
/*
|
||||
* Bvec table can't be updated by bio_for_each_segment_all(),
|
||||
|
@ -74,7 +74,7 @@ bool set_capacity_and_notify(struct gendisk *disk, sector_t size)
|
||||
return false;
|
||||
|
||||
pr_info("%s: detected capacity change from %lld to %lld\n",
|
||||
disk->disk_name, size, capacity);
|
||||
disk->disk_name, capacity, size);
|
||||
|
||||
/*
|
||||
* Historically we did not send a uevent for changes to/from an empty
|
||||
@ -476,7 +476,7 @@ void disk_uevent(struct gendisk *disk, enum kobject_action action)
|
||||
struct disk_part_iter piter;
|
||||
struct block_device *part;
|
||||
|
||||
disk_part_iter_init(&piter, disk, DISK_PITER_INCL_PART0);
|
||||
disk_part_iter_init(&piter, disk, DISK_PITER_INCL_EMPTY_PART0);
|
||||
while ((part = disk_part_iter_next(&piter)))
|
||||
kobject_uevent(bdev_kobj(part), action);
|
||||
disk_part_iter_exit(&piter);
|
||||
|
@ -81,20 +81,27 @@ static int compat_blkpg_ioctl(struct block_device *bdev,
|
||||
}
|
||||
#endif
|
||||
|
||||
static int blkdev_reread_part(struct block_device *bdev)
|
||||
static int blkdev_reread_part(struct block_device *bdev, fmode_t mode)
|
||||
{
|
||||
int ret;
|
||||
struct block_device *tmp;
|
||||
|
||||
if (!disk_part_scan_enabled(bdev->bd_disk) || bdev_is_partition(bdev))
|
||||
return -EINVAL;
|
||||
if (!capable(CAP_SYS_ADMIN))
|
||||
return -EACCES;
|
||||
|
||||
mutex_lock(&bdev->bd_mutex);
|
||||
ret = bdev_disk_changed(bdev, false);
|
||||
mutex_unlock(&bdev->bd_mutex);
|
||||
/*
|
||||
* Reopen the device to revalidate the driver state and force a
|
||||
* partition rescan.
|
||||
*/
|
||||
mode &= ~FMODE_EXCL;
|
||||
set_bit(GD_NEED_PART_SCAN, &bdev->bd_disk->state);
|
||||
|
||||
return ret;
|
||||
tmp = blkdev_get_by_dev(bdev->bd_dev, mode, NULL);
|
||||
if (IS_ERR(tmp))
|
||||
return PTR_ERR(tmp);
|
||||
blkdev_put(tmp, mode);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int blk_ioctl_discard(struct block_device *bdev, fmode_t mode,
|
||||
@ -498,7 +505,7 @@ static int blkdev_common_ioctl(struct block_device *bdev, fmode_t mode,
|
||||
bdev->bd_bdi->ra_pages = (arg * 512) / PAGE_SIZE;
|
||||
return 0;
|
||||
case BLKRRPART:
|
||||
return blkdev_reread_part(bdev);
|
||||
return blkdev_reread_part(bdev, mode);
|
||||
case BLKTRACESTART:
|
||||
case BLKTRACESTOP:
|
||||
case BLKTRACETEARDOWN:
|
||||
|
@ -13,6 +13,8 @@
|
||||
#include <linux/module.h>
|
||||
#include <linux/sbitmap.h>
|
||||
|
||||
#include <trace/events/block.h>
|
||||
|
||||
#include "blk.h"
|
||||
#include "blk-mq.h"
|
||||
#include "blk-mq-debugfs.h"
|
||||
@ -353,19 +355,9 @@ static void kyber_timer_fn(struct timer_list *t)
|
||||
}
|
||||
}
|
||||
|
||||
static unsigned int kyber_sched_tags_shift(struct request_queue *q)
|
||||
{
|
||||
/*
|
||||
* All of the hardware queues have the same depth, so we can just grab
|
||||
* the shift of the first one.
|
||||
*/
|
||||
return q->queue_hw_ctx[0]->sched_tags->bitmap_tags->sb.shift;
|
||||
}
|
||||
|
||||
static struct kyber_queue_data *kyber_queue_data_alloc(struct request_queue *q)
|
||||
{
|
||||
struct kyber_queue_data *kqd;
|
||||
unsigned int shift;
|
||||
int ret = -ENOMEM;
|
||||
int i;
|
||||
|
||||
@ -400,9 +392,6 @@ static struct kyber_queue_data *kyber_queue_data_alloc(struct request_queue *q)
|
||||
kqd->latency_targets[i] = kyber_latency_targets[i];
|
||||
}
|
||||
|
||||
shift = kyber_sched_tags_shift(q);
|
||||
kqd->async_depth = (1U << shift) * KYBER_ASYNC_PERCENT / 100U;
|
||||
|
||||
return kqd;
|
||||
|
||||
err_buckets:
|
||||
@ -458,9 +447,19 @@ static void kyber_ctx_queue_init(struct kyber_ctx_queue *kcq)
|
||||
INIT_LIST_HEAD(&kcq->rq_list[i]);
|
||||
}
|
||||
|
||||
static int kyber_init_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
|
||||
static void kyber_depth_updated(struct blk_mq_hw_ctx *hctx)
|
||||
{
|
||||
struct kyber_queue_data *kqd = hctx->queue->elevator->elevator_data;
|
||||
struct blk_mq_tags *tags = hctx->sched_tags;
|
||||
unsigned int shift = tags->bitmap_tags->sb.shift;
|
||||
|
||||
kqd->async_depth = (1U << shift) * KYBER_ASYNC_PERCENT / 100U;
|
||||
|
||||
sbitmap_queue_min_shallow_depth(tags->bitmap_tags, kqd->async_depth);
|
||||
}
|
||||
|
||||
static int kyber_init_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
|
||||
{
|
||||
struct kyber_hctx_data *khd;
|
||||
int i;
|
||||
|
||||
@ -502,8 +501,7 @@ static int kyber_init_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
|
||||
khd->batching = 0;
|
||||
|
||||
hctx->sched_data = khd;
|
||||
sbitmap_queue_min_shallow_depth(hctx->sched_tags->bitmap_tags,
|
||||
kqd->async_depth);
|
||||
kyber_depth_updated(hctx);
|
||||
|
||||
return 0;
|
||||
|
||||
@ -602,7 +600,7 @@ static void kyber_insert_requests(struct blk_mq_hw_ctx *hctx,
|
||||
list_move_tail(&rq->queuelist, head);
|
||||
sbitmap_set_bit(&khd->kcq_map[sched_domain],
|
||||
rq->mq_ctx->index_hw[hctx->type]);
|
||||
blk_mq_sched_request_inserted(rq);
|
||||
trace_block_rq_insert(rq);
|
||||
spin_unlock(&kcq->lock);
|
||||
}
|
||||
}
|
||||
@ -1022,6 +1020,7 @@ static struct elevator_type kyber_sched = {
|
||||
.completed_request = kyber_completed_request,
|
||||
.dispatch_request = kyber_dispatch_request,
|
||||
.has_work = kyber_has_work,
|
||||
.depth_updated = kyber_depth_updated,
|
||||
},
|
||||
#ifdef CONFIG_BLK_DEBUG_FS
|
||||
.queue_debugfs_attrs = kyber_queue_debugfs_attrs,
|
||||
|
@ -18,6 +18,8 @@
|
||||
#include <linux/rbtree.h>
|
||||
#include <linux/sbitmap.h>
|
||||
|
||||
#include <trace/events/block.h>
|
||||
|
||||
#include "blk.h"
|
||||
#include "blk-mq.h"
|
||||
#include "blk-mq-debugfs.h"
|
||||
@ -496,7 +498,7 @@ static void dd_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
|
||||
if (blk_mq_sched_try_insert_merge(q, rq))
|
||||
return;
|
||||
|
||||
blk_mq_sched_request_inserted(rq);
|
||||
trace_block_rq_insert(rq);
|
||||
|
||||
if (at_head || blk_rq_is_passthrough(rq)) {
|
||||
if (at_head)
|
||||
|
@ -1212,6 +1212,9 @@ static int __loop_clr_fd(struct loop_device *lo, bool release)
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
if (test_bit(QUEUE_FLAG_WC, &lo->lo_queue->queue_flags))
|
||||
blk_queue_write_cache(lo->lo_queue, false, false);
|
||||
|
||||
/* freeze request queue during the transition */
|
||||
blk_mq_freeze_queue(lo->lo_queue);
|
||||
|
||||
|
@ -78,8 +78,7 @@ struct link_dead_args {
|
||||
#define NBD_RT_HAS_PID_FILE 3
|
||||
#define NBD_RT_HAS_CONFIG_REF 4
|
||||
#define NBD_RT_BOUND 5
|
||||
#define NBD_RT_DESTROY_ON_DISCONNECT 6
|
||||
#define NBD_RT_DISCONNECT_ON_CLOSE 7
|
||||
#define NBD_RT_DISCONNECT_ON_CLOSE 6
|
||||
|
||||
#define NBD_DESTROY_ON_DISCONNECT 0
|
||||
#define NBD_DISCONNECT_REQUESTED 1
|
||||
@ -1904,12 +1903,21 @@ again:
|
||||
if (info->attrs[NBD_ATTR_CLIENT_FLAGS]) {
|
||||
u64 flags = nla_get_u64(info->attrs[NBD_ATTR_CLIENT_FLAGS]);
|
||||
if (flags & NBD_CFLAG_DESTROY_ON_DISCONNECT) {
|
||||
set_bit(NBD_RT_DESTROY_ON_DISCONNECT,
|
||||
&config->runtime_flags);
|
||||
set_bit(NBD_DESTROY_ON_DISCONNECT, &nbd->flags);
|
||||
put_dev = true;
|
||||
/*
|
||||
* We have 1 ref to keep the device around, and then 1
|
||||
* ref for our current operation here, which will be
|
||||
* inherited by the config. If we already have
|
||||
* DESTROY_ON_DISCONNECT set then we know we don't have
|
||||
* that extra ref already held so we don't need the
|
||||
* put_dev.
|
||||
*/
|
||||
if (!test_and_set_bit(NBD_DESTROY_ON_DISCONNECT,
|
||||
&nbd->flags))
|
||||
put_dev = true;
|
||||
} else {
|
||||
clear_bit(NBD_DESTROY_ON_DISCONNECT, &nbd->flags);
|
||||
if (test_and_clear_bit(NBD_DESTROY_ON_DISCONNECT,
|
||||
&nbd->flags))
|
||||
refcount_inc(&nbd->refs);
|
||||
}
|
||||
if (flags & NBD_CFLAG_DISCONNECT_ON_CLOSE) {
|
||||
set_bit(NBD_RT_DISCONNECT_ON_CLOSE,
|
||||
@ -2080,15 +2088,13 @@ static int nbd_genl_reconfigure(struct sk_buff *skb, struct genl_info *info)
|
||||
if (info->attrs[NBD_ATTR_CLIENT_FLAGS]) {
|
||||
u64 flags = nla_get_u64(info->attrs[NBD_ATTR_CLIENT_FLAGS]);
|
||||
if (flags & NBD_CFLAG_DESTROY_ON_DISCONNECT) {
|
||||
if (!test_and_set_bit(NBD_RT_DESTROY_ON_DISCONNECT,
|
||||
&config->runtime_flags))
|
||||
if (!test_and_set_bit(NBD_DESTROY_ON_DISCONNECT,
|
||||
&nbd->flags))
|
||||
put_dev = true;
|
||||
set_bit(NBD_DESTROY_ON_DISCONNECT, &nbd->flags);
|
||||
} else {
|
||||
if (test_and_clear_bit(NBD_RT_DESTROY_ON_DISCONNECT,
|
||||
&config->runtime_flags))
|
||||
if (test_and_clear_bit(NBD_DESTROY_ON_DISCONNECT,
|
||||
&nbd->flags))
|
||||
refcount_inc(&nbd->refs);
|
||||
clear_bit(NBD_DESTROY_ON_DISCONNECT, &nbd->flags);
|
||||
}
|
||||
|
||||
if (flags & NBD_CFLAG_DISCONNECT_ON_CLOSE) {
|
||||
|
@ -1326,9 +1326,7 @@ static int dispatch_rw_block_io(struct xen_blkif_ring *ring,
|
||||
pages[i]->page,
|
||||
seg[i].nsec << 9,
|
||||
seg[i].offset) == 0)) {
|
||||
|
||||
int nr_iovecs = min_t(int, (nseg-i), BIO_MAX_PAGES);
|
||||
bio = bio_alloc(GFP_KERNEL, nr_iovecs);
|
||||
bio = bio_alloc(GFP_KERNEL, bio_max_segs(nseg - i));
|
||||
if (unlikely(bio == NULL))
|
||||
goto fail_put_bio;
|
||||
|
||||
|
@ -341,8 +341,8 @@ static void do_region(int op, int op_flags, unsigned region,
|
||||
num_bvecs = 1;
|
||||
break;
|
||||
default:
|
||||
num_bvecs = min_t(int, BIO_MAX_PAGES,
|
||||
dm_sector_div_up(remaining, (PAGE_SIZE >> SECTOR_SHIFT)));
|
||||
num_bvecs = bio_max_segs(dm_sector_div_up(remaining,
|
||||
(PAGE_SIZE >> SECTOR_SHIFT)));
|
||||
}
|
||||
|
||||
bio = bio_alloc_bioset(GFP_NOIO, num_bvecs, &io->client->bios);
|
||||
|
@ -264,15 +264,14 @@ static int write_inline_data(struct log_writes_c *lc, void *entry,
|
||||
size_t entrylen, void *data, size_t datalen,
|
||||
sector_t sector)
|
||||
{
|
||||
int num_pages, bio_pages, pg_datalen, pg_sectorlen, i;
|
||||
int bio_pages, pg_datalen, pg_sectorlen, i;
|
||||
struct page *page;
|
||||
struct bio *bio;
|
||||
size_t ret;
|
||||
void *ptr;
|
||||
|
||||
while (datalen) {
|
||||
num_pages = ALIGN(datalen, PAGE_SIZE) >> PAGE_SHIFT;
|
||||
bio_pages = min(num_pages, BIO_MAX_PAGES);
|
||||
bio_pages = bio_max_segs(DIV_ROUND_UP(datalen, PAGE_SIZE));
|
||||
|
||||
atomic_inc(&lc->io_blocks);
|
||||
|
||||
@ -364,7 +363,7 @@ static int log_one_block(struct log_writes_c *lc,
|
||||
goto out;
|
||||
|
||||
atomic_inc(&lc->io_blocks);
|
||||
bio = bio_alloc(GFP_KERNEL, min(block->vec_cnt, BIO_MAX_PAGES));
|
||||
bio = bio_alloc(GFP_KERNEL, bio_max_segs(block->vec_cnt));
|
||||
if (!bio) {
|
||||
DMERR("Couldn't alloc log bio");
|
||||
goto error;
|
||||
@ -386,7 +385,8 @@ static int log_one_block(struct log_writes_c *lc,
|
||||
if (ret != block->vecs[i].bv_len) {
|
||||
atomic_inc(&lc->io_blocks);
|
||||
submit_bio(bio);
|
||||
bio = bio_alloc(GFP_KERNEL, min(block->vec_cnt - i, BIO_MAX_PAGES));
|
||||
bio = bio_alloc(GFP_KERNEL,
|
||||
bio_max_segs(block->vec_cnt - i));
|
||||
if (!bio) {
|
||||
DMERR("Couldn't alloc log bio");
|
||||
goto error;
|
||||
|
@ -185,7 +185,7 @@ static int nvmet_bdev_alloc_bip(struct nvmet_req *req, struct bio *bio,
|
||||
}
|
||||
|
||||
bip = bio_integrity_alloc(bio, GFP_NOIO,
|
||||
min_t(unsigned int, req->metadata_sg_cnt, BIO_MAX_PAGES));
|
||||
bio_max_segs(req->metadata_sg_cnt));
|
||||
if (IS_ERR(bip)) {
|
||||
pr_err("Unable to allocate bio_integrity_payload\n");
|
||||
return PTR_ERR(bip);
|
||||
@ -225,7 +225,7 @@ static int nvmet_bdev_alloc_bip(struct nvmet_req *req, struct bio *bio,
|
||||
|
||||
static void nvmet_bdev_execute_rw(struct nvmet_req *req)
|
||||
{
|
||||
int sg_cnt = req->sg_cnt;
|
||||
unsigned int sg_cnt = req->sg_cnt;
|
||||
struct bio *bio;
|
||||
struct scatterlist *sg;
|
||||
struct blk_plug plug;
|
||||
@ -262,7 +262,7 @@ static void nvmet_bdev_execute_rw(struct nvmet_req *req)
|
||||
bio = &req->b.inline_bio;
|
||||
bio_init(bio, req->inline_bvec, ARRAY_SIZE(req->inline_bvec));
|
||||
} else {
|
||||
bio = bio_alloc(GFP_KERNEL, min(sg_cnt, BIO_MAX_PAGES));
|
||||
bio = bio_alloc(GFP_KERNEL, bio_max_segs(sg_cnt));
|
||||
}
|
||||
bio_set_dev(bio, req->ns->bdev);
|
||||
bio->bi_iter.bi_sector = sector;
|
||||
@ -289,7 +289,7 @@ static void nvmet_bdev_execute_rw(struct nvmet_req *req)
|
||||
}
|
||||
}
|
||||
|
||||
bio = bio_alloc(GFP_KERNEL, min(sg_cnt, BIO_MAX_PAGES));
|
||||
bio = bio_alloc(GFP_KERNEL, bio_max_segs(sg_cnt));
|
||||
bio_set_dev(bio, req->ns->bdev);
|
||||
bio->bi_iter.bi_sector = sector;
|
||||
bio->bi_opf = op;
|
||||
|
@ -26,7 +26,7 @@ static u16 nvmet_passthru_override_id_ctrl(struct nvmet_req *req)
|
||||
struct nvme_ctrl *pctrl = ctrl->subsys->passthru_ctrl;
|
||||
u16 status = NVME_SC_SUCCESS;
|
||||
struct nvme_id_ctrl *id;
|
||||
int max_hw_sectors;
|
||||
unsigned int max_hw_sectors;
|
||||
int page_shift;
|
||||
|
||||
id = kzalloc(sizeof(*id), GFP_KERNEL);
|
||||
@ -198,7 +198,7 @@ static int nvmet_passthru_map_sg(struct nvmet_req *req, struct request *rq)
|
||||
bio = &req->p.inline_bio;
|
||||
bio_init(bio, req->inline_bvec, ARRAY_SIZE(req->inline_bvec));
|
||||
} else {
|
||||
bio = bio_alloc(GFP_KERNEL, min(req->sg_cnt, BIO_MAX_PAGES));
|
||||
bio = bio_alloc(GFP_KERNEL, bio_max_segs(req->sg_cnt));
|
||||
bio->bi_end_io = bio_put;
|
||||
}
|
||||
bio->bi_opf = req_op(rq);
|
||||
|
@ -315,10 +315,8 @@ iblock_get_bio(struct se_cmd *cmd, sector_t lba, u32 sg_num, int op,
|
||||
* Only allocate as many vector entries as the bio code allows us to,
|
||||
* we'll loop later on until we have handled the whole request.
|
||||
*/
|
||||
if (sg_num > BIO_MAX_PAGES)
|
||||
sg_num = BIO_MAX_PAGES;
|
||||
|
||||
bio = bio_alloc_bioset(GFP_NOIO, sg_num, &ib_dev->ibd_bio_set);
|
||||
bio = bio_alloc_bioset(GFP_NOIO, bio_max_segs(sg_num),
|
||||
&ib_dev->ibd_bio_set);
|
||||
if (!bio) {
|
||||
pr_err("Unable to allocate memory for bio\n");
|
||||
return NULL;
|
||||
@ -638,8 +636,7 @@ iblock_alloc_bip(struct se_cmd *cmd, struct bio *bio,
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
bip = bio_integrity_alloc(bio, GFP_NOIO,
|
||||
min_t(unsigned int, cmd->t_prot_nents, BIO_MAX_PAGES));
|
||||
bip = bio_integrity_alloc(bio, GFP_NOIO, bio_max_segs(cmd->t_prot_nents));
|
||||
if (IS_ERR(bip)) {
|
||||
pr_err("Unable to allocate bio_integrity_payload\n");
|
||||
return PTR_ERR(bip);
|
||||
|
@ -881,7 +881,7 @@ pscsi_map_sg(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
|
||||
|
||||
if (!bio) {
|
||||
new_bio:
|
||||
nr_vecs = min_t(int, BIO_MAX_PAGES, nr_pages);
|
||||
nr_vecs = bio_max_segs(nr_pages);
|
||||
nr_pages -= nr_vecs;
|
||||
/*
|
||||
* Calls bio_kmalloc() and sets bio->bi_end_io()
|
||||
|
@ -221,7 +221,7 @@ static void blkdev_bio_end_io_simple(struct bio *bio)
|
||||
|
||||
static ssize_t
|
||||
__blkdev_direct_IO_simple(struct kiocb *iocb, struct iov_iter *iter,
|
||||
int nr_pages)
|
||||
unsigned int nr_pages)
|
||||
{
|
||||
struct file *file = iocb->ki_filp;
|
||||
struct block_device *bdev = I_BDEV(bdev_file_inode(file));
|
||||
@ -355,8 +355,8 @@ static void blkdev_bio_end_io(struct bio *bio)
|
||||
}
|
||||
}
|
||||
|
||||
static ssize_t
|
||||
__blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages)
|
||||
static ssize_t __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
|
||||
unsigned int nr_pages)
|
||||
{
|
||||
struct file *file = iocb->ki_filp;
|
||||
struct inode *inode = bdev_file_inode(file);
|
||||
@ -486,7 +486,7 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages)
|
||||
static ssize_t
|
||||
blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
|
||||
{
|
||||
int nr_pages;
|
||||
unsigned int nr_pages;
|
||||
|
||||
if (!iov_iter_count(iter))
|
||||
return 0;
|
||||
@ -495,7 +495,7 @@ blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
|
||||
if (is_sync_kiocb(iocb) && nr_pages <= BIO_MAX_PAGES)
|
||||
return __blkdev_direct_IO_simple(iocb, iter, nr_pages);
|
||||
|
||||
return __blkdev_direct_IO(iocb, iter, min(nr_pages, BIO_MAX_PAGES));
|
||||
return __blkdev_direct_IO(iocb, iter, bio_max_segs(nr_pages));
|
||||
}
|
||||
|
||||
static __init int blkdev_init(void)
|
||||
|
@ -695,7 +695,7 @@ static inline int dio_new_bio(struct dio *dio, struct dio_submit *sdio,
|
||||
if (ret)
|
||||
goto out;
|
||||
sector = start_sector << (sdio->blkbits - 9);
|
||||
nr_pages = min(sdio->pages_in_io, BIO_MAX_PAGES);
|
||||
nr_pages = bio_max_segs(sdio->pages_in_io);
|
||||
BUG_ON(nr_pages <= 0);
|
||||
dio_bio_alloc(dio, sdio, map_bh->b_bdev, sector, nr_pages);
|
||||
sdio->boundary = 0;
|
||||
|
@ -215,10 +215,8 @@ submit_bio_retry:
|
||||
/* max # of continuous pages */
|
||||
if (nblocks > DIV_ROUND_UP(map.m_plen, PAGE_SIZE))
|
||||
nblocks = DIV_ROUND_UP(map.m_plen, PAGE_SIZE);
|
||||
if (nblocks > BIO_MAX_PAGES)
|
||||
nblocks = BIO_MAX_PAGES;
|
||||
|
||||
bio = bio_alloc(GFP_NOIO, nblocks);
|
||||
bio = bio_alloc(GFP_NOIO, bio_max_segs(nblocks));
|
||||
|
||||
bio->bi_end_io = erofs_readendio;
|
||||
bio_set_dev(bio, sb->s_bdev);
|
||||
|
@ -371,8 +371,7 @@ int ext4_mpage_readpages(struct inode *inode,
|
||||
* bio_alloc will _always_ be able to allocate a bio if
|
||||
* __GFP_DIRECT_RECLAIM is set, see bio_alloc_bioset().
|
||||
*/
|
||||
bio = bio_alloc(GFP_KERNEL,
|
||||
min_t(int, nr_pages, BIO_MAX_PAGES));
|
||||
bio = bio_alloc(GFP_KERNEL, bio_max_segs(nr_pages));
|
||||
fscrypt_set_bio_crypt_ctx(bio, inode, next_block,
|
||||
GFP_KERNEL);
|
||||
ext4_set_bio_post_read_ctx(bio, inode, page->index);
|
||||
|
@ -969,8 +969,7 @@ static struct bio *f2fs_grab_read_bio(struct inode *inode, block_t blkaddr,
|
||||
unsigned int post_read_steps = 0;
|
||||
|
||||
bio = bio_alloc_bioset(for_write ? GFP_NOIO : GFP_KERNEL,
|
||||
min_t(int, nr_pages, BIO_MAX_PAGES),
|
||||
&f2fs_bioset);
|
||||
bio_max_segs(nr_pages), &f2fs_bioset);
|
||||
if (!bio)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
|
@ -2747,7 +2747,7 @@ int f2fs_restore_node_summary(struct f2fs_sb_info *sbi,
|
||||
sum_entry = &sum->entries[0];
|
||||
|
||||
for (i = 0; i < last_offset; i += nrpages, addr += nrpages) {
|
||||
nrpages = min(last_offset - i, BIO_MAX_PAGES);
|
||||
nrpages = bio_max_segs(last_offset - i);
|
||||
|
||||
/* readahead node pages */
|
||||
f2fs_ra_meta_pages(sbi, addr, nrpages, META_POR, true);
|
||||
|
@ -278,14 +278,14 @@ iomap_readpage_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
|
||||
if (!is_contig || bio_full(ctx->bio, plen)) {
|
||||
gfp_t gfp = mapping_gfp_constraint(page->mapping, GFP_KERNEL);
|
||||
gfp_t orig_gfp = gfp;
|
||||
int nr_vecs = (length + PAGE_SIZE - 1) >> PAGE_SHIFT;
|
||||
unsigned int nr_vecs = DIV_ROUND_UP(length, PAGE_SIZE);
|
||||
|
||||
if (ctx->bio)
|
||||
submit_bio(ctx->bio);
|
||||
|
||||
if (ctx->rac) /* same as readahead_gfp_mask */
|
||||
gfp |= __GFP_NORETRY | __GFP_NOWARN;
|
||||
ctx->bio = bio_alloc(gfp, min(BIO_MAX_PAGES, nr_vecs));
|
||||
ctx->bio = bio_alloc(gfp, bio_max_segs(nr_vecs));
|
||||
/*
|
||||
* If the bio_alloc fails, try it again for a single page to
|
||||
* avoid having to deal with partial page reads. This emulates
|
||||
|
@ -304,9 +304,7 @@ alloc_new:
|
||||
goto out;
|
||||
}
|
||||
args->bio = mpage_alloc(bdev, blocks[0] << (blkbits - 9),
|
||||
min_t(int, args->nr_pages,
|
||||
BIO_MAX_PAGES),
|
||||
gfp);
|
||||
bio_max_segs(args->nr_pages), gfp);
|
||||
if (args->bio == NULL)
|
||||
goto confused;
|
||||
}
|
||||
|
@ -115,13 +115,13 @@ bl_submit_bio(struct bio *bio)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static struct bio *
|
||||
bl_alloc_init_bio(int npg, struct block_device *bdev, sector_t disk_sector,
|
||||
static struct bio *bl_alloc_init_bio(unsigned int npg,
|
||||
struct block_device *bdev, sector_t disk_sector,
|
||||
bio_end_io_t end_io, struct parallel_io *par)
|
||||
{
|
||||
struct bio *bio;
|
||||
|
||||
npg = min(npg, BIO_MAX_PAGES);
|
||||
npg = bio_max_segs(npg);
|
||||
bio = bio_alloc(GFP_NOIO, npg);
|
||||
if (bio) {
|
||||
bio->bi_iter.bi_sector = disk_sector;
|
||||
|
@ -6,7 +6,7 @@
|
||||
|
||||
static inline unsigned int bio_max_vecs(unsigned int count)
|
||||
{
|
||||
return min_t(unsigned, howmany(count, PAGE_SIZE), BIO_MAX_PAGES);
|
||||
return bio_max_segs(howmany(count, PAGE_SIZE));
|
||||
}
|
||||
|
||||
int
|
||||
|
@ -1480,7 +1480,7 @@ xfs_buf_ioapply_map(
|
||||
int op)
|
||||
{
|
||||
int page_index;
|
||||
int total_nr_pages = bp->b_page_count;
|
||||
unsigned int total_nr_pages = bp->b_page_count;
|
||||
int nr_pages;
|
||||
struct bio *bio;
|
||||
sector_t sector = bp->b_maps[map].bm_bn;
|
||||
@ -1505,7 +1505,7 @@ xfs_buf_ioapply_map(
|
||||
|
||||
next_chunk:
|
||||
atomic_inc(&bp->b_io_remaining);
|
||||
nr_pages = min(total_nr_pages, BIO_MAX_PAGES);
|
||||
nr_pages = bio_max_segs(total_nr_pages);
|
||||
|
||||
bio = bio_alloc(GFP_NOIO, nr_pages);
|
||||
bio_set_dev(bio, bp->b_target->bt_bdev);
|
||||
|
@ -20,7 +20,12 @@
|
||||
#define BIO_BUG_ON
|
||||
#endif
|
||||
|
||||
#define BIO_MAX_PAGES 256
|
||||
#define BIO_MAX_PAGES 256U
|
||||
|
||||
static inline unsigned int bio_max_segs(unsigned int nr_segs)
|
||||
{
|
||||
return min(nr_segs, BIO_MAX_PAGES);
|
||||
}
|
||||
|
||||
#define bio_prio(bio) (bio)->bi_ioprio
|
||||
#define bio_set_prio(bio, prio) ((bio)->bi_ioprio = prio)
|
||||
|
@ -462,7 +462,6 @@ struct request_queue {
|
||||
#ifdef CONFIG_PM
|
||||
struct device *dev;
|
||||
enum rpm_status rpm_status;
|
||||
unsigned int nr_pending;
|
||||
#endif
|
||||
|
||||
/*
|
||||
|
@ -23,8 +23,6 @@ struct blk_trace {
|
||||
u32 pid;
|
||||
u32 dev;
|
||||
struct dentry *dir;
|
||||
struct dentry *dropped_file;
|
||||
struct dentry *msg_file;
|
||||
struct list_head running_list;
|
||||
atomic_t dropped;
|
||||
};
|
||||
@ -119,7 +117,7 @@ struct compat_blk_user_trace_setup {
|
||||
|
||||
#endif
|
||||
|
||||
extern void blk_fill_rwbs(char *rwbs, unsigned int op, int bytes);
|
||||
void blk_fill_rwbs(char *rwbs, unsigned int op);
|
||||
|
||||
static inline sector_t blk_rq_trace_sector(struct request *rq)
|
||||
{
|
||||
|
@ -28,7 +28,7 @@ DECLARE_EVENT_CLASS(bcache_request,
|
||||
__entry->sector = bio->bi_iter.bi_sector;
|
||||
__entry->orig_sector = bio->bi_iter.bi_sector - 16;
|
||||
__entry->nr_sector = bio->bi_iter.bi_size >> 9;
|
||||
blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size);
|
||||
blk_fill_rwbs(__entry->rwbs, bio->bi_opf);
|
||||
),
|
||||
|
||||
TP_printk("%d,%d %s %llu + %u (from %d,%d @ %llu)",
|
||||
@ -102,7 +102,7 @@ DECLARE_EVENT_CLASS(bcache_bio,
|
||||
__entry->dev = bio_dev(bio);
|
||||
__entry->sector = bio->bi_iter.bi_sector;
|
||||
__entry->nr_sector = bio->bi_iter.bi_size >> 9;
|
||||
blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size);
|
||||
blk_fill_rwbs(__entry->rwbs, bio->bi_opf);
|
||||
),
|
||||
|
||||
TP_printk("%d,%d %s %llu + %u",
|
||||
@ -137,7 +137,7 @@ TRACE_EVENT(bcache_read,
|
||||
__entry->dev = bio_dev(bio);
|
||||
__entry->sector = bio->bi_iter.bi_sector;
|
||||
__entry->nr_sector = bio->bi_iter.bi_size >> 9;
|
||||
blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size);
|
||||
blk_fill_rwbs(__entry->rwbs, bio->bi_opf);
|
||||
__entry->cache_hit = hit;
|
||||
__entry->bypass = bypass;
|
||||
),
|
||||
@ -168,7 +168,7 @@ TRACE_EVENT(bcache_write,
|
||||
__entry->inode = inode;
|
||||
__entry->sector = bio->bi_iter.bi_sector;
|
||||
__entry->nr_sector = bio->bi_iter.bi_size >> 9;
|
||||
blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size);
|
||||
blk_fill_rwbs(__entry->rwbs, bio->bi_opf);
|
||||
__entry->writeback = writeback;
|
||||
__entry->bypass = bypass;
|
||||
),
|
||||
@ -238,7 +238,7 @@ TRACE_EVENT(bcache_journal_write,
|
||||
__entry->sector = bio->bi_iter.bi_sector;
|
||||
__entry->nr_sector = bio->bi_iter.bi_size >> 9;
|
||||
__entry->nr_keys = keys;
|
||||
blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size);
|
||||
blk_fill_rwbs(__entry->rwbs, bio->bi_opf);
|
||||
),
|
||||
|
||||
TP_printk("%d,%d %s %llu + %u keys %u",
|
||||
|
@ -89,7 +89,7 @@ TRACE_EVENT(block_rq_requeue,
|
||||
__entry->sector = blk_rq_trace_sector(rq);
|
||||
__entry->nr_sector = blk_rq_trace_nr_sectors(rq);
|
||||
|
||||
blk_fill_rwbs(__entry->rwbs, rq->cmd_flags, blk_rq_bytes(rq));
|
||||
blk_fill_rwbs(__entry->rwbs, rq->cmd_flags);
|
||||
__get_str(cmd)[0] = '\0';
|
||||
),
|
||||
|
||||
@ -133,7 +133,7 @@ TRACE_EVENT(block_rq_complete,
|
||||
__entry->nr_sector = nr_bytes >> 9;
|
||||
__entry->error = error;
|
||||
|
||||
blk_fill_rwbs(__entry->rwbs, rq->cmd_flags, nr_bytes);
|
||||
blk_fill_rwbs(__entry->rwbs, rq->cmd_flags);
|
||||
__get_str(cmd)[0] = '\0';
|
||||
),
|
||||
|
||||
@ -166,7 +166,7 @@ DECLARE_EVENT_CLASS(block_rq,
|
||||
__entry->nr_sector = blk_rq_trace_nr_sectors(rq);
|
||||
__entry->bytes = blk_rq_bytes(rq);
|
||||
|
||||
blk_fill_rwbs(__entry->rwbs, rq->cmd_flags, blk_rq_bytes(rq));
|
||||
blk_fill_rwbs(__entry->rwbs, rq->cmd_flags);
|
||||
__get_str(cmd)[0] = '\0';
|
||||
memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
|
||||
),
|
||||
@ -196,7 +196,7 @@ DEFINE_EVENT(block_rq, block_rq_insert,
|
||||
|
||||
/**
|
||||
* block_rq_issue - issue pending block IO request operation to device driver
|
||||
* @rq: block IO operation operation request
|
||||
* @rq: block IO operation request
|
||||
*
|
||||
* Called when block operation request @rq from queue @q is sent to a
|
||||
* device driver for processing.
|
||||
@ -210,7 +210,7 @@ DEFINE_EVENT(block_rq, block_rq_issue,
|
||||
|
||||
/**
|
||||
* block_rq_merge - merge request with another one in the elevator
|
||||
* @rq: block IO operation operation request
|
||||
* @rq: block IO operation request
|
||||
*
|
||||
* Called when block operation request @rq from queue @q is merged to another
|
||||
* request queued in the elevator.
|
||||
@ -249,7 +249,7 @@ TRACE_EVENT(block_bio_complete,
|
||||
__entry->sector = bio->bi_iter.bi_sector;
|
||||
__entry->nr_sector = bio_sectors(bio);
|
||||
__entry->error = blk_status_to_errno(bio->bi_status);
|
||||
blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size);
|
||||
blk_fill_rwbs(__entry->rwbs, bio->bi_opf);
|
||||
),
|
||||
|
||||
TP_printk("%d,%d %s %llu + %u [%d]",
|
||||
@ -276,7 +276,7 @@ DECLARE_EVENT_CLASS(block_bio,
|
||||
__entry->dev = bio_dev(bio);
|
||||
__entry->sector = bio->bi_iter.bi_sector;
|
||||
__entry->nr_sector = bio_sectors(bio);
|
||||
blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size);
|
||||
blk_fill_rwbs(__entry->rwbs, bio->bi_opf);
|
||||
memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
|
||||
),
|
||||
|
||||
@ -433,7 +433,7 @@ TRACE_EVENT(block_split,
|
||||
__entry->dev = bio_dev(bio);
|
||||
__entry->sector = bio->bi_iter.bi_sector;
|
||||
__entry->new_sector = new_sector;
|
||||
blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size);
|
||||
blk_fill_rwbs(__entry->rwbs, bio->bi_opf);
|
||||
memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
|
||||
),
|
||||
|
||||
@ -474,7 +474,7 @@ TRACE_EVENT(block_bio_remap,
|
||||
__entry->nr_sector = bio_sectors(bio);
|
||||
__entry->old_dev = dev;
|
||||
__entry->old_sector = from;
|
||||
blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size);
|
||||
blk_fill_rwbs(__entry->rwbs, bio->bi_opf);
|
||||
),
|
||||
|
||||
TP_printk("%d,%d %s %llu + %u <- (%d,%d) %llu",
|
||||
@ -518,7 +518,7 @@ TRACE_EVENT(block_rq_remap,
|
||||
__entry->old_dev = dev;
|
||||
__entry->old_sector = from;
|
||||
__entry->nr_bios = blk_rq_count_bios(rq);
|
||||
blk_fill_rwbs(__entry->rwbs, rq->cmd_flags, blk_rq_bytes(rq));
|
||||
blk_fill_rwbs(__entry->rwbs, rq->cmd_flags);
|
||||
),
|
||||
|
||||
TP_printk("%d,%d %s %llu + %u <- (%d,%d) %llu %u",
|
||||
|
@ -312,8 +312,6 @@ record_it:
|
||||
|
||||
static void blk_trace_free(struct blk_trace *bt)
|
||||
{
|
||||
debugfs_remove(bt->msg_file);
|
||||
debugfs_remove(bt->dropped_file);
|
||||
relay_close(bt->rchan);
|
||||
debugfs_remove(bt->dir);
|
||||
free_percpu(bt->sequence);
|
||||
@ -545,10 +543,8 @@ static int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
|
||||
INIT_LIST_HEAD(&bt->running_list);
|
||||
|
||||
ret = -EIO;
|
||||
bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt,
|
||||
&blk_dropped_fops);
|
||||
|
||||
bt->msg_file = debugfs_create_file("msg", 0222, dir, bt, &blk_msg_fops);
|
||||
debugfs_create_file("dropped", 0444, dir, bt, &blk_dropped_fops);
|
||||
debugfs_create_file("msg", 0222, dir, bt, &blk_msg_fops);
|
||||
|
||||
bt->rchan = relay_open("trace", dir, buts->buf_size,
|
||||
buts->buf_nr, &blk_relay_callbacks, bt);
|
||||
@ -1868,7 +1864,17 @@ void blk_trace_remove_sysfs(struct device *dev)
|
||||
|
||||
#ifdef CONFIG_EVENT_TRACING
|
||||
|
||||
void blk_fill_rwbs(char *rwbs, unsigned int op, int bytes)
|
||||
/**
|
||||
* blk_fill_rwbs - Fill the buffer rwbs by mapping op to character string.
|
||||
* @rwbs: buffer to be filled
|
||||
* @op: REQ_OP_XXX for the tracepoint
|
||||
*
|
||||
* Description:
|
||||
* Maps the REQ_OP_XXX to character and fills the buffer provided by the
|
||||
* caller with resulting string.
|
||||
*
|
||||
**/
|
||||
void blk_fill_rwbs(char *rwbs, unsigned int op)
|
||||
{
|
||||
int i = 0;
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user