mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-24 04:34:08 +08:00
block: allow using the per-cpu bio cache from bio_alloc_bioset
Replace the BIO_PERCPU_CACHE bio-internal flag with a REQ_ALLOC_CACHE one that can be passed to bio_alloc / bio_alloc_bioset, and implement the percpu cache allocation logic in a helper called from bio_alloc_bioset. This allows any bio_alloc_bioset user to use the percpu caches instead of having the functionality tied to struct kiocb. Signed-off-by: Mike Snitzer <snitzer@kernel.org> [hch: refactored a bit] Signed-off-by: Christoph Hellwig <hch@lst.de> Link: https://lore.kernel.org/r/20220324203526.62306-2-snitzer@kernel.org Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
b2d229d4dd
commit
0df71650c0
86
block/bio.c
86
block/bio.c
@ -419,6 +419,28 @@ static void punt_bios_to_rescuer(struct bio_set *bs)
|
||||
queue_work(bs->rescue_workqueue, &bs->rescue_work);
|
||||
}
|
||||
|
||||
static struct bio *bio_alloc_percpu_cache(struct block_device *bdev,
|
||||
unsigned short nr_vecs, unsigned int opf, gfp_t gfp,
|
||||
struct bio_set *bs)
|
||||
{
|
||||
struct bio_alloc_cache *cache;
|
||||
struct bio *bio;
|
||||
|
||||
cache = per_cpu_ptr(bs->cache, get_cpu());
|
||||
if (!cache->free_list) {
|
||||
put_cpu();
|
||||
return NULL;
|
||||
}
|
||||
bio = cache->free_list;
|
||||
cache->free_list = bio->bi_next;
|
||||
cache->nr--;
|
||||
put_cpu();
|
||||
|
||||
bio_init(bio, bdev, nr_vecs ? bio->bi_inline_vecs : NULL, nr_vecs, opf);
|
||||
bio->bi_pool = bs;
|
||||
return bio;
|
||||
}
|
||||
|
||||
/**
|
||||
* bio_alloc_bioset - allocate a bio for I/O
|
||||
* @bdev: block device to allocate the bio for (can be %NULL)
|
||||
@ -451,6 +473,9 @@ static void punt_bios_to_rescuer(struct bio_set *bs)
|
||||
* submit_bio_noacct() should be avoided - instead, use bio_set's front_pad
|
||||
* for per bio allocations.
|
||||
*
|
||||
* If REQ_ALLOC_CACHE is set, the final put of the bio MUST be done from process
|
||||
* context, not hard/soft IRQ.
|
||||
*
|
||||
* Returns: Pointer to new bio on success, NULL on failure.
|
||||
*/
|
||||
struct bio *bio_alloc_bioset(struct block_device *bdev, unsigned short nr_vecs,
|
||||
@ -465,6 +490,21 @@ struct bio *bio_alloc_bioset(struct block_device *bdev, unsigned short nr_vecs,
|
||||
if (WARN_ON_ONCE(!mempool_initialized(&bs->bvec_pool) && nr_vecs > 0))
|
||||
return NULL;
|
||||
|
||||
if (opf & REQ_ALLOC_CACHE) {
|
||||
if (bs->cache && nr_vecs <= BIO_INLINE_VECS) {
|
||||
bio = bio_alloc_percpu_cache(bdev, nr_vecs, opf,
|
||||
gfp_mask, bs);
|
||||
if (bio)
|
||||
return bio;
|
||||
/*
|
||||
* No cached bio available, bio returned below marked with
|
||||
* REQ_ALLOC_CACHE to particpate in per-cpu alloc cache.
|
||||
*/
|
||||
} else {
|
||||
opf &= ~REQ_ALLOC_CACHE;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* submit_bio_noacct() converts recursion to iteration; this means if
|
||||
* we're running beneath it, any bios we allocate and submit will not be
|
||||
@ -711,7 +751,7 @@ void bio_put(struct bio *bio)
|
||||
return;
|
||||
}
|
||||
|
||||
if (bio_flagged(bio, BIO_PERCPU_CACHE)) {
|
||||
if (bio->bi_opf & REQ_ALLOC_CACHE) {
|
||||
struct bio_alloc_cache *cache;
|
||||
|
||||
bio_uninit(bio);
|
||||
@ -1732,50 +1772,6 @@ int bioset_init_from_src(struct bio_set *bs, struct bio_set *src)
|
||||
}
|
||||
EXPORT_SYMBOL(bioset_init_from_src);
|
||||
|
||||
/**
|
||||
* bio_alloc_kiocb - Allocate a bio from bio_set based on kiocb
|
||||
* @kiocb: kiocb describing the IO
|
||||
* @bdev: block device to allocate the bio for (can be %NULL)
|
||||
* @nr_vecs: number of iovecs to pre-allocate
|
||||
* @opf: operation and flags for bio
|
||||
* @bs: bio_set to allocate from
|
||||
*
|
||||
* Description:
|
||||
* Like @bio_alloc_bioset, but pass in the kiocb. The kiocb is only
|
||||
* used to check if we should dip into the per-cpu bio_set allocation
|
||||
* cache. The allocation uses GFP_KERNEL internally. On return, the
|
||||
* bio is marked BIO_PERCPU_CACHEABLE, and the final put of the bio
|
||||
* MUST be done from process context, not hard/soft IRQ.
|
||||
*
|
||||
*/
|
||||
struct bio *bio_alloc_kiocb(struct kiocb *kiocb, struct block_device *bdev,
|
||||
unsigned short nr_vecs, unsigned int opf, struct bio_set *bs)
|
||||
{
|
||||
struct bio_alloc_cache *cache;
|
||||
struct bio *bio;
|
||||
|
||||
if (!(kiocb->ki_flags & IOCB_ALLOC_CACHE) || nr_vecs > BIO_INLINE_VECS)
|
||||
return bio_alloc_bioset(bdev, nr_vecs, opf, GFP_KERNEL, bs);
|
||||
|
||||
cache = per_cpu_ptr(bs->cache, get_cpu());
|
||||
if (cache->free_list) {
|
||||
bio = cache->free_list;
|
||||
cache->free_list = bio->bi_next;
|
||||
cache->nr--;
|
||||
put_cpu();
|
||||
bio_init(bio, bdev, nr_vecs ? bio->bi_inline_vecs : NULL,
|
||||
nr_vecs, opf);
|
||||
bio->bi_pool = bs;
|
||||
bio_set_flag(bio, BIO_PERCPU_CACHE);
|
||||
return bio;
|
||||
}
|
||||
put_cpu();
|
||||
bio = bio_alloc_bioset(bdev, nr_vecs, opf, GFP_KERNEL, bs);
|
||||
bio_set_flag(bio, BIO_PERCPU_CACHE);
|
||||
return bio;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(bio_alloc_kiocb);
|
||||
|
||||
static int __init init_bio(void)
|
||||
{
|
||||
int i;
|
||||
|
@ -453,8 +453,7 @@ extern struct device_attribute dev_attr_events_poll_msecs;
|
||||
static inline void bio_clear_polled(struct bio *bio)
|
||||
{
|
||||
/* can't support alloc cache if we turn off polling */
|
||||
bio_clear_flag(bio, BIO_PERCPU_CACHE);
|
||||
bio->bi_opf &= ~REQ_POLLED;
|
||||
bio->bi_opf &= ~(REQ_POLLED | REQ_ALLOC_CACHE);
|
||||
}
|
||||
|
||||
long blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg);
|
||||
|
11
block/fops.c
11
block/fops.c
@ -197,8 +197,10 @@ static ssize_t __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
|
||||
(bdev_logical_block_size(bdev) - 1))
|
||||
return -EINVAL;
|
||||
|
||||
bio = bio_alloc_kiocb(iocb, bdev, nr_pages, opf, &blkdev_dio_pool);
|
||||
|
||||
if (iocb->ki_flags & IOCB_ALLOC_CACHE)
|
||||
opf |= REQ_ALLOC_CACHE;
|
||||
bio = bio_alloc_bioset(bdev, nr_pages, opf, GFP_KERNEL,
|
||||
&blkdev_dio_pool);
|
||||
dio = container_of(bio, struct blkdev_dio, bio);
|
||||
atomic_set(&dio->ref, 1);
|
||||
/*
|
||||
@ -320,7 +322,10 @@ static ssize_t __blkdev_direct_IO_async(struct kiocb *iocb,
|
||||
(bdev_logical_block_size(bdev) - 1))
|
||||
return -EINVAL;
|
||||
|
||||
bio = bio_alloc_kiocb(iocb, bdev, nr_pages, opf, &blkdev_dio_pool);
|
||||
if (iocb->ki_flags & IOCB_ALLOC_CACHE)
|
||||
opf |= REQ_ALLOC_CACHE;
|
||||
bio = bio_alloc_bioset(bdev, nr_pages, opf, GFP_KERNEL,
|
||||
&blkdev_dio_pool);
|
||||
dio = container_of(bio, struct blkdev_dio, bio);
|
||||
dio->flags = 0;
|
||||
dio->iocb = iocb;
|
||||
|
@ -405,8 +405,6 @@ extern int bioset_init_from_src(struct bio_set *bs, struct bio_set *src);
|
||||
struct bio *bio_alloc_bioset(struct block_device *bdev, unsigned short nr_vecs,
|
||||
unsigned int opf, gfp_t gfp_mask,
|
||||
struct bio_set *bs);
|
||||
struct bio *bio_alloc_kiocb(struct kiocb *kiocb, struct block_device *bdev,
|
||||
unsigned short nr_vecs, unsigned int opf, struct bio_set *bs);
|
||||
struct bio *bio_kmalloc(gfp_t gfp_mask, unsigned short nr_iovecs);
|
||||
extern void bio_put(struct bio *);
|
||||
|
||||
|
@ -329,7 +329,6 @@ enum {
|
||||
BIO_QOS_MERGED, /* but went through rq_qos merge path */
|
||||
BIO_REMAPPED,
|
||||
BIO_ZONE_WRITE_LOCKED, /* Owns a zoned device zone write lock */
|
||||
BIO_PERCPU_CACHE, /* can participate in per-cpu alloc cache */
|
||||
BIO_FLAG_LAST
|
||||
};
|
||||
|
||||
@ -414,6 +413,7 @@ enum req_flag_bits {
|
||||
__REQ_NOUNMAP, /* do not free blocks when zeroing */
|
||||
|
||||
__REQ_POLLED, /* caller polls for completion using bio_poll */
|
||||
__REQ_ALLOC_CACHE, /* allocate IO from cache if available */
|
||||
|
||||
/* for driver use */
|
||||
__REQ_DRV,
|
||||
@ -439,6 +439,7 @@ enum req_flag_bits {
|
||||
|
||||
#define REQ_NOUNMAP (1ULL << __REQ_NOUNMAP)
|
||||
#define REQ_POLLED (1ULL << __REQ_POLLED)
|
||||
#define REQ_ALLOC_CACHE (1ULL << __REQ_ALLOC_CACHE)
|
||||
|
||||
#define REQ_DRV (1ULL << __REQ_DRV)
|
||||
#define REQ_SWAP (1ULL << __REQ_SWAP)
|
||||
|
Loading…
Reference in New Issue
Block a user