mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-11 04:18:39 +08:00
block: add BIO_NO_PAGE_REF flag
If bio_iov_iter_get_pages() is called on an iov_iter that is flagged with NO_REF, then we don't need to add a page reference for the pages that we add. Add BIO_NO_PAGE_REF to track this in the bio, so IO completion knows not to drop a reference to these pages. Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
875f1d0769
commit
399254aaf4
43
block/bio.c
43
block/bio.c
@ -849,20 +849,14 @@ static int __bio_iov_bvec_add_pages(struct bio *bio, struct iov_iter *iter)
|
||||
size = bio_add_page(bio, bv->bv_page, len,
|
||||
bv->bv_offset + iter->iov_offset);
|
||||
if (size == len) {
|
||||
struct page *page;
|
||||
int i;
|
||||
if (!bio_flagged(bio, BIO_NO_PAGE_REF)) {
|
||||
struct page *page;
|
||||
int i;
|
||||
|
||||
mp_bvec_for_each_page(page, bv, i)
|
||||
get_page(page);
|
||||
}
|
||||
|
||||
/*
|
||||
* For the normal O_DIRECT case, we could skip grabbing this
|
||||
* reference and then not have to put them again when IO
|
||||
* completes. But this breaks some in-kernel users, like
|
||||
* splicing to/from a loop device, where we release the pipe
|
||||
* pages unconditionally. If we can fix that case, we can
|
||||
* get rid of the get here and the need to call
|
||||
* bio_release_pages() at IO completion time.
|
||||
*/
|
||||
mp_bvec_for_each_page(page, bv, i)
|
||||
get_page(page);
|
||||
iov_iter_advance(iter, size);
|
||||
return 0;
|
||||
}
|
||||
@ -925,10 +919,12 @@ static int __bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
|
||||
* This takes either an iterator pointing to user memory, or one pointing to
|
||||
* kernel pages (BVEC iterator). If we're adding user pages, we pin them and
|
||||
* map them into the kernel. On IO completion, the caller should put those
|
||||
* pages. For now, when adding kernel pages, we still grab a reference to the
|
||||
* page. This isn't strictly needed for the common case, but some call paths
|
||||
* end up releasing pages from eg a pipe and we can't easily control these.
|
||||
* See comment in __bio_iov_bvec_add_pages().
|
||||
* pages. If we're adding kernel pages, and the caller told us it's safe to
|
||||
* do so, we just have to add the pages to the bio directly. We don't grab an
|
||||
* extra reference to those pages (the user should already have that), and we
|
||||
* don't put the page on IO completion. The caller needs to check if the bio is
|
||||
* flagged BIO_NO_PAGE_REF on IO completion. If it isn't, then pages should be
|
||||
* released.
|
||||
*
|
||||
* The function tries, but does not guarantee, to pin as many pages as
|
||||
* fit into the bio, or are requested in *iter, whatever is smaller. If
|
||||
@ -940,6 +936,13 @@ int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
|
||||
const bool is_bvec = iov_iter_is_bvec(iter);
|
||||
unsigned short orig_vcnt = bio->bi_vcnt;
|
||||
|
||||
/*
|
||||
* If this is a BVEC iter, then the pages are kernel pages. Don't
|
||||
* release them on IO completion, if the caller asked us to.
|
||||
*/
|
||||
if (is_bvec && iov_iter_bvec_no_ref(iter))
|
||||
bio_set_flag(bio, BIO_NO_PAGE_REF);
|
||||
|
||||
do {
|
||||
int ret;
|
||||
|
||||
@ -1696,7 +1699,8 @@ static void bio_dirty_fn(struct work_struct *work)
|
||||
next = bio->bi_private;
|
||||
|
||||
bio_set_pages_dirty(bio);
|
||||
bio_release_pages(bio);
|
||||
if (!bio_flagged(bio, BIO_NO_PAGE_REF))
|
||||
bio_release_pages(bio);
|
||||
bio_put(bio);
|
||||
}
|
||||
}
|
||||
@ -1713,7 +1717,8 @@ void bio_check_pages_dirty(struct bio *bio)
|
||||
goto defer;
|
||||
}
|
||||
|
||||
bio_release_pages(bio);
|
||||
if (!bio_flagged(bio, BIO_NO_PAGE_REF))
|
||||
bio_release_pages(bio);
|
||||
bio_put(bio);
|
||||
return;
|
||||
defer:
|
||||
|
@ -336,12 +336,14 @@ static void blkdev_bio_end_io(struct bio *bio)
|
||||
if (should_dirty) {
|
||||
bio_check_pages_dirty(bio);
|
||||
} else {
|
||||
struct bio_vec *bvec;
|
||||
int i;
|
||||
struct bvec_iter_all iter_all;
|
||||
if (!bio_flagged(bio, BIO_NO_PAGE_REF)) {
|
||||
struct bvec_iter_all iter_all;
|
||||
struct bio_vec *bvec;
|
||||
int i;
|
||||
|
||||
bio_for_each_segment_all(bvec, bio, i, iter_all)
|
||||
put_page(bvec->bv_page);
|
||||
bio_for_each_segment_all(bvec, bio, i, iter_all)
|
||||
put_page(bvec->bv_page);
|
||||
}
|
||||
bio_put(bio);
|
||||
}
|
||||
}
|
||||
|
12
fs/iomap.c
12
fs/iomap.c
@ -1589,12 +1589,14 @@ static void iomap_dio_bio_end_io(struct bio *bio)
|
||||
if (should_dirty) {
|
||||
bio_check_pages_dirty(bio);
|
||||
} else {
|
||||
struct bio_vec *bvec;
|
||||
int i;
|
||||
struct bvec_iter_all iter_all;
|
||||
if (!bio_flagged(bio, BIO_NO_PAGE_REF)) {
|
||||
struct bvec_iter_all iter_all;
|
||||
struct bio_vec *bvec;
|
||||
int i;
|
||||
|
||||
bio_for_each_segment_all(bvec, bio, i, iter_all)
|
||||
put_page(bvec->bv_page);
|
||||
bio_for_each_segment_all(bvec, bio, i, iter_all)
|
||||
put_page(bvec->bv_page);
|
||||
}
|
||||
bio_put(bio);
|
||||
}
|
||||
}
|
||||
|
@ -215,6 +215,7 @@ struct bio {
|
||||
/*
|
||||
* bio flags
|
||||
*/
|
||||
#define BIO_NO_PAGE_REF 0 /* don't put release vec pages */
|
||||
#define BIO_SEG_VALID 1 /* bi_phys_segments valid */
|
||||
#define BIO_CLONED 2 /* doesn't own data */
|
||||
#define BIO_BOUNCED 3 /* bio is a bounce bio */
|
||||
|
Loading…
Reference in New Issue
Block a user