block: kmemleak: Track the page allocations for struct request

The pages allocated for struct request contain pointers to other slab
allocations (via ops->init_request). Since kmemleak does not track/scan
page allocations, the slab objects will be reported as leaks (false
positives). This patch adds kmemleak callbacks to allow tracking of such
pages.

Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
Reported-by: Bart Van Assche <bart.vanassche@sandisk.com>
Tested-by: Bart Van Assche<bart.vanassche@sandisk.com>
Cc: Christoph Hellwig <hch@infradead.org>
Cc: Jens Axboe <axboe@kernel.dk>
Signed-off-by: Jens Axboe <axboe@fb.com>
This commit is contained in:
Catalin Marinas 2015-09-14 18:16:02 +01:00 committed by Jens Axboe
parent bcee19f424
commit f75782e4e0

View File

@ -9,6 +9,7 @@
#include <linux/backing-dev.h> #include <linux/backing-dev.h>
#include <linux/bio.h> #include <linux/bio.h>
#include <linux/blkdev.h> #include <linux/blkdev.h>
#include <linux/kmemleak.h>
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/slab.h> #include <linux/slab.h>
@ -1438,6 +1439,11 @@ static void blk_mq_free_rq_map(struct blk_mq_tag_set *set,
while (!list_empty(&tags->page_list)) { while (!list_empty(&tags->page_list)) {
page = list_first_entry(&tags->page_list, struct page, lru); page = list_first_entry(&tags->page_list, struct page, lru);
list_del_init(&page->lru); list_del_init(&page->lru);
/*
* Remove kmemleak object previously allocated in
* blk_mq_init_rq_map().
*/
kmemleak_free(page_address(page));
__free_pages(page, page->private); __free_pages(page, page->private);
} }
@ -1510,6 +1516,11 @@ static struct blk_mq_tags *blk_mq_init_rq_map(struct blk_mq_tag_set *set,
list_add_tail(&page->lru, &tags->page_list); list_add_tail(&page->lru, &tags->page_list);
p = page_address(page); p = page_address(page);
/*
* Allow kmemleak to scan these pages as they contain pointers
* to additional allocations like via ops->init_request().
*/
kmemleak_alloc(p, order_to_size(this_order), 1, GFP_KERNEL);
entries_per_page = order_to_size(this_order) / rq_size; entries_per_page = order_to_size(this_order) / rq_size;
to_do = min(entries_per_page, set->queue_depth - i); to_do = min(entries_per_page, set->queue_depth - i);
left -= to_do * rq_size; left -= to_do * rq_size;