mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-12-09 20:14:09 +08:00
0ef7dcac99
Patch series "make hugetlb put_page safe for all calling contexts", v5. This effort is the result a recent bug report [1]. Syzbot found a potential deadlock in the hugetlb put_page/free_huge_page_path. WARNING: SOFTIRQ-safe -> SOFTIRQ-unsafe lock order detected Since the free_huge_page_path already has code to 'hand off' page free requests to a workqueue, a suggestion was proposed to make the in_irq() detection accurate by always enabling PREEMPT_COUNT [2]. The outcome of that discussion was that the hugetlb put_page path (free_huge_page) path should be properly fixed and safe for all calling contexts. [1] https://lore.kernel.org/linux-mm/000000000000f1c03b05bc43aadc@google.com/ [2] http://lkml.kernel.org/r/20210311021321.127500-1-mike.kravetz@oracle.com This patch (of 8): cma_release is currently a sleepable operatation because the bitmap manipulation is protected by cma->lock mutex. Hugetlb code which relies on cma_release for CMA backed (giga) hugetlb pages, however, needs to be irq safe. The lock doesn't protect any sleepable operation so it can be changed to a (irq aware) spin lock. The bitmap processing should be quite fast in typical case but if cma sizes grow to TB then we will likely need to replace the lock by a more optimized bitmap implementation. Link: https://lkml.kernel.org/r/20210409205254.242291-1-mike.kravetz@oracle.com Link: https://lkml.kernel.org/r/20210409205254.242291-2-mike.kravetz@oracle.com Signed-off-by: Mike Kravetz <mike.kravetz@oracle.com> Acked-by: Michal Hocko <mhocko@suse.com> Reviewed-by: David Hildenbrand <david@redhat.com> Acked-by: Roman Gushchin <guro@fb.com> Cc: Shakeel Butt <shakeelb@google.com> Cc: Oscar Salvador <osalvador@suse.de> Cc: Muchun Song <songmuchun@bytedance.com> Cc: David Rientjes <rientjes@google.com> Cc: Miaohe Lin <linmiaohe@huawei.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Matthew Wilcox <willy@infradead.org> Cc: HORIGUCHI NAOYA <naoya.horiguchi@nec.com> Cc: "Aneesh Kumar K . V" <aneesh.kumar@linux.ibm.com> Cc: Waiman Long <longman@redhat.com> Cc: Peter Xu <peterx@redhat.com> Cc: Mina Almasry <almasrymina@google.com> Cc: Hillf Danton <hdanton@sina.com> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Cc: Barry Song <song.bao.hua@hisilicon.com> Cc: Will Deacon <will@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
201 lines
4.5 KiB
C
201 lines
4.5 KiB
C
// SPDX-License-Identifier: GPL-2.0
|
|
/*
|
|
* CMA DebugFS Interface
|
|
*
|
|
* Copyright (c) 2015 Sasha Levin <sasha.levin@oracle.com>
|
|
*/
|
|
|
|
|
|
#include <linux/debugfs.h>
|
|
#include <linux/cma.h>
|
|
#include <linux/list.h>
|
|
#include <linux/kernel.h>
|
|
#include <linux/slab.h>
|
|
#include <linux/mm_types.h>
|
|
|
|
#include "cma.h"
|
|
|
|
struct cma_mem {
|
|
struct hlist_node node;
|
|
struct page *p;
|
|
unsigned long n;
|
|
};
|
|
|
|
static int cma_debugfs_get(void *data, u64 *val)
|
|
{
|
|
unsigned long *p = data;
|
|
|
|
*val = *p;
|
|
|
|
return 0;
|
|
}
|
|
DEFINE_DEBUGFS_ATTRIBUTE(cma_debugfs_fops, cma_debugfs_get, NULL, "%llu\n");
|
|
|
|
static int cma_used_get(void *data, u64 *val)
|
|
{
|
|
struct cma *cma = data;
|
|
unsigned long used;
|
|
|
|
spin_lock_irq(&cma->lock);
|
|
/* pages counter is smaller than sizeof(int) */
|
|
used = bitmap_weight(cma->bitmap, (int)cma_bitmap_maxno(cma));
|
|
spin_unlock_irq(&cma->lock);
|
|
*val = (u64)used << cma->order_per_bit;
|
|
|
|
return 0;
|
|
}
|
|
DEFINE_DEBUGFS_ATTRIBUTE(cma_used_fops, cma_used_get, NULL, "%llu\n");
|
|
|
|
static int cma_maxchunk_get(void *data, u64 *val)
|
|
{
|
|
struct cma *cma = data;
|
|
unsigned long maxchunk = 0;
|
|
unsigned long start, end = 0;
|
|
unsigned long bitmap_maxno = cma_bitmap_maxno(cma);
|
|
|
|
spin_lock_irq(&cma->lock);
|
|
for (;;) {
|
|
start = find_next_zero_bit(cma->bitmap, bitmap_maxno, end);
|
|
if (start >= bitmap_maxno)
|
|
break;
|
|
end = find_next_bit(cma->bitmap, bitmap_maxno, start);
|
|
maxchunk = max(end - start, maxchunk);
|
|
}
|
|
spin_unlock_irq(&cma->lock);
|
|
*val = (u64)maxchunk << cma->order_per_bit;
|
|
|
|
return 0;
|
|
}
|
|
DEFINE_DEBUGFS_ATTRIBUTE(cma_maxchunk_fops, cma_maxchunk_get, NULL, "%llu\n");
|
|
|
|
static void cma_add_to_cma_mem_list(struct cma *cma, struct cma_mem *mem)
|
|
{
|
|
spin_lock(&cma->mem_head_lock);
|
|
hlist_add_head(&mem->node, &cma->mem_head);
|
|
spin_unlock(&cma->mem_head_lock);
|
|
}
|
|
|
|
static struct cma_mem *cma_get_entry_from_list(struct cma *cma)
|
|
{
|
|
struct cma_mem *mem = NULL;
|
|
|
|
spin_lock(&cma->mem_head_lock);
|
|
if (!hlist_empty(&cma->mem_head)) {
|
|
mem = hlist_entry(cma->mem_head.first, struct cma_mem, node);
|
|
hlist_del_init(&mem->node);
|
|
}
|
|
spin_unlock(&cma->mem_head_lock);
|
|
|
|
return mem;
|
|
}
|
|
|
|
static int cma_free_mem(struct cma *cma, int count)
|
|
{
|
|
struct cma_mem *mem = NULL;
|
|
|
|
while (count) {
|
|
mem = cma_get_entry_from_list(cma);
|
|
if (mem == NULL)
|
|
return 0;
|
|
|
|
if (mem->n <= count) {
|
|
cma_release(cma, mem->p, mem->n);
|
|
count -= mem->n;
|
|
kfree(mem);
|
|
} else if (cma->order_per_bit == 0) {
|
|
cma_release(cma, mem->p, count);
|
|
mem->p += count;
|
|
mem->n -= count;
|
|
count = 0;
|
|
cma_add_to_cma_mem_list(cma, mem);
|
|
} else {
|
|
pr_debug("cma: cannot release partial block when order_per_bit != 0\n");
|
|
cma_add_to_cma_mem_list(cma, mem);
|
|
break;
|
|
}
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
static int cma_free_write(void *data, u64 val)
|
|
{
|
|
int pages = val;
|
|
struct cma *cma = data;
|
|
|
|
return cma_free_mem(cma, pages);
|
|
}
|
|
DEFINE_DEBUGFS_ATTRIBUTE(cma_free_fops, NULL, cma_free_write, "%llu\n");
|
|
|
|
static int cma_alloc_mem(struct cma *cma, int count)
|
|
{
|
|
struct cma_mem *mem;
|
|
struct page *p;
|
|
|
|
mem = kzalloc(sizeof(*mem), GFP_KERNEL);
|
|
if (!mem)
|
|
return -ENOMEM;
|
|
|
|
p = cma_alloc(cma, count, 0, false);
|
|
if (!p) {
|
|
kfree(mem);
|
|
return -ENOMEM;
|
|
}
|
|
|
|
mem->p = p;
|
|
mem->n = count;
|
|
|
|
cma_add_to_cma_mem_list(cma, mem);
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int cma_alloc_write(void *data, u64 val)
|
|
{
|
|
int pages = val;
|
|
struct cma *cma = data;
|
|
|
|
return cma_alloc_mem(cma, pages);
|
|
}
|
|
DEFINE_DEBUGFS_ATTRIBUTE(cma_alloc_fops, NULL, cma_alloc_write, "%llu\n");
|
|
|
|
static void cma_debugfs_add_one(struct cma *cma, struct dentry *root_dentry)
|
|
{
|
|
struct dentry *tmp;
|
|
char name[16];
|
|
|
|
scnprintf(name, sizeof(name), "cma-%s", cma->name);
|
|
|
|
tmp = debugfs_create_dir(name, root_dentry);
|
|
|
|
debugfs_create_file("alloc", 0200, tmp, cma, &cma_alloc_fops);
|
|
debugfs_create_file("free", 0200, tmp, cma, &cma_free_fops);
|
|
debugfs_create_file("base_pfn", 0444, tmp,
|
|
&cma->base_pfn, &cma_debugfs_fops);
|
|
debugfs_create_file("count", 0444, tmp, &cma->count, &cma_debugfs_fops);
|
|
debugfs_create_file("order_per_bit", 0444, tmp,
|
|
&cma->order_per_bit, &cma_debugfs_fops);
|
|
debugfs_create_file("used", 0444, tmp, cma, &cma_used_fops);
|
|
debugfs_create_file("maxchunk", 0444, tmp, cma, &cma_maxchunk_fops);
|
|
|
|
cma->dfs_bitmap.array = (u32 *)cma->bitmap;
|
|
cma->dfs_bitmap.n_elements = DIV_ROUND_UP(cma_bitmap_maxno(cma),
|
|
BITS_PER_BYTE * sizeof(u32));
|
|
debugfs_create_u32_array("bitmap", 0444, tmp, &cma->dfs_bitmap);
|
|
}
|
|
|
|
static int __init cma_debugfs_init(void)
|
|
{
|
|
struct dentry *cma_debugfs_root;
|
|
int i;
|
|
|
|
cma_debugfs_root = debugfs_create_dir("cma", NULL);
|
|
|
|
for (i = 0; i < cma_area_count; i++)
|
|
cma_debugfs_add_one(&cma_areas[i], cma_debugfs_root);
|
|
|
|
return 0;
|
|
}
|
|
late_initcall(cma_debugfs_init);
|