mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-11 12:28:41 +08:00
Merge branch 'akpm' (patches from Andrew)
Merge misc fixes from Andrew Morton: "13 patches. Subsystems affected by this patch series: mm (kasan, pagealloc, rmap, hmm, and hugetlb), and hfs" * emailed patches from Andrew Morton <akpm@linux-foundation.org>: mm/hugetlb: fix refs calculation from unaligned @vaddr hfs: add lock nesting notation to hfs_find_init hfs: fix high memory mapping in hfs_bnode_read hfs: add missing clean-up in hfs_fill_super lib/test_hmm: remove set but unused page variable mm: fix the try_to_unmap prototype for !CONFIG_MMU mm/page_alloc: further fix __alloc_pages_bulk() return value mm/page_alloc: correct return value when failing at preparing mm/page_alloc: avoid page allocator recursion with pagesets.lock held Revert "mm/page_alloc: make should_fail_alloc_page() static" kasan: fix build by including kernel.h kasan: add memzero init for unaligned size at DEBUG mm: move helper to check slub_debug_enabled
This commit is contained in:
commit
dd9c7df94c
@ -25,7 +25,19 @@ int hfs_find_init(struct hfs_btree *tree, struct hfs_find_data *fd)
|
||||
fd->key = ptr + tree->max_key_len + 2;
|
||||
hfs_dbg(BNODE_REFS, "find_init: %d (%p)\n",
|
||||
tree->cnid, __builtin_return_address(0));
|
||||
mutex_lock(&tree->tree_lock);
|
||||
switch (tree->cnid) {
|
||||
case HFS_CAT_CNID:
|
||||
mutex_lock_nested(&tree->tree_lock, CATALOG_BTREE_MUTEX);
|
||||
break;
|
||||
case HFS_EXT_CNID:
|
||||
mutex_lock_nested(&tree->tree_lock, EXTENTS_BTREE_MUTEX);
|
||||
break;
|
||||
case HFS_ATTR_CNID:
|
||||
mutex_lock_nested(&tree->tree_lock, ATTR_BTREE_MUTEX);
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -15,16 +15,31 @@
|
||||
|
||||
#include "btree.h"
|
||||
|
||||
void hfs_bnode_read(struct hfs_bnode *node, void *buf,
|
||||
int off, int len)
|
||||
void hfs_bnode_read(struct hfs_bnode *node, void *buf, int off, int len)
|
||||
{
|
||||
struct page *page;
|
||||
int pagenum;
|
||||
int bytes_read;
|
||||
int bytes_to_read;
|
||||
void *vaddr;
|
||||
|
||||
off += node->page_offset;
|
||||
page = node->page[0];
|
||||
pagenum = off >> PAGE_SHIFT;
|
||||
off &= ~PAGE_MASK; /* compute page offset for the first page */
|
||||
|
||||
memcpy(buf, kmap(page) + off, len);
|
||||
kunmap(page);
|
||||
for (bytes_read = 0; bytes_read < len; bytes_read += bytes_to_read) {
|
||||
if (pagenum >= node->tree->pages_per_bnode)
|
||||
break;
|
||||
page = node->page[pagenum];
|
||||
bytes_to_read = min_t(int, len - bytes_read, PAGE_SIZE - off);
|
||||
|
||||
vaddr = kmap_atomic(page);
|
||||
memcpy(buf + bytes_read, vaddr + off, bytes_to_read);
|
||||
kunmap_atomic(vaddr);
|
||||
|
||||
pagenum++;
|
||||
off = 0; /* page offset only applies to the first page */
|
||||
}
|
||||
}
|
||||
|
||||
u16 hfs_bnode_read_u16(struct hfs_bnode *node, int off)
|
||||
|
@ -13,6 +13,13 @@ typedef int (*btree_keycmp)(const btree_key *, const btree_key *);
|
||||
|
||||
#define NODE_HASH_SIZE 256
|
||||
|
||||
/* B-tree mutex nested subclasses */
|
||||
enum hfs_btree_mutex_classes {
|
||||
CATALOG_BTREE_MUTEX,
|
||||
EXTENTS_BTREE_MUTEX,
|
||||
ATTR_BTREE_MUTEX,
|
||||
};
|
||||
|
||||
/* A HFS BTree held in memory */
|
||||
struct hfs_btree {
|
||||
struct super_block *sb;
|
||||
|
@ -420,14 +420,12 @@ static int hfs_fill_super(struct super_block *sb, void *data, int silent)
|
||||
if (!res) {
|
||||
if (fd.entrylength > sizeof(rec) || fd.entrylength < 0) {
|
||||
res = -EIO;
|
||||
goto bail;
|
||||
goto bail_hfs_find;
|
||||
}
|
||||
hfs_bnode_read(fd.bnode, &rec, fd.entryoffset, fd.entrylength);
|
||||
}
|
||||
if (res) {
|
||||
hfs_find_exit(&fd);
|
||||
goto bail_no_root;
|
||||
}
|
||||
if (res)
|
||||
goto bail_hfs_find;
|
||||
res = -EINVAL;
|
||||
root_inode = hfs_iget(sb, &fd.search_key->cat, &rec);
|
||||
hfs_find_exit(&fd);
|
||||
@ -443,6 +441,8 @@ static int hfs_fill_super(struct super_block *sb, void *data, int silent)
|
||||
/* everything's okay */
|
||||
return 0;
|
||||
|
||||
bail_hfs_find:
|
||||
hfs_find_exit(&fd);
|
||||
bail_no_root:
|
||||
pr_err("get root inode failed\n");
|
||||
bail:
|
||||
|
@ -3,6 +3,7 @@
|
||||
#define _LINUX_KASAN_H
|
||||
|
||||
#include <linux/bug.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/static_key.h>
|
||||
#include <linux/types.h>
|
||||
|
||||
|
@ -291,7 +291,9 @@ static inline int page_referenced(struct page *page, int is_locked,
|
||||
return 0;
|
||||
}
|
||||
|
||||
#define try_to_unmap(page, refs) false
|
||||
static inline void try_to_unmap(struct page *page, enum ttu_flags flags)
|
||||
{
|
||||
}
|
||||
|
||||
static inline int page_mkclean(struct page *page)
|
||||
{
|
||||
|
@ -628,10 +628,8 @@ static int dmirror_check_atomic(struct dmirror *dmirror, unsigned long start,
|
||||
|
||||
for (pfn = start >> PAGE_SHIFT; pfn < (end >> PAGE_SHIFT); pfn++) {
|
||||
void *entry;
|
||||
struct page *page;
|
||||
|
||||
entry = xa_load(&dmirror->pt, pfn);
|
||||
page = xa_untag_pointer(entry);
|
||||
if (xa_pointer_tag(entry) == DPT_XA_TAG_ATOMIC)
|
||||
return -EPERM;
|
||||
}
|
||||
|
@ -5440,8 +5440,9 @@ long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
|
||||
continue;
|
||||
}
|
||||
|
||||
refs = min3(pages_per_huge_page(h) - pfn_offset,
|
||||
(vma->vm_end - vaddr) >> PAGE_SHIFT, remainder);
|
||||
/* vaddr may not be aligned to PAGE_SIZE */
|
||||
refs = min3(pages_per_huge_page(h) - pfn_offset, remainder,
|
||||
(vma->vm_end - ALIGN_DOWN(vaddr, PAGE_SIZE)) >> PAGE_SHIFT);
|
||||
|
||||
if (pages || vmas)
|
||||
record_subpages_vmas(mem_map_offset(page, pfn_offset),
|
||||
|
@ -9,6 +9,7 @@
|
||||
#ifdef CONFIG_KASAN_HW_TAGS
|
||||
|
||||
#include <linux/static_key.h>
|
||||
#include "../slab.h"
|
||||
|
||||
DECLARE_STATIC_KEY_FALSE(kasan_flag_stacktrace);
|
||||
extern bool kasan_flag_async __ro_after_init;
|
||||
@ -387,6 +388,17 @@ static inline void kasan_unpoison(const void *addr, size_t size, bool init)
|
||||
|
||||
if (WARN_ON((unsigned long)addr & KASAN_GRANULE_MASK))
|
||||
return;
|
||||
/*
|
||||
* Explicitly initialize the memory with the precise object size to
|
||||
* avoid overwriting the SLAB redzone. This disables initialization in
|
||||
* the arch code and may thus lead to performance penalty. The penalty
|
||||
* is accepted since SLAB redzones aren't enabled in production builds.
|
||||
*/
|
||||
if (__slub_debug_enabled() &&
|
||||
init && ((unsigned long)size & KASAN_GRANULE_MASK)) {
|
||||
init = false;
|
||||
memzero_explicit((void *)addr, size);
|
||||
}
|
||||
size = round_up(size, KASAN_GRANULE_SIZE);
|
||||
|
||||
hw_set_mem_tag_range((void *)addr, size, tag, init);
|
||||
|
@ -3820,7 +3820,7 @@ static inline bool __should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
|
||||
|
||||
#endif /* CONFIG_FAIL_PAGE_ALLOC */
|
||||
|
||||
static noinline bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
|
||||
noinline bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
|
||||
{
|
||||
return __should_fail_alloc_page(gfp_mask, order);
|
||||
}
|
||||
@ -5221,9 +5221,6 @@ unsigned long __alloc_pages_bulk(gfp_t gfp, int preferred_nid,
|
||||
unsigned int alloc_flags = ALLOC_WMARK_LOW;
|
||||
int nr_populated = 0, nr_account = 0;
|
||||
|
||||
if (unlikely(nr_pages <= 0))
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* Skip populated array elements to determine if any pages need
|
||||
* to be allocated before disabling IRQs.
|
||||
@ -5231,19 +5228,35 @@ unsigned long __alloc_pages_bulk(gfp_t gfp, int preferred_nid,
|
||||
while (page_array && nr_populated < nr_pages && page_array[nr_populated])
|
||||
nr_populated++;
|
||||
|
||||
/* No pages requested? */
|
||||
if (unlikely(nr_pages <= 0))
|
||||
goto out;
|
||||
|
||||
/* Already populated array? */
|
||||
if (unlikely(page_array && nr_pages - nr_populated == 0))
|
||||
return nr_populated;
|
||||
goto out;
|
||||
|
||||
/* Use the single page allocator for one page. */
|
||||
if (nr_pages - nr_populated == 1)
|
||||
goto failed;
|
||||
|
||||
#ifdef CONFIG_PAGE_OWNER
|
||||
/*
|
||||
* PAGE_OWNER may recurse into the allocator to allocate space to
|
||||
* save the stack with pagesets.lock held. Releasing/reacquiring
|
||||
* removes much of the performance benefit of bulk allocation so
|
||||
* force the caller to allocate one page at a time as it'll have
|
||||
* similar performance to added complexity to the bulk allocator.
|
||||
*/
|
||||
if (static_branch_unlikely(&page_owner_inited))
|
||||
goto failed;
|
||||
#endif
|
||||
|
||||
/* May set ALLOC_NOFRAGMENT, fragmentation will return 1 page. */
|
||||
gfp &= gfp_allowed_mask;
|
||||
alloc_gfp = gfp;
|
||||
if (!prepare_alloc_pages(gfp, 0, preferred_nid, nodemask, &ac, &alloc_gfp, &alloc_flags))
|
||||
return 0;
|
||||
goto out;
|
||||
gfp = alloc_gfp;
|
||||
|
||||
/* Find an allowed local zone that meets the low watermark. */
|
||||
@ -5311,6 +5324,7 @@ unsigned long __alloc_pages_bulk(gfp_t gfp, int preferred_nid,
|
||||
__count_zid_vm_events(PGALLOC, zone_idx(zone), nr_account);
|
||||
zone_statistics(ac.preferred_zoneref->zone, zone, nr_account);
|
||||
|
||||
out:
|
||||
return nr_populated;
|
||||
|
||||
failed_irq:
|
||||
@ -5326,7 +5340,7 @@ failed:
|
||||
nr_populated++;
|
||||
}
|
||||
|
||||
return nr_populated;
|
||||
goto out;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(__alloc_pages_bulk);
|
||||
|
||||
|
15
mm/slab.h
15
mm/slab.h
@ -216,10 +216,18 @@ DECLARE_STATIC_KEY_FALSE(slub_debug_enabled);
|
||||
#endif
|
||||
extern void print_tracking(struct kmem_cache *s, void *object);
|
||||
long validate_slab_cache(struct kmem_cache *s);
|
||||
static inline bool __slub_debug_enabled(void)
|
||||
{
|
||||
return static_branch_unlikely(&slub_debug_enabled);
|
||||
}
|
||||
#else
|
||||
static inline void print_tracking(struct kmem_cache *s, void *object)
|
||||
{
|
||||
}
|
||||
static inline bool __slub_debug_enabled(void)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
@ -229,11 +237,10 @@ static inline void print_tracking(struct kmem_cache *s, void *object)
|
||||
*/
|
||||
static inline bool kmem_cache_debug_flags(struct kmem_cache *s, slab_flags_t flags)
|
||||
{
|
||||
#ifdef CONFIG_SLUB_DEBUG
|
||||
VM_WARN_ON_ONCE(!(flags & SLAB_DEBUG_FLAGS));
|
||||
if (static_branch_unlikely(&slub_debug_enabled))
|
||||
if (IS_ENABLED(CONFIG_SLUB_DEBUG))
|
||||
VM_WARN_ON_ONCE(!(flags & SLAB_DEBUG_FLAGS));
|
||||
if (__slub_debug_enabled())
|
||||
return s->flags & flags;
|
||||
#endif
|
||||
return false;
|
||||
}
|
||||
|
||||
|
14
mm/slub.c
14
mm/slub.c
@ -120,25 +120,11 @@
|
||||
*/
|
||||
|
||||
#ifdef CONFIG_SLUB_DEBUG
|
||||
|
||||
#ifdef CONFIG_SLUB_DEBUG_ON
|
||||
DEFINE_STATIC_KEY_TRUE(slub_debug_enabled);
|
||||
#else
|
||||
DEFINE_STATIC_KEY_FALSE(slub_debug_enabled);
|
||||
#endif
|
||||
|
||||
static inline bool __slub_debug_enabled(void)
|
||||
{
|
||||
return static_branch_unlikely(&slub_debug_enabled);
|
||||
}
|
||||
|
||||
#else /* CONFIG_SLUB_DEBUG */
|
||||
|
||||
static inline bool __slub_debug_enabled(void)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
#endif /* CONFIG_SLUB_DEBUG */
|
||||
|
||||
static inline bool kmem_cache_debug(struct kmem_cache *s)
|
||||
|
Loading…
Reference in New Issue
Block a user