mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-11 12:28:41 +08:00
Merge branch 'akpm' (patches from Andrew)
Merge misc fixes from Andrew Morton: "Two weeks worth of fixes here" * emailed patches from Andrew Morton <akpm@linux-foundation.org>: (41 commits) init/main.c: fix initcall_blacklisted on ia64, ppc64 and parisc64 autofs: don't get stuck in a loop if vfs_write() returns an error mm/page_owner: avoid null pointer dereference tools/vm/slabinfo: fix spelling mistake: "Ocurrences" -> "Occurrences" fs/nilfs2: fix potential underflow in call to crc32_le oom, suspend: fix oom_reaper vs. oom_killer_disable race ocfs2: disable BUG assertions in reading blocks mm, compaction: abort free scanner if split fails mm: prevent KASAN false positives in kmemleak mm/hugetlb: clear compound_mapcount when freeing gigantic pages mm/swap.c: flush lru pvecs on compound page arrival memcg: css_alloc should return an ERR_PTR value on error memcg: mem_cgroup_migrate() may be called with irq disabled hugetlb: fix nr_pmds accounting with shared page tables Revert "mm: disable fault around on emulated access bit architecture" Revert "mm: make faultaround produce old ptes" mailmap: add Boris Brezillon's email mailmap: add Antoine Tenart's email mm, sl[au]b: add __GFP_ATOMIC to the GFP reclaim mask mm: mempool: kasan: don't poot mempool objects in quarantine ...
This commit is contained in:
commit
086e3eb65e
4
.mailmap
4
.mailmap
@ -21,6 +21,7 @@ Andrey Ryabinin <ryabinin.a.a@gmail.com> <a.ryabinin@samsung.com>
|
|||||||
Andrew Morton <akpm@linux-foundation.org>
|
Andrew Morton <akpm@linux-foundation.org>
|
||||||
Andrew Vasquez <andrew.vasquez@qlogic.com>
|
Andrew Vasquez <andrew.vasquez@qlogic.com>
|
||||||
Andy Adamson <andros@citi.umich.edu>
|
Andy Adamson <andros@citi.umich.edu>
|
||||||
|
Antoine Tenart <antoine.tenart@free-electrons.com>
|
||||||
Antonio Ospite <ao2@ao2.it> <ao2@amarulasolutions.com>
|
Antonio Ospite <ao2@ao2.it> <ao2@amarulasolutions.com>
|
||||||
Archit Taneja <archit@ti.com>
|
Archit Taneja <archit@ti.com>
|
||||||
Arnaud Patard <arnaud.patard@rtp-net.org>
|
Arnaud Patard <arnaud.patard@rtp-net.org>
|
||||||
@ -30,6 +31,9 @@ Axel Lin <axel.lin@gmail.com>
|
|||||||
Ben Gardner <bgardner@wabtec.com>
|
Ben Gardner <bgardner@wabtec.com>
|
||||||
Ben M Cahill <ben.m.cahill@intel.com>
|
Ben M Cahill <ben.m.cahill@intel.com>
|
||||||
Björn Steinbrink <B.Steinbrink@gmx.de>
|
Björn Steinbrink <B.Steinbrink@gmx.de>
|
||||||
|
Boris Brezillon <boris.brezillon@free-electrons.com>
|
||||||
|
Boris Brezillon <boris.brezillon@free-electrons.com> <b.brezillon.dev@gmail.com>
|
||||||
|
Boris Brezillon <boris.brezillon@free-electrons.com> <b.brezillon@overkiz.com>
|
||||||
Brian Avery <b.avery@hp.com>
|
Brian Avery <b.avery@hp.com>
|
||||||
Brian King <brking@us.ibm.com>
|
Brian King <brking@us.ibm.com>
|
||||||
Christoph Hellwig <hch@lst.de>
|
Christoph Hellwig <hch@lst.de>
|
||||||
|
@ -2776,9 +2776,9 @@ F: include/net/caif/
|
|||||||
F: net/caif/
|
F: net/caif/
|
||||||
|
|
||||||
CALGARY x86-64 IOMMU
|
CALGARY x86-64 IOMMU
|
||||||
M: Muli Ben-Yehuda <muli@il.ibm.com>
|
M: Muli Ben-Yehuda <mulix@mulix.org>
|
||||||
M: "Jon D. Mason" <jdmason@kudzu.us>
|
M: Jon Mason <jdmason@kudzu.us>
|
||||||
L: discuss@x86-64.org
|
L: iommu@lists.linux-foundation.org
|
||||||
S: Maintained
|
S: Maintained
|
||||||
F: arch/x86/kernel/pci-calgary_64.c
|
F: arch/x86/kernel/pci-calgary_64.c
|
||||||
F: arch/x86/kernel/tce_64.c
|
F: arch/x86/kernel/tce_64.c
|
||||||
|
@ -40,7 +40,7 @@ pgd_free(struct mm_struct *mm, pgd_t *pgd)
|
|||||||
static inline pmd_t *
|
static inline pmd_t *
|
||||||
pmd_alloc_one(struct mm_struct *mm, unsigned long address)
|
pmd_alloc_one(struct mm_struct *mm, unsigned long address)
|
||||||
{
|
{
|
||||||
pmd_t *ret = (pmd_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
|
pmd_t *ret = (pmd_t *)__get_free_page(GFP_KERNEL|__GFP_ZERO);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -53,7 +53,7 @@ pmd_free(struct mm_struct *mm, pmd_t *pmd)
|
|||||||
static inline pte_t *
|
static inline pte_t *
|
||||||
pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
|
pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
|
||||||
{
|
{
|
||||||
pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
|
pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_ZERO);
|
||||||
return pte;
|
return pte;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -95,7 +95,7 @@ static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
|
|||||||
{
|
{
|
||||||
pte_t *pte;
|
pte_t *pte;
|
||||||
|
|
||||||
pte = (pte_t *) __get_free_pages(GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO,
|
pte = (pte_t *) __get_free_pages(GFP_KERNEL | __GFP_ZERO,
|
||||||
__get_order_pte());
|
__get_order_pte());
|
||||||
|
|
||||||
return pte;
|
return pte;
|
||||||
@ -107,7 +107,7 @@ pte_alloc_one(struct mm_struct *mm, unsigned long address)
|
|||||||
pgtable_t pte_pg;
|
pgtable_t pte_pg;
|
||||||
struct page *page;
|
struct page *page;
|
||||||
|
|
||||||
pte_pg = (pgtable_t)__get_free_pages(GFP_KERNEL | __GFP_REPEAT, __get_order_pte());
|
pte_pg = (pgtable_t)__get_free_pages(GFP_KERNEL, __get_order_pte());
|
||||||
if (!pte_pg)
|
if (!pte_pg)
|
||||||
return 0;
|
return 0;
|
||||||
memzero((void *)pte_pg, PTRS_PER_PTE * sizeof(pte_t));
|
memzero((void *)pte_pg, PTRS_PER_PTE * sizeof(pte_t));
|
||||||
|
@ -29,7 +29,7 @@
|
|||||||
|
|
||||||
static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
|
static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
|
||||||
{
|
{
|
||||||
return (pmd_t *)get_zeroed_page(GFP_KERNEL | __GFP_REPEAT);
|
return (pmd_t *)get_zeroed_page(GFP_KERNEL);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
|
static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
|
||||||
|
@ -26,7 +26,7 @@
|
|||||||
|
|
||||||
#define check_pgt_cache() do { } while (0)
|
#define check_pgt_cache() do { } while (0)
|
||||||
|
|
||||||
#define PGALLOC_GFP (GFP_KERNEL | __GFP_NOTRACK | __GFP_REPEAT | __GFP_ZERO)
|
#define PGALLOC_GFP (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO)
|
||||||
#define PGD_SIZE (PTRS_PER_PGD * sizeof(pgd_t))
|
#define PGD_SIZE (PTRS_PER_PGD * sizeof(pgd_t))
|
||||||
|
|
||||||
#if CONFIG_PGTABLE_LEVELS > 2
|
#if CONFIG_PGTABLE_LEVELS > 2
|
||||||
|
@ -43,7 +43,7 @@ static inline void pgd_ctor(void *x)
|
|||||||
*/
|
*/
|
||||||
static inline pgd_t *pgd_alloc(struct mm_struct *mm)
|
static inline pgd_t *pgd_alloc(struct mm_struct *mm)
|
||||||
{
|
{
|
||||||
return quicklist_alloc(QUICK_PGD, GFP_KERNEL | __GFP_REPEAT, pgd_ctor);
|
return quicklist_alloc(QUICK_PGD, GFP_KERNEL, pgd_ctor);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
|
static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
|
||||||
@ -54,7 +54,7 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
|
|||||||
static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
|
static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
|
||||||
unsigned long address)
|
unsigned long address)
|
||||||
{
|
{
|
||||||
return quicklist_alloc(QUICK_PT, GFP_KERNEL | __GFP_REPEAT, NULL);
|
return quicklist_alloc(QUICK_PT, GFP_KERNEL, NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
|
static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
|
||||||
@ -63,7 +63,7 @@ static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
|
|||||||
struct page *page;
|
struct page *page;
|
||||||
void *pg;
|
void *pg;
|
||||||
|
|
||||||
pg = quicklist_alloc(QUICK_PT, GFP_KERNEL | __GFP_REPEAT, NULL);
|
pg = quicklist_alloc(QUICK_PT, GFP_KERNEL, NULL);
|
||||||
if (!pg)
|
if (!pg)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
|
@ -24,14 +24,14 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
|
|||||||
|
|
||||||
static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
|
static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
|
||||||
{
|
{
|
||||||
pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
|
pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_ZERO);
|
||||||
return pte;
|
return pte;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address)
|
static inline pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address)
|
||||||
{
|
{
|
||||||
struct page *pte;
|
struct page *pte;
|
||||||
pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 0);
|
pte = alloc_pages(GFP_KERNEL|__GFP_ZERO, 0);
|
||||||
if (!pte)
|
if (!pte)
|
||||||
return NULL;
|
return NULL;
|
||||||
if (!pgtable_page_ctor(pte)) {
|
if (!pgtable_page_ctor(pte)) {
|
||||||
|
@ -22,7 +22,7 @@ pgd_t swapper_pg_dir[PTRS_PER_PGD] __attribute__((aligned(PAGE_SIZE)));
|
|||||||
|
|
||||||
pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
|
pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
|
||||||
{
|
{
|
||||||
pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT);
|
pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL);
|
||||||
if (pte)
|
if (pte)
|
||||||
clear_page(pte);
|
clear_page(pte);
|
||||||
return pte;
|
return pte;
|
||||||
@ -33,9 +33,9 @@ pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address)
|
|||||||
struct page *page;
|
struct page *page;
|
||||||
|
|
||||||
#ifdef CONFIG_HIGHPTE
|
#ifdef CONFIG_HIGHPTE
|
||||||
page = alloc_pages(GFP_KERNEL|__GFP_HIGHMEM|__GFP_REPEAT, 0);
|
page = alloc_pages(GFP_KERNEL|__GFP_HIGHMEM, 0);
|
||||||
#else
|
#else
|
||||||
page = alloc_pages(GFP_KERNEL|__GFP_REPEAT, 0);
|
page = alloc_pages(GFP_KERNEL, 0);
|
||||||
#endif
|
#endif
|
||||||
if (!page)
|
if (!page)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
@ -64,7 +64,7 @@ static inline struct page *pte_alloc_one(struct mm_struct *mm,
|
|||||||
{
|
{
|
||||||
struct page *pte;
|
struct page *pte;
|
||||||
|
|
||||||
pte = alloc_page(GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO);
|
pte = alloc_page(GFP_KERNEL | __GFP_ZERO);
|
||||||
if (!pte)
|
if (!pte)
|
||||||
return NULL;
|
return NULL;
|
||||||
if (!pgtable_page_ctor(pte)) {
|
if (!pgtable_page_ctor(pte)) {
|
||||||
@ -78,7 +78,7 @@ static inline struct page *pte_alloc_one(struct mm_struct *mm,
|
|||||||
static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
|
static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
|
||||||
unsigned long address)
|
unsigned long address)
|
||||||
{
|
{
|
||||||
gfp_t flags = GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO;
|
gfp_t flags = GFP_KERNEL | __GFP_ZERO;
|
||||||
return (pte_t *) __get_free_page(flags);
|
return (pte_t *) __get_free_page(flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -14,7 +14,7 @@ extern const char bad_pmd_string[];
|
|||||||
extern inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
|
extern inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
|
||||||
unsigned long address)
|
unsigned long address)
|
||||||
{
|
{
|
||||||
unsigned long page = __get_free_page(GFP_DMA|__GFP_REPEAT);
|
unsigned long page = __get_free_page(GFP_DMA);
|
||||||
|
|
||||||
if (!page)
|
if (!page)
|
||||||
return NULL;
|
return NULL;
|
||||||
@ -51,7 +51,7 @@ static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t page,
|
|||||||
static inline struct page *pte_alloc_one(struct mm_struct *mm,
|
static inline struct page *pte_alloc_one(struct mm_struct *mm,
|
||||||
unsigned long address)
|
unsigned long address)
|
||||||
{
|
{
|
||||||
struct page *page = alloc_pages(GFP_DMA|__GFP_REPEAT, 0);
|
struct page *page = alloc_pages(GFP_DMA, 0);
|
||||||
pte_t *pte;
|
pte_t *pte;
|
||||||
|
|
||||||
if (!page)
|
if (!page)
|
||||||
|
@ -11,7 +11,7 @@ static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long ad
|
|||||||
{
|
{
|
||||||
pte_t *pte;
|
pte_t *pte;
|
||||||
|
|
||||||
pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
|
pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_ZERO);
|
||||||
if (pte) {
|
if (pte) {
|
||||||
__flush_page_to_ram(pte);
|
__flush_page_to_ram(pte);
|
||||||
flush_tlb_kernel_page(pte);
|
flush_tlb_kernel_page(pte);
|
||||||
@ -32,7 +32,7 @@ static inline pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long addres
|
|||||||
struct page *page;
|
struct page *page;
|
||||||
pte_t *pte;
|
pte_t *pte;
|
||||||
|
|
||||||
page = alloc_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 0);
|
page = alloc_pages(GFP_KERNEL|__GFP_ZERO, 0);
|
||||||
if(!page)
|
if(!page)
|
||||||
return NULL;
|
return NULL;
|
||||||
if (!pgtable_page_ctor(page)) {
|
if (!pgtable_page_ctor(page)) {
|
||||||
|
@ -37,7 +37,7 @@ do { \
|
|||||||
static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
|
static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
|
||||||
unsigned long address)
|
unsigned long address)
|
||||||
{
|
{
|
||||||
unsigned long page = __get_free_page(GFP_KERNEL|__GFP_REPEAT);
|
unsigned long page = __get_free_page(GFP_KERNEL);
|
||||||
|
|
||||||
if (!page)
|
if (!page)
|
||||||
return NULL;
|
return NULL;
|
||||||
@ -49,7 +49,7 @@ static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
|
|||||||
static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
|
static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
|
||||||
unsigned long address)
|
unsigned long address)
|
||||||
{
|
{
|
||||||
struct page *page = alloc_pages(GFP_KERNEL|__GFP_REPEAT, 0);
|
struct page *page = alloc_pages(GFP_KERNEL, 0);
|
||||||
|
|
||||||
if (page == NULL)
|
if (page == NULL)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
@ -42,8 +42,7 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
|
|||||||
static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
|
static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
|
||||||
unsigned long address)
|
unsigned long address)
|
||||||
{
|
{
|
||||||
pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL | __GFP_REPEAT |
|
pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
|
||||||
__GFP_ZERO);
|
|
||||||
return pte;
|
return pte;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -51,7 +50,7 @@ static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
|
|||||||
unsigned long address)
|
unsigned long address)
|
||||||
{
|
{
|
||||||
struct page *pte;
|
struct page *pte;
|
||||||
pte = alloc_pages(GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO, 0);
|
pte = alloc_pages(GFP_KERNEL | __GFP_ZERO, 0);
|
||||||
if (!pte)
|
if (!pte)
|
||||||
return NULL;
|
return NULL;
|
||||||
if (!pgtable_page_ctor(pte)) {
|
if (!pgtable_page_ctor(pte)) {
|
||||||
|
@ -116,9 +116,9 @@ static inline struct page *pte_alloc_one(struct mm_struct *mm,
|
|||||||
struct page *ptepage;
|
struct page *ptepage;
|
||||||
|
|
||||||
#ifdef CONFIG_HIGHPTE
|
#ifdef CONFIG_HIGHPTE
|
||||||
int flags = GFP_KERNEL | __GFP_HIGHMEM | __GFP_REPEAT;
|
int flags = GFP_KERNEL | __GFP_HIGHMEM;
|
||||||
#else
|
#else
|
||||||
int flags = GFP_KERNEL | __GFP_REPEAT;
|
int flags = GFP_KERNEL;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
ptepage = alloc_pages(flags, 0);
|
ptepage = alloc_pages(flags, 0);
|
||||||
|
@ -239,8 +239,7 @@ __init_refok pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
|
|||||||
{
|
{
|
||||||
pte_t *pte;
|
pte_t *pte;
|
||||||
if (mem_init_done) {
|
if (mem_init_done) {
|
||||||
pte = (pte_t *)__get_free_page(GFP_KERNEL |
|
pte = (pte_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
|
||||||
__GFP_REPEAT | __GFP_ZERO);
|
|
||||||
} else {
|
} else {
|
||||||
pte = (pte_t *)early_get_page();
|
pte = (pte_t *)early_get_page();
|
||||||
if (pte)
|
if (pte)
|
||||||
|
@ -69,7 +69,7 @@ static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
|
|||||||
{
|
{
|
||||||
pte_t *pte;
|
pte_t *pte;
|
||||||
|
|
||||||
pte = (pte_t *) __get_free_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, PTE_ORDER);
|
pte = (pte_t *) __get_free_pages(GFP_KERNEL|__GFP_ZERO, PTE_ORDER);
|
||||||
|
|
||||||
return pte;
|
return pte;
|
||||||
}
|
}
|
||||||
@ -79,7 +79,7 @@ static inline struct page *pte_alloc_one(struct mm_struct *mm,
|
|||||||
{
|
{
|
||||||
struct page *pte;
|
struct page *pte;
|
||||||
|
|
||||||
pte = alloc_pages(GFP_KERNEL | __GFP_REPEAT, PTE_ORDER);
|
pte = alloc_pages(GFP_KERNEL, PTE_ORDER);
|
||||||
if (!pte)
|
if (!pte)
|
||||||
return NULL;
|
return NULL;
|
||||||
clear_highpage(pte);
|
clear_highpage(pte);
|
||||||
@ -113,7 +113,7 @@ static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
|
|||||||
{
|
{
|
||||||
pmd_t *pmd;
|
pmd_t *pmd;
|
||||||
|
|
||||||
pmd = (pmd_t *) __get_free_pages(GFP_KERNEL|__GFP_REPEAT, PMD_ORDER);
|
pmd = (pmd_t *) __get_free_pages(GFP_KERNEL, PMD_ORDER);
|
||||||
if (pmd)
|
if (pmd)
|
||||||
pmd_init((unsigned long)pmd, (unsigned long)invalid_pte_table);
|
pmd_init((unsigned long)pmd, (unsigned long)invalid_pte_table);
|
||||||
return pmd;
|
return pmd;
|
||||||
|
@ -63,7 +63,7 @@ void set_pmd_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags)
|
|||||||
|
|
||||||
pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
|
pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
|
||||||
{
|
{
|
||||||
pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT);
|
pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL);
|
||||||
if (pte)
|
if (pte)
|
||||||
clear_page(pte);
|
clear_page(pte);
|
||||||
return pte;
|
return pte;
|
||||||
@ -74,9 +74,9 @@ struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address)
|
|||||||
struct page *pte;
|
struct page *pte;
|
||||||
|
|
||||||
#ifdef CONFIG_HIGHPTE
|
#ifdef CONFIG_HIGHPTE
|
||||||
pte = alloc_pages(GFP_KERNEL|__GFP_HIGHMEM|__GFP_REPEAT, 0);
|
pte = alloc_pages(GFP_KERNEL|__GFP_HIGHMEM, 0);
|
||||||
#else
|
#else
|
||||||
pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT, 0);
|
pte = alloc_pages(GFP_KERNEL, 0);
|
||||||
#endif
|
#endif
|
||||||
if (!pte)
|
if (!pte)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
@ -42,8 +42,7 @@ static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
|
|||||||
{
|
{
|
||||||
pte_t *pte;
|
pte_t *pte;
|
||||||
|
|
||||||
pte = (pte_t *) __get_free_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO,
|
pte = (pte_t *) __get_free_pages(GFP_KERNEL|__GFP_ZERO, PTE_ORDER);
|
||||||
PTE_ORDER);
|
|
||||||
|
|
||||||
return pte;
|
return pte;
|
||||||
}
|
}
|
||||||
@ -53,7 +52,7 @@ static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
|
|||||||
{
|
{
|
||||||
struct page *pte;
|
struct page *pte;
|
||||||
|
|
||||||
pte = alloc_pages(GFP_KERNEL | __GFP_REPEAT, PTE_ORDER);
|
pte = alloc_pages(GFP_KERNEL, PTE_ORDER);
|
||||||
if (pte) {
|
if (pte) {
|
||||||
if (!pgtable_page_ctor(pte)) {
|
if (!pgtable_page_ctor(pte)) {
|
||||||
__free_page(pte);
|
__free_page(pte);
|
||||||
|
@ -77,7 +77,7 @@ static inline struct page *pte_alloc_one(struct mm_struct *mm,
|
|||||||
unsigned long address)
|
unsigned long address)
|
||||||
{
|
{
|
||||||
struct page *pte;
|
struct page *pte;
|
||||||
pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT, 0);
|
pte = alloc_pages(GFP_KERNEL, 0);
|
||||||
if (!pte)
|
if (!pte)
|
||||||
return NULL;
|
return NULL;
|
||||||
clear_page(page_address(pte));
|
clear_page(page_address(pte));
|
||||||
|
@ -122,7 +122,7 @@ pte_t __init_refok *pte_alloc_one_kernel(struct mm_struct *mm,
|
|||||||
pte_t *pte;
|
pte_t *pte;
|
||||||
|
|
||||||
if (likely(mem_init_done)) {
|
if (likely(mem_init_done)) {
|
||||||
pte = (pte_t *) __get_free_page(GFP_KERNEL | __GFP_REPEAT);
|
pte = (pte_t *) __get_free_page(GFP_KERNEL);
|
||||||
} else {
|
} else {
|
||||||
pte = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE);
|
pte = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE);
|
||||||
#if 0
|
#if 0
|
||||||
|
@ -63,8 +63,7 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
|
|||||||
|
|
||||||
static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
|
static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
|
||||||
{
|
{
|
||||||
pmd_t *pmd = (pmd_t *)__get_free_pages(GFP_KERNEL|__GFP_REPEAT,
|
pmd_t *pmd = (pmd_t *)__get_free_pages(GFP_KERNEL, PMD_ORDER);
|
||||||
PMD_ORDER);
|
|
||||||
if (pmd)
|
if (pmd)
|
||||||
memset(pmd, 0, PAGE_SIZE<<PMD_ORDER);
|
memset(pmd, 0, PAGE_SIZE<<PMD_ORDER);
|
||||||
return pmd;
|
return pmd;
|
||||||
@ -124,7 +123,7 @@ pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte)
|
|||||||
static inline pgtable_t
|
static inline pgtable_t
|
||||||
pte_alloc_one(struct mm_struct *mm, unsigned long address)
|
pte_alloc_one(struct mm_struct *mm, unsigned long address)
|
||||||
{
|
{
|
||||||
struct page *page = alloc_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
|
struct page *page = alloc_page(GFP_KERNEL|__GFP_ZERO);
|
||||||
if (!page)
|
if (!page)
|
||||||
return NULL;
|
return NULL;
|
||||||
if (!pgtable_page_ctor(page)) {
|
if (!pgtable_page_ctor(page)) {
|
||||||
@ -137,7 +136,7 @@ pte_alloc_one(struct mm_struct *mm, unsigned long address)
|
|||||||
static inline pte_t *
|
static inline pte_t *
|
||||||
pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr)
|
pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr)
|
||||||
{
|
{
|
||||||
pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
|
pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_ZERO);
|
||||||
return pte;
|
return pte;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -41,7 +41,7 @@ extern struct kmem_cache *pgtable_cache[];
|
|||||||
pgtable_cache[(shift) - 1]; \
|
pgtable_cache[(shift) - 1]; \
|
||||||
})
|
})
|
||||||
|
|
||||||
#define PGALLOC_GFP GFP_KERNEL | __GFP_NOTRACK | __GFP_REPEAT | __GFP_ZERO
|
#define PGALLOC_GFP GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO
|
||||||
|
|
||||||
extern pte_t *pte_fragment_alloc(struct mm_struct *, unsigned long, int);
|
extern pte_t *pte_fragment_alloc(struct mm_struct *, unsigned long, int);
|
||||||
extern void pte_fragment_free(unsigned long *, int);
|
extern void pte_fragment_free(unsigned long *, int);
|
||||||
@ -56,7 +56,7 @@ static inline pgd_t *radix__pgd_alloc(struct mm_struct *mm)
|
|||||||
return (pgd_t *)__get_free_page(PGALLOC_GFP);
|
return (pgd_t *)__get_free_page(PGALLOC_GFP);
|
||||||
#else
|
#else
|
||||||
struct page *page;
|
struct page *page;
|
||||||
page = alloc_pages(PGALLOC_GFP, 4);
|
page = alloc_pages(PGALLOC_GFP | __GFP_REPEAT, 4);
|
||||||
if (!page)
|
if (!page)
|
||||||
return NULL;
|
return NULL;
|
||||||
return (pgd_t *) page_address(page);
|
return (pgd_t *) page_address(page);
|
||||||
@ -93,8 +93,7 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
|
|||||||
|
|
||||||
static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
|
static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
|
||||||
{
|
{
|
||||||
return kmem_cache_alloc(PGT_CACHE(PUD_INDEX_SIZE),
|
return kmem_cache_alloc(PGT_CACHE(PUD_INDEX_SIZE), GFP_KERNEL);
|
||||||
GFP_KERNEL|__GFP_REPEAT);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void pud_free(struct mm_struct *mm, pud_t *pud)
|
static inline void pud_free(struct mm_struct *mm, pud_t *pud)
|
||||||
@ -115,8 +114,7 @@ static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pud,
|
|||||||
|
|
||||||
static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
|
static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
|
||||||
{
|
{
|
||||||
return kmem_cache_alloc(PGT_CACHE(PMD_CACHE_INDEX),
|
return kmem_cache_alloc(PGT_CACHE(PMD_CACHE_INDEX), GFP_KERNEL);
|
||||||
GFP_KERNEL|__GFP_REPEAT);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
|
static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
|
||||||
@ -151,7 +149,7 @@ static inline pgtable_t pmd_pgtable(pmd_t pmd)
|
|||||||
static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
|
static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
|
||||||
unsigned long address)
|
unsigned long address)
|
||||||
{
|
{
|
||||||
return (pte_t *)__get_free_page(GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO);
|
return (pte_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
|
static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
|
||||||
|
@ -57,8 +57,7 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
|
|||||||
|
|
||||||
static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
|
static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
|
||||||
{
|
{
|
||||||
return kmem_cache_alloc(PGT_CACHE(PUD_INDEX_SIZE),
|
return kmem_cache_alloc(PGT_CACHE(PUD_INDEX_SIZE), GFP_KERNEL);
|
||||||
GFP_KERNEL|__GFP_REPEAT);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void pud_free(struct mm_struct *mm, pud_t *pud)
|
static inline void pud_free(struct mm_struct *mm, pud_t *pud)
|
||||||
@ -88,7 +87,7 @@ static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
|
|||||||
static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
|
static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
|
||||||
unsigned long address)
|
unsigned long address)
|
||||||
{
|
{
|
||||||
return (pte_t *)__get_free_page(GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO);
|
return (pte_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
|
static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
|
||||||
@ -190,8 +189,7 @@ static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table,
|
|||||||
|
|
||||||
static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
|
static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
|
||||||
{
|
{
|
||||||
return kmem_cache_alloc(PGT_CACHE(PMD_CACHE_INDEX),
|
return kmem_cache_alloc(PGT_CACHE(PMD_CACHE_INDEX), GFP_KERNEL);
|
||||||
GFP_KERNEL|__GFP_REPEAT);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
|
static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
|
||||||
|
@ -73,7 +73,7 @@ static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp,
|
|||||||
cachep = PGT_CACHE(pdshift - pshift);
|
cachep = PGT_CACHE(pdshift - pshift);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
new = kmem_cache_zalloc(cachep, GFP_KERNEL|__GFP_REPEAT);
|
new = kmem_cache_zalloc(cachep, GFP_KERNEL);
|
||||||
|
|
||||||
BUG_ON(pshift > HUGEPD_SHIFT_MASK);
|
BUG_ON(pshift > HUGEPD_SHIFT_MASK);
|
||||||
BUG_ON((unsigned long)new & HUGEPD_SHIFT_MASK);
|
BUG_ON((unsigned long)new & HUGEPD_SHIFT_MASK);
|
||||||
|
@ -84,7 +84,7 @@ __init_refok pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long add
|
|||||||
pte_t *pte;
|
pte_t *pte;
|
||||||
|
|
||||||
if (slab_is_available()) {
|
if (slab_is_available()) {
|
||||||
pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
|
pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_ZERO);
|
||||||
} else {
|
} else {
|
||||||
pte = __va(memblock_alloc(PAGE_SIZE, PAGE_SIZE));
|
pte = __va(memblock_alloc(PAGE_SIZE, PAGE_SIZE));
|
||||||
if (pte)
|
if (pte)
|
||||||
@ -97,7 +97,7 @@ pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address)
|
|||||||
{
|
{
|
||||||
struct page *ptepage;
|
struct page *ptepage;
|
||||||
|
|
||||||
gfp_t flags = GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO;
|
gfp_t flags = GFP_KERNEL | __GFP_ZERO;
|
||||||
|
|
||||||
ptepage = alloc_pages(flags, 0);
|
ptepage = alloc_pages(flags, 0);
|
||||||
if (!ptepage)
|
if (!ptepage)
|
||||||
|
@ -350,8 +350,7 @@ static pte_t *get_from_cache(struct mm_struct *mm)
|
|||||||
static pte_t *__alloc_for_cache(struct mm_struct *mm, int kernel)
|
static pte_t *__alloc_for_cache(struct mm_struct *mm, int kernel)
|
||||||
{
|
{
|
||||||
void *ret = NULL;
|
void *ret = NULL;
|
||||||
struct page *page = alloc_page(GFP_KERNEL | __GFP_NOTRACK |
|
struct page *page = alloc_page(GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO);
|
||||||
__GFP_REPEAT | __GFP_ZERO);
|
|
||||||
if (!page)
|
if (!page)
|
||||||
return NULL;
|
return NULL;
|
||||||
if (!kernel && !pgtable_page_ctor(page)) {
|
if (!kernel && !pgtable_page_ctor(page)) {
|
||||||
|
@ -169,7 +169,7 @@ unsigned long *page_table_alloc(struct mm_struct *mm)
|
|||||||
return table;
|
return table;
|
||||||
}
|
}
|
||||||
/* Allocate a fresh page */
|
/* Allocate a fresh page */
|
||||||
page = alloc_page(GFP_KERNEL|__GFP_REPEAT);
|
page = alloc_page(GFP_KERNEL);
|
||||||
if (!page)
|
if (!page)
|
||||||
return NULL;
|
return NULL;
|
||||||
if (!pgtable_page_ctor(page)) {
|
if (!pgtable_page_ctor(page)) {
|
||||||
|
@ -42,8 +42,7 @@ static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
|
|||||||
{
|
{
|
||||||
pte_t *pte;
|
pte_t *pte;
|
||||||
|
|
||||||
pte = (pte_t *) __get_free_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO,
|
pte = (pte_t *) __get_free_pages(GFP_KERNEL|__GFP_ZERO, PTE_ORDER);
|
||||||
PTE_ORDER);
|
|
||||||
|
|
||||||
return pte;
|
return pte;
|
||||||
}
|
}
|
||||||
@ -53,7 +52,7 @@ static inline struct page *pte_alloc_one(struct mm_struct *mm,
|
|||||||
{
|
{
|
||||||
struct page *pte;
|
struct page *pte;
|
||||||
|
|
||||||
pte = alloc_pages(GFP_KERNEL | __GFP_REPEAT, PTE_ORDER);
|
pte = alloc_pages(GFP_KERNEL, PTE_ORDER);
|
||||||
if (!pte)
|
if (!pte)
|
||||||
return NULL;
|
return NULL;
|
||||||
clear_highpage(pte);
|
clear_highpage(pte);
|
||||||
|
@ -34,7 +34,7 @@ static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
|
|||||||
static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
|
static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
|
||||||
unsigned long address)
|
unsigned long address)
|
||||||
{
|
{
|
||||||
return quicklist_alloc(QUICK_PT, GFP_KERNEL | __GFP_REPEAT, NULL);
|
return quicklist_alloc(QUICK_PT, GFP_KERNEL, NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
|
static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
|
||||||
@ -43,7 +43,7 @@ static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
|
|||||||
struct page *page;
|
struct page *page;
|
||||||
void *pg;
|
void *pg;
|
||||||
|
|
||||||
pg = quicklist_alloc(QUICK_PT, GFP_KERNEL | __GFP_REPEAT, NULL);
|
pg = quicklist_alloc(QUICK_PT, GFP_KERNEL, NULL);
|
||||||
if (!pg)
|
if (!pg)
|
||||||
return NULL;
|
return NULL;
|
||||||
page = virt_to_page(pg);
|
page = virt_to_page(pg);
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
#include <linux/mm.h>
|
#include <linux/mm.h>
|
||||||
#include <linux/slab.h>
|
#include <linux/slab.h>
|
||||||
|
|
||||||
#define PGALLOC_GFP GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO
|
#define PGALLOC_GFP GFP_KERNEL | __GFP_ZERO
|
||||||
|
|
||||||
static struct kmem_cache *pgd_cachep;
|
static struct kmem_cache *pgd_cachep;
|
||||||
#if PAGETABLE_LEVELS > 2
|
#if PAGETABLE_LEVELS > 2
|
||||||
|
@ -41,8 +41,7 @@ static inline void __pud_populate(pud_t *pud, pmd_t *pmd)
|
|||||||
|
|
||||||
static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
|
static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
|
||||||
{
|
{
|
||||||
return kmem_cache_alloc(pgtable_cache,
|
return kmem_cache_alloc(pgtable_cache, GFP_KERNEL);
|
||||||
GFP_KERNEL|__GFP_REPEAT);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void pud_free(struct mm_struct *mm, pud_t *pud)
|
static inline void pud_free(struct mm_struct *mm, pud_t *pud)
|
||||||
@ -52,8 +51,7 @@ static inline void pud_free(struct mm_struct *mm, pud_t *pud)
|
|||||||
|
|
||||||
static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
|
static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
|
||||||
{
|
{
|
||||||
return kmem_cache_alloc(pgtable_cache,
|
return kmem_cache_alloc(pgtable_cache, GFP_KERNEL);
|
||||||
GFP_KERNEL|__GFP_REPEAT);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
|
static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
|
||||||
|
@ -2704,8 +2704,7 @@ void __flush_tlb_all(void)
|
|||||||
pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
|
pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
|
||||||
unsigned long address)
|
unsigned long address)
|
||||||
{
|
{
|
||||||
struct page *page = alloc_page(GFP_KERNEL | __GFP_NOTRACK |
|
struct page *page = alloc_page(GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO);
|
||||||
__GFP_REPEAT | __GFP_ZERO);
|
|
||||||
pte_t *pte = NULL;
|
pte_t *pte = NULL;
|
||||||
|
|
||||||
if (page)
|
if (page)
|
||||||
@ -2717,8 +2716,7 @@ pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
|
|||||||
pgtable_t pte_alloc_one(struct mm_struct *mm,
|
pgtable_t pte_alloc_one(struct mm_struct *mm,
|
||||||
unsigned long address)
|
unsigned long address)
|
||||||
{
|
{
|
||||||
struct page *page = alloc_page(GFP_KERNEL | __GFP_NOTRACK |
|
struct page *page = alloc_page(GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO);
|
||||||
__GFP_REPEAT | __GFP_ZERO);
|
|
||||||
if (!page)
|
if (!page)
|
||||||
return NULL;
|
return NULL;
|
||||||
if (!pgtable_page_ctor(page)) {
|
if (!pgtable_page_ctor(page)) {
|
||||||
|
@ -231,7 +231,7 @@ void pgd_free(struct mm_struct *mm, pgd_t *pgd)
|
|||||||
struct page *pgtable_alloc_one(struct mm_struct *mm, unsigned long address,
|
struct page *pgtable_alloc_one(struct mm_struct *mm, unsigned long address,
|
||||||
int order)
|
int order)
|
||||||
{
|
{
|
||||||
gfp_t flags = GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO;
|
gfp_t flags = GFP_KERNEL|__GFP_ZERO;
|
||||||
struct page *p;
|
struct page *p;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
|
@ -204,7 +204,7 @@ pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
|
|||||||
{
|
{
|
||||||
pte_t *pte;
|
pte_t *pte;
|
||||||
|
|
||||||
pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
|
pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_ZERO);
|
||||||
return pte;
|
return pte;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -212,7 +212,7 @@ pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address)
|
|||||||
{
|
{
|
||||||
struct page *pte;
|
struct page *pte;
|
||||||
|
|
||||||
pte = alloc_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
|
pte = alloc_page(GFP_KERNEL|__GFP_ZERO);
|
||||||
if (!pte)
|
if (!pte)
|
||||||
return NULL;
|
return NULL;
|
||||||
if (!pgtable_page_ctor(pte)) {
|
if (!pgtable_page_ctor(pte)) {
|
||||||
|
@ -28,7 +28,7 @@ extern void free_pgd_slow(struct mm_struct *mm, pgd_t *pgd);
|
|||||||
#define pgd_alloc(mm) get_pgd_slow(mm)
|
#define pgd_alloc(mm) get_pgd_slow(mm)
|
||||||
#define pgd_free(mm, pgd) free_pgd_slow(mm, pgd)
|
#define pgd_free(mm, pgd) free_pgd_slow(mm, pgd)
|
||||||
|
|
||||||
#define PGALLOC_GFP (GFP_KERNEL | __GFP_NOTRACK | __GFP_REPEAT | __GFP_ZERO)
|
#define PGALLOC_GFP (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Allocate one PTE table.
|
* Allocate one PTE table.
|
||||||
|
@ -81,7 +81,7 @@ static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
|
|||||||
static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
|
static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
|
||||||
{
|
{
|
||||||
struct page *page;
|
struct page *page;
|
||||||
page = alloc_pages(GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO, 0);
|
page = alloc_pages(GFP_KERNEL | __GFP_ZERO, 0);
|
||||||
if (!page)
|
if (!page)
|
||||||
return NULL;
|
return NULL;
|
||||||
if (!pgtable_pmd_page_ctor(page)) {
|
if (!pgtable_pmd_page_ctor(page)) {
|
||||||
@ -125,7 +125,7 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
|
|||||||
|
|
||||||
static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
|
static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
|
||||||
{
|
{
|
||||||
return (pud_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
|
return (pud_t *)get_zeroed_page(GFP_KERNEL);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void pud_free(struct mm_struct *mm, pud_t *pud)
|
static inline void pud_free(struct mm_struct *mm, pud_t *pud)
|
||||||
|
@ -57,7 +57,7 @@
|
|||||||
# error "Need more than one PGD for the ESPFIX hack"
|
# error "Need more than one PGD for the ESPFIX hack"
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#define PGALLOC_GFP (GFP_KERNEL | __GFP_NOTRACK | __GFP_REPEAT | __GFP_ZERO)
|
#define PGALLOC_GFP (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO)
|
||||||
|
|
||||||
/* This contains the *bottom* address of the espfix stack */
|
/* This contains the *bottom* address of the espfix stack */
|
||||||
DEFINE_PER_CPU_READ_MOSTLY(unsigned long, espfix_stack);
|
DEFINE_PER_CPU_READ_MOSTLY(unsigned long, espfix_stack);
|
||||||
|
@ -6,7 +6,7 @@
|
|||||||
#include <asm/fixmap.h>
|
#include <asm/fixmap.h>
|
||||||
#include <asm/mtrr.h>
|
#include <asm/mtrr.h>
|
||||||
|
|
||||||
#define PGALLOC_GFP GFP_KERNEL | __GFP_NOTRACK | __GFP_REPEAT | __GFP_ZERO
|
#define PGALLOC_GFP GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO
|
||||||
|
|
||||||
#ifdef CONFIG_HIGHPTE
|
#ifdef CONFIG_HIGHPTE
|
||||||
#define PGALLOC_USER_GFP __GFP_HIGHMEM
|
#define PGALLOC_USER_GFP __GFP_HIGHMEM
|
||||||
|
@ -139,7 +139,7 @@ int __init efi_alloc_page_tables(void)
|
|||||||
if (efi_enabled(EFI_OLD_MEMMAP))
|
if (efi_enabled(EFI_OLD_MEMMAP))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
gfp_mask = GFP_KERNEL | __GFP_NOTRACK | __GFP_REPEAT | __GFP_ZERO;
|
gfp_mask = GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO;
|
||||||
efi_pgd = (pgd_t *)__get_free_page(gfp_mask);
|
efi_pgd = (pgd_t *)__get_free_page(gfp_mask);
|
||||||
if (!efi_pgd)
|
if (!efi_pgd)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
@ -182,7 +182,7 @@ static void * __ref alloc_p2m_page(void)
|
|||||||
if (unlikely(!slab_is_available()))
|
if (unlikely(!slab_is_available()))
|
||||||
return alloc_bootmem_align(PAGE_SIZE, PAGE_SIZE);
|
return alloc_bootmem_align(PAGE_SIZE, PAGE_SIZE);
|
||||||
|
|
||||||
return (void *)__get_free_page(GFP_KERNEL | __GFP_REPEAT);
|
return (void *)__get_free_page(GFP_KERNEL);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void __ref free_p2m_page(void *p)
|
static void __ref free_p2m_page(void *p)
|
||||||
|
@ -44,7 +44,7 @@ static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
|
|||||||
pte_t *ptep;
|
pte_t *ptep;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
ptep = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT);
|
ptep = (pte_t *)__get_free_page(GFP_KERNEL);
|
||||||
if (!ptep)
|
if (!ptep)
|
||||||
return NULL;
|
return NULL;
|
||||||
for (i = 0; i < 1024; i++)
|
for (i = 0; i < 1024; i++)
|
||||||
|
@ -1750,7 +1750,7 @@ aoecmd_init(void)
|
|||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
/* get_zeroed_page returns page with ref count 1 */
|
/* get_zeroed_page returns page with ref count 1 */
|
||||||
p = (void *) get_zeroed_page(GFP_KERNEL | __GFP_REPEAT);
|
p = (void *) get_zeroed_page(GFP_KERNEL);
|
||||||
if (!p)
|
if (!p)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
empty_page = virt_to_page(p);
|
empty_page = virt_to_page(p);
|
||||||
|
@ -66,11 +66,12 @@ static int autofs4_write(struct autofs_sb_info *sbi,
|
|||||||
set_fs(KERNEL_DS);
|
set_fs(KERNEL_DS);
|
||||||
|
|
||||||
mutex_lock(&sbi->pipe_mutex);
|
mutex_lock(&sbi->pipe_mutex);
|
||||||
|
while (bytes) {
|
||||||
wr = __vfs_write(file, data, bytes, &file->f_pos);
|
wr = __vfs_write(file, data, bytes, &file->f_pos);
|
||||||
while (bytes && wr) {
|
if (wr <= 0)
|
||||||
|
break;
|
||||||
data += wr;
|
data += wr;
|
||||||
bytes -= wr;
|
bytes -= wr;
|
||||||
wr = __vfs_write(file, data, bytes, &file->f_pos);
|
|
||||||
}
|
}
|
||||||
mutex_unlock(&sbi->pipe_mutex);
|
mutex_unlock(&sbi->pipe_mutex);
|
||||||
|
|
||||||
|
@ -2329,18 +2329,10 @@ void *jbd2_alloc(size_t size, gfp_t flags)
|
|||||||
|
|
||||||
BUG_ON(size & (size-1)); /* Must be a power of 2 */
|
BUG_ON(size & (size-1)); /* Must be a power of 2 */
|
||||||
|
|
||||||
flags |= __GFP_REPEAT;
|
if (size < PAGE_SIZE)
|
||||||
if (size == PAGE_SIZE)
|
|
||||||
ptr = (void *)__get_free_pages(flags, 0);
|
|
||||||
else if (size > PAGE_SIZE) {
|
|
||||||
int order = get_order(size);
|
|
||||||
|
|
||||||
if (order < 3)
|
|
||||||
ptr = (void *)__get_free_pages(flags, order);
|
|
||||||
else
|
|
||||||
ptr = vmalloc(size);
|
|
||||||
} else
|
|
||||||
ptr = kmem_cache_alloc(get_slab(size), flags);
|
ptr = kmem_cache_alloc(get_slab(size), flags);
|
||||||
|
else
|
||||||
|
ptr = (void *)__get_free_pages(flags, get_order(size));
|
||||||
|
|
||||||
/* Check alignment; SLUB has gotten this wrong in the past,
|
/* Check alignment; SLUB has gotten this wrong in the past,
|
||||||
* and this can lead to user data corruption! */
|
* and this can lead to user data corruption! */
|
||||||
@ -2351,20 +2343,10 @@ void *jbd2_alloc(size_t size, gfp_t flags)
|
|||||||
|
|
||||||
void jbd2_free(void *ptr, size_t size)
|
void jbd2_free(void *ptr, size_t size)
|
||||||
{
|
{
|
||||||
if (size == PAGE_SIZE) {
|
if (size < PAGE_SIZE)
|
||||||
free_pages((unsigned long)ptr, 0);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
if (size > PAGE_SIZE) {
|
|
||||||
int order = get_order(size);
|
|
||||||
|
|
||||||
if (order < 3)
|
|
||||||
free_pages((unsigned long)ptr, order);
|
|
||||||
else
|
|
||||||
vfree(ptr);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
kmem_cache_free(get_slab(size), ptr);
|
kmem_cache_free(get_slab(size), ptr);
|
||||||
|
else
|
||||||
|
free_pages((unsigned long)ptr, get_order(size));
|
||||||
};
|
};
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -439,7 +439,7 @@ static int nilfs_valid_sb(struct nilfs_super_block *sbp)
|
|||||||
if (!sbp || le16_to_cpu(sbp->s_magic) != NILFS_SUPER_MAGIC)
|
if (!sbp || le16_to_cpu(sbp->s_magic) != NILFS_SUPER_MAGIC)
|
||||||
return 0;
|
return 0;
|
||||||
bytes = le16_to_cpu(sbp->s_bytes);
|
bytes = le16_to_cpu(sbp->s_bytes);
|
||||||
if (bytes > BLOCK_SIZE)
|
if (bytes < sumoff + 4 || bytes > BLOCK_SIZE)
|
||||||
return 0;
|
return 0;
|
||||||
crc = crc32_le(le32_to_cpu(sbp->s_crc_seed), (unsigned char *)sbp,
|
crc = crc32_le(le32_to_cpu(sbp->s_crc_seed), (unsigned char *)sbp,
|
||||||
sumoff);
|
sumoff);
|
||||||
|
@ -1,7 +1,5 @@
|
|||||||
ccflags-y := -Ifs/ocfs2
|
ccflags-y := -Ifs/ocfs2
|
||||||
|
|
||||||
ccflags-y += -DCATCH_BH_JBD_RACES
|
|
||||||
|
|
||||||
obj-$(CONFIG_OCFS2_FS) += \
|
obj-$(CONFIG_OCFS2_FS) += \
|
||||||
ocfs2.o \
|
ocfs2.o \
|
||||||
ocfs2_stackglue.o
|
ocfs2_stackglue.o
|
||||||
|
@ -139,11 +139,16 @@ int ocfs2_read_blocks_sync(struct ocfs2_super *osb, u64 block,
|
|||||||
|
|
||||||
lock_buffer(bh);
|
lock_buffer(bh);
|
||||||
if (buffer_jbd(bh)) {
|
if (buffer_jbd(bh)) {
|
||||||
|
#ifdef CATCH_BH_JBD_RACES
|
||||||
mlog(ML_ERROR,
|
mlog(ML_ERROR,
|
||||||
"block %llu had the JBD bit set "
|
"block %llu had the JBD bit set "
|
||||||
"while I was in lock_buffer!",
|
"while I was in lock_buffer!",
|
||||||
(unsigned long long)bh->b_blocknr);
|
(unsigned long long)bh->b_blocknr);
|
||||||
BUG();
|
BUG();
|
||||||
|
#else
|
||||||
|
unlock_buffer(bh);
|
||||||
|
continue;
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
clear_buffer_uptodate(bh);
|
clear_buffer_uptodate(bh);
|
||||||
|
@ -59,14 +59,13 @@ void kasan_poison_object_data(struct kmem_cache *cache, void *object);
|
|||||||
|
|
||||||
void kasan_kmalloc_large(const void *ptr, size_t size, gfp_t flags);
|
void kasan_kmalloc_large(const void *ptr, size_t size, gfp_t flags);
|
||||||
void kasan_kfree_large(const void *ptr);
|
void kasan_kfree_large(const void *ptr);
|
||||||
void kasan_kfree(void *ptr);
|
void kasan_poison_kfree(void *ptr);
|
||||||
void kasan_kmalloc(struct kmem_cache *s, const void *object, size_t size,
|
void kasan_kmalloc(struct kmem_cache *s, const void *object, size_t size,
|
||||||
gfp_t flags);
|
gfp_t flags);
|
||||||
void kasan_krealloc(const void *object, size_t new_size, gfp_t flags);
|
void kasan_krealloc(const void *object, size_t new_size, gfp_t flags);
|
||||||
|
|
||||||
void kasan_slab_alloc(struct kmem_cache *s, void *object, gfp_t flags);
|
void kasan_slab_alloc(struct kmem_cache *s, void *object, gfp_t flags);
|
||||||
bool kasan_slab_free(struct kmem_cache *s, void *object);
|
bool kasan_slab_free(struct kmem_cache *s, void *object);
|
||||||
void kasan_poison_slab_free(struct kmem_cache *s, void *object);
|
|
||||||
|
|
||||||
struct kasan_cache {
|
struct kasan_cache {
|
||||||
int alloc_meta_offset;
|
int alloc_meta_offset;
|
||||||
@ -76,6 +75,9 @@ struct kasan_cache {
|
|||||||
int kasan_module_alloc(void *addr, size_t size);
|
int kasan_module_alloc(void *addr, size_t size);
|
||||||
void kasan_free_shadow(const struct vm_struct *vm);
|
void kasan_free_shadow(const struct vm_struct *vm);
|
||||||
|
|
||||||
|
size_t ksize(const void *);
|
||||||
|
static inline void kasan_unpoison_slab(const void *ptr) { ksize(ptr); }
|
||||||
|
|
||||||
#else /* CONFIG_KASAN */
|
#else /* CONFIG_KASAN */
|
||||||
|
|
||||||
static inline void kasan_unpoison_shadow(const void *address, size_t size) {}
|
static inline void kasan_unpoison_shadow(const void *address, size_t size) {}
|
||||||
@ -102,7 +104,7 @@ static inline void kasan_poison_object_data(struct kmem_cache *cache,
|
|||||||
|
|
||||||
static inline void kasan_kmalloc_large(void *ptr, size_t size, gfp_t flags) {}
|
static inline void kasan_kmalloc_large(void *ptr, size_t size, gfp_t flags) {}
|
||||||
static inline void kasan_kfree_large(const void *ptr) {}
|
static inline void kasan_kfree_large(const void *ptr) {}
|
||||||
static inline void kasan_kfree(void *ptr) {}
|
static inline void kasan_poison_kfree(void *ptr) {}
|
||||||
static inline void kasan_kmalloc(struct kmem_cache *s, const void *object,
|
static inline void kasan_kmalloc(struct kmem_cache *s, const void *object,
|
||||||
size_t size, gfp_t flags) {}
|
size_t size, gfp_t flags) {}
|
||||||
static inline void kasan_krealloc(const void *object, size_t new_size,
|
static inline void kasan_krealloc(const void *object, size_t new_size,
|
||||||
@ -114,11 +116,12 @@ static inline bool kasan_slab_free(struct kmem_cache *s, void *object)
|
|||||||
{
|
{
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
static inline void kasan_poison_slab_free(struct kmem_cache *s, void *object) {}
|
|
||||||
|
|
||||||
static inline int kasan_module_alloc(void *addr, size_t size) { return 0; }
|
static inline int kasan_module_alloc(void *addr, size_t size) { return 0; }
|
||||||
static inline void kasan_free_shadow(const struct vm_struct *vm) {}
|
static inline void kasan_free_shadow(const struct vm_struct *vm) {}
|
||||||
|
|
||||||
|
static inline void kasan_unpoison_slab(const void *ptr) { }
|
||||||
|
|
||||||
#endif /* CONFIG_KASAN */
|
#endif /* CONFIG_KASAN */
|
||||||
|
|
||||||
#endif /* LINUX_KASAN_H */
|
#endif /* LINUX_KASAN_H */
|
||||||
|
@ -602,7 +602,7 @@ static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma)
|
|||||||
}
|
}
|
||||||
|
|
||||||
void do_set_pte(struct vm_area_struct *vma, unsigned long address,
|
void do_set_pte(struct vm_area_struct *vma, unsigned long address,
|
||||||
struct page *page, pte_t *pte, bool write, bool anon, bool old);
|
struct page *page, pte_t *pte, bool write, bool anon);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -708,11 +708,13 @@ static bool __init_or_module initcall_blacklisted(initcall_t fn)
|
|||||||
{
|
{
|
||||||
struct blacklist_entry *entry;
|
struct blacklist_entry *entry;
|
||||||
char fn_name[KSYM_SYMBOL_LEN];
|
char fn_name[KSYM_SYMBOL_LEN];
|
||||||
|
unsigned long addr;
|
||||||
|
|
||||||
if (list_empty(&blacklisted_initcalls))
|
if (list_empty(&blacklisted_initcalls))
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
sprint_symbol_no_offset(fn_name, (unsigned long)fn);
|
addr = (unsigned long) dereference_function_descriptor(fn);
|
||||||
|
sprint_symbol_no_offset(fn_name, addr);
|
||||||
|
|
||||||
list_for_each_entry(entry, &blacklisted_initcalls, next) {
|
list_for_each_entry(entry, &blacklisted_initcalls, next) {
|
||||||
if (!strcmp(fn_name, entry->buf)) {
|
if (!strcmp(fn_name, entry->buf)) {
|
||||||
|
@ -146,6 +146,18 @@ int freeze_processes(void)
|
|||||||
if (!error && !oom_killer_disable())
|
if (!error && !oom_killer_disable())
|
||||||
error = -EBUSY;
|
error = -EBUSY;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* There is a hard to fix race between oom_reaper kernel thread
|
||||||
|
* and oom_killer_disable. oom_reaper calls exit_oom_victim
|
||||||
|
* before the victim reaches exit_mm so try to freeze all the tasks
|
||||||
|
* again and catch such a left over task.
|
||||||
|
*/
|
||||||
|
if (!error) {
|
||||||
|
pr_info("Double checking all user space processes after OOM killer disable... ");
|
||||||
|
error = try_to_freeze_tasks(true);
|
||||||
|
pr_cont("\n");
|
||||||
|
}
|
||||||
|
|
||||||
if (error)
|
if (error)
|
||||||
thaw_processes();
|
thaw_processes();
|
||||||
return error;
|
return error;
|
||||||
|
@ -441,25 +441,23 @@ static unsigned long isolate_freepages_block(struct compact_control *cc,
|
|||||||
|
|
||||||
/* Found a free page, break it into order-0 pages */
|
/* Found a free page, break it into order-0 pages */
|
||||||
isolated = split_free_page(page);
|
isolated = split_free_page(page);
|
||||||
|
if (!isolated)
|
||||||
|
break;
|
||||||
|
|
||||||
total_isolated += isolated;
|
total_isolated += isolated;
|
||||||
|
cc->nr_freepages += isolated;
|
||||||
for (i = 0; i < isolated; i++) {
|
for (i = 0; i < isolated; i++) {
|
||||||
list_add(&page->lru, freelist);
|
list_add(&page->lru, freelist);
|
||||||
page++;
|
page++;
|
||||||
}
|
}
|
||||||
|
if (!strict && cc->nr_migratepages <= cc->nr_freepages) {
|
||||||
/* If a page was split, advance to the end of it */
|
|
||||||
if (isolated) {
|
|
||||||
cc->nr_freepages += isolated;
|
|
||||||
if (!strict &&
|
|
||||||
cc->nr_migratepages <= cc->nr_freepages) {
|
|
||||||
blockpfn += isolated;
|
blockpfn += isolated;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
/* Advance to the end of split page */
|
||||||
blockpfn += isolated - 1;
|
blockpfn += isolated - 1;
|
||||||
cursor += isolated - 1;
|
cursor += isolated - 1;
|
||||||
continue;
|
continue;
|
||||||
}
|
|
||||||
|
|
||||||
isolate_fail:
|
isolate_fail:
|
||||||
if (strict)
|
if (strict)
|
||||||
@ -469,6 +467,9 @@ isolate_fail:
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (locked)
|
||||||
|
spin_unlock_irqrestore(&cc->zone->lock, flags);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* There is a tiny chance that we have read bogus compound_order(),
|
* There is a tiny chance that we have read bogus compound_order(),
|
||||||
* so be careful to not go outside of the pageblock.
|
* so be careful to not go outside of the pageblock.
|
||||||
@ -490,9 +491,6 @@ isolate_fail:
|
|||||||
if (strict && blockpfn < end_pfn)
|
if (strict && blockpfn < end_pfn)
|
||||||
total_isolated = 0;
|
total_isolated = 0;
|
||||||
|
|
||||||
if (locked)
|
|
||||||
spin_unlock_irqrestore(&cc->zone->lock, flags);
|
|
||||||
|
|
||||||
/* Update the pageblock-skip if the whole pageblock was scanned */
|
/* Update the pageblock-skip if the whole pageblock was scanned */
|
||||||
if (blockpfn == end_pfn)
|
if (blockpfn == end_pfn)
|
||||||
update_pageblock_skip(cc, valid_page, total_isolated, false);
|
update_pageblock_skip(cc, valid_page, total_isolated, false);
|
||||||
@ -1011,6 +1009,7 @@ static void isolate_freepages(struct compact_control *cc)
|
|||||||
block_end_pfn = block_start_pfn,
|
block_end_pfn = block_start_pfn,
|
||||||
block_start_pfn -= pageblock_nr_pages,
|
block_start_pfn -= pageblock_nr_pages,
|
||||||
isolate_start_pfn = block_start_pfn) {
|
isolate_start_pfn = block_start_pfn) {
|
||||||
|
unsigned long isolated;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* This can iterate a massively long zone without finding any
|
* This can iterate a massively long zone without finding any
|
||||||
@ -1035,8 +1034,12 @@ static void isolate_freepages(struct compact_control *cc)
|
|||||||
continue;
|
continue;
|
||||||
|
|
||||||
/* Found a block suitable for isolating free pages from. */
|
/* Found a block suitable for isolating free pages from. */
|
||||||
isolate_freepages_block(cc, &isolate_start_pfn,
|
isolated = isolate_freepages_block(cc, &isolate_start_pfn,
|
||||||
block_end_pfn, freelist, false);
|
block_end_pfn, freelist, false);
|
||||||
|
/* If isolation failed early, do not continue needlessly */
|
||||||
|
if (!isolated && isolate_start_pfn < block_end_pfn &&
|
||||||
|
cc->nr_migratepages > cc->nr_freepages)
|
||||||
|
break;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If we isolated enough freepages, or aborted due to async
|
* If we isolated enough freepages, or aborted due to async
|
||||||
|
@ -2186,7 +2186,7 @@ repeat:
|
|||||||
if (file->f_ra.mmap_miss > 0)
|
if (file->f_ra.mmap_miss > 0)
|
||||||
file->f_ra.mmap_miss--;
|
file->f_ra.mmap_miss--;
|
||||||
addr = address + (page->index - vmf->pgoff) * PAGE_SIZE;
|
addr = address + (page->index - vmf->pgoff) * PAGE_SIZE;
|
||||||
do_set_pte(vma, addr, page, pte, false, false, true);
|
do_set_pte(vma, addr, page, pte, false, false);
|
||||||
unlock_page(page);
|
unlock_page(page);
|
||||||
goto next;
|
goto next;
|
||||||
unlock:
|
unlock:
|
||||||
|
@ -1030,6 +1030,7 @@ static void destroy_compound_gigantic_page(struct page *page,
|
|||||||
int nr_pages = 1 << order;
|
int nr_pages = 1 << order;
|
||||||
struct page *p = page + 1;
|
struct page *p = page + 1;
|
||||||
|
|
||||||
|
atomic_set(compound_mapcount_ptr(page), 0);
|
||||||
for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
|
for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
|
||||||
clear_compound_head(p);
|
clear_compound_head(p);
|
||||||
set_page_refcounted(p);
|
set_page_refcounted(p);
|
||||||
@ -4228,7 +4229,6 @@ pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
|
|||||||
if (saddr) {
|
if (saddr) {
|
||||||
spte = huge_pte_offset(svma->vm_mm, saddr);
|
spte = huge_pte_offset(svma->vm_mm, saddr);
|
||||||
if (spte) {
|
if (spte) {
|
||||||
mm_inc_nr_pmds(mm);
|
|
||||||
get_page(virt_to_page(spte));
|
get_page(virt_to_page(spte));
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
@ -4243,9 +4243,9 @@ pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
|
|||||||
if (pud_none(*pud)) {
|
if (pud_none(*pud)) {
|
||||||
pud_populate(mm, pud,
|
pud_populate(mm, pud,
|
||||||
(pmd_t *)((unsigned long)spte & PAGE_MASK));
|
(pmd_t *)((unsigned long)spte & PAGE_MASK));
|
||||||
|
mm_inc_nr_pmds(mm);
|
||||||
} else {
|
} else {
|
||||||
put_page(virt_to_page(spte));
|
put_page(virt_to_page(spte));
|
||||||
mm_inc_nr_pmds(mm);
|
|
||||||
}
|
}
|
||||||
spin_unlock(ptl);
|
spin_unlock(ptl);
|
||||||
out:
|
out:
|
||||||
|
@ -24,7 +24,8 @@
|
|||||||
*/
|
*/
|
||||||
#define GFP_RECLAIM_MASK (__GFP_RECLAIM|__GFP_HIGH|__GFP_IO|__GFP_FS|\
|
#define GFP_RECLAIM_MASK (__GFP_RECLAIM|__GFP_HIGH|__GFP_IO|__GFP_FS|\
|
||||||
__GFP_NOWARN|__GFP_REPEAT|__GFP_NOFAIL|\
|
__GFP_NOWARN|__GFP_REPEAT|__GFP_NOFAIL|\
|
||||||
__GFP_NORETRY|__GFP_MEMALLOC|__GFP_NOMEMALLOC)
|
__GFP_NORETRY|__GFP_MEMALLOC|__GFP_NOMEMALLOC|\
|
||||||
|
__GFP_ATOMIC)
|
||||||
|
|
||||||
/* The GFP flags allowed during early boot */
|
/* The GFP flags allowed during early boot */
|
||||||
#define GFP_BOOT_MASK (__GFP_BITS_MASK & ~(__GFP_RECLAIM|__GFP_IO|__GFP_FS))
|
#define GFP_BOOT_MASK (__GFP_BITS_MASK & ~(__GFP_RECLAIM|__GFP_IO|__GFP_FS))
|
||||||
|
@ -508,7 +508,7 @@ void kasan_slab_alloc(struct kmem_cache *cache, void *object, gfp_t flags)
|
|||||||
kasan_kmalloc(cache, object, cache->object_size, flags);
|
kasan_kmalloc(cache, object, cache->object_size, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
void kasan_poison_slab_free(struct kmem_cache *cache, void *object)
|
static void kasan_poison_slab_free(struct kmem_cache *cache, void *object)
|
||||||
{
|
{
|
||||||
unsigned long size = cache->object_size;
|
unsigned long size = cache->object_size;
|
||||||
unsigned long rounded_up_size = round_up(size, KASAN_SHADOW_SCALE_SIZE);
|
unsigned long rounded_up_size = round_up(size, KASAN_SHADOW_SCALE_SIZE);
|
||||||
@ -626,7 +626,7 @@ void kasan_krealloc(const void *object, size_t size, gfp_t flags)
|
|||||||
kasan_kmalloc(page->slab_cache, object, size, flags);
|
kasan_kmalloc(page->slab_cache, object, size, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
void kasan_kfree(void *ptr)
|
void kasan_poison_kfree(void *ptr)
|
||||||
{
|
{
|
||||||
struct page *page;
|
struct page *page;
|
||||||
|
|
||||||
@ -636,7 +636,7 @@ void kasan_kfree(void *ptr)
|
|||||||
kasan_poison_shadow(ptr, PAGE_SIZE << compound_order(page),
|
kasan_poison_shadow(ptr, PAGE_SIZE << compound_order(page),
|
||||||
KASAN_FREE_PAGE);
|
KASAN_FREE_PAGE);
|
||||||
else
|
else
|
||||||
kasan_slab_free(page->slab_cache, ptr);
|
kasan_poison_slab_free(page->slab_cache, ptr);
|
||||||
}
|
}
|
||||||
|
|
||||||
void kasan_kfree_large(const void *ptr)
|
void kasan_kfree_large(const void *ptr)
|
||||||
|
@ -307,8 +307,10 @@ static void hex_dump_object(struct seq_file *seq,
|
|||||||
len = min_t(size_t, object->size, HEX_MAX_LINES * HEX_ROW_SIZE);
|
len = min_t(size_t, object->size, HEX_MAX_LINES * HEX_ROW_SIZE);
|
||||||
|
|
||||||
seq_printf(seq, " hex dump (first %zu bytes):\n", len);
|
seq_printf(seq, " hex dump (first %zu bytes):\n", len);
|
||||||
|
kasan_disable_current();
|
||||||
seq_hex_dump(seq, " ", DUMP_PREFIX_NONE, HEX_ROW_SIZE,
|
seq_hex_dump(seq, " ", DUMP_PREFIX_NONE, HEX_ROW_SIZE,
|
||||||
HEX_GROUP_SIZE, ptr, len, HEX_ASCII);
|
HEX_GROUP_SIZE, ptr, len, HEX_ASCII);
|
||||||
|
kasan_enable_current();
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -4203,7 +4203,7 @@ mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
|
|||||||
return &memcg->css;
|
return &memcg->css;
|
||||||
fail:
|
fail:
|
||||||
mem_cgroup_free(memcg);
|
mem_cgroup_free(memcg);
|
||||||
return NULL;
|
return ERR_PTR(-ENOMEM);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int
|
static int
|
||||||
@ -5544,6 +5544,7 @@ void mem_cgroup_migrate(struct page *oldpage, struct page *newpage)
|
|||||||
struct mem_cgroup *memcg;
|
struct mem_cgroup *memcg;
|
||||||
unsigned int nr_pages;
|
unsigned int nr_pages;
|
||||||
bool compound;
|
bool compound;
|
||||||
|
unsigned long flags;
|
||||||
|
|
||||||
VM_BUG_ON_PAGE(!PageLocked(oldpage), oldpage);
|
VM_BUG_ON_PAGE(!PageLocked(oldpage), oldpage);
|
||||||
VM_BUG_ON_PAGE(!PageLocked(newpage), newpage);
|
VM_BUG_ON_PAGE(!PageLocked(newpage), newpage);
|
||||||
@ -5574,10 +5575,10 @@ void mem_cgroup_migrate(struct page *oldpage, struct page *newpage)
|
|||||||
|
|
||||||
commit_charge(newpage, memcg, false);
|
commit_charge(newpage, memcg, false);
|
||||||
|
|
||||||
local_irq_disable();
|
local_irq_save(flags);
|
||||||
mem_cgroup_charge_statistics(memcg, newpage, compound, nr_pages);
|
mem_cgroup_charge_statistics(memcg, newpage, compound, nr_pages);
|
||||||
memcg_check_events(memcg, newpage);
|
memcg_check_events(memcg, newpage);
|
||||||
local_irq_enable();
|
local_irq_restore(flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
DEFINE_STATIC_KEY_FALSE(memcg_sockets_enabled_key);
|
DEFINE_STATIC_KEY_FALSE(memcg_sockets_enabled_key);
|
||||||
|
31
mm/memory.c
31
mm/memory.c
@ -2877,7 +2877,7 @@ static int __do_fault(struct vm_area_struct *vma, unsigned long address,
|
|||||||
* vm_ops->map_pages.
|
* vm_ops->map_pages.
|
||||||
*/
|
*/
|
||||||
void do_set_pte(struct vm_area_struct *vma, unsigned long address,
|
void do_set_pte(struct vm_area_struct *vma, unsigned long address,
|
||||||
struct page *page, pte_t *pte, bool write, bool anon, bool old)
|
struct page *page, pte_t *pte, bool write, bool anon)
|
||||||
{
|
{
|
||||||
pte_t entry;
|
pte_t entry;
|
||||||
|
|
||||||
@ -2885,8 +2885,6 @@ void do_set_pte(struct vm_area_struct *vma, unsigned long address,
|
|||||||
entry = mk_pte(page, vma->vm_page_prot);
|
entry = mk_pte(page, vma->vm_page_prot);
|
||||||
if (write)
|
if (write)
|
||||||
entry = maybe_mkwrite(pte_mkdirty(entry), vma);
|
entry = maybe_mkwrite(pte_mkdirty(entry), vma);
|
||||||
if (old)
|
|
||||||
entry = pte_mkold(entry);
|
|
||||||
if (anon) {
|
if (anon) {
|
||||||
inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES);
|
inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES);
|
||||||
page_add_new_anon_rmap(page, vma, address, false);
|
page_add_new_anon_rmap(page, vma, address, false);
|
||||||
@ -2900,16 +2898,8 @@ void do_set_pte(struct vm_area_struct *vma, unsigned long address,
|
|||||||
update_mmu_cache(vma, address, pte);
|
update_mmu_cache(vma, address, pte);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* If architecture emulates "accessed" or "young" bit without HW support,
|
|
||||||
* there is no much gain with fault_around.
|
|
||||||
*/
|
|
||||||
static unsigned long fault_around_bytes __read_mostly =
|
static unsigned long fault_around_bytes __read_mostly =
|
||||||
#ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
|
|
||||||
PAGE_SIZE;
|
|
||||||
#else
|
|
||||||
rounddown_pow_of_two(65536);
|
rounddown_pow_of_two(65536);
|
||||||
#endif
|
|
||||||
|
|
||||||
#ifdef CONFIG_DEBUG_FS
|
#ifdef CONFIG_DEBUG_FS
|
||||||
static int fault_around_bytes_get(void *data, u64 *val)
|
static int fault_around_bytes_get(void *data, u64 *val)
|
||||||
@ -3032,20 +3022,9 @@ static int do_read_fault(struct mm_struct *mm, struct vm_area_struct *vma,
|
|||||||
*/
|
*/
|
||||||
if (vma->vm_ops->map_pages && fault_around_bytes >> PAGE_SHIFT > 1) {
|
if (vma->vm_ops->map_pages && fault_around_bytes >> PAGE_SHIFT > 1) {
|
||||||
pte = pte_offset_map_lock(mm, pmd, address, &ptl);
|
pte = pte_offset_map_lock(mm, pmd, address, &ptl);
|
||||||
|
do_fault_around(vma, address, pte, pgoff, flags);
|
||||||
if (!pte_same(*pte, orig_pte))
|
if (!pte_same(*pte, orig_pte))
|
||||||
goto unlock_out;
|
goto unlock_out;
|
||||||
do_fault_around(vma, address, pte, pgoff, flags);
|
|
||||||
/* Check if the fault is handled by faultaround */
|
|
||||||
if (!pte_same(*pte, orig_pte)) {
|
|
||||||
/*
|
|
||||||
* Faultaround produce old pte, but the pte we've
|
|
||||||
* handler fault for should be young.
|
|
||||||
*/
|
|
||||||
pte_t entry = pte_mkyoung(*pte);
|
|
||||||
if (ptep_set_access_flags(vma, address, pte, entry, 0))
|
|
||||||
update_mmu_cache(vma, address, pte);
|
|
||||||
goto unlock_out;
|
|
||||||
}
|
|
||||||
pte_unmap_unlock(pte, ptl);
|
pte_unmap_unlock(pte, ptl);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -3060,7 +3039,7 @@ static int do_read_fault(struct mm_struct *mm, struct vm_area_struct *vma,
|
|||||||
put_page(fault_page);
|
put_page(fault_page);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
do_set_pte(vma, address, fault_page, pte, false, false, false);
|
do_set_pte(vma, address, fault_page, pte, false, false);
|
||||||
unlock_page(fault_page);
|
unlock_page(fault_page);
|
||||||
unlock_out:
|
unlock_out:
|
||||||
pte_unmap_unlock(pte, ptl);
|
pte_unmap_unlock(pte, ptl);
|
||||||
@ -3111,7 +3090,7 @@ static int do_cow_fault(struct mm_struct *mm, struct vm_area_struct *vma,
|
|||||||
}
|
}
|
||||||
goto uncharge_out;
|
goto uncharge_out;
|
||||||
}
|
}
|
||||||
do_set_pte(vma, address, new_page, pte, true, true, false);
|
do_set_pte(vma, address, new_page, pte, true, true);
|
||||||
mem_cgroup_commit_charge(new_page, memcg, false, false);
|
mem_cgroup_commit_charge(new_page, memcg, false, false);
|
||||||
lru_cache_add_active_or_unevictable(new_page, vma);
|
lru_cache_add_active_or_unevictable(new_page, vma);
|
||||||
pte_unmap_unlock(pte, ptl);
|
pte_unmap_unlock(pte, ptl);
|
||||||
@ -3164,7 +3143,7 @@ static int do_shared_fault(struct mm_struct *mm, struct vm_area_struct *vma,
|
|||||||
put_page(fault_page);
|
put_page(fault_page);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
do_set_pte(vma, address, fault_page, pte, true, false, false);
|
do_set_pte(vma, address, fault_page, pte, true, false);
|
||||||
pte_unmap_unlock(pte, ptl);
|
pte_unmap_unlock(pte, ptl);
|
||||||
|
|
||||||
if (set_page_dirty(fault_page))
|
if (set_page_dirty(fault_page))
|
||||||
|
12
mm/mempool.c
12
mm/mempool.c
@ -104,20 +104,16 @@ static inline void poison_element(mempool_t *pool, void *element)
|
|||||||
|
|
||||||
static void kasan_poison_element(mempool_t *pool, void *element)
|
static void kasan_poison_element(mempool_t *pool, void *element)
|
||||||
{
|
{
|
||||||
if (pool->alloc == mempool_alloc_slab)
|
if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc)
|
||||||
kasan_poison_slab_free(pool->pool_data, element);
|
kasan_poison_kfree(element);
|
||||||
if (pool->alloc == mempool_kmalloc)
|
|
||||||
kasan_kfree(element);
|
|
||||||
if (pool->alloc == mempool_alloc_pages)
|
if (pool->alloc == mempool_alloc_pages)
|
||||||
kasan_free_pages(element, (unsigned long)pool->pool_data);
|
kasan_free_pages(element, (unsigned long)pool->pool_data);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void kasan_unpoison_element(mempool_t *pool, void *element, gfp_t flags)
|
static void kasan_unpoison_element(mempool_t *pool, void *element, gfp_t flags)
|
||||||
{
|
{
|
||||||
if (pool->alloc == mempool_alloc_slab)
|
if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc)
|
||||||
kasan_slab_alloc(pool->pool_data, element, flags);
|
kasan_unpoison_slab(element);
|
||||||
if (pool->alloc == mempool_kmalloc)
|
|
||||||
kasan_krealloc(element, (size_t)pool->pool_data, flags);
|
|
||||||
if (pool->alloc == mempool_alloc_pages)
|
if (pool->alloc == mempool_alloc_pages)
|
||||||
kasan_alloc_pages(element, (unsigned long)pool->pool_data);
|
kasan_alloc_pages(element, (unsigned long)pool->pool_data);
|
||||||
}
|
}
|
||||||
|
@ -474,13 +474,8 @@ static bool __oom_reap_task(struct task_struct *tsk)
|
|||||||
p = find_lock_task_mm(tsk);
|
p = find_lock_task_mm(tsk);
|
||||||
if (!p)
|
if (!p)
|
||||||
goto unlock_oom;
|
goto unlock_oom;
|
||||||
|
|
||||||
mm = p->mm;
|
mm = p->mm;
|
||||||
if (!atomic_inc_not_zero(&mm->mm_users)) {
|
atomic_inc(&mm->mm_users);
|
||||||
task_unlock(p);
|
|
||||||
goto unlock_oom;
|
|
||||||
}
|
|
||||||
|
|
||||||
task_unlock(p);
|
task_unlock(p);
|
||||||
|
|
||||||
if (!down_read_trylock(&mm->mmap_sem)) {
|
if (!down_read_trylock(&mm->mmap_sem)) {
|
||||||
|
@ -207,13 +207,15 @@ void __dump_page_owner(struct page *page)
|
|||||||
.nr_entries = page_ext->nr_entries,
|
.nr_entries = page_ext->nr_entries,
|
||||||
.entries = &page_ext->trace_entries[0],
|
.entries = &page_ext->trace_entries[0],
|
||||||
};
|
};
|
||||||
gfp_t gfp_mask = page_ext->gfp_mask;
|
gfp_t gfp_mask;
|
||||||
int mt = gfpflags_to_migratetype(gfp_mask);
|
int mt;
|
||||||
|
|
||||||
if (unlikely(!page_ext)) {
|
if (unlikely(!page_ext)) {
|
||||||
pr_alert("There is not page extension available.\n");
|
pr_alert("There is not page extension available.\n");
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
gfp_mask = page_ext->gfp_mask;
|
||||||
|
mt = gfpflags_to_migratetype(gfp_mask);
|
||||||
|
|
||||||
if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags)) {
|
if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags)) {
|
||||||
pr_alert("page_owner info is not active (free page?)\n");
|
pr_alert("page_owner info is not active (free page?)\n");
|
||||||
|
@ -2227,7 +2227,7 @@ static long shmem_fallocate(struct file *file, int mode, loff_t offset,
|
|||||||
/* Remove the !PageUptodate pages we added */
|
/* Remove the !PageUptodate pages we added */
|
||||||
shmem_undo_range(inode,
|
shmem_undo_range(inode,
|
||||||
(loff_t)start << PAGE_SHIFT,
|
(loff_t)start << PAGE_SHIFT,
|
||||||
(loff_t)index << PAGE_SHIFT, true);
|
((loff_t)index << PAGE_SHIFT) - 1, true);
|
||||||
goto undone;
|
goto undone;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
11
mm/swap.c
11
mm/swap.c
@ -242,7 +242,7 @@ void rotate_reclaimable_page(struct page *page)
|
|||||||
get_page(page);
|
get_page(page);
|
||||||
local_irq_save(flags);
|
local_irq_save(flags);
|
||||||
pvec = this_cpu_ptr(&lru_rotate_pvecs);
|
pvec = this_cpu_ptr(&lru_rotate_pvecs);
|
||||||
if (!pagevec_add(pvec, page))
|
if (!pagevec_add(pvec, page) || PageCompound(page))
|
||||||
pagevec_move_tail(pvec);
|
pagevec_move_tail(pvec);
|
||||||
local_irq_restore(flags);
|
local_irq_restore(flags);
|
||||||
}
|
}
|
||||||
@ -296,7 +296,7 @@ void activate_page(struct page *page)
|
|||||||
struct pagevec *pvec = &get_cpu_var(activate_page_pvecs);
|
struct pagevec *pvec = &get_cpu_var(activate_page_pvecs);
|
||||||
|
|
||||||
get_page(page);
|
get_page(page);
|
||||||
if (!pagevec_add(pvec, page))
|
if (!pagevec_add(pvec, page) || PageCompound(page))
|
||||||
pagevec_lru_move_fn(pvec, __activate_page, NULL);
|
pagevec_lru_move_fn(pvec, __activate_page, NULL);
|
||||||
put_cpu_var(activate_page_pvecs);
|
put_cpu_var(activate_page_pvecs);
|
||||||
}
|
}
|
||||||
@ -391,9 +391,8 @@ static void __lru_cache_add(struct page *page)
|
|||||||
struct pagevec *pvec = &get_cpu_var(lru_add_pvec);
|
struct pagevec *pvec = &get_cpu_var(lru_add_pvec);
|
||||||
|
|
||||||
get_page(page);
|
get_page(page);
|
||||||
if (!pagevec_space(pvec))
|
if (!pagevec_add(pvec, page) || PageCompound(page))
|
||||||
__pagevec_lru_add(pvec);
|
__pagevec_lru_add(pvec);
|
||||||
pagevec_add(pvec, page);
|
|
||||||
put_cpu_var(lru_add_pvec);
|
put_cpu_var(lru_add_pvec);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -628,7 +627,7 @@ void deactivate_file_page(struct page *page)
|
|||||||
if (likely(get_page_unless_zero(page))) {
|
if (likely(get_page_unless_zero(page))) {
|
||||||
struct pagevec *pvec = &get_cpu_var(lru_deactivate_file_pvecs);
|
struct pagevec *pvec = &get_cpu_var(lru_deactivate_file_pvecs);
|
||||||
|
|
||||||
if (!pagevec_add(pvec, page))
|
if (!pagevec_add(pvec, page) || PageCompound(page))
|
||||||
pagevec_lru_move_fn(pvec, lru_deactivate_file_fn, NULL);
|
pagevec_lru_move_fn(pvec, lru_deactivate_file_fn, NULL);
|
||||||
put_cpu_var(lru_deactivate_file_pvecs);
|
put_cpu_var(lru_deactivate_file_pvecs);
|
||||||
}
|
}
|
||||||
@ -648,7 +647,7 @@ void deactivate_page(struct page *page)
|
|||||||
struct pagevec *pvec = &get_cpu_var(lru_deactivate_pvecs);
|
struct pagevec *pvec = &get_cpu_var(lru_deactivate_pvecs);
|
||||||
|
|
||||||
get_page(page);
|
get_page(page);
|
||||||
if (!pagevec_add(pvec, page))
|
if (!pagevec_add(pvec, page) || PageCompound(page))
|
||||||
pagevec_lru_move_fn(pvec, lru_deactivate_fn, NULL);
|
pagevec_lru_move_fn(pvec, lru_deactivate_fn, NULL);
|
||||||
put_cpu_var(lru_deactivate_pvecs);
|
put_cpu_var(lru_deactivate_pvecs);
|
||||||
}
|
}
|
||||||
|
@ -136,7 +136,7 @@ int check_compaction(unsigned long mem_free, unsigned int hugepage_size)
|
|||||||
printf("No of huge pages allocated = %d\n",
|
printf("No of huge pages allocated = %d\n",
|
||||||
(atoi(nr_hugepages)));
|
(atoi(nr_hugepages)));
|
||||||
|
|
||||||
if (write(fd, initial_nr_hugepages, sizeof(initial_nr_hugepages))
|
if (write(fd, initial_nr_hugepages, strlen(initial_nr_hugepages))
|
||||||
!= strlen(initial_nr_hugepages)) {
|
!= strlen(initial_nr_hugepages)) {
|
||||||
perror("Failed to write to /proc/sys/vm/nr_hugepages\n");
|
perror("Failed to write to /proc/sys/vm/nr_hugepages\n");
|
||||||
goto close_fd;
|
goto close_fd;
|
||||||
|
@ -492,7 +492,7 @@ static void slab_stats(struct slabinfo *s)
|
|||||||
s->deactivate_to_head + s->deactivate_to_tail + s->deactivate_bypass;
|
s->deactivate_to_head + s->deactivate_to_tail + s->deactivate_bypass;
|
||||||
|
|
||||||
if (total) {
|
if (total) {
|
||||||
printf("\nSlab Deactivation Ocurrences %%\n");
|
printf("\nSlab Deactivation Occurrences %%\n");
|
||||||
printf("-------------------------------------------------\n");
|
printf("-------------------------------------------------\n");
|
||||||
printf("Slab full %7lu %3lu%%\n",
|
printf("Slab full %7lu %3lu%%\n",
|
||||||
s->deactivate_full, (s->deactivate_full * 100) / total);
|
s->deactivate_full, (s->deactivate_full * 100) / total);
|
||||||
|
Loading…
Reference in New Issue
Block a user