2019-06-01 16:08:55 +08:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-only
|
2014-11-26 08:28:39 +08:00
|
|
|
/*
|
|
|
|
* Copyright (c) 2014, The Linux Foundation. All rights reserved.
|
|
|
|
* Debug helper to dump the current kernel pagetables of the system
|
|
|
|
* so that we can see what the various memory ranges are set to.
|
|
|
|
*
|
|
|
|
* Derived from x86 and arm implementation:
|
|
|
|
* (C) Copyright 2008 Intel Corporation
|
|
|
|
*
|
|
|
|
* Author: Arjan van de Ven <arjan@linux.intel.com>
|
|
|
|
*/
|
|
|
|
#include <linux/debugfs.h>
|
2015-01-23 02:20:36 +08:00
|
|
|
#include <linux/errno.h>
|
2014-11-26 08:28:39 +08:00
|
|
|
#include <linux/fs.h>
|
2015-01-23 04:52:10 +08:00
|
|
|
#include <linux/io.h>
|
2015-01-23 02:20:36 +08:00
|
|
|
#include <linux/init.h>
|
2014-11-26 08:28:39 +08:00
|
|
|
#include <linux/mm.h>
|
|
|
|
#include <linux/sched.h>
|
|
|
|
#include <linux/seq_file.h>
|
|
|
|
|
|
|
|
#include <asm/fixmap.h>
|
2016-04-23 00:48:04 +08:00
|
|
|
#include <asm/kasan.h>
|
2015-01-23 02:20:36 +08:00
|
|
|
#include <asm/memory.h>
|
2014-11-26 08:28:39 +08:00
|
|
|
#include <asm/pgtable.h>
|
2015-01-23 02:20:36 +08:00
|
|
|
#include <asm/pgtable-hwdef.h>
|
2016-05-31 21:49:01 +08:00
|
|
|
#include <asm/ptdump.h>
|
2014-11-26 08:28:39 +08:00
|
|
|
|
2019-08-07 23:55:16 +08:00
|
|
|
|
|
|
|
enum address_markers_idx {
|
|
|
|
PAGE_OFFSET_NR = 0,
|
2019-08-14 21:28:48 +08:00
|
|
|
PAGE_END_NR,
|
2019-08-07 23:55:16 +08:00
|
|
|
#ifdef CONFIG_KASAN
|
|
|
|
KASAN_START_NR,
|
|
|
|
#endif
|
|
|
|
};
|
|
|
|
|
|
|
|
static struct addr_marker address_markers[] = {
|
2019-08-07 23:55:14 +08:00
|
|
|
{ PAGE_OFFSET, "Linear Mapping start" },
|
2019-08-14 21:28:48 +08:00
|
|
|
{ 0 /* PAGE_END */, "Linear Mapping end" },
|
2016-04-23 00:48:04 +08:00
|
|
|
#ifdef CONFIG_KASAN
|
2019-08-07 23:55:16 +08:00
|
|
|
{ 0 /* KASAN_SHADOW_START */, "Kasan shadow start" },
|
2016-04-23 00:48:04 +08:00
|
|
|
{ KASAN_SHADOW_END, "Kasan shadow end" },
|
|
|
|
#endif
|
2016-04-23 00:48:03 +08:00
|
|
|
{ MODULES_VADDR, "Modules start" },
|
|
|
|
{ MODULES_END, "Modules end" },
|
2018-09-05 22:12:27 +08:00
|
|
|
{ VMALLOC_START, "vmalloc() area" },
|
|
|
|
{ VMALLOC_END, "vmalloc() end" },
|
2016-04-23 00:48:03 +08:00
|
|
|
{ FIXADDR_START, "Fixmap start" },
|
|
|
|
{ FIXADDR_TOP, "Fixmap end" },
|
|
|
|
{ PCI_IO_START, "PCI I/O start" },
|
|
|
|
{ PCI_IO_END, "PCI I/O end" },
|
2016-03-30 22:46:00 +08:00
|
|
|
#ifdef CONFIG_SPARSEMEM_VMEMMAP
|
2016-04-23 00:48:03 +08:00
|
|
|
{ VMEMMAP_START, "vmemmap start" },
|
|
|
|
{ VMEMMAP_START + VMEMMAP_SIZE, "vmemmap end" },
|
2016-03-30 22:46:00 +08:00
|
|
|
#endif
|
2016-04-23 00:48:03 +08:00
|
|
|
{ -1, NULL },
|
2014-11-26 08:28:39 +08:00
|
|
|
};
|
|
|
|
|
2016-10-28 00:27:32 +08:00
|
|
|
#define pt_dump_seq_printf(m, fmt, args...) \
|
|
|
|
({ \
|
|
|
|
if (m) \
|
|
|
|
seq_printf(m, fmt, ##args); \
|
|
|
|
})
|
|
|
|
|
|
|
|
#define pt_dump_seq_puts(m, fmt) \
|
|
|
|
({ \
|
|
|
|
if (m) \
|
|
|
|
seq_printf(m, fmt); \
|
|
|
|
})
|
|
|
|
|
2015-10-08 01:00:23 +08:00
|
|
|
/*
|
|
|
|
* The page dumper groups page table entries of the same type into a single
|
|
|
|
* description. It uses pg_state to track the range information while
|
|
|
|
* iterating over the pte entries. When the continuity is broken it then
|
|
|
|
* dumps out a description of the range.
|
|
|
|
*/
|
2014-11-26 08:28:39 +08:00
|
|
|
struct pg_state {
|
|
|
|
struct seq_file *seq;
|
|
|
|
const struct addr_marker *marker;
|
|
|
|
unsigned long start_address;
|
|
|
|
unsigned level;
|
|
|
|
u64 current_prot;
|
2016-10-28 00:27:34 +08:00
|
|
|
bool check_wx;
|
|
|
|
unsigned long wx_pages;
|
|
|
|
unsigned long uxn_pages;
|
2014-11-26 08:28:39 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
struct prot_bits {
|
|
|
|
u64 mask;
|
|
|
|
u64 val;
|
|
|
|
const char *set;
|
|
|
|
const char *clear;
|
|
|
|
};
|
|
|
|
|
|
|
|
static const struct prot_bits pte_bits[] = {
|
|
|
|
{
|
2016-02-06 08:24:48 +08:00
|
|
|
.mask = PTE_VALID,
|
|
|
|
.val = PTE_VALID,
|
|
|
|
.set = " ",
|
|
|
|
.clear = "F",
|
|
|
|
}, {
|
2014-11-26 08:28:39 +08:00
|
|
|
.mask = PTE_USER,
|
|
|
|
.val = PTE_USER,
|
|
|
|
.set = "USR",
|
|
|
|
.clear = " ",
|
|
|
|
}, {
|
|
|
|
.mask = PTE_RDONLY,
|
|
|
|
.val = PTE_RDONLY,
|
|
|
|
.set = "ro",
|
|
|
|
.clear = "RW",
|
|
|
|
}, {
|
|
|
|
.mask = PTE_PXN,
|
|
|
|
.val = PTE_PXN,
|
|
|
|
.set = "NX",
|
|
|
|
.clear = "x ",
|
|
|
|
}, {
|
|
|
|
.mask = PTE_SHARED,
|
|
|
|
.val = PTE_SHARED,
|
|
|
|
.set = "SHD",
|
|
|
|
.clear = " ",
|
|
|
|
}, {
|
|
|
|
.mask = PTE_AF,
|
|
|
|
.val = PTE_AF,
|
|
|
|
.set = "AF",
|
|
|
|
.clear = " ",
|
|
|
|
}, {
|
|
|
|
.mask = PTE_NG,
|
|
|
|
.val = PTE_NG,
|
|
|
|
.set = "NG",
|
|
|
|
.clear = " ",
|
2015-10-08 01:00:23 +08:00
|
|
|
}, {
|
|
|
|
.mask = PTE_CONT,
|
|
|
|
.val = PTE_CONT,
|
|
|
|
.set = "CON",
|
|
|
|
.clear = " ",
|
|
|
|
}, {
|
|
|
|
.mask = PTE_TABLE_BIT,
|
|
|
|
.val = PTE_TABLE_BIT,
|
|
|
|
.set = " ",
|
|
|
|
.clear = "BLK",
|
2014-11-26 08:28:39 +08:00
|
|
|
}, {
|
|
|
|
.mask = PTE_UXN,
|
|
|
|
.val = PTE_UXN,
|
|
|
|
.set = "UXN",
|
|
|
|
}, {
|
|
|
|
.mask = PTE_ATTRINDX_MASK,
|
|
|
|
.val = PTE_ATTRINDX(MT_DEVICE_nGnRnE),
|
|
|
|
.set = "DEVICE/nGnRnE",
|
|
|
|
}, {
|
|
|
|
.mask = PTE_ATTRINDX_MASK,
|
|
|
|
.val = PTE_ATTRINDX(MT_DEVICE_nGnRE),
|
|
|
|
.set = "DEVICE/nGnRE",
|
|
|
|
}, {
|
|
|
|
.mask = PTE_ATTRINDX_MASK,
|
|
|
|
.val = PTE_ATTRINDX(MT_DEVICE_GRE),
|
|
|
|
.set = "DEVICE/GRE",
|
|
|
|
}, {
|
|
|
|
.mask = PTE_ATTRINDX_MASK,
|
|
|
|
.val = PTE_ATTRINDX(MT_NORMAL_NC),
|
|
|
|
.set = "MEM/NORMAL-NC",
|
|
|
|
}, {
|
|
|
|
.mask = PTE_ATTRINDX_MASK,
|
|
|
|
.val = PTE_ATTRINDX(MT_NORMAL),
|
|
|
|
.set = "MEM/NORMAL",
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
struct pg_level {
|
|
|
|
const struct prot_bits *bits;
|
arm64: mm: dump: log span level
The page table dump code logs spans of entries at the same level
(pgd/pud/pmd/pte) which have the same attributes. While we log the
(decoded) attributes, we don't log the level, which leaves the output
ambiguous and/or confusing in some cases.
For example:
0xffff800800000000-0xffff800980000000 6G RW NX SHD AF BLK UXN MEM/NORMAL
If using 4K pages, this may describe a span of 6 1G block entries at the
PGD/PUD level, or 3072 2M block entries at the PMD level.
This patch adds the page table level to each output line, removing this
ambiguity. For the example above, this will produce:
0xffffffc800000000-0xffffffc980000000 6G PUD RW NX SHD AF BLK UXN MEM/NORMAL
When 3 level tables are in use, and we use the asm-generic/nopud.h
definitions, the dump code treats each entry in the PGD as a 1 element
table at the PUD level, and logs spans as being PUDs, which can be
confusing. To counteract this, the "PUD" mnemonic is replaced with "PGD"
when CONFIG_PGTABLE_LEVELS <= 3. Likewise for "PMD" when
CONFIG_PGTABLE_LEVELS <= 2.
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Huang Shijie <shijie.huang@arm.com>
Cc: Laura Abbott <labbott@fedoraproject.org>
Cc: Steve Capper <steve.capper@arm.com>
Cc: Will Deacon <will.deacon@arm.com>
Signed-off-by: Will Deacon <will.deacon@arm.com>
2016-05-31 21:49:02 +08:00
|
|
|
const char *name;
|
2014-11-26 08:28:39 +08:00
|
|
|
size_t num;
|
|
|
|
u64 mask;
|
|
|
|
};
|
|
|
|
|
|
|
|
static struct pg_level pg_level[] = {
|
|
|
|
{
|
|
|
|
}, { /* pgd */
|
arm64: mm: dump: log span level
The page table dump code logs spans of entries at the same level
(pgd/pud/pmd/pte) which have the same attributes. While we log the
(decoded) attributes, we don't log the level, which leaves the output
ambiguous and/or confusing in some cases.
For example:
0xffff800800000000-0xffff800980000000 6G RW NX SHD AF BLK UXN MEM/NORMAL
If using 4K pages, this may describe a span of 6 1G block entries at the
PGD/PUD level, or 3072 2M block entries at the PMD level.
This patch adds the page table level to each output line, removing this
ambiguity. For the example above, this will produce:
0xffffffc800000000-0xffffffc980000000 6G PUD RW NX SHD AF BLK UXN MEM/NORMAL
When 3 level tables are in use, and we use the asm-generic/nopud.h
definitions, the dump code treats each entry in the PGD as a 1 element
table at the PUD level, and logs spans as being PUDs, which can be
confusing. To counteract this, the "PUD" mnemonic is replaced with "PGD"
when CONFIG_PGTABLE_LEVELS <= 3. Likewise for "PMD" when
CONFIG_PGTABLE_LEVELS <= 2.
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Huang Shijie <shijie.huang@arm.com>
Cc: Laura Abbott <labbott@fedoraproject.org>
Cc: Steve Capper <steve.capper@arm.com>
Cc: Will Deacon <will.deacon@arm.com>
Signed-off-by: Will Deacon <will.deacon@arm.com>
2016-05-31 21:49:02 +08:00
|
|
|
.name = "PGD",
|
2014-11-26 08:28:39 +08:00
|
|
|
.bits = pte_bits,
|
|
|
|
.num = ARRAY_SIZE(pte_bits),
|
|
|
|
}, { /* pud */
|
arm64: mm: dump: log span level
The page table dump code logs spans of entries at the same level
(pgd/pud/pmd/pte) which have the same attributes. While we log the
(decoded) attributes, we don't log the level, which leaves the output
ambiguous and/or confusing in some cases.
For example:
0xffff800800000000-0xffff800980000000 6G RW NX SHD AF BLK UXN MEM/NORMAL
If using 4K pages, this may describe a span of 6 1G block entries at the
PGD/PUD level, or 3072 2M block entries at the PMD level.
This patch adds the page table level to each output line, removing this
ambiguity. For the example above, this will produce:
0xffffffc800000000-0xffffffc980000000 6G PUD RW NX SHD AF BLK UXN MEM/NORMAL
When 3 level tables are in use, and we use the asm-generic/nopud.h
definitions, the dump code treats each entry in the PGD as a 1 element
table at the PUD level, and logs spans as being PUDs, which can be
confusing. To counteract this, the "PUD" mnemonic is replaced with "PGD"
when CONFIG_PGTABLE_LEVELS <= 3. Likewise for "PMD" when
CONFIG_PGTABLE_LEVELS <= 2.
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Huang Shijie <shijie.huang@arm.com>
Cc: Laura Abbott <labbott@fedoraproject.org>
Cc: Steve Capper <steve.capper@arm.com>
Cc: Will Deacon <will.deacon@arm.com>
Signed-off-by: Will Deacon <will.deacon@arm.com>
2016-05-31 21:49:02 +08:00
|
|
|
.name = (CONFIG_PGTABLE_LEVELS > 3) ? "PUD" : "PGD",
|
2014-11-26 08:28:39 +08:00
|
|
|
.bits = pte_bits,
|
|
|
|
.num = ARRAY_SIZE(pte_bits),
|
|
|
|
}, { /* pmd */
|
arm64: mm: dump: log span level
The page table dump code logs spans of entries at the same level
(pgd/pud/pmd/pte) which have the same attributes. While we log the
(decoded) attributes, we don't log the level, which leaves the output
ambiguous and/or confusing in some cases.
For example:
0xffff800800000000-0xffff800980000000 6G RW NX SHD AF BLK UXN MEM/NORMAL
If using 4K pages, this may describe a span of 6 1G block entries at the
PGD/PUD level, or 3072 2M block entries at the PMD level.
This patch adds the page table level to each output line, removing this
ambiguity. For the example above, this will produce:
0xffffffc800000000-0xffffffc980000000 6G PUD RW NX SHD AF BLK UXN MEM/NORMAL
When 3 level tables are in use, and we use the asm-generic/nopud.h
definitions, the dump code treats each entry in the PGD as a 1 element
table at the PUD level, and logs spans as being PUDs, which can be
confusing. To counteract this, the "PUD" mnemonic is replaced with "PGD"
when CONFIG_PGTABLE_LEVELS <= 3. Likewise for "PMD" when
CONFIG_PGTABLE_LEVELS <= 2.
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Huang Shijie <shijie.huang@arm.com>
Cc: Laura Abbott <labbott@fedoraproject.org>
Cc: Steve Capper <steve.capper@arm.com>
Cc: Will Deacon <will.deacon@arm.com>
Signed-off-by: Will Deacon <will.deacon@arm.com>
2016-05-31 21:49:02 +08:00
|
|
|
.name = (CONFIG_PGTABLE_LEVELS > 2) ? "PMD" : "PGD",
|
2014-11-26 08:28:39 +08:00
|
|
|
.bits = pte_bits,
|
|
|
|
.num = ARRAY_SIZE(pte_bits),
|
|
|
|
}, { /* pte */
|
arm64: mm: dump: log span level
The page table dump code logs spans of entries at the same level
(pgd/pud/pmd/pte) which have the same attributes. While we log the
(decoded) attributes, we don't log the level, which leaves the output
ambiguous and/or confusing in some cases.
For example:
0xffff800800000000-0xffff800980000000 6G RW NX SHD AF BLK UXN MEM/NORMAL
If using 4K pages, this may describe a span of 6 1G block entries at the
PGD/PUD level, or 3072 2M block entries at the PMD level.
This patch adds the page table level to each output line, removing this
ambiguity. For the example above, this will produce:
0xffffffc800000000-0xffffffc980000000 6G PUD RW NX SHD AF BLK UXN MEM/NORMAL
When 3 level tables are in use, and we use the asm-generic/nopud.h
definitions, the dump code treats each entry in the PGD as a 1 element
table at the PUD level, and logs spans as being PUDs, which can be
confusing. To counteract this, the "PUD" mnemonic is replaced with "PGD"
when CONFIG_PGTABLE_LEVELS <= 3. Likewise for "PMD" when
CONFIG_PGTABLE_LEVELS <= 2.
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Huang Shijie <shijie.huang@arm.com>
Cc: Laura Abbott <labbott@fedoraproject.org>
Cc: Steve Capper <steve.capper@arm.com>
Cc: Will Deacon <will.deacon@arm.com>
Signed-off-by: Will Deacon <will.deacon@arm.com>
2016-05-31 21:49:02 +08:00
|
|
|
.name = "PTE",
|
2014-11-26 08:28:39 +08:00
|
|
|
.bits = pte_bits,
|
|
|
|
.num = ARRAY_SIZE(pte_bits),
|
|
|
|
},
|
|
|
|
};
|
|
|
|
|
|
|
|
static void dump_prot(struct pg_state *st, const struct prot_bits *bits,
|
|
|
|
size_t num)
|
|
|
|
{
|
|
|
|
unsigned i;
|
|
|
|
|
|
|
|
for (i = 0; i < num; i++, bits++) {
|
|
|
|
const char *s;
|
|
|
|
|
|
|
|
if ((st->current_prot & bits->mask) == bits->val)
|
|
|
|
s = bits->set;
|
|
|
|
else
|
|
|
|
s = bits->clear;
|
|
|
|
|
|
|
|
if (s)
|
2016-10-28 00:27:32 +08:00
|
|
|
pt_dump_seq_printf(st->seq, " %s", s);
|
2014-11-26 08:28:39 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-10-28 00:27:34 +08:00
|
|
|
static void note_prot_uxn(struct pg_state *st, unsigned long addr)
|
|
|
|
{
|
|
|
|
if (!st->check_wx)
|
|
|
|
return;
|
|
|
|
|
|
|
|
if ((st->current_prot & PTE_UXN) == PTE_UXN)
|
|
|
|
return;
|
|
|
|
|
|
|
|
WARN_ONCE(1, "arm64/mm: Found non-UXN mapping at address %p/%pS\n",
|
|
|
|
(void *)st->start_address, (void *)st->start_address);
|
|
|
|
|
|
|
|
st->uxn_pages += (addr - st->start_address) / PAGE_SIZE;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void note_prot_wx(struct pg_state *st, unsigned long addr)
|
|
|
|
{
|
|
|
|
if (!st->check_wx)
|
|
|
|
return;
|
|
|
|
if ((st->current_prot & PTE_RDONLY) == PTE_RDONLY)
|
|
|
|
return;
|
|
|
|
if ((st->current_prot & PTE_PXN) == PTE_PXN)
|
|
|
|
return;
|
|
|
|
|
|
|
|
WARN_ONCE(1, "arm64/mm: Found insecure W+X mapping at address %p/%pS\n",
|
|
|
|
(void *)st->start_address, (void *)st->start_address);
|
|
|
|
|
|
|
|
st->wx_pages += (addr - st->start_address) / PAGE_SIZE;
|
|
|
|
}
|
|
|
|
|
2014-11-26 08:28:39 +08:00
|
|
|
static void note_page(struct pg_state *st, unsigned long addr, unsigned level,
|
|
|
|
u64 val)
|
|
|
|
{
|
|
|
|
static const char units[] = "KMGTPE";
|
|
|
|
u64 prot = val & pg_level[level].mask;
|
|
|
|
|
|
|
|
if (!st->level) {
|
|
|
|
st->level = level;
|
|
|
|
st->current_prot = prot;
|
|
|
|
st->start_address = addr;
|
2016-10-28 00:27:32 +08:00
|
|
|
pt_dump_seq_printf(st->seq, "---[ %s ]---\n", st->marker->name);
|
2014-11-26 08:28:39 +08:00
|
|
|
} else if (prot != st->current_prot || level != st->level ||
|
|
|
|
addr >= st->marker[1].start_address) {
|
|
|
|
const char *unit = units;
|
|
|
|
unsigned long delta;
|
|
|
|
|
|
|
|
if (st->current_prot) {
|
2016-10-28 00:27:34 +08:00
|
|
|
note_prot_uxn(st, addr);
|
|
|
|
note_prot_wx(st, addr);
|
2016-10-28 00:27:32 +08:00
|
|
|
pt_dump_seq_printf(st->seq, "0x%016lx-0x%016lx ",
|
2014-11-26 08:28:39 +08:00
|
|
|
st->start_address, addr);
|
|
|
|
|
|
|
|
delta = (addr - st->start_address) >> 10;
|
|
|
|
while (!(delta & 1023) && unit[1]) {
|
|
|
|
delta >>= 10;
|
|
|
|
unit++;
|
|
|
|
}
|
2016-10-28 00:27:32 +08:00
|
|
|
pt_dump_seq_printf(st->seq, "%9lu%c %s", delta, *unit,
|
arm64: mm: dump: log span level
The page table dump code logs spans of entries at the same level
(pgd/pud/pmd/pte) which have the same attributes. While we log the
(decoded) attributes, we don't log the level, which leaves the output
ambiguous and/or confusing in some cases.
For example:
0xffff800800000000-0xffff800980000000 6G RW NX SHD AF BLK UXN MEM/NORMAL
If using 4K pages, this may describe a span of 6 1G block entries at the
PGD/PUD level, or 3072 2M block entries at the PMD level.
This patch adds the page table level to each output line, removing this
ambiguity. For the example above, this will produce:
0xffffffc800000000-0xffffffc980000000 6G PUD RW NX SHD AF BLK UXN MEM/NORMAL
When 3 level tables are in use, and we use the asm-generic/nopud.h
definitions, the dump code treats each entry in the PGD as a 1 element
table at the PUD level, and logs spans as being PUDs, which can be
confusing. To counteract this, the "PUD" mnemonic is replaced with "PGD"
when CONFIG_PGTABLE_LEVELS <= 3. Likewise for "PMD" when
CONFIG_PGTABLE_LEVELS <= 2.
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Huang Shijie <shijie.huang@arm.com>
Cc: Laura Abbott <labbott@fedoraproject.org>
Cc: Steve Capper <steve.capper@arm.com>
Cc: Will Deacon <will.deacon@arm.com>
Signed-off-by: Will Deacon <will.deacon@arm.com>
2016-05-31 21:49:02 +08:00
|
|
|
pg_level[st->level].name);
|
2014-11-26 08:28:39 +08:00
|
|
|
if (pg_level[st->level].bits)
|
|
|
|
dump_prot(st, pg_level[st->level].bits,
|
|
|
|
pg_level[st->level].num);
|
2016-10-28 00:27:32 +08:00
|
|
|
pt_dump_seq_puts(st->seq, "\n");
|
2014-11-26 08:28:39 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (addr >= st->marker[1].start_address) {
|
|
|
|
st->marker++;
|
2016-10-28 00:27:32 +08:00
|
|
|
pt_dump_seq_printf(st->seq, "---[ %s ]---\n", st->marker->name);
|
2014-11-26 08:28:39 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
st->start_address = addr;
|
|
|
|
st->current_prot = prot;
|
|
|
|
st->level = level;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (addr >= st->marker[1].start_address) {
|
|
|
|
st->marker++;
|
2016-10-28 00:27:32 +08:00
|
|
|
pt_dump_seq_printf(st->seq, "---[ %s ]---\n", st->marker->name);
|
2014-11-26 08:28:39 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
|
2019-02-04 22:37:38 +08:00
|
|
|
static void walk_pte(struct pg_state *st, pmd_t *pmdp, unsigned long start,
|
|
|
|
unsigned long end)
|
2014-11-26 08:28:39 +08:00
|
|
|
{
|
2019-02-04 22:37:38 +08:00
|
|
|
unsigned long addr = start;
|
|
|
|
pte_t *ptep = pte_offset_kernel(pmdp, start);
|
2014-11-26 08:28:39 +08:00
|
|
|
|
2019-02-04 22:37:38 +08:00
|
|
|
do {
|
2018-02-15 19:14:56 +08:00
|
|
|
note_page(st, addr, 4, READ_ONCE(pte_val(*ptep)));
|
2019-02-04 22:37:38 +08:00
|
|
|
} while (ptep++, addr += PAGE_SIZE, addr != end);
|
2014-11-26 08:28:39 +08:00
|
|
|
}
|
|
|
|
|
2019-02-04 22:37:38 +08:00
|
|
|
static void walk_pmd(struct pg_state *st, pud_t *pudp, unsigned long start,
|
|
|
|
unsigned long end)
|
2014-11-26 08:28:39 +08:00
|
|
|
{
|
2019-02-04 22:37:38 +08:00
|
|
|
unsigned long next, addr = start;
|
|
|
|
pmd_t *pmdp = pmd_offset(pudp, start);
|
2014-11-26 08:28:39 +08:00
|
|
|
|
2019-02-04 22:37:38 +08:00
|
|
|
do {
|
2018-02-15 19:14:56 +08:00
|
|
|
pmd_t pmd = READ_ONCE(*pmdp);
|
2019-02-04 22:37:38 +08:00
|
|
|
next = pmd_addr_end(addr, end);
|
2018-02-15 19:14:56 +08:00
|
|
|
|
|
|
|
if (pmd_none(pmd) || pmd_sect(pmd)) {
|
|
|
|
note_page(st, addr, 3, pmd_val(pmd));
|
arm64: mm: use *_sect to check for section maps
The {pgd,pud,pmd}_bad family of macros have slightly fuzzy
cross-architecture semantics, and seem to imply a populated entry that
is not a next-level table, rather than a particular type of entry (e.g.
a section map).
In arm64 code, for those cases where we care about whether an entry is a
section mapping, we can instead use the {pud,pmd}_sect macros to
explicitly check for this case. This helps to document precisely what we
care about, making the code easier to read, and allows for future
relaxation of the *_bad macros to check for other "bad" entries.
To that end this patch updates the table dumping and initial table setup
to check for section mappings with {pud,pmd}_sect, and adds/restores
BUG_ON(*_bad((*p)) checks after we've handled the *_sect and *_none
cases so as to catch remaining "bad" cases.
In the fault handling code, show_pte is left with *_bad checks as it
only cares about whether it can walk the next level table, and this path
is used for both kernel and userspace fault handling. The former case
will be followed by a die() where we'll report the address that
triggered the fault, which can be useful context for debugging.
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Acked-by: Steve Capper <steve.capper@linaro.org>
Cc: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Cc: Kees Cook <keescook@chromium.org>
Cc: Laura Abbott <lauraa@codeaurora.org>
Cc: Will Deacon <will.deacon@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2015-01-28 00:36:30 +08:00
|
|
|
} else {
|
2018-02-15 19:14:56 +08:00
|
|
|
BUG_ON(pmd_bad(pmd));
|
2019-02-04 22:37:38 +08:00
|
|
|
walk_pte(st, pmdp, addr, next);
|
arm64: mm: use *_sect to check for section maps
The {pgd,pud,pmd}_bad family of macros have slightly fuzzy
cross-architecture semantics, and seem to imply a populated entry that
is not a next-level table, rather than a particular type of entry (e.g.
a section map).
In arm64 code, for those cases where we care about whether an entry is a
section mapping, we can instead use the {pud,pmd}_sect macros to
explicitly check for this case. This helps to document precisely what we
care about, making the code easier to read, and allows for future
relaxation of the *_bad macros to check for other "bad" entries.
To that end this patch updates the table dumping and initial table setup
to check for section mappings with {pud,pmd}_sect, and adds/restores
BUG_ON(*_bad((*p)) checks after we've handled the *_sect and *_none
cases so as to catch remaining "bad" cases.
In the fault handling code, show_pte is left with *_bad checks as it
only cares about whether it can walk the next level table, and this path
is used for both kernel and userspace fault handling. The former case
will be followed by a die() where we'll report the address that
triggered the fault, which can be useful context for debugging.
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Acked-by: Steve Capper <steve.capper@linaro.org>
Cc: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Cc: Kees Cook <keescook@chromium.org>
Cc: Laura Abbott <lauraa@codeaurora.org>
Cc: Will Deacon <will.deacon@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2015-01-28 00:36:30 +08:00
|
|
|
}
|
2019-02-04 22:37:38 +08:00
|
|
|
} while (pmdp++, addr = next, addr != end);
|
2014-11-26 08:28:39 +08:00
|
|
|
}
|
|
|
|
|
2019-02-04 22:37:38 +08:00
|
|
|
static void walk_pud(struct pg_state *st, pgd_t *pgdp, unsigned long start,
|
|
|
|
unsigned long end)
|
2014-11-26 08:28:39 +08:00
|
|
|
{
|
2019-02-04 22:37:38 +08:00
|
|
|
unsigned long next, addr = start;
|
|
|
|
pud_t *pudp = pud_offset(pgdp, start);
|
2014-11-26 08:28:39 +08:00
|
|
|
|
2019-02-04 22:37:38 +08:00
|
|
|
do {
|
2018-02-15 19:14:56 +08:00
|
|
|
pud_t pud = READ_ONCE(*pudp);
|
2019-02-04 22:37:38 +08:00
|
|
|
next = pud_addr_end(addr, end);
|
2018-02-15 19:14:56 +08:00
|
|
|
|
|
|
|
if (pud_none(pud) || pud_sect(pud)) {
|
|
|
|
note_page(st, addr, 2, pud_val(pud));
|
arm64: mm: use *_sect to check for section maps
The {pgd,pud,pmd}_bad family of macros have slightly fuzzy
cross-architecture semantics, and seem to imply a populated entry that
is not a next-level table, rather than a particular type of entry (e.g.
a section map).
In arm64 code, for those cases where we care about whether an entry is a
section mapping, we can instead use the {pud,pmd}_sect macros to
explicitly check for this case. This helps to document precisely what we
care about, making the code easier to read, and allows for future
relaxation of the *_bad macros to check for other "bad" entries.
To that end this patch updates the table dumping and initial table setup
to check for section mappings with {pud,pmd}_sect, and adds/restores
BUG_ON(*_bad((*p)) checks after we've handled the *_sect and *_none
cases so as to catch remaining "bad" cases.
In the fault handling code, show_pte is left with *_bad checks as it
only cares about whether it can walk the next level table, and this path
is used for both kernel and userspace fault handling. The former case
will be followed by a die() where we'll report the address that
triggered the fault, which can be useful context for debugging.
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Acked-by: Steve Capper <steve.capper@linaro.org>
Cc: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Cc: Kees Cook <keescook@chromium.org>
Cc: Laura Abbott <lauraa@codeaurora.org>
Cc: Will Deacon <will.deacon@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2015-01-28 00:36:30 +08:00
|
|
|
} else {
|
2018-02-15 19:14:56 +08:00
|
|
|
BUG_ON(pud_bad(pud));
|
2019-02-04 22:37:38 +08:00
|
|
|
walk_pmd(st, pudp, addr, next);
|
arm64: mm: use *_sect to check for section maps
The {pgd,pud,pmd}_bad family of macros have slightly fuzzy
cross-architecture semantics, and seem to imply a populated entry that
is not a next-level table, rather than a particular type of entry (e.g.
a section map).
In arm64 code, for those cases where we care about whether an entry is a
section mapping, we can instead use the {pud,pmd}_sect macros to
explicitly check for this case. This helps to document precisely what we
care about, making the code easier to read, and allows for future
relaxation of the *_bad macros to check for other "bad" entries.
To that end this patch updates the table dumping and initial table setup
to check for section mappings with {pud,pmd}_sect, and adds/restores
BUG_ON(*_bad((*p)) checks after we've handled the *_sect and *_none
cases so as to catch remaining "bad" cases.
In the fault handling code, show_pte is left with *_bad checks as it
only cares about whether it can walk the next level table, and this path
is used for both kernel and userspace fault handling. The former case
will be followed by a die() where we'll report the address that
triggered the fault, which can be useful context for debugging.
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Acked-by: Steve Capper <steve.capper@linaro.org>
Cc: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Cc: Kees Cook <keescook@chromium.org>
Cc: Laura Abbott <lauraa@codeaurora.org>
Cc: Will Deacon <will.deacon@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2015-01-28 00:36:30 +08:00
|
|
|
}
|
2019-02-04 22:37:38 +08:00
|
|
|
} while (pudp++, addr = next, addr != end);
|
2014-11-26 08:28:39 +08:00
|
|
|
}
|
|
|
|
|
2016-05-31 21:49:01 +08:00
|
|
|
static void walk_pgd(struct pg_state *st, struct mm_struct *mm,
|
|
|
|
unsigned long start)
|
2014-11-26 08:28:39 +08:00
|
|
|
{
|
2019-02-04 22:37:38 +08:00
|
|
|
unsigned long end = (start < TASK_SIZE_64) ? TASK_SIZE_64 : 0;
|
|
|
|
unsigned long next, addr = start;
|
|
|
|
pgd_t *pgdp = pgd_offset(mm, start);
|
2014-11-26 08:28:39 +08:00
|
|
|
|
2019-02-04 22:37:38 +08:00
|
|
|
do {
|
2018-02-15 19:14:56 +08:00
|
|
|
pgd_t pgd = READ_ONCE(*pgdp);
|
2019-02-04 22:37:38 +08:00
|
|
|
next = pgd_addr_end(addr, end);
|
2018-02-15 19:14:56 +08:00
|
|
|
|
|
|
|
if (pgd_none(pgd)) {
|
|
|
|
note_page(st, addr, 1, pgd_val(pgd));
|
arm64: mm: use *_sect to check for section maps
The {pgd,pud,pmd}_bad family of macros have slightly fuzzy
cross-architecture semantics, and seem to imply a populated entry that
is not a next-level table, rather than a particular type of entry (e.g.
a section map).
In arm64 code, for those cases where we care about whether an entry is a
section mapping, we can instead use the {pud,pmd}_sect macros to
explicitly check for this case. This helps to document precisely what we
care about, making the code easier to read, and allows for future
relaxation of the *_bad macros to check for other "bad" entries.
To that end this patch updates the table dumping and initial table setup
to check for section mappings with {pud,pmd}_sect, and adds/restores
BUG_ON(*_bad((*p)) checks after we've handled the *_sect and *_none
cases so as to catch remaining "bad" cases.
In the fault handling code, show_pte is left with *_bad checks as it
only cares about whether it can walk the next level table, and this path
is used for both kernel and userspace fault handling. The former case
will be followed by a die() where we'll report the address that
triggered the fault, which can be useful context for debugging.
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Acked-by: Steve Capper <steve.capper@linaro.org>
Cc: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Cc: Kees Cook <keescook@chromium.org>
Cc: Laura Abbott <lauraa@codeaurora.org>
Cc: Will Deacon <will.deacon@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2015-01-28 00:36:30 +08:00
|
|
|
} else {
|
2018-02-15 19:14:56 +08:00
|
|
|
BUG_ON(pgd_bad(pgd));
|
2019-02-04 22:37:38 +08:00
|
|
|
walk_pud(st, pgdp, addr, next);
|
arm64: mm: use *_sect to check for section maps
The {pgd,pud,pmd}_bad family of macros have slightly fuzzy
cross-architecture semantics, and seem to imply a populated entry that
is not a next-level table, rather than a particular type of entry (e.g.
a section map).
In arm64 code, for those cases where we care about whether an entry is a
section mapping, we can instead use the {pud,pmd}_sect macros to
explicitly check for this case. This helps to document precisely what we
care about, making the code easier to read, and allows for future
relaxation of the *_bad macros to check for other "bad" entries.
To that end this patch updates the table dumping and initial table setup
to check for section mappings with {pud,pmd}_sect, and adds/restores
BUG_ON(*_bad((*p)) checks after we've handled the *_sect and *_none
cases so as to catch remaining "bad" cases.
In the fault handling code, show_pte is left with *_bad checks as it
only cares about whether it can walk the next level table, and this path
is used for both kernel and userspace fault handling. The former case
will be followed by a die() where we'll report the address that
triggered the fault, which can be useful context for debugging.
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Acked-by: Steve Capper <steve.capper@linaro.org>
Cc: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Cc: Kees Cook <keescook@chromium.org>
Cc: Laura Abbott <lauraa@codeaurora.org>
Cc: Will Deacon <will.deacon@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2015-01-28 00:36:30 +08:00
|
|
|
}
|
2019-02-04 22:37:38 +08:00
|
|
|
} while (pgdp++, addr = next, addr != end);
|
2014-11-26 08:28:39 +08:00
|
|
|
}
|
|
|
|
|
2016-10-28 00:27:31 +08:00
|
|
|
void ptdump_walk_pgd(struct seq_file *m, struct ptdump_info *info)
|
2014-11-26 08:28:39 +08:00
|
|
|
{
|
|
|
|
struct pg_state st = {
|
|
|
|
.seq = m,
|
2016-05-31 21:49:01 +08:00
|
|
|
.marker = info->markers,
|
2014-11-26 08:28:39 +08:00
|
|
|
};
|
|
|
|
|
2016-05-31 21:49:01 +08:00
|
|
|
walk_pgd(&st, info->mm, info->base_addr);
|
2014-11-26 08:28:39 +08:00
|
|
|
|
|
|
|
note_page(&st, 0, 0, 0);
|
|
|
|
}
|
|
|
|
|
2016-10-28 00:27:31 +08:00
|
|
|
static void ptdump_initialize(void)
|
2014-11-26 08:28:39 +08:00
|
|
|
{
|
|
|
|
unsigned i, j;
|
|
|
|
|
|
|
|
for (i = 0; i < ARRAY_SIZE(pg_level); i++)
|
|
|
|
if (pg_level[i].bits)
|
|
|
|
for (j = 0; j < pg_level[i].num; j++)
|
|
|
|
pg_level[i].mask |= pg_level[i].bits[j].mask;
|
|
|
|
}
|
2016-05-31 21:49:01 +08:00
|
|
|
|
|
|
|
static struct ptdump_info kernel_ptdump_info = {
|
|
|
|
.mm = &init_mm,
|
|
|
|
.markers = address_markers,
|
2019-08-07 23:55:14 +08:00
|
|
|
.base_addr = PAGE_OFFSET,
|
2016-05-31 21:49:01 +08:00
|
|
|
};
|
|
|
|
|
2016-10-28 00:27:34 +08:00
|
|
|
void ptdump_check_wx(void)
|
|
|
|
{
|
|
|
|
struct pg_state st = {
|
|
|
|
.seq = NULL,
|
|
|
|
.marker = (struct addr_marker[]) {
|
|
|
|
{ 0, NULL},
|
|
|
|
{ -1, NULL},
|
|
|
|
},
|
|
|
|
.check_wx = true,
|
|
|
|
};
|
|
|
|
|
2019-08-14 21:28:47 +08:00
|
|
|
walk_pgd(&st, &init_mm, PAGE_OFFSET);
|
2016-10-28 00:27:34 +08:00
|
|
|
note_page(&st, 0, 0, 0);
|
|
|
|
if (st.wx_pages || st.uxn_pages)
|
|
|
|
pr_warn("Checked W+X mappings: FAILED, %lu W+X pages found, %lu non-UXN pages found\n",
|
|
|
|
st.wx_pages, st.uxn_pages);
|
|
|
|
else
|
|
|
|
pr_info("Checked W+X mappings: passed, no W+X pages found\n");
|
|
|
|
}
|
|
|
|
|
2016-05-31 21:49:01 +08:00
|
|
|
static int ptdump_init(void)
|
|
|
|
{
|
2019-08-14 21:28:48 +08:00
|
|
|
address_markers[PAGE_END_NR].start_address = PAGE_END;
|
2019-08-07 23:55:16 +08:00
|
|
|
#ifdef CONFIG_KASAN
|
|
|
|
address_markers[KASAN_START_NR].start_address = KASAN_SHADOW_START;
|
|
|
|
#endif
|
2016-10-28 00:27:31 +08:00
|
|
|
ptdump_initialize();
|
2019-01-22 22:41:11 +08:00
|
|
|
ptdump_debugfs_register(&kernel_ptdump_info, "kernel_page_tables");
|
|
|
|
return 0;
|
2016-05-31 21:49:01 +08:00
|
|
|
}
|
2014-11-26 08:28:39 +08:00
|
|
|
device_initcall(ptdump_init);
|