mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-15 08:14:15 +08:00
ARM: 8249/1: mm: dump: don't skip regions
Currently the arm page table dumping code starts dumping page tables from USER_PGTABLES_CEILING. This is unnecessary for skipping any entries related to userspace as the swapper_pg_dir does not contain such entries, and results in a couple of unfortuante side effects. Firstly, any kernel mappings which might exist below USER_PGTABLES_CEILING will not be accounted in the dump output. This masks any entries erroneously created below this address. Secondly, if the final page table entry walked is part of a valid mapping the page table dumping code will not log the region this entry is part of, as the final note_page call in walk_pgd will trigger an early return when 0 < USER_PGTABLES_CEILING. Luckily this isn't seen on contemporary systems as they typically don't have enough RAM to extend the linear mapping right to the end of the address space. Due to the way addr is constructed in the walk_* functions, it can never be less than USER_PGTABLES_CEILING when walking the page tables, so it is not necessary to avoid dereferencing invalid table addresses. The existing checks for st->current_prot and st->marker[1].start_address are sufficient to ensure we will not print and/or dereference garbage when trying to log information. This patch removes both problematic uses of USER_PGTABLES_CEILING from the arm page table dumping code, preventing both of these issues. We will now report any low mappings, and the final note_page call will not return early, ensuring all regions are logged. Signed-off-by: Mark Rutland <mark.rutland@arm.com> Cc: Steve Capper <steve.capper@linaro.org> Cc: Kees Cook <keescook@chromium.org> Cc: Will Deacon <will.deacon@arm.com> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
This commit is contained in:
parent
841ee23025
commit
cca547e9aa
@ -220,9 +220,6 @@ static void note_page(struct pg_state *st, unsigned long addr, unsigned level, u
|
||||
static const char units[] = "KMGTPE";
|
||||
u64 prot = val & pg_level[level].mask;
|
||||
|
||||
if (addr < USER_PGTABLES_CEILING)
|
||||
return;
|
||||
|
||||
if (!st->level) {
|
||||
st->level = level;
|
||||
st->current_prot = prot;
|
||||
@ -308,15 +305,13 @@ static void walk_pgd(struct seq_file *m)
|
||||
pgd_t *pgd = swapper_pg_dir;
|
||||
struct pg_state st;
|
||||
unsigned long addr;
|
||||
unsigned i, pgdoff = USER_PGTABLES_CEILING / PGDIR_SIZE;
|
||||
unsigned i;
|
||||
|
||||
memset(&st, 0, sizeof(st));
|
||||
st.seq = m;
|
||||
st.marker = address_markers;
|
||||
|
||||
pgd += pgdoff;
|
||||
|
||||
for (i = pgdoff; i < PTRS_PER_PGD; i++, pgd++) {
|
||||
for (i = 0; i < PTRS_PER_PGD; i++, pgd++) {
|
||||
addr = i * PGDIR_SIZE;
|
||||
if (!pgd_none(*pgd)) {
|
||||
walk_pud(&st, pgd, addr);
|
||||
|
Loading…
Reference in New Issue
Block a user