mm/debug_vm_pgtable,page_table_check: warn pte map fails

Failures here would be surprising: pte_advanced_tests() and
pte_clear_tests() and __page_table_check_pte_clear_range() each issue a
warning if pte_offset_map() or pte_offset_map_lock() fails.

Link: https://lkml.kernel.org/r/3ea9e4f-e5cf-d7d9-4c2-291b3c5a3636@google.com
Signed-off-by: Hugh Dickins <hughd@google.com>
Cc: Alistair Popple <apopple@nvidia.com>
Cc: Anshuman Khandual <anshuman.khandual@arm.com>
Cc: Axel Rasmussen <axelrasmussen@google.com>
Cc: Christophe Leroy <christophe.leroy@csgroup.eu>
Cc: Christoph Hellwig <hch@infradead.org>
Cc: David Hildenbrand <david@redhat.com>
Cc: "Huang, Ying" <ying.huang@intel.com>
Cc: Ira Weiny <ira.weiny@intel.com>
Cc: Jason Gunthorpe <jgg@ziepe.ca>
Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Lorenzo Stoakes <lstoakes@gmail.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Mel Gorman <mgorman@techsingularity.net>
Cc: Miaohe Lin <linmiaohe@huawei.com>
Cc: Mike Kravetz <mike.kravetz@oracle.com>
Cc: Mike Rapoport (IBM) <rppt@kernel.org>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Naoya Horiguchi <naoya.horiguchi@nec.com>
Cc: Pavel Tatashin <pasha.tatashin@soleen.com>
Cc: Peter Xu <peterx@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Qi Zheng <zhengqi.arch@bytedance.com>
Cc: Ralph Campbell <rcampbell@nvidia.com>
Cc: Ryan Roberts <ryan.roberts@arm.com>
Cc: SeongJae Park <sj@kernel.org>
Cc: Song Liu <song@kernel.org>
Cc: Steven Price <steven.price@arm.com>
Cc: Suren Baghdasaryan <surenb@google.com>
Cc: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Cc: Will Deacon <will@kernel.org>
Cc: Yang Shi <shy828301@gmail.com>
Cc: Yu Zhao <yuzhao@google.com>
Cc: Zack Rusin <zackr@vmware.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
Hugh Dickins 2023-06-08 18:27:52 -07:00 committed by Andrew Morton
parent 3622d3cde3
commit 9f2bad096d
2 changed files with 10 additions and 1 deletions

View File

@ -138,6 +138,9 @@ static void __init pte_advanced_tests(struct pgtable_debug_args *args)
return; return;
pr_debug("Validating PTE advanced\n"); pr_debug("Validating PTE advanced\n");
if (WARN_ON(!args->ptep))
return;
pte = pfn_pte(args->pte_pfn, args->page_prot); pte = pfn_pte(args->pte_pfn, args->page_prot);
set_pte_at(args->mm, args->vaddr, args->ptep, pte); set_pte_at(args->mm, args->vaddr, args->ptep, pte);
flush_dcache_page(page); flush_dcache_page(page);
@ -619,6 +622,9 @@ static void __init pte_clear_tests(struct pgtable_debug_args *args)
* the unexpected overhead of cache flushing is acceptable. * the unexpected overhead of cache flushing is acceptable.
*/ */
pr_debug("Validating PTE clear\n"); pr_debug("Validating PTE clear\n");
if (WARN_ON(!args->ptep))
return;
#ifndef CONFIG_RISCV #ifndef CONFIG_RISCV
pte = __pte(pte_val(pte) | RANDOM_ORVALUE); pte = __pte(pte_val(pte) | RANDOM_ORVALUE);
#endif #endif
@ -1377,7 +1383,8 @@ static int __init debug_vm_pgtable(void)
args.ptep = pte_offset_map_lock(args.mm, args.pmdp, args.vaddr, &ptl); args.ptep = pte_offset_map_lock(args.mm, args.pmdp, args.vaddr, &ptl);
pte_clear_tests(&args); pte_clear_tests(&args);
pte_advanced_tests(&args); pte_advanced_tests(&args);
pte_unmap_unlock(args.ptep, ptl); if (args.ptep)
pte_unmap_unlock(args.ptep, ptl);
ptl = pmd_lock(args.mm, args.pmdp); ptl = pmd_lock(args.mm, args.pmdp);
pmd_clear_tests(&args); pmd_clear_tests(&args);

View File

@ -240,6 +240,8 @@ void __page_table_check_pte_clear_range(struct mm_struct *mm,
pte_t *ptep = pte_offset_map(&pmd, addr); pte_t *ptep = pte_offset_map(&pmd, addr);
unsigned long i; unsigned long i;
if (WARN_ON(!ptep))
return;
for (i = 0; i < PTRS_PER_PTE; i++) { for (i = 0; i < PTRS_PER_PTE; i++) {
__page_table_check_pte_clear(mm, addr, *ptep); __page_table_check_pte_clear(mm, addr, *ptep);
addr += PAGE_SIZE; addr += PAGE_SIZE;