mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-11 04:18:39 +08:00
mm: kmsan: handle alloc failures in kmsan_ioremap_page_range()
Similarly to kmsan_vmap_pages_range_noflush(), kmsan_ioremap_page_range()
must also properly handle allocation/mapping failures. In the case of
such, it must clean up the already created metadata mappings and return an
error code, so that the error can be propagated to ioremap_page_range().
Without doing so, KMSAN may silently fail to bring the metadata for the
page range into a consistent state, which will result in user-visible
crashes when trying to access them.
Link: https://lkml.kernel.org/r/20230413131223.4135168-2-glider@google.com
Fixes: b073d7f8ae
("mm: kmsan: maintain KMSAN metadata for page operations")
Signed-off-by: Alexander Potapenko <glider@google.com>
Reported-by: Dipanjan Das <mail.dipanjan.das@gmail.com>
Link: https://lore.kernel.org/linux-mm/CANX2M5ZRrRA64k0hOif02TjmY9kbbO2aCBPyq79es34RXZ=cAw@mail.gmail.com/
Reviewed-by: Marco Elver <elver@google.com>
Cc: Christoph Hellwig <hch@infradead.org>
Cc: Dmitry Vyukov <dvyukov@google.com>
Cc: Uladzislau Rezki (Sony) <urezki@gmail.com>
Cc: <stable@vger.kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
47ebd0310e
commit
fdea03e12a
@ -160,11 +160,12 @@ void kmsan_vunmap_range_noflush(unsigned long start, unsigned long end);
|
||||
* @page_shift: page_shift argument passed to vmap_range_noflush().
|
||||
*
|
||||
* KMSAN creates new metadata pages for the physical pages mapped into the
|
||||
* virtual memory.
|
||||
* virtual memory. Returns 0 on success, callers must check for non-zero return
|
||||
* value.
|
||||
*/
|
||||
void kmsan_ioremap_page_range(unsigned long addr, unsigned long end,
|
||||
phys_addr_t phys_addr, pgprot_t prot,
|
||||
unsigned int page_shift);
|
||||
int kmsan_ioremap_page_range(unsigned long addr, unsigned long end,
|
||||
phys_addr_t phys_addr, pgprot_t prot,
|
||||
unsigned int page_shift);
|
||||
|
||||
/**
|
||||
* kmsan_iounmap_page_range() - Notify KMSAN about a iounmap_page_range() call.
|
||||
@ -296,12 +297,12 @@ static inline void kmsan_vunmap_range_noflush(unsigned long start,
|
||||
{
|
||||
}
|
||||
|
||||
static inline void kmsan_ioremap_page_range(unsigned long start,
|
||||
unsigned long end,
|
||||
phys_addr_t phys_addr,
|
||||
pgprot_t prot,
|
||||
unsigned int page_shift)
|
||||
static inline int kmsan_ioremap_page_range(unsigned long start,
|
||||
unsigned long end,
|
||||
phys_addr_t phys_addr, pgprot_t prot,
|
||||
unsigned int page_shift)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void kmsan_iounmap_page_range(unsigned long start,
|
||||
|
@ -148,35 +148,74 @@ void kmsan_vunmap_range_noflush(unsigned long start, unsigned long end)
|
||||
* into the virtual memory. If those physical pages already had shadow/origin,
|
||||
* those are ignored.
|
||||
*/
|
||||
void kmsan_ioremap_page_range(unsigned long start, unsigned long end,
|
||||
phys_addr_t phys_addr, pgprot_t prot,
|
||||
unsigned int page_shift)
|
||||
int kmsan_ioremap_page_range(unsigned long start, unsigned long end,
|
||||
phys_addr_t phys_addr, pgprot_t prot,
|
||||
unsigned int page_shift)
|
||||
{
|
||||
gfp_t gfp_mask = GFP_KERNEL | __GFP_ZERO;
|
||||
struct page *shadow, *origin;
|
||||
unsigned long off = 0;
|
||||
int nr;
|
||||
int nr, err = 0, clean = 0, mapped;
|
||||
|
||||
if (!kmsan_enabled || kmsan_in_runtime())
|
||||
return;
|
||||
return 0;
|
||||
|
||||
nr = (end - start) / PAGE_SIZE;
|
||||
kmsan_enter_runtime();
|
||||
for (int i = 0; i < nr; i++, off += PAGE_SIZE) {
|
||||
for (int i = 0; i < nr; i++, off += PAGE_SIZE, clean = i) {
|
||||
shadow = alloc_pages(gfp_mask, 1);
|
||||
origin = alloc_pages(gfp_mask, 1);
|
||||
__vmap_pages_range_noflush(
|
||||
if (!shadow || !origin) {
|
||||
err = -ENOMEM;
|
||||
goto ret;
|
||||
}
|
||||
mapped = __vmap_pages_range_noflush(
|
||||
vmalloc_shadow(start + off),
|
||||
vmalloc_shadow(start + off + PAGE_SIZE), prot, &shadow,
|
||||
PAGE_SHIFT);
|
||||
__vmap_pages_range_noflush(
|
||||
if (mapped) {
|
||||
err = mapped;
|
||||
goto ret;
|
||||
}
|
||||
shadow = NULL;
|
||||
mapped = __vmap_pages_range_noflush(
|
||||
vmalloc_origin(start + off),
|
||||
vmalloc_origin(start + off + PAGE_SIZE), prot, &origin,
|
||||
PAGE_SHIFT);
|
||||
if (mapped) {
|
||||
__vunmap_range_noflush(
|
||||
vmalloc_shadow(start + off),
|
||||
vmalloc_shadow(start + off + PAGE_SIZE));
|
||||
err = mapped;
|
||||
goto ret;
|
||||
}
|
||||
origin = NULL;
|
||||
}
|
||||
/* Page mapping loop finished normally, nothing to clean up. */
|
||||
clean = 0;
|
||||
|
||||
ret:
|
||||
if (clean > 0) {
|
||||
/*
|
||||
* Something went wrong. Clean up shadow/origin pages allocated
|
||||
* on the last loop iteration, then delete mappings created
|
||||
* during the previous iterations.
|
||||
*/
|
||||
if (shadow)
|
||||
__free_pages(shadow, 1);
|
||||
if (origin)
|
||||
__free_pages(origin, 1);
|
||||
__vunmap_range_noflush(
|
||||
vmalloc_shadow(start),
|
||||
vmalloc_shadow(start + clean * PAGE_SIZE));
|
||||
__vunmap_range_noflush(
|
||||
vmalloc_origin(start),
|
||||
vmalloc_origin(start + clean * PAGE_SIZE));
|
||||
}
|
||||
flush_cache_vmap(vmalloc_shadow(start), vmalloc_shadow(end));
|
||||
flush_cache_vmap(vmalloc_origin(start), vmalloc_origin(end));
|
||||
kmsan_leave_runtime();
|
||||
return err;
|
||||
}
|
||||
|
||||
void kmsan_iounmap_page_range(unsigned long start, unsigned long end)
|
||||
|
@ -313,8 +313,8 @@ int ioremap_page_range(unsigned long addr, unsigned long end,
|
||||
ioremap_max_page_shift);
|
||||
flush_cache_vmap(addr, end);
|
||||
if (!err)
|
||||
kmsan_ioremap_page_range(addr, end, phys_addr, prot,
|
||||
ioremap_max_page_shift);
|
||||
err = kmsan_ioremap_page_range(addr, end, phys_addr, prot,
|
||||
ioremap_max_page_shift);
|
||||
return err;
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user