2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-12-27 22:53:55 +08:00
linux-next/include/linux/io-mapping.h
Michael J. Ruhl e0b3e0b1a0 io-mapping: indicate mapping failure
The !ATOMIC_IOMAP version of io_maping_init_wc will always return
success, even when the ioremap fails.

Since the ATOMIC_IOMAP version returns NULL when the init fails, and
callers check for a NULL return on error this is unexpected.

During a device probe, where the ioremap failed, a crash can look like
this:

    BUG: unable to handle page fault for address: 0000000000210000
     #PF: supervisor write access in kernel mode
     #PF: error_code(0x0002) - not-present page
     Oops: 0002 [#1] PREEMPT SMP
     CPU: 0 PID: 177 Comm:
     RIP: 0010:fill_page_dma [i915]
       gen8_ppgtt_create [i915]
       i915_ppgtt_create [i915]
       intel_gt_init [i915]
       i915_gem_init [i915]
       i915_driver_probe [i915]
       pci_device_probe
       really_probe
       driver_probe_device

The remap failure occurred much earlier in the probe.  If it had been
propagated, the driver would have exited with an error.

Return NULL on ioremap failure.

[akpm@linux-foundation.org: detect ioremap_wc() errors earlier]

Fixes: cafaf14a5d ("io-mapping: Always create a struct to hold metadata about the io-mapping")
Signed-off-by: Michael J. Ruhl <michael.j.ruhl@intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Reviewed-by: Andrew Morton <akpm@linux-foundation.org>
Cc: Mike Rapoport <rppt@linux.ibm.com>
Cc: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Daniel Vetter <daniel@ffwll.ch>
Cc: <stable@vger.kernel.org>
Link: http://lkml.kernel.org/r/20200721171936.81563-1-michael.j.ruhl@intel.com
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2020-07-24 12:42:42 -07:00

193 lines
3.7 KiB
C

/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright © 2008 Keith Packard <keithp@keithp.com>
*/
#ifndef _LINUX_IO_MAPPING_H
#define _LINUX_IO_MAPPING_H
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/bug.h>
#include <linux/io.h>
#include <linux/pgtable.h>
#include <asm/page.h>
/*
* The io_mapping mechanism provides an abstraction for mapping
* individual pages from an io device to the CPU in an efficient fashion.
*
* See Documentation/driver-api/io-mapping.rst
*/
struct io_mapping {
resource_size_t base;
unsigned long size;
pgprot_t prot;
void __iomem *iomem;
};
#ifdef CONFIG_HAVE_ATOMIC_IOMAP
#include <linux/pfn.h>
#include <asm/iomap.h>
/*
* For small address space machines, mapping large objects
* into the kernel virtual space isn't practical. Where
* available, use fixmap support to dynamically map pages
* of the object at run time.
*/
static inline struct io_mapping *
io_mapping_init_wc(struct io_mapping *iomap,
resource_size_t base,
unsigned long size)
{
pgprot_t prot;
if (iomap_create_wc(base, size, &prot))
return NULL;
iomap->base = base;
iomap->size = size;
iomap->prot = prot;
return iomap;
}
static inline void
io_mapping_fini(struct io_mapping *mapping)
{
iomap_free(mapping->base, mapping->size);
}
/* Atomic map/unmap */
static inline void __iomem *
io_mapping_map_atomic_wc(struct io_mapping *mapping,
unsigned long offset)
{
resource_size_t phys_addr;
BUG_ON(offset >= mapping->size);
phys_addr = mapping->base + offset;
return iomap_atomic_prot_pfn(PHYS_PFN(phys_addr), mapping->prot);
}
static inline void
io_mapping_unmap_atomic(void __iomem *vaddr)
{
iounmap_atomic(vaddr);
}
static inline void __iomem *
io_mapping_map_wc(struct io_mapping *mapping,
unsigned long offset,
unsigned long size)
{
resource_size_t phys_addr;
BUG_ON(offset >= mapping->size);
phys_addr = mapping->base + offset;
return ioremap_wc(phys_addr, size);
}
static inline void
io_mapping_unmap(void __iomem *vaddr)
{
iounmap(vaddr);
}
#else
#include <linux/uaccess.h>
/* Create the io_mapping object*/
static inline struct io_mapping *
io_mapping_init_wc(struct io_mapping *iomap,
resource_size_t base,
unsigned long size)
{
iomap->iomem = ioremap_wc(base, size);
if (!iomap->iomem)
return NULL;
iomap->base = base;
iomap->size = size;
#if defined(pgprot_noncached_wc) /* archs can't agree on a name ... */
iomap->prot = pgprot_noncached_wc(PAGE_KERNEL);
#elif defined(pgprot_writecombine)
iomap->prot = pgprot_writecombine(PAGE_KERNEL);
#else
iomap->prot = pgprot_noncached(PAGE_KERNEL);
#endif
return iomap;
}
static inline void
io_mapping_fini(struct io_mapping *mapping)
{
iounmap(mapping->iomem);
}
/* Non-atomic map/unmap */
static inline void __iomem *
io_mapping_map_wc(struct io_mapping *mapping,
unsigned long offset,
unsigned long size)
{
return mapping->iomem + offset;
}
static inline void
io_mapping_unmap(void __iomem *vaddr)
{
}
/* Atomic map/unmap */
static inline void __iomem *
io_mapping_map_atomic_wc(struct io_mapping *mapping,
unsigned long offset)
{
preempt_disable();
pagefault_disable();
return io_mapping_map_wc(mapping, offset, PAGE_SIZE);
}
static inline void
io_mapping_unmap_atomic(void __iomem *vaddr)
{
io_mapping_unmap(vaddr);
pagefault_enable();
preempt_enable();
}
#endif /* HAVE_ATOMIC_IOMAP */
static inline struct io_mapping *
io_mapping_create_wc(resource_size_t base,
unsigned long size)
{
struct io_mapping *iomap;
iomap = kmalloc(sizeof(*iomap), GFP_KERNEL);
if (!iomap)
return NULL;
if (!io_mapping_init_wc(iomap, base, size)) {
kfree(iomap);
return NULL;
}
return iomap;
}
static inline void
io_mapping_free(struct io_mapping *iomap)
{
io_mapping_fini(iomap);
kfree(iomap);
}
#endif /* _LINUX_IO_MAPPING_H */