mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-25 05:34:00 +08:00
2cb7c9cb42
The existing code relies on pagefault_disable() implicitly disabling preemption, so that no schedule will happen between kmap_atomic() and kunmap_atomic(). Let's make this explicit, to prepare for pagefault_disable() not touching preemption anymore. Reviewed-and-tested-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: David Hildenbrand <dahi@linux.vnet.ibm.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: David.Laight@ACULAB.COM Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: airlied@linux.ie Cc: akpm@linux-foundation.org Cc: benh@kernel.crashing.org Cc: bigeasy@linutronix.de Cc: borntraeger@de.ibm.com Cc: daniel.vetter@intel.com Cc: heiko.carstens@de.ibm.com Cc: herbert@gondor.apana.org.au Cc: hocko@suse.cz Cc: hughd@google.com Cc: mst@redhat.com Cc: paulus@samba.org Cc: ralf@linux-mips.org Cc: schwidefsky@de.ibm.com Cc: yang.shi@windriver.com Link: http://lkml.kernel.org/r/1431359540-32227-5-git-send-email-dahi@linux.vnet.ibm.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
171 lines
3.7 KiB
C
171 lines
3.7 KiB
C
/*
|
|
* Copyright © 2008 Keith Packard <keithp@keithp.com>
|
|
*
|
|
* This file is free software; you can redistribute it and/or modify
|
|
* it under the terms of version 2 of the GNU General Public License
|
|
* as published by the Free Software Foundation.
|
|
*
|
|
* This program is distributed in the hope that it will be useful,
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
* GNU General Public License for more details.
|
|
*
|
|
* You should have received a copy of the GNU General Public License
|
|
* along with this program; if not, write to the Free Software Foundation,
|
|
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
|
|
*/
|
|
|
|
#ifndef _LINUX_IO_MAPPING_H
|
|
#define _LINUX_IO_MAPPING_H
|
|
|
|
#include <linux/types.h>
|
|
#include <linux/slab.h>
|
|
#include <linux/bug.h>
|
|
#include <asm/io.h>
|
|
#include <asm/page.h>
|
|
|
|
/*
|
|
* The io_mapping mechanism provides an abstraction for mapping
|
|
* individual pages from an io device to the CPU in an efficient fashion.
|
|
*
|
|
* See Documentation/io-mapping.txt
|
|
*/
|
|
|
|
#ifdef CONFIG_HAVE_ATOMIC_IOMAP
|
|
|
|
#include <asm/iomap.h>
|
|
|
|
struct io_mapping {
|
|
resource_size_t base;
|
|
unsigned long size;
|
|
pgprot_t prot;
|
|
};
|
|
|
|
/*
|
|
* For small address space machines, mapping large objects
|
|
* into the kernel virtual space isn't practical. Where
|
|
* available, use fixmap support to dynamically map pages
|
|
* of the object at run time.
|
|
*/
|
|
|
|
static inline struct io_mapping *
|
|
io_mapping_create_wc(resource_size_t base, unsigned long size)
|
|
{
|
|
struct io_mapping *iomap;
|
|
pgprot_t prot;
|
|
|
|
iomap = kmalloc(sizeof(*iomap), GFP_KERNEL);
|
|
if (!iomap)
|
|
goto out_err;
|
|
|
|
if (iomap_create_wc(base, size, &prot))
|
|
goto out_free;
|
|
|
|
iomap->base = base;
|
|
iomap->size = size;
|
|
iomap->prot = prot;
|
|
return iomap;
|
|
|
|
out_free:
|
|
kfree(iomap);
|
|
out_err:
|
|
return NULL;
|
|
}
|
|
|
|
static inline void
|
|
io_mapping_free(struct io_mapping *mapping)
|
|
{
|
|
iomap_free(mapping->base, mapping->size);
|
|
kfree(mapping);
|
|
}
|
|
|
|
/* Atomic map/unmap */
|
|
static inline void __iomem *
|
|
io_mapping_map_atomic_wc(struct io_mapping *mapping,
|
|
unsigned long offset)
|
|
{
|
|
resource_size_t phys_addr;
|
|
unsigned long pfn;
|
|
|
|
BUG_ON(offset >= mapping->size);
|
|
phys_addr = mapping->base + offset;
|
|
pfn = (unsigned long) (phys_addr >> PAGE_SHIFT);
|
|
return iomap_atomic_prot_pfn(pfn, mapping->prot);
|
|
}
|
|
|
|
static inline void
|
|
io_mapping_unmap_atomic(void __iomem *vaddr)
|
|
{
|
|
iounmap_atomic(vaddr);
|
|
}
|
|
|
|
static inline void __iomem *
|
|
io_mapping_map_wc(struct io_mapping *mapping, unsigned long offset)
|
|
{
|
|
resource_size_t phys_addr;
|
|
|
|
BUG_ON(offset >= mapping->size);
|
|
phys_addr = mapping->base + offset;
|
|
|
|
return ioremap_wc(phys_addr, PAGE_SIZE);
|
|
}
|
|
|
|
static inline void
|
|
io_mapping_unmap(void __iomem *vaddr)
|
|
{
|
|
iounmap(vaddr);
|
|
}
|
|
|
|
#else
|
|
|
|
#include <linux/uaccess.h>
|
|
|
|
/* this struct isn't actually defined anywhere */
|
|
struct io_mapping;
|
|
|
|
/* Create the io_mapping object*/
|
|
static inline struct io_mapping *
|
|
io_mapping_create_wc(resource_size_t base, unsigned long size)
|
|
{
|
|
return (struct io_mapping __force *) ioremap_wc(base, size);
|
|
}
|
|
|
|
static inline void
|
|
io_mapping_free(struct io_mapping *mapping)
|
|
{
|
|
iounmap((void __force __iomem *) mapping);
|
|
}
|
|
|
|
/* Atomic map/unmap */
|
|
static inline void __iomem *
|
|
io_mapping_map_atomic_wc(struct io_mapping *mapping,
|
|
unsigned long offset)
|
|
{
|
|
preempt_disable();
|
|
pagefault_disable();
|
|
return ((char __force __iomem *) mapping) + offset;
|
|
}
|
|
|
|
static inline void
|
|
io_mapping_unmap_atomic(void __iomem *vaddr)
|
|
{
|
|
pagefault_enable();
|
|
preempt_enable();
|
|
}
|
|
|
|
/* Non-atomic map/unmap */
|
|
static inline void __iomem *
|
|
io_mapping_map_wc(struct io_mapping *mapping, unsigned long offset)
|
|
{
|
|
return ((char __force __iomem *) mapping) + offset;
|
|
}
|
|
|
|
static inline void
|
|
io_mapping_unmap(void __iomem *vaddr)
|
|
{
|
|
}
|
|
|
|
#endif /* HAVE_ATOMIC_IOMAP */
|
|
|
|
#endif /* _LINUX_IO_MAPPING_H */
|