mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-27 06:04:23 +08:00
[PATCH] sort the devres mess out
* Split the implementation-agnostic stuff in separate files. * Make sure that targets using non-default request_irq() pull kernel/irq/devres.o * Introduce new symbols (HAS_IOPORT and HAS_IOMEM) defaulting to positive; allow architectures to turn them off (we needed these symbols anyway for dependencies of quite a few drivers). * protect the ioport-related parts of lib/devres.o with CONFIG_HAS_IOPORT. Signed-off-by: Al Viro <viro@zeniv.linux.org.uk> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
2835fdfa4a
commit
5ea8176994
@ -29,6 +29,10 @@ config MMU
|
||||
bool
|
||||
default y
|
||||
|
||||
config NO_IOPORT
|
||||
bool
|
||||
default n
|
||||
|
||||
config EISA
|
||||
bool
|
||||
---help---
|
||||
@ -298,6 +302,7 @@ config ARCH_RPC
|
||||
select TIMER_ACORN
|
||||
select ARCH_MAY_HAVE_PC_FDC
|
||||
select ISA_DMA_API
|
||||
select NO_IOPORT
|
||||
help
|
||||
On the Acorn Risc-PC, Linux can support the internal IDE disk and
|
||||
CD-ROM interface, serial and parallel port, and the floppy drive.
|
||||
|
@ -44,6 +44,9 @@ config IRQ_PER_CPU
|
||||
bool
|
||||
default y
|
||||
|
||||
config NO_IOPORT
|
||||
def_bool y
|
||||
|
||||
config CRIS
|
||||
bool
|
||||
default y
|
||||
|
@ -57,6 +57,9 @@ config TIME_LOW_RES
|
||||
bool
|
||||
default y
|
||||
|
||||
config NO_IOPORT
|
||||
def_bool y
|
||||
|
||||
config ISA
|
||||
bool
|
||||
default y
|
||||
|
@ -6,6 +6,8 @@ extra-y := vmlinux.lds
|
||||
|
||||
obj-y := process.o traps.o ptrace.o ints.o \
|
||||
sys_h8300.o time.o semaphore.o signal.o \
|
||||
setup.o gpio.o init_task.o syscalls.o
|
||||
setup.o gpio.o init_task.o syscalls.o devres.o
|
||||
|
||||
devres-y = ../../../kernel/irq/devres.o
|
||||
|
||||
obj-$(CONFIG_MODULES) += module.o h8300_ksyms.o
|
||||
|
@ -28,6 +28,9 @@ config GENERIC_IRQ_PROBE
|
||||
bool
|
||||
default y
|
||||
|
||||
config NO_IOPORT
|
||||
def_bool y
|
||||
|
||||
source "init/Kconfig"
|
||||
|
||||
|
||||
|
@ -42,6 +42,9 @@ config ARCH_MAY_HAVE_PC_FDC
|
||||
depends on Q40 || (BROKEN && SUN3X)
|
||||
default y
|
||||
|
||||
config NO_IOPORT
|
||||
def_bool y
|
||||
|
||||
mainmenu "Linux/68k Kernel Configuration"
|
||||
|
||||
source "init/Kconfig"
|
||||
|
@ -10,7 +10,9 @@ endif
|
||||
extra-y += vmlinux.lds
|
||||
|
||||
obj-y := entry.o process.o traps.o ints.o signal.o ptrace.o \
|
||||
sys_m68k.o time.o semaphore.o setup.o m68k_ksyms.o
|
||||
sys_m68k.o time.o semaphore.o setup.o m68k_ksyms.o devres.o
|
||||
|
||||
devres-y = ../../../kernel/irq/devres.o
|
||||
|
||||
obj-$(CONFIG_PCI) += bios32.o
|
||||
obj-$(CONFIG_MODULES) += module.o
|
||||
|
@ -53,6 +53,9 @@ config TIME_LOW_RES
|
||||
bool
|
||||
default y
|
||||
|
||||
config NO_IOPORT
|
||||
def_bool y
|
||||
|
||||
source "init/Kconfig"
|
||||
|
||||
menu "Processor type and features"
|
||||
|
@ -41,6 +41,9 @@ config GENERIC_HWEIGHT
|
||||
config GENERIC_TIME
|
||||
def_bool y
|
||||
|
||||
config NO_IOPORT
|
||||
def_bool y
|
||||
|
||||
mainmenu "Linux Kernel Configuration"
|
||||
|
||||
config S390
|
||||
|
@ -12,7 +12,9 @@ obj-y := entry.o wof.o wuf.o etrap.o rtrap.o traps.o $(IRQ_OBJS) \
|
||||
sys_sparc.o sunos_asm.o systbls.o \
|
||||
time.o windows.o cpu.o devices.o sclow.o \
|
||||
tadpole.o tick14.o ptrace.o sys_solaris.o \
|
||||
unaligned.o muldiv.o semaphore.o prom.o of_device.o
|
||||
unaligned.o muldiv.o semaphore.o prom.o of_device.o devres.o
|
||||
|
||||
devres-y = ../../../kernel/irq/devres.o
|
||||
|
||||
obj-$(CONFIG_PCI) += pcic.o
|
||||
obj-$(CONFIG_SUN4) += sun4setup.o
|
||||
|
@ -16,6 +16,9 @@ config MMU
|
||||
bool
|
||||
default y
|
||||
|
||||
config NO_IOMEM
|
||||
def_bool y
|
||||
|
||||
mainmenu "Linux/Usermode Kernel Configuration"
|
||||
|
||||
config ISA
|
||||
|
@ -46,6 +46,9 @@ config ARCH_HAS_ILOG2_U64
|
||||
bool
|
||||
default n
|
||||
|
||||
config NO_IOPORT
|
||||
def_bool y
|
||||
|
||||
source "init/Kconfig"
|
||||
|
||||
menu "Processor type and features"
|
||||
|
@ -43,12 +43,6 @@ void __iomem * devm_ioremap_nocache(struct device *dev, unsigned long offset,
|
||||
unsigned long size);
|
||||
void devm_iounmap(struct device *dev, void __iomem *addr);
|
||||
|
||||
void __iomem * pcim_iomap(struct pci_dev *pdev, int bar, unsigned long maxlen);
|
||||
void pcim_iounmap(struct pci_dev *pdev, void __iomem *addr);
|
||||
void __iomem * const * pcim_iomap_table(struct pci_dev *pdev);
|
||||
|
||||
int pcim_iomap_regions(struct pci_dev *pdev, u16 mask, const char *name);
|
||||
|
||||
/**
|
||||
* check_signature - find BIOS signatures
|
||||
* @io_addr: mmio address to check
|
||||
|
@ -840,6 +840,11 @@ enum pci_fixup_pass {
|
||||
|
||||
void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev);
|
||||
|
||||
void __iomem * pcim_iomap(struct pci_dev *pdev, int bar, unsigned long maxlen);
|
||||
void pcim_iounmap(struct pci_dev *pdev, void __iomem *addr);
|
||||
void __iomem * const * pcim_iomap_table(struct pci_dev *pdev);
|
||||
int pcim_iomap_regions(struct pci_dev *pdev, u16 mask, const char *name);
|
||||
|
||||
extern int pci_pci_problems;
|
||||
#define PCIPCI_FAIL 1 /* No PCI PCI DMA */
|
||||
#define PCIPCI_TRITON 2
|
||||
|
@ -1,5 +1,5 @@
|
||||
|
||||
obj-y := handle.o manage.o spurious.o resend.o chip.o
|
||||
obj-y := handle.o manage.o spurious.o resend.o chip.o devres.o
|
||||
obj-$(CONFIG_GENERIC_IRQ_PROBE) += autoprobe.o
|
||||
obj-$(CONFIG_PROC_FS) += proc.o
|
||||
obj-$(CONFIG_GENERIC_PENDING_IRQ) += migration.o
|
||||
|
88
kernel/irq/devres.c
Normal file
88
kernel/irq/devres.c
Normal file
@ -0,0 +1,88 @@
|
||||
#include <linux/module.h>
|
||||
#include <linux/interrupt.h>
|
||||
|
||||
/*
|
||||
* Device resource management aware IRQ request/free implementation.
|
||||
*/
|
||||
struct irq_devres {
|
||||
unsigned int irq;
|
||||
void *dev_id;
|
||||
};
|
||||
|
||||
static void devm_irq_release(struct device *dev, void *res)
|
||||
{
|
||||
struct irq_devres *this = res;
|
||||
|
||||
free_irq(this->irq, this->dev_id);
|
||||
}
|
||||
|
||||
static int devm_irq_match(struct device *dev, void *res, void *data)
|
||||
{
|
||||
struct irq_devres *this = res, *match = data;
|
||||
|
||||
return this->irq == match->irq && this->dev_id == match->dev_id;
|
||||
}
|
||||
|
||||
/**
|
||||
* devm_request_irq - allocate an interrupt line for a managed device
|
||||
* @dev: device to request interrupt for
|
||||
* @irq: Interrupt line to allocate
|
||||
* @handler: Function to be called when the IRQ occurs
|
||||
* @irqflags: Interrupt type flags
|
||||
* @devname: An ascii name for the claiming device
|
||||
* @dev_id: A cookie passed back to the handler function
|
||||
*
|
||||
* Except for the extra @dev argument, this function takes the
|
||||
* same arguments and performs the same function as
|
||||
* request_irq(). IRQs requested with this function will be
|
||||
* automatically freed on driver detach.
|
||||
*
|
||||
* If an IRQ allocated with this function needs to be freed
|
||||
* separately, dev_free_irq() must be used.
|
||||
*/
|
||||
int devm_request_irq(struct device *dev, unsigned int irq,
|
||||
irq_handler_t handler, unsigned long irqflags,
|
||||
const char *devname, void *dev_id)
|
||||
{
|
||||
struct irq_devres *dr;
|
||||
int rc;
|
||||
|
||||
dr = devres_alloc(devm_irq_release, sizeof(struct irq_devres),
|
||||
GFP_KERNEL);
|
||||
if (!dr)
|
||||
return -ENOMEM;
|
||||
|
||||
rc = request_irq(irq, handler, irqflags, devname, dev_id);
|
||||
if (rc) {
|
||||
kfree(dr);
|
||||
return rc;
|
||||
}
|
||||
|
||||
dr->irq = irq;
|
||||
dr->dev_id = dev_id;
|
||||
devres_add(dev, dr);
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(devm_request_irq);
|
||||
|
||||
/**
|
||||
* devm_free_irq - free an interrupt
|
||||
* @dev: device to free interrupt for
|
||||
* @irq: Interrupt line to free
|
||||
* @dev_id: Device identity to free
|
||||
*
|
||||
* Except for the extra @dev argument, this function takes the
|
||||
* same arguments and performs the same function as free_irq().
|
||||
* This function instead of free_irq() should be used to manually
|
||||
* free IRQs allocated with dev_request_irq().
|
||||
*/
|
||||
void devm_free_irq(struct device *dev, unsigned int irq, void *dev_id)
|
||||
{
|
||||
struct irq_devres match_data = { irq, dev_id };
|
||||
|
||||
free_irq(irq, dev_id);
|
||||
WARN_ON(devres_destroy(dev, devm_irq_release, devm_irq_match,
|
||||
&match_data));
|
||||
}
|
||||
EXPORT_SYMBOL(devm_free_irq);
|
@ -482,89 +482,3 @@ int request_irq(unsigned int irq, irq_handler_t handler,
|
||||
return retval;
|
||||
}
|
||||
EXPORT_SYMBOL(request_irq);
|
||||
|
||||
/*
|
||||
* Device resource management aware IRQ request/free implementation.
|
||||
*/
|
||||
struct irq_devres {
|
||||
unsigned int irq;
|
||||
void *dev_id;
|
||||
};
|
||||
|
||||
static void devm_irq_release(struct device *dev, void *res)
|
||||
{
|
||||
struct irq_devres *this = res;
|
||||
|
||||
free_irq(this->irq, this->dev_id);
|
||||
}
|
||||
|
||||
static int devm_irq_match(struct device *dev, void *res, void *data)
|
||||
{
|
||||
struct irq_devres *this = res, *match = data;
|
||||
|
||||
return this->irq == match->irq && this->dev_id == match->dev_id;
|
||||
}
|
||||
|
||||
/**
|
||||
* devm_request_irq - allocate an interrupt line for a managed device
|
||||
* @dev: device to request interrupt for
|
||||
* @irq: Interrupt line to allocate
|
||||
* @handler: Function to be called when the IRQ occurs
|
||||
* @irqflags: Interrupt type flags
|
||||
* @devname: An ascii name for the claiming device
|
||||
* @dev_id: A cookie passed back to the handler function
|
||||
*
|
||||
* Except for the extra @dev argument, this function takes the
|
||||
* same arguments and performs the same function as
|
||||
* request_irq(). IRQs requested with this function will be
|
||||
* automatically freed on driver detach.
|
||||
*
|
||||
* If an IRQ allocated with this function needs to be freed
|
||||
* separately, dev_free_irq() must be used.
|
||||
*/
|
||||
int devm_request_irq(struct device *dev, unsigned int irq,
|
||||
irq_handler_t handler, unsigned long irqflags,
|
||||
const char *devname, void *dev_id)
|
||||
{
|
||||
struct irq_devres *dr;
|
||||
int rc;
|
||||
|
||||
dr = devres_alloc(devm_irq_release, sizeof(struct irq_devres),
|
||||
GFP_KERNEL);
|
||||
if (!dr)
|
||||
return -ENOMEM;
|
||||
|
||||
rc = request_irq(irq, handler, irqflags, devname, dev_id);
|
||||
if (rc) {
|
||||
kfree(dr);
|
||||
return rc;
|
||||
}
|
||||
|
||||
dr->irq = irq;
|
||||
dr->dev_id = dev_id;
|
||||
devres_add(dev, dr);
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(devm_request_irq);
|
||||
|
||||
/**
|
||||
* devm_free_irq - free an interrupt
|
||||
* @dev: device to free interrupt for
|
||||
* @irq: Interrupt line to free
|
||||
* @dev_id: Device identity to free
|
||||
*
|
||||
* Except for the extra @dev argument, this function takes the
|
||||
* same arguments and performs the same function as free_irq().
|
||||
* This function instead of free_irq() should be used to manually
|
||||
* free IRQs allocated with dev_request_irq().
|
||||
*/
|
||||
void devm_free_irq(struct device *dev, unsigned int irq, void *dev_id)
|
||||
{
|
||||
struct irq_devres match_data = { irq, dev_id };
|
||||
|
||||
free_irq(irq, dev_id);
|
||||
WARN_ON(devres_destroy(dev, devm_irq_release, devm_irq_match,
|
||||
&match_data));
|
||||
}
|
||||
EXPORT_SYMBOL(devm_free_irq);
|
||||
|
@ -101,9 +101,14 @@ config TEXTSEARCH_FSM
|
||||
config PLIST
|
||||
boolean
|
||||
|
||||
config IOMAP_COPY
|
||||
config HAS_IOMEM
|
||||
boolean
|
||||
depends on !UML
|
||||
depends on !NO_IOMEM
|
||||
default y
|
||||
|
||||
config HAS_IOPORT
|
||||
boolean
|
||||
depends on HAS_IOMEM && !NO_IOPORT
|
||||
default y
|
||||
|
||||
endmenu
|
||||
|
@ -12,15 +12,15 @@ lib-$(CONFIG_SMP) += cpumask.o
|
||||
|
||||
lib-y += kobject.o kref.o kobject_uevent.o klist.o
|
||||
|
||||
obj-y += sort.o parser.o halfmd4.o debug_locks.o random32.o iomap.o \
|
||||
bust_spinlocks.o
|
||||
obj-y += sort.o parser.o halfmd4.o debug_locks.o random32.o bust_spinlocks.o
|
||||
|
||||
ifeq ($(CONFIG_DEBUG_KOBJECT),y)
|
||||
CFLAGS_kobject.o += -DDEBUG
|
||||
CFLAGS_kobject_uevent.o += -DDEBUG
|
||||
endif
|
||||
|
||||
obj-$(CONFIG_IOMAP_COPY) += iomap_copy.o
|
||||
obj-$(CONFIG_GENERIC_IOMAP) += iomap.o
|
||||
obj-$(CONFIG_HAS_IOMEM) += iomap_copy.o devres.o
|
||||
obj-$(CONFIG_DEBUG_LOCKING_API_SELFTESTS) += locking-selftest.o
|
||||
obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock_debug.o
|
||||
lib-$(CONFIG_RWSEM_GENERIC_SPINLOCK) += rwsem-spinlock.o
|
||||
|
300
lib/devres.c
Normal file
300
lib/devres.c
Normal file
@ -0,0 +1,300 @@
|
||||
#include <linux/pci.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/module.h>
|
||||
|
||||
static void devm_ioremap_release(struct device *dev, void *res)
|
||||
{
|
||||
iounmap(*(void __iomem **)res);
|
||||
}
|
||||
|
||||
static int devm_ioremap_match(struct device *dev, void *res, void *match_data)
|
||||
{
|
||||
return *(void **)res == match_data;
|
||||
}
|
||||
|
||||
/**
|
||||
* devm_ioremap - Managed ioremap()
|
||||
* @dev: Generic device to remap IO address for
|
||||
* @offset: BUS offset to map
|
||||
* @size: Size of map
|
||||
*
|
||||
* Managed ioremap(). Map is automatically unmapped on driver detach.
|
||||
*/
|
||||
void __iomem *devm_ioremap(struct device *dev, unsigned long offset,
|
||||
unsigned long size)
|
||||
{
|
||||
void __iomem **ptr, *addr;
|
||||
|
||||
ptr = devres_alloc(devm_ioremap_release, sizeof(*ptr), GFP_KERNEL);
|
||||
if (!ptr)
|
||||
return NULL;
|
||||
|
||||
addr = ioremap(offset, size);
|
||||
if (addr) {
|
||||
*ptr = addr;
|
||||
devres_add(dev, ptr);
|
||||
} else
|
||||
devres_free(ptr);
|
||||
|
||||
return addr;
|
||||
}
|
||||
EXPORT_SYMBOL(devm_ioremap);
|
||||
|
||||
/**
|
||||
* devm_ioremap_nocache - Managed ioremap_nocache()
|
||||
* @dev: Generic device to remap IO address for
|
||||
* @offset: BUS offset to map
|
||||
* @size: Size of map
|
||||
*
|
||||
* Managed ioremap_nocache(). Map is automatically unmapped on driver
|
||||
* detach.
|
||||
*/
|
||||
void __iomem *devm_ioremap_nocache(struct device *dev, unsigned long offset,
|
||||
unsigned long size)
|
||||
{
|
||||
void __iomem **ptr, *addr;
|
||||
|
||||
ptr = devres_alloc(devm_ioremap_release, sizeof(*ptr), GFP_KERNEL);
|
||||
if (!ptr)
|
||||
return NULL;
|
||||
|
||||
addr = ioremap_nocache(offset, size);
|
||||
if (addr) {
|
||||
*ptr = addr;
|
||||
devres_add(dev, ptr);
|
||||
} else
|
||||
devres_free(ptr);
|
||||
|
||||
return addr;
|
||||
}
|
||||
EXPORT_SYMBOL(devm_ioremap_nocache);
|
||||
|
||||
/**
|
||||
* devm_iounmap - Managed iounmap()
|
||||
* @dev: Generic device to unmap for
|
||||
* @addr: Address to unmap
|
||||
*
|
||||
* Managed iounmap(). @addr must have been mapped using devm_ioremap*().
|
||||
*/
|
||||
void devm_iounmap(struct device *dev, void __iomem *addr)
|
||||
{
|
||||
iounmap(addr);
|
||||
WARN_ON(devres_destroy(dev, devm_ioremap_release, devm_ioremap_match,
|
||||
(void *)addr));
|
||||
}
|
||||
EXPORT_SYMBOL(devm_iounmap);
|
||||
|
||||
#ifdef CONFIG_HAS_IOPORT
|
||||
/*
|
||||
* Generic iomap devres
|
||||
*/
|
||||
static void devm_ioport_map_release(struct device *dev, void *res)
|
||||
{
|
||||
ioport_unmap(*(void __iomem **)res);
|
||||
}
|
||||
|
||||
static int devm_ioport_map_match(struct device *dev, void *res,
|
||||
void *match_data)
|
||||
{
|
||||
return *(void **)res == match_data;
|
||||
}
|
||||
|
||||
/**
|
||||
* devm_ioport_map - Managed ioport_map()
|
||||
* @dev: Generic device to map ioport for
|
||||
* @port: Port to map
|
||||
* @nr: Number of ports to map
|
||||
*
|
||||
* Managed ioport_map(). Map is automatically unmapped on driver
|
||||
* detach.
|
||||
*/
|
||||
void __iomem * devm_ioport_map(struct device *dev, unsigned long port,
|
||||
unsigned int nr)
|
||||
{
|
||||
void __iomem **ptr, *addr;
|
||||
|
||||
ptr = devres_alloc(devm_ioport_map_release, sizeof(*ptr), GFP_KERNEL);
|
||||
if (!ptr)
|
||||
return NULL;
|
||||
|
||||
addr = ioport_map(port, nr);
|
||||
if (addr) {
|
||||
*ptr = addr;
|
||||
devres_add(dev, ptr);
|
||||
} else
|
||||
devres_free(ptr);
|
||||
|
||||
return addr;
|
||||
}
|
||||
EXPORT_SYMBOL(devm_ioport_map);
|
||||
|
||||
/**
|
||||
* devm_ioport_unmap - Managed ioport_unmap()
|
||||
* @dev: Generic device to unmap for
|
||||
* @addr: Address to unmap
|
||||
*
|
||||
* Managed ioport_unmap(). @addr must have been mapped using
|
||||
* devm_ioport_map().
|
||||
*/
|
||||
void devm_ioport_unmap(struct device *dev, void __iomem *addr)
|
||||
{
|
||||
ioport_unmap(addr);
|
||||
WARN_ON(devres_destroy(dev, devm_ioport_map_release,
|
||||
devm_ioport_map_match, (void *)addr));
|
||||
}
|
||||
EXPORT_SYMBOL(devm_ioport_unmap);
|
||||
|
||||
#ifdef CONFIG_PCI
|
||||
/*
|
||||
* PCI iomap devres
|
||||
*/
|
||||
#define PCIM_IOMAP_MAX PCI_ROM_RESOURCE
|
||||
|
||||
struct pcim_iomap_devres {
|
||||
void __iomem *table[PCIM_IOMAP_MAX];
|
||||
};
|
||||
|
||||
static void pcim_iomap_release(struct device *gendev, void *res)
|
||||
{
|
||||
struct pci_dev *dev = container_of(gendev, struct pci_dev, dev);
|
||||
struct pcim_iomap_devres *this = res;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < PCIM_IOMAP_MAX; i++)
|
||||
if (this->table[i])
|
||||
pci_iounmap(dev, this->table[i]);
|
||||
}
|
||||
|
||||
/**
|
||||
* pcim_iomap_table - access iomap allocation table
|
||||
* @pdev: PCI device to access iomap table for
|
||||
*
|
||||
* Access iomap allocation table for @dev. If iomap table doesn't
|
||||
* exist and @pdev is managed, it will be allocated. All iomaps
|
||||
* recorded in the iomap table are automatically unmapped on driver
|
||||
* detach.
|
||||
*
|
||||
* This function might sleep when the table is first allocated but can
|
||||
* be safely called without context and guaranteed to succed once
|
||||
* allocated.
|
||||
*/
|
||||
void __iomem * const * pcim_iomap_table(struct pci_dev *pdev)
|
||||
{
|
||||
struct pcim_iomap_devres *dr, *new_dr;
|
||||
|
||||
dr = devres_find(&pdev->dev, pcim_iomap_release, NULL, NULL);
|
||||
if (dr)
|
||||
return dr->table;
|
||||
|
||||
new_dr = devres_alloc(pcim_iomap_release, sizeof(*new_dr), GFP_KERNEL);
|
||||
if (!new_dr)
|
||||
return NULL;
|
||||
dr = devres_get(&pdev->dev, new_dr, NULL, NULL);
|
||||
return dr->table;
|
||||
}
|
||||
EXPORT_SYMBOL(pcim_iomap_table);
|
||||
|
||||
/**
|
||||
* pcim_iomap - Managed pcim_iomap()
|
||||
* @pdev: PCI device to iomap for
|
||||
* @bar: BAR to iomap
|
||||
* @maxlen: Maximum length of iomap
|
||||
*
|
||||
* Managed pci_iomap(). Map is automatically unmapped on driver
|
||||
* detach.
|
||||
*/
|
||||
void __iomem * pcim_iomap(struct pci_dev *pdev, int bar, unsigned long maxlen)
|
||||
{
|
||||
void __iomem **tbl;
|
||||
|
||||
BUG_ON(bar >= PCIM_IOMAP_MAX);
|
||||
|
||||
tbl = (void __iomem **)pcim_iomap_table(pdev);
|
||||
if (!tbl || tbl[bar]) /* duplicate mappings not allowed */
|
||||
return NULL;
|
||||
|
||||
tbl[bar] = pci_iomap(pdev, bar, maxlen);
|
||||
return tbl[bar];
|
||||
}
|
||||
EXPORT_SYMBOL(pcim_iomap);
|
||||
|
||||
/**
|
||||
* pcim_iounmap - Managed pci_iounmap()
|
||||
* @pdev: PCI device to iounmap for
|
||||
* @addr: Address to unmap
|
||||
*
|
||||
* Managed pci_iounmap(). @addr must have been mapped using pcim_iomap().
|
||||
*/
|
||||
void pcim_iounmap(struct pci_dev *pdev, void __iomem *addr)
|
||||
{
|
||||
void __iomem **tbl;
|
||||
int i;
|
||||
|
||||
pci_iounmap(pdev, addr);
|
||||
|
||||
tbl = (void __iomem **)pcim_iomap_table(pdev);
|
||||
BUG_ON(!tbl);
|
||||
|
||||
for (i = 0; i < PCIM_IOMAP_MAX; i++)
|
||||
if (tbl[i] == addr) {
|
||||
tbl[i] = NULL;
|
||||
return;
|
||||
}
|
||||
WARN_ON(1);
|
||||
}
|
||||
EXPORT_SYMBOL(pcim_iounmap);
|
||||
|
||||
/**
|
||||
* pcim_iomap_regions - Request and iomap PCI BARs
|
||||
* @pdev: PCI device to map IO resources for
|
||||
* @mask: Mask of BARs to request and iomap
|
||||
* @name: Name used when requesting regions
|
||||
*
|
||||
* Request and iomap regions specified by @mask.
|
||||
*/
|
||||
int pcim_iomap_regions(struct pci_dev *pdev, u16 mask, const char *name)
|
||||
{
|
||||
void __iomem * const *iomap;
|
||||
int i, rc;
|
||||
|
||||
iomap = pcim_iomap_table(pdev);
|
||||
if (!iomap)
|
||||
return -ENOMEM;
|
||||
|
||||
for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
|
||||
unsigned long len;
|
||||
|
||||
if (!(mask & (1 << i)))
|
||||
continue;
|
||||
|
||||
rc = -EINVAL;
|
||||
len = pci_resource_len(pdev, i);
|
||||
if (!len)
|
||||
goto err_inval;
|
||||
|
||||
rc = pci_request_region(pdev, i, name);
|
||||
if (rc)
|
||||
goto err_region;
|
||||
|
||||
rc = -ENOMEM;
|
||||
if (!pcim_iomap(pdev, i, 0))
|
||||
goto err_iomap;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
err_iomap:
|
||||
pcim_iounmap(pdev, iomap[i]);
|
||||
err_region:
|
||||
pci_release_region(pdev, i);
|
||||
err_inval:
|
||||
while (--i >= 0) {
|
||||
pcim_iounmap(pdev, iomap[i]);
|
||||
pci_release_region(pdev, i);
|
||||
}
|
||||
|
||||
return rc;
|
||||
}
|
||||
EXPORT_SYMBOL(pcim_iomap_regions);
|
||||
#endif
|
||||
#endif
|
296
lib/iomap.c
296
lib/iomap.c
@ -6,7 +6,6 @@
|
||||
#include <linux/pci.h>
|
||||
#include <linux/io.h>
|
||||
|
||||
#ifdef CONFIG_GENERIC_IOMAP
|
||||
#include <linux/module.h>
|
||||
|
||||
/*
|
||||
@ -256,298 +255,3 @@ void pci_iounmap(struct pci_dev *dev, void __iomem * addr)
|
||||
}
|
||||
EXPORT_SYMBOL(pci_iomap);
|
||||
EXPORT_SYMBOL(pci_iounmap);
|
||||
|
||||
#endif /* CONFIG_GENERIC_IOMAP */
|
||||
|
||||
/*
|
||||
* Generic iomap devres
|
||||
*/
|
||||
static void devm_ioport_map_release(struct device *dev, void *res)
|
||||
{
|
||||
ioport_unmap(*(void __iomem **)res);
|
||||
}
|
||||
|
||||
static int devm_ioport_map_match(struct device *dev, void *res,
|
||||
void *match_data)
|
||||
{
|
||||
return *(void **)res == match_data;
|
||||
}
|
||||
|
||||
/**
|
||||
* devm_ioport_map - Managed ioport_map()
|
||||
* @dev: Generic device to map ioport for
|
||||
* @port: Port to map
|
||||
* @nr: Number of ports to map
|
||||
*
|
||||
* Managed ioport_map(). Map is automatically unmapped on driver
|
||||
* detach.
|
||||
*/
|
||||
void __iomem * devm_ioport_map(struct device *dev, unsigned long port,
|
||||
unsigned int nr)
|
||||
{
|
||||
void __iomem **ptr, *addr;
|
||||
|
||||
ptr = devres_alloc(devm_ioport_map_release, sizeof(*ptr), GFP_KERNEL);
|
||||
if (!ptr)
|
||||
return NULL;
|
||||
|
||||
addr = ioport_map(port, nr);
|
||||
if (addr) {
|
||||
*ptr = addr;
|
||||
devres_add(dev, ptr);
|
||||
} else
|
||||
devres_free(ptr);
|
||||
|
||||
return addr;
|
||||
}
|
||||
EXPORT_SYMBOL(devm_ioport_map);
|
||||
|
||||
/**
|
||||
* devm_ioport_unmap - Managed ioport_unmap()
|
||||
* @dev: Generic device to unmap for
|
||||
* @addr: Address to unmap
|
||||
*
|
||||
* Managed ioport_unmap(). @addr must have been mapped using
|
||||
* devm_ioport_map().
|
||||
*/
|
||||
void devm_ioport_unmap(struct device *dev, void __iomem *addr)
|
||||
{
|
||||
ioport_unmap(addr);
|
||||
WARN_ON(devres_destroy(dev, devm_ioport_map_release,
|
||||
devm_ioport_map_match, (void *)addr));
|
||||
}
|
||||
EXPORT_SYMBOL(devm_ioport_unmap);
|
||||
|
||||
static void devm_ioremap_release(struct device *dev, void *res)
|
||||
{
|
||||
iounmap(*(void __iomem **)res);
|
||||
}
|
||||
|
||||
static int devm_ioremap_match(struct device *dev, void *res, void *match_data)
|
||||
{
|
||||
return *(void **)res == match_data;
|
||||
}
|
||||
|
||||
/**
|
||||
* devm_ioremap - Managed ioremap()
|
||||
* @dev: Generic device to remap IO address for
|
||||
* @offset: BUS offset to map
|
||||
* @size: Size of map
|
||||
*
|
||||
* Managed ioremap(). Map is automatically unmapped on driver detach.
|
||||
*/
|
||||
void __iomem *devm_ioremap(struct device *dev, unsigned long offset,
|
||||
unsigned long size)
|
||||
{
|
||||
void __iomem **ptr, *addr;
|
||||
|
||||
ptr = devres_alloc(devm_ioremap_release, sizeof(*ptr), GFP_KERNEL);
|
||||
if (!ptr)
|
||||
return NULL;
|
||||
|
||||
addr = ioremap(offset, size);
|
||||
if (addr) {
|
||||
*ptr = addr;
|
||||
devres_add(dev, ptr);
|
||||
} else
|
||||
devres_free(ptr);
|
||||
|
||||
return addr;
|
||||
}
|
||||
EXPORT_SYMBOL(devm_ioremap);
|
||||
|
||||
/**
|
||||
* devm_ioremap_nocache - Managed ioremap_nocache()
|
||||
* @dev: Generic device to remap IO address for
|
||||
* @offset: BUS offset to map
|
||||
* @size: Size of map
|
||||
*
|
||||
* Managed ioremap_nocache(). Map is automatically unmapped on driver
|
||||
* detach.
|
||||
*/
|
||||
void __iomem *devm_ioremap_nocache(struct device *dev, unsigned long offset,
|
||||
unsigned long size)
|
||||
{
|
||||
void __iomem **ptr, *addr;
|
||||
|
||||
ptr = devres_alloc(devm_ioremap_release, sizeof(*ptr), GFP_KERNEL);
|
||||
if (!ptr)
|
||||
return NULL;
|
||||
|
||||
addr = ioremap_nocache(offset, size);
|
||||
if (addr) {
|
||||
*ptr = addr;
|
||||
devres_add(dev, ptr);
|
||||
} else
|
||||
devres_free(ptr);
|
||||
|
||||
return addr;
|
||||
}
|
||||
EXPORT_SYMBOL(devm_ioremap_nocache);
|
||||
|
||||
/**
|
||||
* devm_iounmap - Managed iounmap()
|
||||
* @dev: Generic device to unmap for
|
||||
* @addr: Address to unmap
|
||||
*
|
||||
* Managed iounmap(). @addr must have been mapped using devm_ioremap*().
|
||||
*/
|
||||
void devm_iounmap(struct device *dev, void __iomem *addr)
|
||||
{
|
||||
iounmap(addr);
|
||||
WARN_ON(devres_destroy(dev, devm_ioremap_release, devm_ioremap_match,
|
||||
(void *)addr));
|
||||
}
|
||||
EXPORT_SYMBOL(devm_iounmap);
|
||||
|
||||
/*
|
||||
* PCI iomap devres
|
||||
*/
|
||||
#define PCIM_IOMAP_MAX PCI_ROM_RESOURCE
|
||||
|
||||
struct pcim_iomap_devres {
|
||||
void __iomem *table[PCIM_IOMAP_MAX];
|
||||
};
|
||||
|
||||
static void pcim_iomap_release(struct device *gendev, void *res)
|
||||
{
|
||||
struct pci_dev *dev = container_of(gendev, struct pci_dev, dev);
|
||||
struct pcim_iomap_devres *this = res;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < PCIM_IOMAP_MAX; i++)
|
||||
if (this->table[i])
|
||||
pci_iounmap(dev, this->table[i]);
|
||||
}
|
||||
|
||||
/**
|
||||
* pcim_iomap_table - access iomap allocation table
|
||||
* @pdev: PCI device to access iomap table for
|
||||
*
|
||||
* Access iomap allocation table for @dev. If iomap table doesn't
|
||||
* exist and @pdev is managed, it will be allocated. All iomaps
|
||||
* recorded in the iomap table are automatically unmapped on driver
|
||||
* detach.
|
||||
*
|
||||
* This function might sleep when the table is first allocated but can
|
||||
* be safely called without context and guaranteed to succed once
|
||||
* allocated.
|
||||
*/
|
||||
void __iomem * const * pcim_iomap_table(struct pci_dev *pdev)
|
||||
{
|
||||
struct pcim_iomap_devres *dr, *new_dr;
|
||||
|
||||
dr = devres_find(&pdev->dev, pcim_iomap_release, NULL, NULL);
|
||||
if (dr)
|
||||
return dr->table;
|
||||
|
||||
new_dr = devres_alloc(pcim_iomap_release, sizeof(*new_dr), GFP_KERNEL);
|
||||
if (!new_dr)
|
||||
return NULL;
|
||||
dr = devres_get(&pdev->dev, new_dr, NULL, NULL);
|
||||
return dr->table;
|
||||
}
|
||||
EXPORT_SYMBOL(pcim_iomap_table);
|
||||
|
||||
/**
|
||||
* pcim_iomap - Managed pcim_iomap()
|
||||
* @pdev: PCI device to iomap for
|
||||
* @bar: BAR to iomap
|
||||
* @maxlen: Maximum length of iomap
|
||||
*
|
||||
* Managed pci_iomap(). Map is automatically unmapped on driver
|
||||
* detach.
|
||||
*/
|
||||
void __iomem * pcim_iomap(struct pci_dev *pdev, int bar, unsigned long maxlen)
|
||||
{
|
||||
void __iomem **tbl;
|
||||
|
||||
BUG_ON(bar >= PCIM_IOMAP_MAX);
|
||||
|
||||
tbl = (void __iomem **)pcim_iomap_table(pdev);
|
||||
if (!tbl || tbl[bar]) /* duplicate mappings not allowed */
|
||||
return NULL;
|
||||
|
||||
tbl[bar] = pci_iomap(pdev, bar, maxlen);
|
||||
return tbl[bar];
|
||||
}
|
||||
EXPORT_SYMBOL(pcim_iomap);
|
||||
|
||||
/**
|
||||
* pcim_iounmap - Managed pci_iounmap()
|
||||
* @pdev: PCI device to iounmap for
|
||||
* @addr: Address to unmap
|
||||
*
|
||||
* Managed pci_iounmap(). @addr must have been mapped using pcim_iomap().
|
||||
*/
|
||||
void pcim_iounmap(struct pci_dev *pdev, void __iomem *addr)
|
||||
{
|
||||
void __iomem **tbl;
|
||||
int i;
|
||||
|
||||
pci_iounmap(pdev, addr);
|
||||
|
||||
tbl = (void __iomem **)pcim_iomap_table(pdev);
|
||||
BUG_ON(!tbl);
|
||||
|
||||
for (i = 0; i < PCIM_IOMAP_MAX; i++)
|
||||
if (tbl[i] == addr) {
|
||||
tbl[i] = NULL;
|
||||
return;
|
||||
}
|
||||
WARN_ON(1);
|
||||
}
|
||||
EXPORT_SYMBOL(pcim_iounmap);
|
||||
|
||||
/**
|
||||
* pcim_iomap_regions - Request and iomap PCI BARs
|
||||
* @pdev: PCI device to map IO resources for
|
||||
* @mask: Mask of BARs to request and iomap
|
||||
* @name: Name used when requesting regions
|
||||
*
|
||||
* Request and iomap regions specified by @mask.
|
||||
*/
|
||||
int pcim_iomap_regions(struct pci_dev *pdev, u16 mask, const char *name)
|
||||
{
|
||||
void __iomem * const *iomap;
|
||||
int i, rc;
|
||||
|
||||
iomap = pcim_iomap_table(pdev);
|
||||
if (!iomap)
|
||||
return -ENOMEM;
|
||||
|
||||
for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
|
||||
unsigned long len;
|
||||
|
||||
if (!(mask & (1 << i)))
|
||||
continue;
|
||||
|
||||
rc = -EINVAL;
|
||||
len = pci_resource_len(pdev, i);
|
||||
if (!len)
|
||||
goto err_inval;
|
||||
|
||||
rc = pci_request_region(pdev, i, name);
|
||||
if (rc)
|
||||
goto err_region;
|
||||
|
||||
rc = -ENOMEM;
|
||||
if (!pcim_iomap(pdev, i, 0))
|
||||
goto err_iomap;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
err_iomap:
|
||||
pcim_iounmap(pdev, iomap[i]);
|
||||
err_region:
|
||||
pci_release_region(pdev, i);
|
||||
err_inval:
|
||||
while (--i >= 0) {
|
||||
pcim_iounmap(pdev, iomap[i]);
|
||||
pci_release_region(pdev, i);
|
||||
}
|
||||
|
||||
return rc;
|
||||
}
|
||||
EXPORT_SYMBOL(pcim_iomap_regions);
|
||||
|
Loading…
Reference in New Issue
Block a user