mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-12-15 23:14:31 +08:00
1d4454e7ce
The current PowerPC code makes pci_unmap_addr(), pci_unmap_addr_set(), and friends trivial for all 32-bit kernels. This is reasonable, since for those kernels it is true that pci_unmap_single() does not need the DMA address from the original DMA mapping -- in fact, it is a NOP. However, I recently tried the tg3 driver on a PowerPC 440SPe machine, which runs a 32-bit kernel and has non-cache-coherent PCI DMA. I found that the tg3 driver crashed in pci_dma_sync_single_for_cpu(), since for non-coherent systems, that function must invalidate the cache for the DMA address range requested, and therefore it does use the address passed in. tg3 uses a DMA address it stashes away with pci_unmap_addr_set() and retrieves with pci_unmap_addr(). Of course, since pci_unmap_addr() is defined to (0) right now, this doesn't work. It seems to me that the tg3 driver is using pci_unmap_addr() in a legitimate way -- I wouldn't want to have to teach all drivers that they should use pci_unmap_addr() if they only need the address for unmapping functions, but if they want the pci_dma_sync functions, then they have to store the DMA address without the helper macros. The right fix therefore seems to be in the definition of the macros in <asm/pci.h> -- we should use the trivial versions only for 32-bit kernels for coherent systems, and the real versions for both 64-bit kernels and non-coherent systems. Signed-off-by: Roland Dreier <rolandd@cisco.com> Signed-off-by: Paul Mackerras <paulus@samba.org>
255 lines
6.9 KiB
C
255 lines
6.9 KiB
C
#ifndef __ASM_POWERPC_PCI_H
|
|
#define __ASM_POWERPC_PCI_H
|
|
#ifdef __KERNEL__
|
|
|
|
/*
|
|
* This program is free software; you can redistribute it and/or
|
|
* modify it under the terms of the GNU General Public License
|
|
* as published by the Free Software Foundation; either version
|
|
* 2 of the License, or (at your option) any later version.
|
|
*/
|
|
|
|
#include <linux/types.h>
|
|
#include <linux/slab.h>
|
|
#include <linux/string.h>
|
|
#include <linux/dma-mapping.h>
|
|
|
|
#include <asm/machdep.h>
|
|
#include <asm/scatterlist.h>
|
|
#include <asm/io.h>
|
|
#include <asm/prom.h>
|
|
#include <asm/pci-bridge.h>
|
|
|
|
#include <asm-generic/pci-dma-compat.h>
|
|
|
|
#define PCIBIOS_MIN_IO 0x1000
|
|
#define PCIBIOS_MIN_MEM 0x10000000
|
|
|
|
struct pci_dev;
|
|
|
|
/* Values for the `which' argument to sys_pciconfig_iobase syscall. */
|
|
#define IOBASE_BRIDGE_NUMBER 0
|
|
#define IOBASE_MEMORY 1
|
|
#define IOBASE_IO 2
|
|
#define IOBASE_ISA_IO 3
|
|
#define IOBASE_ISA_MEM 4
|
|
|
|
/*
|
|
* Set this to 1 if you want the kernel to re-assign all PCI
|
|
* bus numbers
|
|
*/
|
|
extern int pci_assign_all_buses;
|
|
#define pcibios_assign_all_busses() (pci_assign_all_buses)
|
|
|
|
#define pcibios_scan_all_fns(a, b) 0
|
|
|
|
static inline void pcibios_set_master(struct pci_dev *dev)
|
|
{
|
|
/* No special bus mastering setup handling */
|
|
}
|
|
|
|
static inline void pcibios_penalize_isa_irq(int irq, int active)
|
|
{
|
|
/* We don't do dynamic PCI IRQ allocation */
|
|
}
|
|
|
|
#define HAVE_ARCH_PCI_GET_LEGACY_IDE_IRQ
|
|
static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
|
|
{
|
|
if (ppc_md.pci_get_legacy_ide_irq)
|
|
return ppc_md.pci_get_legacy_ide_irq(dev, channel);
|
|
return channel ? 15 : 14;
|
|
}
|
|
|
|
#ifdef CONFIG_PPC64
|
|
|
|
/*
|
|
* We want to avoid touching the cacheline size or MWI bit.
|
|
* pSeries firmware sets the cacheline size (which is not the cpu cacheline
|
|
* size in all cases) and hardware treats MWI the same as memory write.
|
|
*/
|
|
#define PCI_DISABLE_MWI
|
|
|
|
extern struct dma_mapping_ops *pci_dma_ops;
|
|
|
|
/* For DAC DMA, we currently don't support it by default, but
|
|
* we let 64-bit platforms override this.
|
|
*/
|
|
static inline int pci_dac_dma_supported(struct pci_dev *hwdev,u64 mask)
|
|
{
|
|
if (pci_dma_ops && pci_dma_ops->dac_dma_supported)
|
|
return pci_dma_ops->dac_dma_supported(&hwdev->dev, mask);
|
|
return 0;
|
|
}
|
|
|
|
#ifdef CONFIG_PCI
|
|
static inline void pci_dma_burst_advice(struct pci_dev *pdev,
|
|
enum pci_dma_burst_strategy *strat,
|
|
unsigned long *strategy_parameter)
|
|
{
|
|
unsigned long cacheline_size;
|
|
u8 byte;
|
|
|
|
pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &byte);
|
|
if (byte == 0)
|
|
cacheline_size = 1024;
|
|
else
|
|
cacheline_size = (int) byte * 4;
|
|
|
|
*strat = PCI_DMA_BURST_MULTIPLE;
|
|
*strategy_parameter = cacheline_size;
|
|
}
|
|
#endif
|
|
|
|
extern int pci_domain_nr(struct pci_bus *bus);
|
|
|
|
/* Decide whether to display the domain number in /proc */
|
|
extern int pci_proc_domain(struct pci_bus *bus);
|
|
|
|
#else /* 32-bit */
|
|
|
|
#ifdef CONFIG_PCI
|
|
static inline void pci_dma_burst_advice(struct pci_dev *pdev,
|
|
enum pci_dma_burst_strategy *strat,
|
|
unsigned long *strategy_parameter)
|
|
{
|
|
*strat = PCI_DMA_BURST_INFINITY;
|
|
*strategy_parameter = ~0UL;
|
|
}
|
|
#endif
|
|
|
|
/*
|
|
* At present there are very few 32-bit PPC machines that can have
|
|
* memory above the 4GB point, and we don't support that.
|
|
*/
|
|
#define pci_dac_dma_supported(pci_dev, mask) (0)
|
|
|
|
/* Return the index of the PCI controller for device PDEV. */
|
|
#define pci_domain_nr(bus) ((struct pci_controller *)(bus)->sysdata)->index
|
|
|
|
/* Set the name of the bus as it appears in /proc/bus/pci */
|
|
static inline int pci_proc_domain(struct pci_bus *bus)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
#endif /* CONFIG_PPC64 */
|
|
|
|
struct vm_area_struct;
|
|
/* Map a range of PCI memory or I/O space for a device into user space */
|
|
int pci_mmap_page_range(struct pci_dev *pdev, struct vm_area_struct *vma,
|
|
enum pci_mmap_state mmap_state, int write_combine);
|
|
|
|
/* Tell drivers/pci/proc.c that we have pci_mmap_page_range() */
|
|
#define HAVE_PCI_MMAP 1
|
|
|
|
#if defined(CONFIG_PPC64) || defined(CONFIG_NOT_COHERENT_CACHE)
|
|
/*
|
|
* For 64-bit kernels, pci_unmap_{single,page} is not a nop.
|
|
* For 32-bit non-coherent kernels, pci_dma_sync_single_for_cpu() and
|
|
* so on are not nops.
|
|
* and thus...
|
|
*/
|
|
#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) \
|
|
dma_addr_t ADDR_NAME;
|
|
#define DECLARE_PCI_UNMAP_LEN(LEN_NAME) \
|
|
__u32 LEN_NAME;
|
|
#define pci_unmap_addr(PTR, ADDR_NAME) \
|
|
((PTR)->ADDR_NAME)
|
|
#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) \
|
|
(((PTR)->ADDR_NAME) = (VAL))
|
|
#define pci_unmap_len(PTR, LEN_NAME) \
|
|
((PTR)->LEN_NAME)
|
|
#define pci_unmap_len_set(PTR, LEN_NAME, VAL) \
|
|
(((PTR)->LEN_NAME) = (VAL))
|
|
|
|
#else /* 32-bit && coherent */
|
|
|
|
/* pci_unmap_{page,single} is a nop so... */
|
|
#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME)
|
|
#define DECLARE_PCI_UNMAP_LEN(LEN_NAME)
|
|
#define pci_unmap_addr(PTR, ADDR_NAME) (0)
|
|
#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) do { } while (0)
|
|
#define pci_unmap_len(PTR, LEN_NAME) (0)
|
|
#define pci_unmap_len_set(PTR, LEN_NAME, VAL) do { } while (0)
|
|
|
|
#endif /* CONFIG_PPC64 || CONFIG_NOT_COHERENT_CACHE */
|
|
|
|
#ifdef CONFIG_PPC64
|
|
|
|
/* The PCI address space does not equal the physical memory address
|
|
* space (we have an IOMMU). The IDE and SCSI device layers use
|
|
* this boolean for bounce buffer decisions.
|
|
*/
|
|
#define PCI_DMA_BUS_IS_PHYS (0)
|
|
|
|
#else /* 32-bit */
|
|
|
|
/* The PCI address space does equal the physical memory
|
|
* address space (no IOMMU). The IDE and SCSI device layers use
|
|
* this boolean for bounce buffer decisions.
|
|
*/
|
|
#define PCI_DMA_BUS_IS_PHYS (1)
|
|
|
|
#endif /* CONFIG_PPC64 */
|
|
|
|
extern void pcibios_resource_to_bus(struct pci_dev *dev,
|
|
struct pci_bus_region *region,
|
|
struct resource *res);
|
|
|
|
extern void pcibios_bus_to_resource(struct pci_dev *dev,
|
|
struct resource *res,
|
|
struct pci_bus_region *region);
|
|
|
|
static inline struct resource *pcibios_select_root(struct pci_dev *pdev,
|
|
struct resource *res)
|
|
{
|
|
struct resource *root = NULL;
|
|
|
|
if (res->flags & IORESOURCE_IO)
|
|
root = &ioport_resource;
|
|
if (res->flags & IORESOURCE_MEM)
|
|
root = &iomem_resource;
|
|
|
|
return root;
|
|
}
|
|
|
|
extern int unmap_bus_range(struct pci_bus *bus);
|
|
|
|
extern int remap_bus_range(struct pci_bus *bus);
|
|
|
|
extern void pcibios_fixup_device_resources(struct pci_dev *dev,
|
|
struct pci_bus *bus);
|
|
|
|
extern void pcibios_setup_new_device(struct pci_dev *dev);
|
|
|
|
extern void pcibios_claim_one_bus(struct pci_bus *b);
|
|
|
|
extern struct pci_controller *init_phb_dynamic(struct device_node *dn);
|
|
|
|
extern struct pci_dev *of_create_pci_dev(struct device_node *node,
|
|
struct pci_bus *bus, int devfn);
|
|
|
|
extern void of_scan_pci_bridge(struct device_node *node,
|
|
struct pci_dev *dev);
|
|
|
|
extern void of_scan_bus(struct device_node *node, struct pci_bus *bus);
|
|
|
|
extern int pci_read_irq_line(struct pci_dev *dev);
|
|
|
|
extern void pcibios_add_platform_entries(struct pci_dev *dev);
|
|
|
|
struct file;
|
|
extern pgprot_t pci_phys_mem_access_prot(struct file *file,
|
|
unsigned long pfn,
|
|
unsigned long size,
|
|
pgprot_t prot);
|
|
|
|
#define HAVE_ARCH_PCI_RESOURCE_TO_USER
|
|
extern void pci_resource_to_user(const struct pci_dev *dev, int bar,
|
|
const struct resource *rsrc,
|
|
resource_size_t *start, resource_size_t *end);
|
|
|
|
#endif /* __KERNEL__ */
|
|
#endif /* __ASM_POWERPC_PCI_H */
|