2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-11-18 23:54:26 +08:00

Merge branch 'for-linus' of git://git.monstr.eu/linux-2.6-microblaze

* 'for-linus' of git://git.monstr.eu/linux-2.6-microblaze: (27 commits)
  microblaze: entry.S use delay slot for return handlers
  microblaze: Save current task directly
  microblaze: Simplify entry.S - save/restore r3/r4 - ret_from_trap
  microblaze: PCI early support for noMMU system
  microblaze: Fix dma alloc and free coherent dma functions
  microblaze: Add consistent code
  microblaze: pgtable.h: move consistent functions
  microblaze: Remove ancient Kconfig option for consistent mapping
  microblaze: Remove VMALLOC_VMADDR
  microblaze: Add define for ASM_LOOP
  microblaze: Preliminary support for dma drivers
  microblaze: remove trailing space in messages
  microblaze: Use generic show_mem()
  microblaze: Change temp register for cmdline
  microblaze: Preliminary support for dma drivers
  microblaze: Move cache function to cache.c
  microblaze: Add support from PREEMPT
  microblaze: Add support for Xilinx PCI host bridge
  microblaze: Enable PCI, missing files
  microblaze: Add core PCI files
  ...
This commit is contained in:
Linus Torvalds 2010-03-18 16:57:24 -07:00
commit 722874465e
33 changed files with 3841 additions and 196 deletions

View File

@ -14,6 +14,8 @@ config MICROBLAZE
select USB_ARCH_HAS_EHCI
select ARCH_WANT_OPTIONAL_GPIOLIB
select HAVE_OPROFILE
select HAVE_DMA_ATTRS
select HAVE_DMA_API_DEBUG
select TRACING_SUPPORT
config SWAP
@ -76,9 +78,6 @@ config HAVE_LATENCYTOP_SUPPORT
config PCI
def_bool n
config NO_DMA
def_bool y
config DTC
def_bool y
@ -146,7 +145,6 @@ menu "Advanced setup"
config ADVANCED_OPTIONS
bool "Prompt for advanced kernel configuration options"
depends on MMU
help
This option will enable prompting for a variety of advanced kernel
configuration options. These options can cause the kernel to not
@ -158,6 +156,15 @@ config ADVANCED_OPTIONS
comment "Default settings for advanced configuration options are used"
depends on !ADVANCED_OPTIONS
config XILINX_UNCACHED_SHADOW
bool "Are you using uncached shadow for RAM ?"
depends on ADVANCED_OPTIONS && !MMU
default n
help
This is needed to be able to allocate uncachable memory regions.
The feature requires the design to define the RAM memory controller
window to be twice as large as the actual physical memory.
config HIGHMEM_START_BOOL
bool "Set high memory pool address"
depends on ADVANCED_OPTIONS && HIGHMEM
@ -175,7 +182,7 @@ config HIGHMEM_START
config LOWMEM_SIZE_BOOL
bool "Set maximum low memory"
depends on ADVANCED_OPTIONS
depends on ADVANCED_OPTIONS && MMU
help
This option allows you to set the maximum amount of memory which
will be used as "low memory", that is, memory which the kernel can
@ -187,7 +194,6 @@ config LOWMEM_SIZE_BOOL
config LOWMEM_SIZE
hex "Maximum low memory size (in bytes)" if LOWMEM_SIZE_BOOL
depends on MMU
default "0x30000000"
config KERNEL_START_BOOL
@ -208,7 +214,7 @@ config KERNEL_START
config TASK_SIZE_BOOL
bool "Set custom user task size"
depends on ADVANCED_OPTIONS
depends on ADVANCED_OPTIONS && MMU
help
This option allows you to set the amount of virtual address space
allocated to user tasks. This can be useful in optimizing the
@ -218,35 +224,8 @@ config TASK_SIZE_BOOL
config TASK_SIZE
hex "Size of user task space" if TASK_SIZE_BOOL
depends on MMU
default "0x80000000"
config CONSISTENT_START_BOOL
bool "Set custom consistent memory pool address"
depends on ADVANCED_OPTIONS && NOT_COHERENT_CACHE
help
This option allows you to set the base virtual address
of the the consistent memory pool. This pool of virtual
memory is used to make consistent memory allocations.
config CONSISTENT_START
hex "Base virtual address of consistent memory pool" if CONSISTENT_START_BOOL
depends on MMU
default "0xff100000" if NOT_COHERENT_CACHE
config CONSISTENT_SIZE_BOOL
bool "Set custom consistent memory pool size"
depends on ADVANCED_OPTIONS && NOT_COHERENT_CACHE
help
This option allows you to set the size of the the
consistent memory pool. This pool of virtual memory
is used to make consistent memory allocations.
config CONSISTENT_SIZE
hex "Size of consistent memory pool" if CONSISTENT_SIZE_BOOL
depends on MMU
default "0x00200000" if NOT_COHERENT_CACHE
endmenu
source "mm/Kconfig"
@ -257,6 +236,25 @@ source "fs/Kconfig.binfmt"
endmenu
menu "Bus Options"
config PCI
bool "PCI support"
config PCI_DOMAINS
def_bool PCI
config PCI_SYSCALL
def_bool PCI
config PCI_XILINX
bool "Xilinx PCI host bridge support"
depends on PCI
source "drivers/pci/Kconfig"
endmenu
source "net/Kconfig"
source "drivers/Kconfig"

View File

@ -50,6 +50,7 @@ libs-y += $(LIBGCC)
core-y += arch/microblaze/kernel/
core-y += arch/microblaze/mm/
core-y += arch/microblaze/platform/
core-$(CONFIG_PCI) += arch/microblaze/pci/
drivers-$(CONFIG_OPROFILE) += arch/microblaze/oprofile/

View File

@ -14,6 +14,10 @@ struct device_node;
struct dev_archdata {
/* Optional pointer to an OF device node */
struct device_node *of_node;
/* DMA operations on that device */
struct dma_map_ops *dma_ops;
void *dma_data;
};
struct pdev_archdata {

View File

@ -1 +1,153 @@
#include <asm-generic/dma-mapping-broken.h>
/*
* Implements the generic device dma API for microblaze and the pci
*
* Copyright (C) 2009-2010 Michal Simek <monstr@monstr.eu>
* Copyright (C) 2009-2010 PetaLogix
*
* This file is subject to the terms and conditions of the GNU General
* Public License. See the file COPYING in the main directory of this
* archive for more details.
*
* This file is base on powerpc and x86 dma-mapping.h versions
* Copyright (C) 2004 IBM
*/
#ifndef _ASM_MICROBLAZE_DMA_MAPPING_H
#define _ASM_MICROBLAZE_DMA_MAPPING_H
/*
* See Documentation/PCI/PCI-DMA-mapping.txt and
* Documentation/DMA-API.txt for documentation.
*/
#include <linux/types.h>
#include <linux/cache.h>
#include <linux/mm.h>
#include <linux/scatterlist.h>
#include <linux/dma-debug.h>
#include <linux/dma-attrs.h>
#include <asm/io.h>
#include <asm-generic/dma-coherent.h>
#define DMA_ERROR_CODE (~(dma_addr_t)0x0)
#define __dma_alloc_coherent(dev, gfp, size, handle) NULL
#define __dma_free_coherent(size, addr) ((void)0)
#define __dma_sync(addr, size, rw) ((void)0)
static inline unsigned long device_to_mask(struct device *dev)
{
if (dev->dma_mask && *dev->dma_mask)
return *dev->dma_mask;
/* Assume devices without mask can take 32 bit addresses */
return 0xfffffffful;
}
extern struct dma_map_ops *dma_ops;
/*
* Available generic sets of operations
*/
extern struct dma_map_ops dma_direct_ops;
static inline struct dma_map_ops *get_dma_ops(struct device *dev)
{
/* We don't handle the NULL dev case for ISA for now. We could
* do it via an out of line call but it is not needed for now. The
* only ISA DMA device we support is the floppy and we have a hack
* in the floppy driver directly to get a device for us.
*/
if (unlikely(!dev) || !dev->archdata.dma_ops)
return NULL;
return dev->archdata.dma_ops;
}
static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops)
{
dev->archdata.dma_ops = ops;
}
static inline int dma_supported(struct device *dev, u64 mask)
{
struct dma_map_ops *ops = get_dma_ops(dev);
if (unlikely(!ops))
return 0;
if (!ops->dma_supported)
return 1;
return ops->dma_supported(dev, mask);
}
#ifdef CONFIG_PCI
/* We have our own implementation of pci_set_dma_mask() */
#define HAVE_ARCH_PCI_SET_DMA_MASK
#endif
static inline int dma_set_mask(struct device *dev, u64 dma_mask)
{
struct dma_map_ops *ops = get_dma_ops(dev);
if (unlikely(ops == NULL))
return -EIO;
if (ops->set_dma_mask)
return ops->set_dma_mask(dev, dma_mask);
if (!dev->dma_mask || !dma_supported(dev, dma_mask))
return -EIO;
*dev->dma_mask = dma_mask;
return 0;
}
#include <asm-generic/dma-mapping-common.h>
static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
{
struct dma_map_ops *ops = get_dma_ops(dev);
if (ops->mapping_error)
return ops->mapping_error(dev, dma_addr);
return (dma_addr == DMA_ERROR_CODE);
}
#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
#define dma_is_consistent(d, h) (1)
static inline void *dma_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t flag)
{
struct dma_map_ops *ops = get_dma_ops(dev);
void *memory;
BUG_ON(!ops);
memory = ops->alloc_coherent(dev, size, dma_handle, flag);
debug_dma_alloc_coherent(dev, size, *dma_handle, memory);
return memory;
}
static inline void dma_free_coherent(struct device *dev, size_t size,
void *cpu_addr, dma_addr_t dma_handle)
{
struct dma_map_ops *ops = get_dma_ops(dev);
BUG_ON(!ops);
debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
ops->free_coherent(dev, size, cpu_addr, dma_handle);
}
static inline int dma_get_cache_alignment(void)
{
return L1_CACHE_BYTES;
}
static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
enum dma_data_direction direction)
{
BUG_ON(direction == DMA_NONE);
__dma_sync(vaddr, size, (int)direction);
}
#endif /* _ASM_MICROBLAZE_DMA_MAPPING_H */

View File

@ -15,7 +15,23 @@
#include <asm/page.h>
#include <linux/types.h>
#include <linux/mm.h> /* Get struct page {...} */
#include <asm-generic/iomap.h>
#ifndef CONFIG_PCI
#define _IO_BASE 0
#define _ISA_MEM_BASE 0
#define PCI_DRAM_OFFSET 0
#else
#define _IO_BASE isa_io_base
#define _ISA_MEM_BASE isa_mem_base
#define PCI_DRAM_OFFSET pci_dram_offset
#endif
extern unsigned long isa_io_base;
extern unsigned long pci_io_base;
extern unsigned long pci_dram_offset;
extern resource_size_t isa_mem_base;
#define IO_SPACE_LIMIT (0xFFFFFFFF)
@ -124,9 +140,6 @@ static inline void writel(unsigned int v, volatile void __iomem *addr)
#define virt_to_phys(addr) ((unsigned long)__virt_to_phys(addr))
#define virt_to_bus(addr) ((unsigned long)__virt_to_phys(addr))
#define __page_address(page) \
(PAGE_OFFSET + (((page) - mem_map) << PAGE_SHIFT))
#define page_to_phys(page) virt_to_phys((void *)__page_address(page))
#define page_to_bus(page) (page_to_phys(page))
#define bus_to_virt(addr) (phys_to_virt(addr))
@ -227,15 +240,7 @@ static inline void __iomem *__ioremap(phys_addr_t address, unsigned long size,
#define out_8(a, v) __raw_writeb((v), (a))
#define in_8(a) __raw_readb(a)
/* FIXME */
static inline void __iomem *ioport_map(unsigned long port, unsigned int len)
{
return (void __iomem *) (port);
}
static inline void ioport_unmap(void __iomem *addr)
{
/* Nothing to do */
}
#define ioport_map(port, nr) ((void __iomem *)(port))
#define ioport_unmap(addr)
#endif /* _ASM_MICROBLAZE_IO_H */

View File

@ -14,6 +14,12 @@
#include <linux/interrupt.h>
/* This type is the placeholder for a hardware interrupt number. It has to
* be big enough to enclose whatever representation is used by a given
* platform.
*/
typedef unsigned long irq_hw_number_t;
extern unsigned int nr_irq;
#define NO_IRQ (-1)
@ -21,7 +27,8 @@ extern unsigned int nr_irq;
struct pt_regs;
extern void do_IRQ(struct pt_regs *regs);
/* irq_of_parse_and_map - Parse and Map an interrupt into linux virq space
/**
* irq_of_parse_and_map - Parse and Map an interrupt into linux virq space
* @device: Device node of the device whose interrupt is to be mapped
* @index: Index of the interrupt to map
*
@ -40,4 +47,32 @@ static inline void irq_dispose_mapping(unsigned int virq)
return;
}
struct irq_host;
/**
* irq_create_mapping - Map a hardware interrupt into linux virq space
* @host: host owning this hardware interrupt or NULL for default host
* @hwirq: hardware irq number in that host space
*
* Only one mapping per hardware interrupt is permitted. Returns a linux
* virq number.
* If the sense/trigger is to be specified, set_irq_type() should be called
* on the number returned from that call.
*/
extern unsigned int irq_create_mapping(struct irq_host *host,
irq_hw_number_t hwirq);
/**
* irq_create_of_mapping - Map a hardware interrupt into linux virq space
* @controller: Device node of the interrupt controller
* @inspec: Interrupt specifier from the device-tree
* @intsize: Size of the interrupt specifier from the device-tree
*
* This function is identical to irq_create_mapping except that it takes
* as input informations straight from the device-tree (typically the results
* of the of_irq_map_*() functions.
*/
extern unsigned int irq_create_of_mapping(struct device_node *controller,
u32 *intspec, unsigned int intsize);
#endif /* _ASM_MICROBLAZE_IRQ_H */

View File

@ -61,12 +61,6 @@ extern unsigned int __page_offset;
*/
#define PAGE_OFFSET CONFIG_KERNEL_START
/*
* MAP_NR -- given an address, calculate the index of the page struct which
* points to the address's page.
*/
#define MAP_NR(addr) (((unsigned long)(addr) - PAGE_OFFSET) >> PAGE_SHIFT)
/*
* The basic type of a PTE - 32 bit physical addressing.
*/
@ -154,7 +148,11 @@ extern int page_is_ram(unsigned long pfn);
# define pfn_to_virt(pfn) __va(pfn_to_phys((pfn)))
# ifdef CONFIG_MMU
# define virt_to_page(kaddr) (mem_map + MAP_NR(kaddr))
# define virt_to_page(kaddr) (pfn_to_page(__pa(kaddr) >> PAGE_SHIFT))
# define page_to_virt(page) __va(page_to_pfn(page) << PAGE_SHIFT)
# define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
# else /* CONFIG_MMU */
# define virt_to_page(vaddr) (pfn_to_page(virt_to_pfn(vaddr)))
# define page_to_virt(page) (pfn_to_virt(page_to_pfn(page)))

View File

@ -1 +1,196 @@
#ifndef _ASM_MICROBLAZE_PCI_BRIDGE_H
#define _ASM_MICROBLAZE_PCI_BRIDGE_H
#ifdef __KERNEL__
/*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/pci.h>
#include <linux/list.h>
#include <linux/ioport.h>
struct device_node;
enum {
/* Force re-assigning all resources (ignore firmware
* setup completely)
*/
PCI_REASSIGN_ALL_RSRC = 0x00000001,
/* Re-assign all bus numbers */
PCI_REASSIGN_ALL_BUS = 0x00000002,
/* Do not try to assign, just use existing setup */
PCI_PROBE_ONLY = 0x00000004,
/* Don't bother with ISA alignment unless the bridge has
* ISA forwarding enabled
*/
PCI_CAN_SKIP_ISA_ALIGN = 0x00000008,
/* Enable domain numbers in /proc */
PCI_ENABLE_PROC_DOMAINS = 0x00000010,
/* ... except for domain 0 */
PCI_COMPAT_DOMAIN_0 = 0x00000020,
};
/*
* Structure of a PCI controller (host bridge)
*/
struct pci_controller {
struct pci_bus *bus;
char is_dynamic;
struct device_node *dn;
struct list_head list_node;
struct device *parent;
int first_busno;
int last_busno;
int self_busno;
void __iomem *io_base_virt;
resource_size_t io_base_phys;
resource_size_t pci_io_size;
/* Some machines (PReP) have a non 1:1 mapping of
* the PCI memory space in the CPU bus space
*/
resource_size_t pci_mem_offset;
/* Some machines have a special region to forward the ISA
* "memory" cycles such as VGA memory regions. Left to 0
* if unsupported
*/
resource_size_t isa_mem_phys;
resource_size_t isa_mem_size;
struct pci_ops *ops;
unsigned int __iomem *cfg_addr;
void __iomem *cfg_data;
/*
* Used for variants of PCI indirect handling and possible quirks:
* SET_CFG_TYPE - used on 4xx or any PHB that does explicit type0/1
* EXT_REG - provides access to PCI-e extended registers
* SURPRESS_PRIMARY_BUS - we surpress the setting of PCI_PRIMARY_BUS
* on Freescale PCI-e controllers since they used the PCI_PRIMARY_BUS
* to determine which bus number to match on when generating type0
* config cycles
* NO_PCIE_LINK - the Freescale PCI-e controllers have issues with
* hanging if we don't have link and try to do config cycles to
* anything but the PHB. Only allow talking to the PHB if this is
* set.
* BIG_ENDIAN - cfg_addr is a big endian register
* BROKEN_MRM - the 440EPx/GRx chips have an errata that causes hangs
* on the PLB4. Effectively disable MRM commands by setting this.
*/
#define INDIRECT_TYPE_SET_CFG_TYPE 0x00000001
#define INDIRECT_TYPE_EXT_REG 0x00000002
#define INDIRECT_TYPE_SURPRESS_PRIMARY_BUS 0x00000004
#define INDIRECT_TYPE_NO_PCIE_LINK 0x00000008
#define INDIRECT_TYPE_BIG_ENDIAN 0x00000010
#define INDIRECT_TYPE_BROKEN_MRM 0x00000020
u32 indirect_type;
/* Currently, we limit ourselves to 1 IO range and 3 mem
* ranges since the common pci_bus structure can't handle more
*/
struct resource io_resource;
struct resource mem_resources[3];
int global_number; /* PCI domain number */
};
static inline struct pci_controller *pci_bus_to_host(const struct pci_bus *bus)
{
return bus->sysdata;
}
static inline int isa_vaddr_is_ioport(void __iomem *address)
{
/* No specific ISA handling on ppc32 at this stage, it
* all goes through PCI
*/
return 0;
}
/* These are used for config access before all the PCI probing
has been done. */
extern int early_read_config_byte(struct pci_controller *hose, int bus,
int dev_fn, int where, u8 *val);
extern int early_read_config_word(struct pci_controller *hose, int bus,
int dev_fn, int where, u16 *val);
extern int early_read_config_dword(struct pci_controller *hose, int bus,
int dev_fn, int where, u32 *val);
extern int early_write_config_byte(struct pci_controller *hose, int bus,
int dev_fn, int where, u8 val);
extern int early_write_config_word(struct pci_controller *hose, int bus,
int dev_fn, int where, u16 val);
extern int early_write_config_dword(struct pci_controller *hose, int bus,
int dev_fn, int where, u32 val);
extern int early_find_capability(struct pci_controller *hose, int bus,
int dev_fn, int cap);
extern void setup_indirect_pci(struct pci_controller *hose,
resource_size_t cfg_addr,
resource_size_t cfg_data, u32 flags);
/* Get the PCI host controller for an OF device */
extern struct pci_controller *pci_find_hose_for_OF_device(
struct device_node *node);
/* Fill up host controller resources from the OF node */
extern void pci_process_bridge_OF_ranges(struct pci_controller *hose,
struct device_node *dev, int primary);
/* Allocate & free a PCI host bridge structure */
extern struct pci_controller *pcibios_alloc_controller(struct device_node *dev);
extern void pcibios_free_controller(struct pci_controller *phb);
extern void pcibios_setup_phb_resources(struct pci_controller *hose);
#ifdef CONFIG_PCI
extern unsigned int pci_flags;
static inline void pci_set_flags(int flags)
{
pci_flags = flags;
}
static inline void pci_add_flags(int flags)
{
pci_flags |= flags;
}
static inline int pci_has_flag(int flag)
{
return pci_flags & flag;
}
extern struct list_head hose_list;
extern unsigned long pci_address_to_pio(phys_addr_t address);
extern int pcibios_vaddr_is_ioport(void __iomem *address);
#else
static inline unsigned long pci_address_to_pio(phys_addr_t address)
{
return (unsigned long)-1;
}
static inline int pcibios_vaddr_is_ioport(void __iomem *address)
{
return 0;
}
static inline void pci_set_flags(int flags) { }
static inline void pci_add_flags(int flags) { }
static inline int pci_has_flag(int flag)
{
return 0;
}
#endif /* CONFIG_PCI */
#endif /* __KERNEL__ */
#endif /* _ASM_MICROBLAZE_PCI_BRIDGE_H */

View File

@ -1 +1,177 @@
#include <asm-generic/pci.h>
/*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
* Based on powerpc version
*/
#ifndef __ASM_MICROBLAZE_PCI_H
#define __ASM_MICROBLAZE_PCI_H
#ifdef __KERNEL__
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/dma-mapping.h>
#include <linux/pci.h>
#include <asm/scatterlist.h>
#include <asm/io.h>
#include <asm/prom.h>
#include <asm/pci-bridge.h>
#define PCIBIOS_MIN_IO 0x1000
#define PCIBIOS_MIN_MEM 0x10000000
struct pci_dev;
/* Values for the `which' argument to sys_pciconfig_iobase syscall. */
#define IOBASE_BRIDGE_NUMBER 0
#define IOBASE_MEMORY 1
#define IOBASE_IO 2
#define IOBASE_ISA_IO 3
#define IOBASE_ISA_MEM 4
#define pcibios_scan_all_fns(a, b) 0
/*
* Set this to 1 if you want the kernel to re-assign all PCI
* bus numbers (don't do that on ppc64 yet !)
*/
#define pcibios_assign_all_busses() \
(pci_has_flag(PCI_REASSIGN_ALL_BUS))
static inline void pcibios_set_master(struct pci_dev *dev)
{
/* No special bus mastering setup handling */
}
static inline void pcibios_penalize_isa_irq(int irq, int active)
{
/* We don't do dynamic PCI IRQ allocation */
}
#ifdef CONFIG_PCI
extern void set_pci_dma_ops(struct dma_map_ops *dma_ops);
extern struct dma_map_ops *get_pci_dma_ops(void);
#else /* CONFIG_PCI */
#define set_pci_dma_ops(d)
#define get_pci_dma_ops() NULL
#endif
#ifdef CONFIG_PCI
static inline void pci_dma_burst_advice(struct pci_dev *pdev,
enum pci_dma_burst_strategy *strat,
unsigned long *strategy_parameter)
{
*strat = PCI_DMA_BURST_INFINITY;
*strategy_parameter = ~0UL;
}
#endif
extern int pci_domain_nr(struct pci_bus *bus);
/* Decide whether to display the domain number in /proc */
extern int pci_proc_domain(struct pci_bus *bus);
struct vm_area_struct;
/* Map a range of PCI memory or I/O space for a device into user space */
int pci_mmap_page_range(struct pci_dev *pdev, struct vm_area_struct *vma,
enum pci_mmap_state mmap_state, int write_combine);
/* Tell drivers/pci/proc.c that we have pci_mmap_page_range() */
#define HAVE_PCI_MMAP 1
extern int pci_legacy_read(struct pci_bus *bus, loff_t port, u32 *val,
size_t count);
extern int pci_legacy_write(struct pci_bus *bus, loff_t port, u32 val,
size_t count);
extern int pci_mmap_legacy_page_range(struct pci_bus *bus,
struct vm_area_struct *vma,
enum pci_mmap_state mmap_state);
#define HAVE_PCI_LEGACY 1
/* pci_unmap_{page,single} is a nop so... */
#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME)
#define DECLARE_PCI_UNMAP_LEN(LEN_NAME)
#define pci_unmap_addr(PTR, ADDR_NAME) (0)
#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) do { } while (0)
#define pci_unmap_len(PTR, LEN_NAME) (0)
#define pci_unmap_len_set(PTR, LEN_NAME, VAL) do { } while (0)
/* The PCI address space does equal the physical memory
* address space (no IOMMU). The IDE and SCSI device layers use
* this boolean for bounce buffer decisions.
*/
#define PCI_DMA_BUS_IS_PHYS (1)
extern void pcibios_resource_to_bus(struct pci_dev *dev,
struct pci_bus_region *region,
struct resource *res);
extern void pcibios_bus_to_resource(struct pci_dev *dev,
struct resource *res,
struct pci_bus_region *region);
static inline struct resource *pcibios_select_root(struct pci_dev *pdev,
struct resource *res)
{
struct resource *root = NULL;
if (res->flags & IORESOURCE_IO)
root = &ioport_resource;
if (res->flags & IORESOURCE_MEM)
root = &iomem_resource;
return root;
}
extern void pcibios_claim_one_bus(struct pci_bus *b);
extern void pcibios_finish_adding_to_bus(struct pci_bus *bus);
extern void pcibios_resource_survey(void);
extern struct pci_controller *init_phb_dynamic(struct device_node *dn);
extern int remove_phb_dynamic(struct pci_controller *phb);
extern struct pci_dev *of_create_pci_dev(struct device_node *node,
struct pci_bus *bus, int devfn);
extern void of_scan_pci_bridge(struct device_node *node,
struct pci_dev *dev);
extern void of_scan_bus(struct device_node *node, struct pci_bus *bus);
extern void of_rescan_bus(struct device_node *node, struct pci_bus *bus);
extern int pci_read_irq_line(struct pci_dev *dev);
extern int pci_bus_find_capability(struct pci_bus *bus,
unsigned int devfn, int cap);
struct file;
extern pgprot_t pci_phys_mem_access_prot(struct file *file,
unsigned long pfn,
unsigned long size,
pgprot_t prot);
#define HAVE_ARCH_PCI_RESOURCE_TO_USER
extern void pci_resource_to_user(const struct pci_dev *dev, int bar,
const struct resource *rsrc,
resource_size_t *start, resource_size_t *end);
extern void pcibios_setup_bus_devices(struct pci_bus *bus);
extern void pcibios_setup_bus_self(struct pci_bus *bus);
/* This part of code was originaly in xilinx-pci.h */
#ifdef CONFIG_PCI_XILINX
extern void __init xilinx_pci_init(void);
#else
static inline void __init xilinx_pci_init(void) { return; }
#endif
#endif /* __KERNEL__ */
#endif /* __ASM_MICROBLAZE_PCI_H */

View File

@ -19,6 +19,7 @@
#include <asm/io.h>
#include <asm/page.h>
#include <asm/cache.h>
#include <asm/pgtable.h>
#define PGDIR_ORDER 0
@ -111,7 +112,6 @@ static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
unsigned long address)
{
pte_t *pte;
extern int mem_init_done;
extern void *early_get_page(void);
if (mem_init_done) {
pte = (pte_t *)__get_free_page(GFP_KERNEL |

View File

@ -16,6 +16,10 @@
#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
remap_pfn_range(vma, vaddr, pfn, size, prot)
#ifndef __ASSEMBLY__
extern int mem_init_done;
#endif
#ifndef CONFIG_MMU
#define pgd_present(pgd) (1) /* pages are always present on non MMU */
@ -51,6 +55,8 @@ static inline int pte_file(pte_t pte) { return 0; }
#define arch_enter_lazy_cpu_mode() do {} while (0)
#define pgprot_noncached_wc(prot) prot
#else /* CONFIG_MMU */
#include <asm-generic/4level-fixup.h>
@ -68,7 +74,6 @@ static inline int pte_file(pte_t pte) { return 0; }
extern unsigned long va_to_phys(unsigned long address);
extern pte_t *va_to_pte(unsigned long address);
extern unsigned long ioremap_bot, ioremap_base;
/*
* The following only work if pte_present() is true.
@ -85,10 +90,24 @@ static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
#define VMALLOC_START (CONFIG_KERNEL_START + \
max(32 * 1024 * 1024UL, memory_size))
#define VMALLOC_END ioremap_bot
#define VMALLOC_VMADDR(x) ((unsigned long)(x))
#endif /* __ASSEMBLY__ */
/*
* Macro to mark a page protection value as "uncacheable".
*/
#define _PAGE_CACHE_CTL (_PAGE_GUARDED | _PAGE_NO_CACHE | \
_PAGE_WRITETHRU)
#define pgprot_noncached(prot) \
(__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \
_PAGE_NO_CACHE | _PAGE_GUARDED))
#define pgprot_noncached_wc(prot) \
(__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \
_PAGE_NO_CACHE))
/*
* The MicroBlaze MMU is identical to the PPC-40x MMU, and uses a hash
* table containing PTEs, together with a set of 16 segment registers, to
@ -397,7 +416,7 @@ static inline unsigned long pte_update(pte_t *p, unsigned long clr,
mts rmsr, %2\n\
nop"
: "=&r" (old), "=&r" (tmp), "=&r" (msr), "=m" (*p)
: "r" ((unsigned long)(p+1) - 4), "r" (clr), "r" (set), "m" (*p)
: "r" ((unsigned long)(p + 1) - 4), "r" (clr), "r" (set), "m" (*p)
: "cc");
return old;
@ -566,18 +585,11 @@ void mapin_ram(void);
int map_page(unsigned long va, phys_addr_t pa, int flags);
extern int mem_init_done;
extern unsigned long ioremap_base;
extern unsigned long ioremap_bot;
asmlinkage void __init mmu_init(void);
void __init *early_get_page(void);
void *consistent_alloc(int gfp, size_t size, dma_addr_t *dma_handle);
void consistent_free(void *vaddr);
void consistent_sync(void *vaddr, size_t size, int direction);
void consistent_sync_page(struct page *page, unsigned long offset,
size_t size, int direction);
#endif /* __ASSEMBLY__ */
#endif /* __KERNEL__ */
@ -586,6 +598,14 @@ void consistent_sync_page(struct page *page, unsigned long offset,
#ifndef __ASSEMBLY__
#include <asm-generic/pgtable.h>
extern unsigned long ioremap_bot, ioremap_base;
void *consistent_alloc(int gfp, size_t size, dma_addr_t *dma_handle);
void consistent_free(void *vaddr);
void consistent_sync(void *vaddr, size_t size, int direction);
void consistent_sync_page(struct page *page, unsigned long offset,
size_t size, int direction);
void setup_memory(void);
#endif /* __ASSEMBLY__ */

View File

@ -31,6 +31,21 @@
/* Other Prototypes */
extern int early_uartlite_console(void);
#ifdef CONFIG_PCI
/*
* PCI <-> OF matching functions
* (XXX should these be here?)
*/
struct pci_bus;
struct pci_dev;
extern int pci_device_from_OF_node(struct device_node *node,
u8 *bus, u8 *devfn);
extern struct device_node *pci_busdev_to_OF_node(struct pci_bus *bus,
int devfn);
extern struct device_node *pci_device_to_OF_node(struct pci_dev *dev);
extern void pci_create_OF_bus_map(void);
#endif
/*
* OF address retreival & translation
*/

View File

@ -87,6 +87,9 @@ void free_initmem(void);
extern char *klimit;
extern void ret_from_fork(void);
extern void *alloc_maybe_bootmem(size_t size, gfp_t mask);
extern void *zalloc_maybe_bootmem(size_t size, gfp_t mask);
#ifdef CONFIG_DEBUG_FS
extern struct dentry *of_debugfs_root;
#endif

View File

@ -23,7 +23,7 @@
extern void _tlbie(unsigned long address);
extern void _tlbia(void);
#define __tlbia() _tlbia()
#define __tlbia() { preempt_disable(); _tlbia(); preempt_enable(); }
static inline void local_flush_tlb_all(void)
{ __tlbia(); }

View File

@ -14,7 +14,7 @@ endif
extra-y := head.o vmlinux.lds
obj-y += exceptions.o \
obj-y += dma.o exceptions.o \
hw_exception_handler.o init_task.o intc.o irq.o of_device.o \
of_platform.o process.o prom.o prom_parse.o ptrace.o \
setup.o signal.o sys_microblaze.o timer.o traps.o reset.o

View File

@ -90,6 +90,7 @@ int main(int argc, char *argv[])
DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
DEFINE(TI_ADDR_LIMIT, offsetof(struct thread_info, addr_limit));
DEFINE(TI_CPU_CONTEXT, offsetof(struct thread_info, cpu_context));
DEFINE(TI_PREEMPT_COUNT, offsetof(struct thread_info, preempt_count));
BLANK();
/* struct cpu_context */

View File

@ -15,25 +15,6 @@
#include <asm/cpuinfo.h>
#include <asm/pvr.h>
static inline void __invalidate_flush_icache(unsigned int addr)
{
__asm__ __volatile__ ("wic %0, r0;" \
: : "r" (addr));
}
static inline void __flush_dcache(unsigned int addr)
{
__asm__ __volatile__ ("wdc.flush %0, r0;" \
: : "r" (addr));
}
static inline void __invalidate_dcache(unsigned int baseaddr,
unsigned int offset)
{
__asm__ __volatile__ ("wdc.clear %0, %1;" \
: : "r" (baseaddr), "r" (offset));
}
static inline void __enable_icache_msr(void)
{
__asm__ __volatile__ (" msrset r0, %0; \
@ -148,9 +129,9 @@ do { \
int step = -line_length; \
BUG_ON(step >= 0); \
\
__asm__ __volatile__ (" 1: " #op " r0, %0; \
bgtid %0, 1b; \
addk %0, %0, %1; \
__asm__ __volatile__ (" 1: " #op " r0, %0; \
bgtid %0, 1b; \
addk %0, %0, %1; \
" : : "r" (len), "r" (step) \
: "memory"); \
} while (0);
@ -162,9 +143,9 @@ do { \
int count = end - start; \
BUG_ON(count <= 0); \
\
__asm__ __volatile__ (" 1: " #op " %0, %1; \
bgtid %1, 1b; \
addk %1, %1, %2; \
__asm__ __volatile__ (" 1: " #op " %0, %1; \
bgtid %1, 1b; \
addk %1, %1, %2; \
" : : "r" (start), "r" (count), \
"r" (step) : "memory"); \
} while (0);
@ -175,7 +156,7 @@ do { \
int volatile temp; \
BUG_ON(end - start <= 0); \
\
__asm__ __volatile__ (" 1: " #op " %1, r0; \
__asm__ __volatile__ (" 1: " #op " %1, r0; \
cmpu %0, %1, %2; \
bgtid %0, 1b; \
addk %1, %1, %3; \
@ -183,10 +164,14 @@ do { \
"r" (line_length) : "memory"); \
} while (0);
#define ASM_LOOP
static void __flush_icache_range_msr_irq(unsigned long start, unsigned long end)
{
unsigned long flags;
#ifndef ASM_LOOP
int i;
#endif
pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
(unsigned int)start, (unsigned int) end);
@ -196,8 +181,13 @@ static void __flush_icache_range_msr_irq(unsigned long start, unsigned long end)
local_irq_save(flags);
__disable_icache_msr();
#ifdef ASM_LOOP
CACHE_RANGE_LOOP_1(start, end, cpuinfo.icache_line_length, wic);
#else
for (i = start; i < end; i += cpuinfo.icache_line_length)
__asm__ __volatile__ ("wic %0, r0;" \
: : "r" (i));
#endif
__enable_icache_msr();
local_irq_restore(flags);
}
@ -206,7 +196,9 @@ static void __flush_icache_range_nomsr_irq(unsigned long start,
unsigned long end)
{
unsigned long flags;
#ifndef ASM_LOOP
int i;
#endif
pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
(unsigned int)start, (unsigned int) end);
@ -216,7 +208,13 @@ static void __flush_icache_range_nomsr_irq(unsigned long start,
local_irq_save(flags);
__disable_icache_nomsr();
#ifdef ASM_LOOP
CACHE_RANGE_LOOP_1(start, end, cpuinfo.icache_line_length, wic);
#else
for (i = start; i < end; i += cpuinfo.icache_line_length)
__asm__ __volatile__ ("wic %0, r0;" \
: : "r" (i));
#endif
__enable_icache_nomsr();
local_irq_restore(flags);
@ -225,25 +223,41 @@ static void __flush_icache_range_nomsr_irq(unsigned long start,
static void __flush_icache_range_noirq(unsigned long start,
unsigned long end)
{
#ifndef ASM_LOOP
int i;
#endif
pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
(unsigned int)start, (unsigned int) end);
CACHE_LOOP_LIMITS(start, end,
cpuinfo.icache_line_length, cpuinfo.icache_size);
#ifdef ASM_LOOP
CACHE_RANGE_LOOP_1(start, end, cpuinfo.icache_line_length, wic);
#else
for (i = start; i < end; i += cpuinfo.icache_line_length)
__asm__ __volatile__ ("wic %0, r0;" \
: : "r" (i));
#endif
}
static void __flush_icache_all_msr_irq(void)
{
unsigned long flags;
#ifndef ASM_LOOP
int i;
#endif
pr_debug("%s\n", __func__);
local_irq_save(flags);
__disable_icache_msr();
#ifdef ASM_LOOP
CACHE_ALL_LOOP(cpuinfo.icache_size, cpuinfo.icache_line_length, wic);
#else
for (i = 0; i < cpuinfo.icache_size;
i += cpuinfo.icache_line_length)
__asm__ __volatile__ ("wic %0, r0;" \
: : "r" (i));
#endif
__enable_icache_msr();
local_irq_restore(flags);
}
@ -251,35 +265,59 @@ static void __flush_icache_all_msr_irq(void)
static void __flush_icache_all_nomsr_irq(void)
{
unsigned long flags;
#ifndef ASM_LOOP
int i;
#endif
pr_debug("%s\n", __func__);
local_irq_save(flags);
__disable_icache_nomsr();
#ifdef ASM_LOOP
CACHE_ALL_LOOP(cpuinfo.icache_size, cpuinfo.icache_line_length, wic);
#else
for (i = 0; i < cpuinfo.icache_size;
i += cpuinfo.icache_line_length)
__asm__ __volatile__ ("wic %0, r0;" \
: : "r" (i));
#endif
__enable_icache_nomsr();
local_irq_restore(flags);
}
static void __flush_icache_all_noirq(void)
{
#ifndef ASM_LOOP
int i;
#endif
pr_debug("%s\n", __func__);
#ifdef ASM_LOOP
CACHE_ALL_LOOP(cpuinfo.icache_size, cpuinfo.icache_line_length, wic);
#else
for (i = 0; i < cpuinfo.icache_size;
i += cpuinfo.icache_line_length)
__asm__ __volatile__ ("wic %0, r0;" \
: : "r" (i));
#endif
}
static void __invalidate_dcache_all_msr_irq(void)
{
unsigned long flags;
#ifndef ASM_LOOP
int i;
#endif
pr_debug("%s\n", __func__);
local_irq_save(flags);
__disable_dcache_msr();
#ifdef ASM_LOOP
CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length, wdc);
#else
for (i = 0; i < cpuinfo.dcache_size;
i += cpuinfo.dcache_line_length)
__asm__ __volatile__ ("wdc %0, r0;" \
: : "r" (i));
#endif
__enable_dcache_msr();
local_irq_restore(flags);
}
@ -287,60 +325,107 @@ static void __invalidate_dcache_all_msr_irq(void)
static void __invalidate_dcache_all_nomsr_irq(void)
{
unsigned long flags;
#ifndef ASM_LOOP
int i;
#endif
pr_debug("%s\n", __func__);
local_irq_save(flags);
__disable_dcache_nomsr();
#ifdef ASM_LOOP
CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length, wdc);
#else
for (i = 0; i < cpuinfo.dcache_size;
i += cpuinfo.dcache_line_length)
__asm__ __volatile__ ("wdc %0, r0;" \
: : "r" (i));
#endif
__enable_dcache_nomsr();
local_irq_restore(flags);
}
static void __invalidate_dcache_all_noirq_wt(void)
{
#ifndef ASM_LOOP
int i;
#endif
pr_debug("%s\n", __func__);
#ifdef ASM_LOOP
CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length, wdc)
#else
for (i = 0; i < cpuinfo.dcache_size;
i += cpuinfo.dcache_line_length)
__asm__ __volatile__ ("wdc %0, r0;" \
: : "r" (i));
#endif
}
/* FIXME this is weird - should be only wdc but not work
* MS: I am getting bus errors and other weird things */
static void __invalidate_dcache_all_wb(void)
{
#ifndef ASM_LOOP
int i;
#endif
pr_debug("%s\n", __func__);
#ifdef ASM_LOOP
CACHE_ALL_LOOP2(cpuinfo.dcache_size, cpuinfo.dcache_line_length,
wdc.clear)
#else
for (i = 0; i < cpuinfo.dcache_size;
i += cpuinfo.dcache_line_length)
__asm__ __volatile__ ("wdc.clear %0, r0;" \
: : "r" (i));
#endif
}
static void __invalidate_dcache_range_wb(unsigned long start,
unsigned long end)
{
#ifndef ASM_LOOP
int i;
#endif
pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
(unsigned int)start, (unsigned int) end);
CACHE_LOOP_LIMITS(start, end,
cpuinfo.dcache_line_length, cpuinfo.dcache_size);
#ifdef ASM_LOOP
CACHE_RANGE_LOOP_2(start, end, cpuinfo.dcache_line_length, wdc.clear);
#else
for (i = start; i < end; i += cpuinfo.icache_line_length)
__asm__ __volatile__ ("wdc.clear %0, r0;" \
: : "r" (i));
#endif
}
static void __invalidate_dcache_range_nomsr_wt(unsigned long start,
unsigned long end)
{
#ifndef ASM_LOOP
int i;
#endif
pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
(unsigned int)start, (unsigned int) end);
CACHE_LOOP_LIMITS(start, end,
cpuinfo.dcache_line_length, cpuinfo.dcache_size);
#ifdef ASM_LOOP
CACHE_RANGE_LOOP_1(start, end, cpuinfo.dcache_line_length, wdc);
#else
for (i = start; i < end; i += cpuinfo.icache_line_length)
__asm__ __volatile__ ("wdc %0, r0;" \
: : "r" (i));
#endif
}
static void __invalidate_dcache_range_msr_irq_wt(unsigned long start,
unsigned long end)
{
unsigned long flags;
#ifndef ASM_LOOP
int i;
#endif
pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
(unsigned int)start, (unsigned int) end);
CACHE_LOOP_LIMITS(start, end,
@ -349,7 +434,13 @@ static void __invalidate_dcache_range_msr_irq_wt(unsigned long start,
local_irq_save(flags);
__disable_dcache_msr();
#ifdef ASM_LOOP
CACHE_RANGE_LOOP_1(start, end, cpuinfo.dcache_line_length, wdc);
#else
for (i = start; i < end; i += cpuinfo.icache_line_length)
__asm__ __volatile__ ("wdc %0, r0;" \
: : "r" (i));
#endif
__enable_dcache_msr();
local_irq_restore(flags);
@ -359,7 +450,9 @@ static void __invalidate_dcache_range_nomsr_irq(unsigned long start,
unsigned long end)
{
unsigned long flags;
#ifndef ASM_LOOP
int i;
#endif
pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
(unsigned int)start, (unsigned int) end);
@ -369,7 +462,13 @@ static void __invalidate_dcache_range_nomsr_irq(unsigned long start,
local_irq_save(flags);
__disable_dcache_nomsr();
#ifdef ASM_LOOP
CACHE_RANGE_LOOP_1(start, end, cpuinfo.dcache_line_length, wdc);
#else
for (i = start; i < end; i += cpuinfo.icache_line_length)
__asm__ __volatile__ ("wdc %0, r0;" \
: : "r" (i));
#endif
__enable_dcache_nomsr();
local_irq_restore(flags);
@ -377,19 +476,38 @@ static void __invalidate_dcache_range_nomsr_irq(unsigned long start,
static void __flush_dcache_all_wb(void)
{
#ifndef ASM_LOOP
int i;
#endif
pr_debug("%s\n", __func__);
#ifdef ASM_LOOP
CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length,
wdc.flush);
#else
for (i = 0; i < cpuinfo.dcache_size;
i += cpuinfo.dcache_line_length)
__asm__ __volatile__ ("wdc.flush %0, r0;" \
: : "r" (i));
#endif
}
static void __flush_dcache_range_wb(unsigned long start, unsigned long end)
{
#ifndef ASM_LOOP
int i;
#endif
pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
(unsigned int)start, (unsigned int) end);
CACHE_LOOP_LIMITS(start, end,
cpuinfo.dcache_line_length, cpuinfo.dcache_size);
#ifdef ASM_LOOP
CACHE_RANGE_LOOP_2(start, end, cpuinfo.dcache_line_length, wdc.flush);
#else
for (i = start; i < end; i += cpuinfo.icache_line_length)
__asm__ __volatile__ ("wdc.flush %0, r0;" \
: : "r" (i));
#endif
}
/* struct for wb caches and for wt caches */
@ -493,7 +611,7 @@ const struct scache wt_nomsr_noirq = {
#define CPUVER_7_20_A 0x0c
#define CPUVER_7_20_D 0x0f
#define INFO(s) printk(KERN_INFO "cache: " s " \n");
#define INFO(s) printk(KERN_INFO "cache: " s "\n");
void microblaze_cache_init(void)
{
@ -532,4 +650,9 @@ void microblaze_cache_init(void)
}
}
}
invalidate_dcache();
enable_dcache();
invalidate_icache();
enable_icache();
}

View File

@ -0,0 +1,156 @@
/*
* Copyright (C) 2009-2010 PetaLogix
* Copyright (C) 2006 Benjamin Herrenschmidt, IBM Corporation
*
* Provide default implementations of the DMA mapping callbacks for
* directly mapped busses.
*/
#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/dma-debug.h>
#include <asm/bug.h>
#include <asm/cacheflush.h>
/*
* Generic direct DMA implementation
*
* This implementation supports a per-device offset that can be applied if
* the address at which memory is visible to devices is not 0. Platform code
* can set archdata.dma_data to an unsigned long holding the offset. By
* default the offset is PCI_DRAM_OFFSET.
*/
static inline void __dma_sync_page(unsigned long paddr, unsigned long offset,
size_t size, enum dma_data_direction direction)
{
switch (direction) {
case DMA_TO_DEVICE:
flush_dcache_range(paddr + offset, paddr + offset + size);
break;
case DMA_FROM_DEVICE:
invalidate_dcache_range(paddr + offset, paddr + offset + size);
break;
default:
BUG();
}
}
static unsigned long get_dma_direct_offset(struct device *dev)
{
if (dev)
return (unsigned long)dev->archdata.dma_data;
return PCI_DRAM_OFFSET; /* FIXME Not sure if is correct */
}
#define NOT_COHERENT_CACHE
static void *dma_direct_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t flag)
{
#ifdef NOT_COHERENT_CACHE
return consistent_alloc(flag, size, dma_handle);
#else
void *ret;
struct page *page;
int node = dev_to_node(dev);
/* ignore region specifiers */
flag &= ~(__GFP_HIGHMEM);
page = alloc_pages_node(node, flag, get_order(size));
if (page == NULL)
return NULL;
ret = page_address(page);
memset(ret, 0, size);
*dma_handle = virt_to_phys(ret) + get_dma_direct_offset(dev);
return ret;
#endif
}
static void dma_direct_free_coherent(struct device *dev, size_t size,
void *vaddr, dma_addr_t dma_handle)
{
#ifdef NOT_COHERENT_CACHE
consistent_free(vaddr);
#else
free_pages((unsigned long)vaddr, get_order(size));
#endif
}
static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl,
int nents, enum dma_data_direction direction,
struct dma_attrs *attrs)
{
struct scatterlist *sg;
int i;
/* FIXME this part of code is untested */
for_each_sg(sgl, sg, nents, i) {
sg->dma_address = sg_phys(sg) + get_dma_direct_offset(dev);
sg->dma_length = sg->length;
__dma_sync_page(page_to_phys(sg_page(sg)), sg->offset,
sg->length, direction);
}
return nents;
}
static void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sg,
int nents, enum dma_data_direction direction,
struct dma_attrs *attrs)
{
}
static int dma_direct_dma_supported(struct device *dev, u64 mask)
{
return 1;
}
static inline dma_addr_t dma_direct_map_page(struct device *dev,
struct page *page,
unsigned long offset,
size_t size,
enum dma_data_direction direction,
struct dma_attrs *attrs)
{
__dma_sync_page(page_to_phys(page), offset, size, direction);
return page_to_phys(page) + offset + get_dma_direct_offset(dev);
}
static inline void dma_direct_unmap_page(struct device *dev,
dma_addr_t dma_address,
size_t size,
enum dma_data_direction direction,
struct dma_attrs *attrs)
{
/* There is not necessary to do cache cleanup
*
* phys_to_virt is here because in __dma_sync_page is __virt_to_phys and
* dma_address is physical address
*/
__dma_sync_page(dma_address, 0 , size, direction);
}
struct dma_map_ops dma_direct_ops = {
.alloc_coherent = dma_direct_alloc_coherent,
.free_coherent = dma_direct_free_coherent,
.map_sg = dma_direct_map_sg,
.unmap_sg = dma_direct_unmap_sg,
.dma_supported = dma_direct_dma_supported,
.map_page = dma_direct_map_page,
.unmap_page = dma_direct_unmap_page,
};
EXPORT_SYMBOL(dma_direct_ops);
/* Number of entries preallocated for DMA-API debugging */
#define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
static int __init dma_init(void)
{
dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
return 0;
}
fs_initcall(dma_init);

View File

@ -305,7 +305,7 @@ C_ENTRY(_user_exception):
swi r11, r1, PTO+PT_R1; /* Store user SP. */
addi r11, r0, 1;
swi r11, r0, TOPHYS(PER_CPU(KM)); /* Now we're in kernel-mode. */
2: lwi r31, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */
2: lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
/* Save away the syscall number. */
swi r12, r1, PTO+PT_R0;
tovirt(r1,r1)
@ -322,8 +322,7 @@ C_ENTRY(_user_exception):
rtid r11, 0
nop
3:
add r11, r0, CURRENT_TASK /* Get current task ptr into r11 */
lwi r11, r11, TS_THREAD_INFO /* get thread info */
lwi r11, CURRENT_TASK, TS_THREAD_INFO /* get thread info */
lwi r11, r11, TI_FLAGS /* get flags in thread info */
andi r11, r11, _TIF_WORK_SYSCALL_MASK
beqi r11, 4f
@ -382,60 +381,50 @@ C_ENTRY(ret_from_trap):
/* See if returning to kernel mode, if so, skip resched &c. */
bnei r11, 2f;
swi r3, r1, PTO + PT_R3
swi r4, r1, PTO + PT_R4
/* We're returning to user mode, so check for various conditions that
* trigger rescheduling. */
# FIXME: Restructure all these flag checks.
add r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */
lwi r11, r11, TS_THREAD_INFO; /* get thread info */
/* FIXME: Restructure all these flag checks. */
lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
lwi r11, r11, TI_FLAGS; /* get flags in thread info */
andi r11, r11, _TIF_WORK_SYSCALL_MASK
beqi r11, 1f
swi r3, r1, PTO + PT_R3
swi r4, r1, PTO + PT_R4
brlid r15, do_syscall_trace_leave
addik r5, r1, PTO + PT_R0
lwi r3, r1, PTO + PT_R3
lwi r4, r1, PTO + PT_R4
1:
/* We're returning to user mode, so check for various conditions that
* trigger rescheduling. */
/* Get current task ptr into r11 */
add r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */
lwi r11, r11, TS_THREAD_INFO; /* get thread info */
/* get thread info from current task */
lwi r11, CURRENT_TASK, TS_THREAD_INFO;
lwi r11, r11, TI_FLAGS; /* get flags in thread info */
andi r11, r11, _TIF_NEED_RESCHED;
beqi r11, 5f;
swi r3, r1, PTO + PT_R3; /* store syscall result */
swi r4, r1, PTO + PT_R4;
bralid r15, schedule; /* Call scheduler */
nop; /* delay slot */
lwi r3, r1, PTO + PT_R3; /* restore syscall result */
lwi r4, r1, PTO + PT_R4;
/* Maybe handle a signal */
5: add r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */
lwi r11, r11, TS_THREAD_INFO; /* get thread info */
5: /* get thread info from current task*/
lwi r11, CURRENT_TASK, TS_THREAD_INFO;
lwi r11, r11, TI_FLAGS; /* get flags in thread info */
andi r11, r11, _TIF_SIGPENDING;
beqi r11, 1f; /* Signals to handle, handle them */
swi r3, r1, PTO + PT_R3; /* store syscall result */
swi r4, r1, PTO + PT_R4;
la r5, r1, PTO; /* Arg 1: struct pt_regs *regs */
add r6, r0, r0; /* Arg 2: sigset_t *oldset */
addi r7, r0, 1; /* Arg 3: int in_syscall */
bralid r15, do_signal; /* Handle any signals */
nop;
add r6, r0, r0; /* Arg 2: sigset_t *oldset */
/* Finally, return to user state. */
1:
lwi r3, r1, PTO + PT_R3; /* restore syscall result */
lwi r4, r1, PTO + PT_R4;
/* Finally, return to user state. */
1: swi r0, r0, PER_CPU(KM); /* Now officially in user state. */
add r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */
swi r11, r0, PER_CPU(CURRENT_SAVE); /* save current */
swi r0, r0, PER_CPU(KM); /* Now officially in user state. */
swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE); /* save current */
VM_OFF;
tophys(r1,r1);
RESTORE_REGS;
@ -565,7 +554,7 @@ C_ENTRY(sys_rt_sigreturn_wrapper):
swi r11, r1, PTO+PT_R1; /* Store user SP. */ \
addi r11, r0, 1; \
swi r11, r0, TOPHYS(PER_CPU(KM)); /* Now we're in kernel-mode.*/\
2: lwi r31, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */\
2: lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); \
/* Save away the syscall number. */ \
swi r0, r1, PTO+PT_R0; \
tovirt(r1,r1)
@ -673,9 +662,7 @@ C_ENTRY(ret_from_exc):
/* We're returning to user mode, so check for various conditions that
trigger rescheduling. */
/* Get current task ptr into r11 */
add r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */
lwi r11, r11, TS_THREAD_INFO; /* get thread info */
lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
lwi r11, r11, TI_FLAGS; /* get flags in thread info */
andi r11, r11, _TIF_NEED_RESCHED;
beqi r11, 5f;
@ -685,8 +672,7 @@ C_ENTRY(ret_from_exc):
nop; /* delay slot */
/* Maybe handle a signal */
5: add r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */
lwi r11, r11, TS_THREAD_INFO; /* get thread info */
5: lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
lwi r11, r11, TI_FLAGS; /* get flags in thread info */
andi r11, r11, _TIF_SIGPENDING;
beqi r11, 1f; /* Signals to handle, handle them */
@ -705,15 +691,13 @@ C_ENTRY(ret_from_exc):
* store return registers separately because this macros is use
* for others exceptions */
la r5, r1, PTO; /* Arg 1: struct pt_regs *regs */
add r6, r0, r0; /* Arg 2: sigset_t *oldset */
addi r7, r0, 0; /* Arg 3: int in_syscall */
bralid r15, do_signal; /* Handle any signals */
nop;
add r6, r0, r0; /* Arg 2: sigset_t *oldset */
/* Finally, return to user state. */
1: swi r0, r0, PER_CPU(KM); /* Now officially in user state. */
add r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */
swi r11, r0, PER_CPU(CURRENT_SAVE); /* save current */
swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE); /* save current */
VM_OFF;
tophys(r1,r1);
@ -802,7 +786,7 @@ C_ENTRY(_interrupt):
swi r11, r0, TOPHYS(PER_CPU(KM));
2:
lwi r31, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
swi r0, r1, PTO + PT_R0;
tovirt(r1,r1)
la r5, r1, PTO;
@ -817,8 +801,7 @@ ret_from_irq:
lwi r11, r1, PTO + PT_MODE;
bnei r11, 2f;
add r11, r0, CURRENT_TASK;
lwi r11, r11, TS_THREAD_INFO;
lwi r11, CURRENT_TASK, TS_THREAD_INFO;
lwi r11, r11, TI_FLAGS; /* MS: get flags from thread info */
andi r11, r11, _TIF_NEED_RESCHED;
beqi r11, 5f
@ -826,8 +809,7 @@ ret_from_irq:
nop; /* delay slot */
/* Maybe handle a signal */
5: add r11, r0, CURRENT_TASK;
lwi r11, r11, TS_THREAD_INFO; /* MS: get thread info */
5: lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* MS: get thread info */
lwi r11, r11, TI_FLAGS; /* get flags in thread info */
andi r11, r11, _TIF_SIGPENDING;
beqid r11, no_intr_resched
@ -842,8 +824,7 @@ no_intr_resched:
/* Disable interrupts, we are now committed to the state restore */
disable_irq
swi r0, r0, PER_CPU(KM); /* MS: Now officially in user state. */
add r11, r0, CURRENT_TASK;
swi r11, r0, PER_CPU(CURRENT_SAVE);
swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE);
VM_OFF;
tophys(r1,r1);
lwi r3, r1, PTO + PT_R3; /* MS: restore saved r3, r4 registers */
@ -853,7 +834,28 @@ no_intr_resched:
lwi r1, r1, PT_R1 - PT_SIZE;
bri 6f;
/* MS: Return to kernel state. */
2: VM_OFF /* MS: turn off MMU */
2:
#ifdef CONFIG_PREEMPT
lwi r11, CURRENT_TASK, TS_THREAD_INFO;
/* MS: get preempt_count from thread info */
lwi r5, r11, TI_PREEMPT_COUNT;
bgti r5, restore;
lwi r5, r11, TI_FLAGS; /* get flags in thread info */
andi r5, r5, _TIF_NEED_RESCHED;
beqi r5, restore /* if zero jump over */
preempt:
/* interrupts are off that's why I am calling preempt_chedule_irq */
bralid r15, preempt_schedule_irq
nop
lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
lwi r5, r11, TI_FLAGS; /* get flags in thread info */
andi r5, r5, _TIF_NEED_RESCHED;
bnei r5, preempt /* if non zero jump to resched */
restore:
#endif
VM_OFF /* MS: turn off MMU */
tophys(r1,r1)
lwi r3, r1, PTO + PT_R3; /* MS: restore saved r3, r4 registers */
lwi r4, r1, PTO + PT_R4;
@ -915,7 +917,7 @@ C_ENTRY(_debug_exception):
swi r11, r1, PTO+PT_R1; /* Store user SP. */
addi r11, r0, 1;
swi r11, r0, TOPHYS(PER_CPU(KM)); /* Now we're in kernel-mode. */
2: lwi r31, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */
2: lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
/* Save away the syscall number. */
swi r0, r1, PTO+PT_R0;
tovirt(r1,r1)
@ -935,8 +937,7 @@ dbtrap_call: rtbd r11, 0;
bnei r11, 2f;
/* Get current task ptr into r11 */
add r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */
lwi r11, r11, TS_THREAD_INFO; /* get thread info */
lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
lwi r11, r11, TI_FLAGS; /* get flags in thread info */
andi r11, r11, _TIF_NEED_RESCHED;
beqi r11, 5f;
@ -949,8 +950,7 @@ dbtrap_call: rtbd r11, 0;
/* XXX m68knommu also checks TASK_STATE & TASK_COUNTER here. */
/* Maybe handle a signal */
5: add r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */
lwi r11, r11, TS_THREAD_INFO; /* get thread info */
5: lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
lwi r11, r11, TI_FLAGS; /* get flags in thread info */
andi r11, r11, _TIF_SIGPENDING;
beqi r11, 1f; /* Signals to handle, handle them */
@ -966,16 +966,14 @@ dbtrap_call: rtbd r11, 0;
(in a possibly modified form) after do_signal returns. */
la r5, r1, PTO; /* Arg 1: struct pt_regs *regs */
add r6, r0, r0; /* Arg 2: sigset_t *oldset */
addi r7, r0, 0; /* Arg 3: int in_syscall */
bralid r15, do_signal; /* Handle any signals */
nop;
add r6, r0, r0; /* Arg 2: sigset_t *oldset */
/* Finally, return to user state. */
1: swi r0, r0, PER_CPU(KM); /* Now officially in user state. */
add r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */
swi r11, r0, PER_CPU(CURRENT_SAVE); /* save current */
swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE); /* save current */
VM_OFF;
tophys(r1,r1);
@ -1007,7 +1005,7 @@ DBTRAP_return: /* Make global symbol for debugging */
ENTRY(_switch_to)
/* prepare return value */
addk r3, r0, r31
addk r3, r0, CURRENT_TASK
/* save registers in cpu_context */
/* use r11 and r12, volatile registers, as temp register */
@ -1051,10 +1049,10 @@ ENTRY(_switch_to)
nop
swi r12, r11, CC_FSR
/* update r31, the current */
lwi r31, r6, TI_TASK/* give me pointer to task which will be next */
/* update r31, the current-give me pointer to task which will be next */
lwi CURRENT_TASK, r6, TI_TASK
/* stored it to current_save too */
swi r31, r0, PER_CPU(CURRENT_SAVE)
swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE)
/* get new process' cpu context and restore */
/* give me start where start context of next task */

View File

@ -99,8 +99,8 @@ no_fdt_arg:
tophys(r4,r4) /* convert to phys address */
ori r3, r0, COMMAND_LINE_SIZE - 1 /* number of loops */
_copy_command_line:
lbu r7, r5, r6 /* r7=r5+r6 - r5 contain pointer to command line */
sb r7, r4, r6 /* addr[r4+r6]= r7*/
lbu r2, r5, r6 /* r7=r5+r6 - r5 contain pointer to command line */
sb r2, r4, r6 /* addr[r4+r6]= r7*/
addik r6, r6, 1 /* increment counting */
bgtid r3, _copy_command_line /* loop for all entries */
addik r3, r3, -1 /* descrement loop */
@ -136,6 +136,11 @@ _invalidate:
addik r3, r3, -1
/* sync */
/* Setup the kernel PID */
mts rpid,r0 /* Load the kernel PID */
nop
bri 4
/*
* We should still be executing code at physical address area
* RAM_BASEADDR at this point. However, kernel code is at
@ -146,10 +151,6 @@ _invalidate:
addik r3,r0, CONFIG_KERNEL_START /* Load the kernel virtual address */
tophys(r4,r3) /* Load the kernel physical address */
mts rpid,r0 /* Load the kernel PID */
nop
bri 4
/*
* Configure and load two entries into TLB slots 0 and 1.
* In case we are pinning TLBs, these are reserved in by the

View File

@ -93,3 +93,18 @@ skip:
}
return 0;
}
/* MS: There is no any advance mapping mechanism. We are using simple 32bit
intc without any cascades or any connection that's why mapping is 1:1 */
unsigned int irq_create_mapping(struct irq_host *host, irq_hw_number_t hwirq)
{
return hwirq;
}
EXPORT_SYMBOL_GPL(irq_create_mapping);
unsigned int irq_create_of_mapping(struct device_node *controller,
u32 *intspec, unsigned int intsize)
{
return intspec[0];
}
EXPORT_SYMBOL_GPL(irq_create_of_mapping);

View File

@ -22,7 +22,10 @@
#include <linux/io.h>
#include <linux/bug.h>
#include <linux/param.h>
#include <linux/pci.h>
#include <linux/cache.h>
#include <linux/of_platform.h>
#include <linux/dma-mapping.h>
#include <asm/cacheflush.h>
#include <asm/entry.h>
#include <asm/cpuinfo.h>
@ -54,14 +57,10 @@ void __init setup_arch(char **cmdline_p)
microblaze_cache_init();
invalidate_dcache();
enable_dcache();
invalidate_icache();
enable_icache();
setup_memory();
xilinx_pci_init();
#if defined(CONFIG_SELFMOD_INTC) || defined(CONFIG_SELFMOD_TIMER)
printk(KERN_NOTICE "Self modified code enable\n");
#endif
@ -188,3 +187,37 @@ static int microblaze_debugfs_init(void)
}
arch_initcall(microblaze_debugfs_init);
#endif
static int dflt_bus_notify(struct notifier_block *nb,
unsigned long action, void *data)
{
struct device *dev = data;
/* We are only intereted in device addition */
if (action != BUS_NOTIFY_ADD_DEVICE)
return 0;
set_dma_ops(dev, &dma_direct_ops);
return NOTIFY_DONE;
}
static struct notifier_block dflt_plat_bus_notifier = {
.notifier_call = dflt_bus_notify,
.priority = INT_MAX,
};
static struct notifier_block dflt_of_bus_notifier = {
.notifier_call = dflt_bus_notify,
.priority = INT_MAX,
};
static int __init setup_bus_notifier(void)
{
bus_register_notifier(&platform_bus_type, &dflt_plat_bus_notifier);
bus_register_notifier(&of_platform_bus_type, &dflt_of_bus_notifier);
return 0;
}
arch_initcall(setup_bus_notifier);

View File

@ -2,6 +2,6 @@
# Makefile
#
obj-y := init.o
obj-y := consistent.o init.o
obj-$(CONFIG_MMU) += pgtable.o mmu_context.o fault.o

View File

@ -0,0 +1,246 @@
/*
* Microblaze support for cache consistent memory.
* Copyright (C) 2010 Michal Simek <monstr@monstr.eu>
* Copyright (C) 2010 PetaLogix
* Copyright (C) 2005 John Williams <jwilliams@itee.uq.edu.au>
*
* Based on PowerPC version derived from arch/arm/mm/consistent.c
* Copyright (C) 2001 Dan Malek (dmalek@jlc.net)
* Copyright (C) 2000 Russell King
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/module.h>
#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/ptrace.h>
#include <linux/mman.h>
#include <linux/mm.h>
#include <linux/swap.h>
#include <linux/stddef.h>
#include <linux/vmalloc.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/bootmem.h>
#include <linux/highmem.h>
#include <linux/pci.h>
#include <linux/interrupt.h>
#include <asm/pgalloc.h>
#include <linux/io.h>
#include <linux/hardirq.h>
#include <asm/mmu_context.h>
#include <asm/mmu.h>
#include <linux/uaccess.h>
#include <asm/pgtable.h>
#include <asm/cpuinfo.h>
#ifndef CONFIG_MMU
/* I have to use dcache values because I can't relate on ram size */
#define UNCACHED_SHADOW_MASK (cpuinfo.dcache_high - cpuinfo.dcache_base + 1)
/*
* Consistent memory allocators. Used for DMA devices that want to
* share uncached memory with the processor core.
* My crufty no-MMU approach is simple. In the HW platform we can optionally
* mirror the DDR up above the processor cacheable region. So, memory accessed
* in this mirror region will not be cached. It's alloced from the same
* pool as normal memory, but the handle we return is shifted up into the
* uncached region. This will no doubt cause big problems if memory allocated
* here is not also freed properly. -- JW
*/
void *consistent_alloc(int gfp, size_t size, dma_addr_t *dma_handle)
{
struct page *page, *end, *free;
unsigned long order;
void *ret, *virt;
if (in_interrupt())
BUG();
size = PAGE_ALIGN(size);
order = get_order(size);
page = alloc_pages(gfp, order);
if (!page)
goto no_page;
/* We could do with a page_to_phys and page_to_bus here. */
virt = page_address(page);
ret = ioremap(virt_to_phys(virt), size);
if (!ret)
goto no_remap;
/*
* Here's the magic! Note if the uncached shadow is not implemented,
* it's up to the calling code to also test that condition and make
* other arranegments, such as manually flushing the cache and so on.
*/
#ifdef CONFIG_XILINX_UNCACHED_SHADOW
ret = (void *)((unsigned) ret | UNCACHED_SHADOW_MASK);
#endif
/* dma_handle is same as physical (shadowed) address */
*dma_handle = (dma_addr_t)ret;
/*
* free wasted pages. We skip the first page since we know
* that it will have count = 1 and won't require freeing.
* We also mark the pages in use as reserved so that
* remap_page_range works.
*/
page = virt_to_page(virt);
free = page + (size >> PAGE_SHIFT);
end = page + (1 << order);
for (; page < end; page++) {
init_page_count(page);
if (page >= free)
__free_page(page);
else
SetPageReserved(page);
}
return ret;
no_remap:
__free_pages(page, order);
no_page:
return NULL;
}
#else
void *consistent_alloc(int gfp, size_t size, dma_addr_t *dma_handle)
{
int order, err, i;
unsigned long page, va, flags;
phys_addr_t pa;
struct vm_struct *area;
void *ret;
if (in_interrupt())
BUG();
/* Only allocate page size areas. */
size = PAGE_ALIGN(size);
order = get_order(size);
page = __get_free_pages(gfp, order);
if (!page) {
BUG();
return NULL;
}
/*
* we need to ensure that there are no cachelines in use,
* or worse dirty in this area.
*/
flush_dcache_range(virt_to_phys(page), virt_to_phys(page) + size);
/* Allocate some common virtual space to map the new pages. */
area = get_vm_area(size, VM_ALLOC);
if (area == NULL) {
free_pages(page, order);
return NULL;
}
va = (unsigned long) area->addr;
ret = (void *)va;
/* This gives us the real physical address of the first page. */
*dma_handle = pa = virt_to_bus((void *)page);
/* MS: This is the whole magic - use cache inhibit pages */
flags = _PAGE_KERNEL | _PAGE_NO_CACHE;
/*
* Set refcount=1 on all pages in an order>0
* allocation so that vfree() will actually
* free all pages that were allocated.
*/
if (order > 0) {
struct page *rpage = virt_to_page(page);
for (i = 1; i < (1 << order); i++)
init_page_count(rpage+i);
}
err = 0;
for (i = 0; i < size && err == 0; i += PAGE_SIZE)
err = map_page(va+i, pa+i, flags);
if (err) {
vfree((void *)va);
return NULL;
}
return ret;
}
#endif /* CONFIG_MMU */
EXPORT_SYMBOL(consistent_alloc);
/*
* free page(s) as defined by the above mapping.
*/
void consistent_free(void *vaddr)
{
if (in_interrupt())
BUG();
/* Clear SHADOW_MASK bit in address, and free as per usual */
#ifdef CONFIG_XILINX_UNCACHED_SHADOW
vaddr = (void *)((unsigned)vaddr & ~UNCACHED_SHADOW_MASK);
#endif
vfree(vaddr);
}
EXPORT_SYMBOL(consistent_free);
/*
* make an area consistent.
*/
void consistent_sync(void *vaddr, size_t size, int direction)
{
unsigned long start;
unsigned long end;
start = (unsigned long)vaddr;
/* Convert start address back down to unshadowed memory region */
#ifdef CONFIG_XILINX_UNCACHED_SHADOW
start &= ~UNCACHED_SHADOW_MASK;
#endif
end = start + size;
switch (direction) {
case PCI_DMA_NONE:
BUG();
case PCI_DMA_FROMDEVICE: /* invalidate only */
flush_dcache_range(start, end);
break;
case PCI_DMA_TODEVICE: /* writeback only */
flush_dcache_range(start, end);
break;
case PCI_DMA_BIDIRECTIONAL: /* writeback and invalidate */
flush_dcache_range(start, end);
break;
}
}
EXPORT_SYMBOL(consistent_sync);
/*
* consistent_sync_page makes memory consistent. identical
* to consistent_sync, but takes a struct page instead of a
* virtual address
*/
void consistent_sync_page(struct page *page, unsigned long offset,
size_t size, int direction)
{
unsigned long start = (unsigned long)page_address(page) + offset;
consistent_sync((void *)start, size, direction);
}
EXPORT_SYMBOL(consistent_sync_page);

View File

@ -23,6 +23,9 @@
#include <asm/sections.h>
#include <asm/tlb.h>
/* Use for MMU and noMMU because of PCI generic code */
int mem_init_done;
#ifndef CONFIG_MMU
unsigned int __page_offset;
EXPORT_SYMBOL(__page_offset);
@ -30,7 +33,6 @@ EXPORT_SYMBOL(__page_offset);
#else
DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
int mem_init_done;
static int init_bootmem_done;
#endif /* CONFIG_MMU */
@ -193,12 +195,6 @@ void free_initmem(void)
(unsigned long)(&__init_end));
}
/* FIXME from arch/powerpc/mm/mem.c*/
void show_mem(void)
{
printk(KERN_NOTICE "%s\n", __func__);
}
void __init mem_init(void)
{
high_memory = (void *)__va(memory_end);
@ -208,9 +204,7 @@ void __init mem_init(void)
printk(KERN_INFO "Memory: %luk/%luk available\n",
nr_free_pages() << (PAGE_SHIFT-10),
num_physpages << (PAGE_SHIFT-10));
#ifdef CONFIG_MMU
mem_init_done = 1;
#endif
}
#ifndef CONFIG_MMU
@ -222,6 +216,10 @@ int ___range_ok(unsigned long addr, unsigned long size)
}
EXPORT_SYMBOL(___range_ok);
int page_is_ram(unsigned long pfn)
{
return __range_ok(pfn, 0);
}
#else
int page_is_ram(unsigned long pfn)
{
@ -349,4 +347,27 @@ void __init *early_get_page(void)
}
return p;
}
#endif /* CONFIG_MMU */
void * __init_refok alloc_maybe_bootmem(size_t size, gfp_t mask)
{
if (mem_init_done)
return kmalloc(size, mask);
else
return alloc_bootmem(size);
}
void * __init_refok zalloc_maybe_bootmem(size_t size, gfp_t mask)
{
void *p;
if (mem_init_done)
p = kzalloc(size, mask);
else {
p = alloc_bootmem(size);
if (p)
memset(p, 0, size);
}
return p;
}

View File

@ -103,7 +103,7 @@ static void __iomem *__ioremap(phys_addr_t addr, unsigned long size,
area = get_vm_area(size, VM_IOREMAP);
if (area == NULL)
return NULL;
v = VMALLOC_VMADDR(area->addr);
v = (unsigned long) area->addr;
} else {
v = (ioremap_bot -= size);
}

View File

@ -0,0 +1,6 @@
#
# Makefile
#
obj-$(CONFIG_PCI) += pci_32.o pci-common.o indirect_pci.o iomap.o
obj-$(CONFIG_PCI_XILINX) += xilinx_pci.o

View File

@ -0,0 +1,163 @@
/*
* Support for indirect PCI bridges.
*
* Copyright (C) 1998 Gabriel Paubert.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/delay.h>
#include <linux/string.h>
#include <linux/init.h>
#include <asm/io.h>
#include <asm/prom.h>
#include <asm/pci-bridge.h>
static int
indirect_read_config(struct pci_bus *bus, unsigned int devfn, int offset,
int len, u32 *val)
{
struct pci_controller *hose = pci_bus_to_host(bus);
volatile void __iomem *cfg_data;
u8 cfg_type = 0;
u32 bus_no, reg;
if (hose->indirect_type & INDIRECT_TYPE_NO_PCIE_LINK) {
if (bus->number != hose->first_busno)
return PCIBIOS_DEVICE_NOT_FOUND;
if (devfn != 0)
return PCIBIOS_DEVICE_NOT_FOUND;
}
if (hose->indirect_type & INDIRECT_TYPE_SET_CFG_TYPE)
if (bus->number != hose->first_busno)
cfg_type = 1;
bus_no = (bus->number == hose->first_busno) ?
hose->self_busno : bus->number;
if (hose->indirect_type & INDIRECT_TYPE_EXT_REG)
reg = ((offset & 0xf00) << 16) | (offset & 0xfc);
else
reg = offset & 0xfc; /* Only 3 bits for function */
if (hose->indirect_type & INDIRECT_TYPE_BIG_ENDIAN)
out_be32(hose->cfg_addr, (0x80000000 | (bus_no << 16) |
(devfn << 8) | reg | cfg_type));
else
out_le32(hose->cfg_addr, (0x80000000 | (bus_no << 16) |
(devfn << 8) | reg | cfg_type));
/*
* Note: the caller has already checked that offset is
* suitably aligned and that len is 1, 2 or 4.
*/
cfg_data = hose->cfg_data + (offset & 3); /* Only 3 bits for function */
switch (len) {
case 1:
*val = in_8(cfg_data);
break;
case 2:
*val = in_le16(cfg_data);
break;
default:
*val = in_le32(cfg_data);
break;
}
return PCIBIOS_SUCCESSFUL;
}
static int
indirect_write_config(struct pci_bus *bus, unsigned int devfn, int offset,
int len, u32 val)
{
struct pci_controller *hose = pci_bus_to_host(bus);
volatile void __iomem *cfg_data;
u8 cfg_type = 0;
u32 bus_no, reg;
if (hose->indirect_type & INDIRECT_TYPE_NO_PCIE_LINK) {
if (bus->number != hose->first_busno)
return PCIBIOS_DEVICE_NOT_FOUND;
if (devfn != 0)
return PCIBIOS_DEVICE_NOT_FOUND;
}
if (hose->indirect_type & INDIRECT_TYPE_SET_CFG_TYPE)
if (bus->number != hose->first_busno)
cfg_type = 1;
bus_no = (bus->number == hose->first_busno) ?
hose->self_busno : bus->number;
if (hose->indirect_type & INDIRECT_TYPE_EXT_REG)
reg = ((offset & 0xf00) << 16) | (offset & 0xfc);
else
reg = offset & 0xfc;
if (hose->indirect_type & INDIRECT_TYPE_BIG_ENDIAN)
out_be32(hose->cfg_addr, (0x80000000 | (bus_no << 16) |
(devfn << 8) | reg | cfg_type));
else
out_le32(hose->cfg_addr, (0x80000000 | (bus_no << 16) |
(devfn << 8) | reg | cfg_type));
/* surpress setting of PCI_PRIMARY_BUS */
if (hose->indirect_type & INDIRECT_TYPE_SURPRESS_PRIMARY_BUS)
if ((offset == PCI_PRIMARY_BUS) &&
(bus->number == hose->first_busno))
val &= 0xffffff00;
/* Workaround for PCI_28 Errata in 440EPx/GRx */
if ((hose->indirect_type & INDIRECT_TYPE_BROKEN_MRM) &&
offset == PCI_CACHE_LINE_SIZE) {
val = 0;
}
/*
* Note: the caller has already checked that offset is
* suitably aligned and that len is 1, 2 or 4.
*/
cfg_data = hose->cfg_data + (offset & 3);
switch (len) {
case 1:
out_8(cfg_data, val);
break;
case 2:
out_le16(cfg_data, val);
break;
default:
out_le32(cfg_data, val);
break;
}
return PCIBIOS_SUCCESSFUL;
}
static struct pci_ops indirect_pci_ops = {
.read = indirect_read_config,
.write = indirect_write_config,
};
void __init
setup_indirect_pci(struct pci_controller *hose,
resource_size_t cfg_addr,
resource_size_t cfg_data, u32 flags)
{
resource_size_t base = cfg_addr & PAGE_MASK;
void __iomem *mbase;
mbase = ioremap(base, PAGE_SIZE);
hose->cfg_addr = mbase + (cfg_addr & ~PAGE_MASK);
if ((cfg_data & PAGE_MASK) != base)
mbase = ioremap(cfg_data & PAGE_MASK, PAGE_SIZE);
hose->cfg_data = mbase + (cfg_data & ~PAGE_MASK);
hose->ops = &indirect_pci_ops;
hose->indirect_type = flags;
}

View File

@ -0,0 +1,39 @@
/*
* ppc64 "iomap" interface implementation.
*
* (C) Copyright 2004 Linus Torvalds
*/
#include <linux/init.h>
#include <linux/pci.h>
#include <linux/mm.h>
#include <asm/io.h>
#include <asm/pci-bridge.h>
void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max)
{
resource_size_t start = pci_resource_start(dev, bar);
resource_size_t len = pci_resource_len(dev, bar);
unsigned long flags = pci_resource_flags(dev, bar);
if (!len)
return NULL;
if (max && len > max)
len = max;
if (flags & IORESOURCE_IO)
return ioport_map(start, len);
if (flags & IORESOURCE_MEM)
return ioremap(start, len);
/* What? */
return NULL;
}
EXPORT_SYMBOL(pci_iomap);
void pci_iounmap(struct pci_dev *dev, void __iomem *addr)
{
if (isa_vaddr_is_ioport(addr))
return;
if (pcibios_vaddr_is_ioport(addr))
return;
iounmap(addr);
}
EXPORT_SYMBOL(pci_iounmap);

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,430 @@
/*
* Common pmac/prep/chrp pci routines. -- Cort
*/
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/delay.h>
#include <linux/string.h>
#include <linux/init.h>
#include <linux/capability.h>
#include <linux/sched.h>
#include <linux/errno.h>
#include <linux/bootmem.h>
#include <linux/irq.h>
#include <linux/list.h>
#include <linux/of.h>
#include <asm/processor.h>
#include <asm/io.h>
#include <asm/prom.h>
#include <asm/sections.h>
#include <asm/pci-bridge.h>
#include <asm/byteorder.h>
#include <asm/uaccess.h>
#undef DEBUG
unsigned long isa_io_base;
unsigned long pci_dram_offset;
int pcibios_assign_bus_offset = 1;
static u8 *pci_to_OF_bus_map;
/* By default, we don't re-assign bus numbers. We do this only on
* some pmacs
*/
static int pci_assign_all_buses;
static int pci_bus_count;
/*
* Functions below are used on OpenFirmware machines.
*/
static void
make_one_node_map(struct device_node *node, u8 pci_bus)
{
const int *bus_range;
int len;
if (pci_bus >= pci_bus_count)
return;
bus_range = of_get_property(node, "bus-range", &len);
if (bus_range == NULL || len < 2 * sizeof(int)) {
printk(KERN_WARNING "Can't get bus-range for %s, "
"assuming it starts at 0\n", node->full_name);
pci_to_OF_bus_map[pci_bus] = 0;
} else
pci_to_OF_bus_map[pci_bus] = bus_range[0];
for_each_child_of_node(node, node) {
struct pci_dev *dev;
const unsigned int *class_code, *reg;
class_code = of_get_property(node, "class-code", NULL);
if (!class_code ||
((*class_code >> 8) != PCI_CLASS_BRIDGE_PCI &&
(*class_code >> 8) != PCI_CLASS_BRIDGE_CARDBUS))
continue;
reg = of_get_property(node, "reg", NULL);
if (!reg)
continue;
dev = pci_get_bus_and_slot(pci_bus, ((reg[0] >> 8) & 0xff));
if (!dev || !dev->subordinate) {
pci_dev_put(dev);
continue;
}
make_one_node_map(node, dev->subordinate->number);
pci_dev_put(dev);
}
}
void
pcibios_make_OF_bus_map(void)
{
int i;
struct pci_controller *hose, *tmp;
struct property *map_prop;
struct device_node *dn;
pci_to_OF_bus_map = kmalloc(pci_bus_count, GFP_KERNEL);
if (!pci_to_OF_bus_map) {
printk(KERN_ERR "Can't allocate OF bus map !\n");
return;
}
/* We fill the bus map with invalid values, that helps
* debugging.
*/
for (i = 0; i < pci_bus_count; i++)
pci_to_OF_bus_map[i] = 0xff;
/* For each hose, we begin searching bridges */
list_for_each_entry_safe(hose, tmp, &hose_list, list_node) {
struct device_node *node = hose->dn;
if (!node)
continue;
make_one_node_map(node, hose->first_busno);
}
dn = of_find_node_by_path("/");
map_prop = of_find_property(dn, "pci-OF-bus-map", NULL);
if (map_prop) {
BUG_ON(pci_bus_count > map_prop->length);
memcpy(map_prop->value, pci_to_OF_bus_map, pci_bus_count);
}
of_node_put(dn);
#ifdef DEBUG
printk(KERN_INFO "PCI->OF bus map:\n");
for (i = 0; i < pci_bus_count; i++) {
if (pci_to_OF_bus_map[i] == 0xff)
continue;
printk(KERN_INFO "%d -> %d\n", i, pci_to_OF_bus_map[i]);
}
#endif
}
typedef int (*pci_OF_scan_iterator)(struct device_node *node, void *data);
static struct device_node *scan_OF_pci_childs(struct device_node *parent,
pci_OF_scan_iterator filter, void *data)
{
struct device_node *node;
struct device_node *sub_node;
for_each_child_of_node(parent, node) {
const unsigned int *class_code;
if (filter(node, data)) {
of_node_put(node);
return node;
}
/* For PCI<->PCI bridges or CardBus bridges, we go down
* Note: some OFs create a parent node "multifunc-device" as
* a fake root for all functions of a multi-function device,
* we go down them as well.
*/
class_code = of_get_property(node, "class-code", NULL);
if ((!class_code ||
((*class_code >> 8) != PCI_CLASS_BRIDGE_PCI &&
(*class_code >> 8) != PCI_CLASS_BRIDGE_CARDBUS)) &&
strcmp(node->name, "multifunc-device"))
continue;
sub_node = scan_OF_pci_childs(node, filter, data);
if (sub_node) {
of_node_put(node);
return sub_node;
}
}
return NULL;
}
static struct device_node *scan_OF_for_pci_dev(struct device_node *parent,
unsigned int devfn)
{
struct device_node *np, *cnp;
const u32 *reg;
unsigned int psize;
for_each_child_of_node(parent, np) {
reg = of_get_property(np, "reg", &psize);
if (reg && psize >= 4 && ((reg[0] >> 8) & 0xff) == devfn)
return np;
/* Note: some OFs create a parent node "multifunc-device" as
* a fake root for all functions of a multi-function device,
* we go down them as well. */
if (!strcmp(np->name, "multifunc-device")) {
cnp = scan_OF_for_pci_dev(np, devfn);
if (cnp)
return cnp;
}
}
return NULL;
}
static struct device_node *scan_OF_for_pci_bus(struct pci_bus *bus)
{
struct device_node *parent, *np;
/* Are we a root bus ? */
if (bus->self == NULL || bus->parent == NULL) {
struct pci_controller *hose = pci_bus_to_host(bus);
if (hose == NULL)
return NULL;
return of_node_get(hose->dn);
}
/* not a root bus, we need to get our parent */
parent = scan_OF_for_pci_bus(bus->parent);
if (parent == NULL)
return NULL;
/* now iterate for children for a match */
np = scan_OF_for_pci_dev(parent, bus->self->devfn);
of_node_put(parent);
return np;
}
/*
* Scans the OF tree for a device node matching a PCI device
*/
struct device_node *
pci_busdev_to_OF_node(struct pci_bus *bus, int devfn)
{
struct device_node *parent, *np;
pr_debug("pci_busdev_to_OF_node(%d,0x%x)\n", bus->number, devfn);
parent = scan_OF_for_pci_bus(bus);
if (parent == NULL)
return NULL;
pr_debug(" parent is %s\n", parent ? parent->full_name : "<NULL>");
np = scan_OF_for_pci_dev(parent, devfn);
of_node_put(parent);
pr_debug(" result is %s\n", np ? np->full_name : "<NULL>");
/* XXX most callers don't release the returned node
* mostly because ppc64 doesn't increase the refcount,
* we need to fix that.
*/
return np;
}
EXPORT_SYMBOL(pci_busdev_to_OF_node);
struct device_node*
pci_device_to_OF_node(struct pci_dev *dev)
{
return pci_busdev_to_OF_node(dev->bus, dev->devfn);
}
EXPORT_SYMBOL(pci_device_to_OF_node);
static int
find_OF_pci_device_filter(struct device_node *node, void *data)
{
return ((void *)node == data);
}
/*
* Returns the PCI device matching a given OF node
*/
int
pci_device_from_OF_node(struct device_node *node, u8 *bus, u8 *devfn)
{
const unsigned int *reg;
struct pci_controller *hose;
struct pci_dev *dev = NULL;
/* Make sure it's really a PCI device */
hose = pci_find_hose_for_OF_device(node);
if (!hose || !hose->dn)
return -ENODEV;
if (!scan_OF_pci_childs(hose->dn,
find_OF_pci_device_filter, (void *)node))
return -ENODEV;
reg = of_get_property(node, "reg", NULL);
if (!reg)
return -ENODEV;
*bus = (reg[0] >> 16) & 0xff;
*devfn = ((reg[0] >> 8) & 0xff);
/* Ok, here we need some tweak. If we have already renumbered
* all busses, we can't rely on the OF bus number any more.
* the pci_to_OF_bus_map is not enough as several PCI busses
* may match the same OF bus number.
*/
if (!pci_to_OF_bus_map)
return 0;
for_each_pci_dev(dev)
if (pci_to_OF_bus_map[dev->bus->number] == *bus &&
dev->devfn == *devfn) {
*bus = dev->bus->number;
pci_dev_put(dev);
return 0;
}
return -ENODEV;
}
EXPORT_SYMBOL(pci_device_from_OF_node);
/* We create the "pci-OF-bus-map" property now so it appears in the
* /proc device tree
*/
void __init
pci_create_OF_bus_map(void)
{
struct property *of_prop;
struct device_node *dn;
of_prop = (struct property *) alloc_bootmem(sizeof(struct property) + \
256);
if (!of_prop)
return;
dn = of_find_node_by_path("/");
if (dn) {
memset(of_prop, -1, sizeof(struct property) + 256);
of_prop->name = "pci-OF-bus-map";
of_prop->length = 256;
of_prop->value = &of_prop[1];
prom_add_property(dn, of_prop);
of_node_put(dn);
}
}
static void __devinit pcibios_scan_phb(struct pci_controller *hose)
{
struct pci_bus *bus;
struct device_node *node = hose->dn;
unsigned long io_offset;
struct resource *res = &hose->io_resource;
pr_debug("PCI: Scanning PHB %s\n",
node ? node->full_name : "<NO NAME>");
/* Create an empty bus for the toplevel */
bus = pci_create_bus(hose->parent, hose->first_busno, hose->ops, hose);
if (bus == NULL) {
printk(KERN_ERR "Failed to create bus for PCI domain %04x\n",
hose->global_number);
return;
}
bus->secondary = hose->first_busno;
hose->bus = bus;
/* Fixup IO space offset */
io_offset = (unsigned long)hose->io_base_virt - isa_io_base;
res->start = (res->start + io_offset) & 0xffffffffu;
res->end = (res->end + io_offset) & 0xffffffffu;
/* Wire up PHB bus resources */
pcibios_setup_phb_resources(hose);
/* Scan children */
hose->last_busno = bus->subordinate = pci_scan_child_bus(bus);
}
static int __init pcibios_init(void)
{
struct pci_controller *hose, *tmp;
int next_busno = 0;
printk(KERN_INFO "PCI: Probing PCI hardware\n");
if (pci_flags & PCI_REASSIGN_ALL_BUS) {
printk(KERN_INFO "setting pci_asign_all_busses\n");
pci_assign_all_buses = 1;
}
/* Scan all of the recorded PCI controllers. */
list_for_each_entry_safe(hose, tmp, &hose_list, list_node) {
if (pci_assign_all_buses)
hose->first_busno = next_busno;
hose->last_busno = 0xff;
pcibios_scan_phb(hose);
printk(KERN_INFO "calling pci_bus_add_devices()\n");
pci_bus_add_devices(hose->bus);
if (pci_assign_all_buses || next_busno <= hose->last_busno)
next_busno = hose->last_busno + \
pcibios_assign_bus_offset;
}
pci_bus_count = next_busno;
/* OpenFirmware based machines need a map of OF bus
* numbers vs. kernel bus numbers since we may have to
* remap them.
*/
if (pci_assign_all_buses)
pcibios_make_OF_bus_map();
/* Call common code to handle resource allocation */
pcibios_resource_survey();
return 0;
}
subsys_initcall(pcibios_init);
static struct pci_controller*
pci_bus_to_hose(int bus)
{
struct pci_controller *hose, *tmp;
list_for_each_entry_safe(hose, tmp, &hose_list, list_node)
if (bus >= hose->first_busno && bus <= hose->last_busno)
return hose;
return NULL;
}
/* Provide information on locations of various I/O regions in physical
* memory. Do this on a per-card basis so that we choose the right
* root bridge.
* Note that the returned IO or memory base is a physical address
*/
long sys_pciconfig_iobase(long which, unsigned long bus, unsigned long devfn)
{
struct pci_controller *hose;
long result = -EOPNOTSUPP;
hose = pci_bus_to_hose(bus);
if (!hose)
return -ENODEV;
switch (which) {
case IOBASE_BRIDGE_NUMBER:
return (long)hose->first_busno;
case IOBASE_MEMORY:
return (long)hose->pci_mem_offset;
case IOBASE_IO:
return (long)hose->io_base_phys;
case IOBASE_ISA_IO:
return (long)isa_io_base;
case IOBASE_ISA_MEM:
return (long)isa_mem_base;
}
return result;
}

View File

@ -0,0 +1,168 @@
/*
* PCI support for Xilinx plbv46_pci soft-core which can be used on
* Xilinx Virtex ML410 / ML510 boards.
*
* Copyright 2009 Roderick Colenbrander
* Copyright 2009 Secret Lab Technologies Ltd.
*
* The pci bridge fixup code was copied from ppc4xx_pci.c and was written
* by Benjamin Herrenschmidt.
* Copyright 2007 Ben. Herrenschmidt <benh@kernel.crashing.org>, IBM Corp.
*
* This file is licensed under the terms of the GNU General Public License
* version 2. This program is licensed "as is" without any warranty of any
* kind, whether express or implied.
*/
#include <linux/ioport.h>
#include <linux/of.h>
#include <linux/pci.h>
#include <asm/io.h>
#define XPLB_PCI_ADDR 0x10c
#define XPLB_PCI_DATA 0x110
#define XPLB_PCI_BUS 0x114
#define PCI_HOST_ENABLE_CMD (PCI_COMMAND_SERR | PCI_COMMAND_PARITY | \
PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY)
static struct of_device_id xilinx_pci_match[] = {
{ .compatible = "xlnx,plbv46-pci-1.03.a", },
{}
};
/**
* xilinx_pci_fixup_bridge - Block Xilinx PHB configuration.
*/
static void xilinx_pci_fixup_bridge(struct pci_dev *dev)
{
struct pci_controller *hose;
int i;
if (dev->devfn || dev->bus->self)
return;
hose = pci_bus_to_host(dev->bus);
if (!hose)
return;
if (!of_match_node(xilinx_pci_match, hose->dn))
return;
/* Hide the PCI host BARs from the kernel as their content doesn't
* fit well in the resource management
*/
for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
dev->resource[i].start = 0;
dev->resource[i].end = 0;
dev->resource[i].flags = 0;
}
dev_info(&dev->dev, "Hiding Xilinx plb-pci host bridge resources %s\n",
pci_name(dev));
}
DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, xilinx_pci_fixup_bridge);
#ifdef DEBUG
/**
* xilinx_pci_exclude_device - Don't do config access for non-root bus
*
* This is a hack. Config access to any bus other than bus 0 does not
* currently work on the ML510 so we prevent it here.
*/
static int
xilinx_pci_exclude_device(struct pci_controller *hose, u_char bus, u8 devfn)
{
return (bus != 0);
}
/**
* xilinx_early_pci_scan - List pci config space for available devices
*
* List pci devices in very early phase.
*/
void __init xilinx_early_pci_scan(struct pci_controller *hose)
{
u32 bus = 0;
u32 val, dev, func, offset;
/* Currently we have only 2 device connected - up-to 32 devices */
for (dev = 0; dev < 2; dev++) {
/* List only first function number - up-to 8 functions */
for (func = 0; func < 1; func++) {
printk(KERN_INFO "%02x:%02x:%02x", bus, dev, func);
/* read the first 64 standardized bytes */
/* Up-to 192 bytes can be list of capabilities */
for (offset = 0; offset < 64; offset += 4) {
early_read_config_dword(hose, bus,
PCI_DEVFN(dev, func), offset, &val);
if (offset == 0 && val == 0xFFFFFFFF) {
printk(KERN_CONT "\nABSENT");
break;
}
if (!(offset % 0x10))
printk(KERN_CONT "\n%04x: ", offset);
printk(KERN_CONT "%08x ", val);
}
printk(KERN_INFO "\n");
}
}
}
#else
void __init xilinx_early_pci_scan(struct pci_controller *hose)
{
}
#endif
/**
* xilinx_pci_init - Find and register a Xilinx PCI host bridge
*/
void __init xilinx_pci_init(void)
{
struct pci_controller *hose;
struct resource r;
void __iomem *pci_reg;
struct device_node *pci_node;
pci_node = of_find_matching_node(NULL, xilinx_pci_match);
if (!pci_node)
return;
if (of_address_to_resource(pci_node, 0, &r)) {
pr_err("xilinx-pci: cannot resolve base address\n");
return;
}
hose = pcibios_alloc_controller(pci_node);
if (!hose) {
pr_err("xilinx-pci: pcibios_alloc_controller() failed\n");
return;
}
/* Setup config space */
setup_indirect_pci(hose, r.start + XPLB_PCI_ADDR,
r.start + XPLB_PCI_DATA,
INDIRECT_TYPE_SET_CFG_TYPE);
/* According to the xilinx plbv46_pci documentation the soft-core starts
* a self-init when the bus master enable bit is set. Without this bit
* set the pci bus can't be scanned.
*/
early_write_config_word(hose, 0, 0, PCI_COMMAND, PCI_HOST_ENABLE_CMD);
/* Set the max latency timer to 255 */
early_write_config_byte(hose, 0, 0, PCI_LATENCY_TIMER, 0xff);
/* Set the max bus number to 255, and bus/subbus no's to 0 */
pci_reg = of_iomap(pci_node, 0);
out_be32(pci_reg + XPLB_PCI_BUS, 0x000000ff);
iounmap(pci_reg);
/* Register the host bridge with the linux kernel! */
pci_process_bridge_OF_ranges(hose, pci_node,
INDIRECT_TYPE_SET_CFG_TYPE);
pr_info("xilinx-pci: Registered PCI host bridge\n");
xilinx_early_pci_scan(hose);
}

View File

@ -48,6 +48,7 @@ obj-$(CONFIG_PPC) += setup-bus.o
obj-$(CONFIG_MIPS) += setup-bus.o setup-irq.o
obj-$(CONFIG_X86_VISWS) += setup-irq.o
obj-$(CONFIG_MN10300) += setup-bus.o
obj-$(CONFIG_MICROBLAZE) += setup-bus.o
#
# ACPI Related PCI FW Functions