mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-25 13:14:07 +08:00
Merge branch 'for-linus' of git://ftp.arm.linux.org.uk/~rmk/linux-arm
Pull ARM updates from Russell King: "In this set, we have: - Refactoring of some of the old StrongARM-1100 GPIO code to make things simpler by Dmitry Eremin-Solenikov - Read-only and non-executable support for modules on ARM from Laura Abbot - Removal of unnecessary set_drvdata() calls in AMBA code - Some non-executable support for kernel lowmem mappings at the 1MB section granularity, and dumping of kernel page tables via debugfs - Some improvements for the timer/clock code on Footbridge platforms, and cleanup some of the LED code there - Fix fls/ffs() signatures to match x86 to prevent build warnings, particularly where these are used with min/max() macros - Avoid using the bootmem allocator on ARM (patches from Santosh Shilimkar) - Various asid/unaligned access updates from Will Deacon" * 'for-linus' of git://ftp.arm.linux.org.uk/~rmk/linux-arm: (51 commits) ARM: SMP implementations are not supposed to return from smp_ops.cpu_die() ARM: ignore memory below PHYS_OFFSET Fix select-induced Kconfig warning for ZBOOT_ROM ARM: fix ffs/fls implementations to match x86 ARM: 7935/1: sa1100: collie: add gpio-keys configuration ARM: 7932/1: bcm: Add DEBUG_LL console support ARM: 7929/1: Remove duplicate SCHED_HRTICK config option ARM: 7928/1: kconfig: select HAVE_EFFICIENT_UNALIGNED_ACCESS for CPUv6+ && MMU ARM: 7927/1: dcache: select DCACHE_WORD_ACCESS for big-endian CPUs ARM: 7926/1: mm: flesh out and fix the comments in the ASID allocator ARM: 7925/1: mm: keep track of last ASID allocation to improve bitmap searching ARM: 7924/1: mm: don't bother with reserved ttbr0 when running with LPAE ARM: PCI: add legacy IDE IRQ implementation ARM: footbridge: cleanup LEDs code ARM: pgd allocation: retry on failure ARM: footbridge: add one-shot mode for DC21285 timer ARM: footbridge: add sched_clock implementation ARM: 7922/1: l2x0: add Marvell Tauros3 support ARM: 7877/1: use built-in byte swap function ARM: 7921/1: mcpm: remove redundant dsb instructions prior to sev ...
This commit is contained in:
commit
f341535193
@ -7,20 +7,21 @@ The ARM L2 cache representation in the device tree should be done as follows:
|
||||
Required properties:
|
||||
|
||||
- compatible : should be one of:
|
||||
"arm,pl310-cache"
|
||||
"arm,l220-cache"
|
||||
"arm,l210-cache"
|
||||
"marvell,aurora-system-cache": Marvell Controller designed to be
|
||||
"arm,pl310-cache"
|
||||
"arm,l220-cache"
|
||||
"arm,l210-cache"
|
||||
"bcm,bcm11351-a2-pl310-cache": DEPRECATED by "brcm,bcm11351-a2-pl310-cache"
|
||||
"brcm,bcm11351-a2-pl310-cache": For Broadcom bcm11351 chipset where an
|
||||
offset needs to be added to the address before passing down to the L2
|
||||
cache controller
|
||||
"marvell,aurora-system-cache": Marvell Controller designed to be
|
||||
compatible with the ARM one, with system cache mode (meaning
|
||||
maintenance operations on L1 are broadcasted to the L2 and L2
|
||||
performs the same operation).
|
||||
"marvell,"aurora-outer-cache: Marvell Controller designed to be
|
||||
compatible with the ARM one with outer cache mode.
|
||||
"brcm,bcm11351-a2-pl310-cache": For Broadcom bcm11351 chipset where an
|
||||
offset needs to be added to the address before passing down to the L2
|
||||
cache controller
|
||||
"bcm,bcm11351-a2-pl310-cache": DEPRECATED by
|
||||
"brcm,bcm11351-a2-pl310-cache"
|
||||
"marvell,aurora-outer-cache": Marvell Controller designed to be
|
||||
compatible with the ARM one with outer cache mode.
|
||||
"marvell,tauros3-cache": Marvell Tauros3 cache controller, compatible
|
||||
with arm,pl310-cache controller.
|
||||
- cache-unified : Specifies the cache is a unified cache.
|
||||
- cache-level : Should be set to 2 for a level 2 cache.
|
||||
- reg : Physical base address and size of cache controller's memory mapped
|
||||
|
@ -6,12 +6,13 @@ config ARM
|
||||
select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
|
||||
select ARCH_HAVE_CUSTOM_GPIO_H
|
||||
select ARCH_MIGHT_HAVE_PC_PARPORT
|
||||
select ARCH_USE_BUILTIN_BSWAP
|
||||
select ARCH_USE_CMPXCHG_LOCKREF
|
||||
select ARCH_WANT_IPC_PARSE_VERSION
|
||||
select BUILDTIME_EXTABLE_SORT if MMU
|
||||
select CLONE_BACKWARDS
|
||||
select CPU_PM if (SUSPEND || CPU_IDLE)
|
||||
select DCACHE_WORD_ACCESS if (CPU_V6 || CPU_V6K || CPU_V7) && !CPU_BIG_ENDIAN && MMU
|
||||
select DCACHE_WORD_ACCESS if HAVE_EFFICIENT_UNALIGNED_ACCESS
|
||||
select GENERIC_ATOMIC64 if (CPU_V7M || CPU_V6 || !CPU_32v6K || !AEABI)
|
||||
select GENERIC_CLOCKEVENTS_BROADCAST if SMP
|
||||
select GENERIC_IDLE_POLL_SETUP
|
||||
@ -36,6 +37,7 @@ config ARM
|
||||
select HAVE_DMA_ATTRS
|
||||
select HAVE_DMA_CONTIGUOUS if MMU
|
||||
select HAVE_DYNAMIC_FTRACE if (!XIP_KERNEL)
|
||||
select HAVE_EFFICIENT_UNALIGNED_ACCESS if (CPU_V6 || CPU_V6K || CPU_V7) && MMU
|
||||
select HAVE_FTRACE_MCOUNT_RECORD if (!XIP_KERNEL)
|
||||
select HAVE_FUNCTION_GRAPH_TRACER if (!THUMB2_KERNEL)
|
||||
select HAVE_FUNCTION_TRACER if (!XIP_KERNEL)
|
||||
@ -63,6 +65,7 @@ config ARM
|
||||
select IRQ_FORCED_THREADING
|
||||
select KTIME_SCALAR
|
||||
select MODULES_USE_ELF_REL
|
||||
select NO_BOOTMEM
|
||||
select OLD_SIGACTION
|
||||
select OLD_SIGSUSPEND3
|
||||
select PERF_USE_VMALLOC
|
||||
@ -1651,9 +1654,6 @@ config HZ
|
||||
config SCHED_HRTICK
|
||||
def_bool HIGH_RES_TIMERS
|
||||
|
||||
config SCHED_HRTICK
|
||||
def_bool HIGH_RES_TIMERS
|
||||
|
||||
config THUMB2_KERNEL
|
||||
bool "Compile the kernel in Thumb-2 mode" if !CPU_THUMBONLY
|
||||
depends on (CPU_V7 || CPU_V7M) && !CPU_V6 && !CPU_V6K
|
||||
@ -1934,6 +1934,7 @@ config ZBOOT_ROM_BSS
|
||||
config ZBOOT_ROM
|
||||
bool "Compressed boot loader in ROM/flash"
|
||||
depends on ZBOOT_ROM_TEXT != ZBOOT_ROM_BSS
|
||||
depends on !ARM_APPENDED_DTB && !XIP_KERNEL && !AUTO_ZRELADDR
|
||||
help
|
||||
Say Y here if you intend to execute your compressed kernel image
|
||||
(zImage) directly from ROM or flash. If unsure, say N.
|
||||
@ -1969,7 +1970,7 @@ endchoice
|
||||
|
||||
config ARM_APPENDED_DTB
|
||||
bool "Use appended device tree blob to zImage (EXPERIMENTAL)"
|
||||
depends on OF && !ZBOOT_ROM
|
||||
depends on OF
|
||||
help
|
||||
With this option, the boot code will look for a device tree binary
|
||||
(DTB) appended to zImage
|
||||
@ -2057,7 +2058,7 @@ endchoice
|
||||
|
||||
config XIP_KERNEL
|
||||
bool "Kernel Execute-In-Place from ROM"
|
||||
depends on !ZBOOT_ROM && !ARM_LPAE && !ARCH_MULTIPLATFORM
|
||||
depends on !ARM_LPAE && !ARCH_MULTIPLATFORM
|
||||
help
|
||||
Execute-In-Place allows the kernel to run from non-volatile storage
|
||||
directly addressable by the CPU, such as NOR flash. This saves RAM
|
||||
@ -2120,7 +2121,6 @@ config CRASH_DUMP
|
||||
|
||||
config AUTO_ZRELADDR
|
||||
bool "Auto calculation of the decompressed kernel image address"
|
||||
depends on !ZBOOT_ROM
|
||||
help
|
||||
ZRELADDR is the physical address where the decompressed kernel
|
||||
image will be placed. If AUTO_ZRELADDR is selected, the address
|
||||
|
@ -2,6 +2,18 @@ menu "Kernel hacking"
|
||||
|
||||
source "lib/Kconfig.debug"
|
||||
|
||||
config ARM_PTDUMP
|
||||
bool "Export kernel pagetable layout to userspace via debugfs"
|
||||
depends on DEBUG_KERNEL
|
||||
select DEBUG_FS
|
||||
---help---
|
||||
Say Y here if you want to show the kernel pagetable layout in a
|
||||
debugfs file. This information is only useful for kernel developers
|
||||
who are working in architecture specific areas of the kernel.
|
||||
It is probably not a good idea to enable this feature in a production
|
||||
kernel.
|
||||
If in doubt, say "N"
|
||||
|
||||
config STRICT_DEVMEM
|
||||
bool "Filter access to /dev/mem"
|
||||
depends on MMU
|
||||
@ -94,6 +106,17 @@ choice
|
||||
depends on ARCH_BCM2835
|
||||
select DEBUG_UART_PL01X
|
||||
|
||||
config DEBUG_BCM_KONA_UART
|
||||
bool "Kernel low-level debugging messages via BCM KONA UART"
|
||||
depends on ARCH_BCM
|
||||
select DEBUG_UART_8250
|
||||
help
|
||||
Say Y here if you want kernel low-level debugging support
|
||||
on Broadcom SoC platforms.
|
||||
This low level debug works for Broadcom
|
||||
mobile SoCs in the Kona family of chips (e.g. bcm28155,
|
||||
bcm11351, etc...)
|
||||
|
||||
config DEBUG_CLPS711X_UART1
|
||||
bool "Kernel low-level debugging messages via UART1"
|
||||
depends on ARCH_CLPS711X
|
||||
@ -988,6 +1011,7 @@ config DEBUG_UART_PHYS
|
||||
default 0x20064000 if DEBUG_RK29_UART1 || DEBUG_RK3X_UART2
|
||||
default 0x20068000 if DEBUG_RK29_UART2 || DEBUG_RK3X_UART3
|
||||
default 0x20201000 if DEBUG_BCM2835
|
||||
default 0x3e000000 if DEBUG_BCM_KONA_UART
|
||||
default 0x4000e400 if DEBUG_LL_UART_EFM32
|
||||
default 0x40090000 if ARCH_LPC32XX
|
||||
default 0x40100000 if DEBUG_PXA_UART1
|
||||
@ -1049,6 +1073,7 @@ config DEBUG_UART_VIRT
|
||||
default 0xfe018000 if DEBUG_MMP_UART3
|
||||
default 0xfe100000 if DEBUG_IMX23_UART || DEBUG_IMX28_UART
|
||||
default 0xfe230000 if DEBUG_PICOXCELL_UART
|
||||
default 0xfe300000 if DEBUG_BCM_KONA_UART
|
||||
default 0xfe800000 if ARCH_IOP32X
|
||||
default 0xfeb00000 if DEBUG_HI3620_UART || DEBUG_HI3716_UART
|
||||
default 0xfeb24000 if DEBUG_RK3X_UART0
|
||||
@ -1091,7 +1116,8 @@ config DEBUG_UART_8250_WORD
|
||||
default y if DEBUG_PICOXCELL_UART || DEBUG_SOCFPGA_UART || \
|
||||
ARCH_KEYSTONE || \
|
||||
DEBUG_DAVINCI_DMx_UART0 || DEBUG_DAVINCI_DA8XX_UART1 || \
|
||||
DEBUG_DAVINCI_DA8XX_UART2 || DEBUG_DAVINCI_TNETV107X_UART1
|
||||
DEBUG_DAVINCI_DA8XX_UART2 || DEBUG_DAVINCI_TNETV107X_UART1 || \
|
||||
DEBUG_BCM_KONA_UART
|
||||
|
||||
config DEBUG_UART_8250_FLOW_CONTROL
|
||||
bool "Enable flow control for 8250 UART"
|
||||
@ -1150,4 +1176,15 @@ config PID_IN_CONTEXTIDR
|
||||
additional instructions during context switch. Say Y here only if you
|
||||
are planning to use hardware trace tools with this kernel.
|
||||
|
||||
config DEBUG_SET_MODULE_RONX
|
||||
bool "Set loadable kernel module data as NX and text as RO"
|
||||
depends on MODULES
|
||||
---help---
|
||||
This option helps catch unintended modifications to loadable
|
||||
kernel module's text and read-only data. It also prevents execution
|
||||
of module data. Such protection may interfere with run-time code
|
||||
patching and dynamic kernel tracing - and they might also protect
|
||||
against certain classes of kernel exploits.
|
||||
If in doubt, say "N".
|
||||
|
||||
endmenu
|
||||
|
@ -108,12 +108,12 @@ endif
|
||||
|
||||
targets := vmlinux vmlinux.lds \
|
||||
piggy.$(suffix_y) piggy.$(suffix_y).o \
|
||||
lib1funcs.o lib1funcs.S ashldi3.o ashldi3.S \
|
||||
font.o font.c head.o misc.o $(OBJS)
|
||||
lib1funcs.o lib1funcs.S ashldi3.o ashldi3.S bswapsdi2.o \
|
||||
bswapsdi2.S font.o font.c head.o misc.o $(OBJS)
|
||||
|
||||
# Make sure files are removed during clean
|
||||
extra-y += piggy.gzip piggy.lzo piggy.lzma piggy.xzkern piggy.lz4 \
|
||||
lib1funcs.S ashldi3.S $(libfdt) $(libfdt_hdrs) \
|
||||
lib1funcs.S ashldi3.S bswapsdi2.S $(libfdt) $(libfdt_hdrs) \
|
||||
hyp-stub.S
|
||||
|
||||
ifeq ($(CONFIG_FUNCTION_TRACER),y)
|
||||
@ -156,6 +156,12 @@ ashldi3 = $(obj)/ashldi3.o
|
||||
$(obj)/ashldi3.S: $(srctree)/arch/$(SRCARCH)/lib/ashldi3.S
|
||||
$(call cmd,shipped)
|
||||
|
||||
# For __bswapsi2, __bswapdi2
|
||||
bswapsdi2 = $(obj)/bswapsdi2.o
|
||||
|
||||
$(obj)/bswapsdi2.S: $(srctree)/arch/$(SRCARCH)/lib/bswapsdi2.S
|
||||
$(call cmd,shipped)
|
||||
|
||||
# We need to prevent any GOTOFF relocs being used with references
|
||||
# to symbols in the .bss section since we cannot relocate them
|
||||
# independently from the rest at run time. This can be achieved by
|
||||
@ -177,7 +183,8 @@ if [ $(words $(ZRELADDR)) -gt 1 -a "$(CONFIG_AUTO_ZRELADDR)" = "" ]; then \
|
||||
fi
|
||||
|
||||
$(obj)/vmlinux: $(obj)/vmlinux.lds $(obj)/$(HEAD) $(obj)/piggy.$(suffix_y).o \
|
||||
$(addprefix $(obj)/, $(OBJS)) $(lib1funcs) $(ashldi3) FORCE
|
||||
$(addprefix $(obj)/, $(OBJS)) $(lib1funcs) $(ashldi3) \
|
||||
$(bswapsdi2) FORCE
|
||||
@$(check_for_multiple_zreladdr)
|
||||
$(call if_changed,ld)
|
||||
@$(check_for_bad_syms)
|
||||
|
@ -35,8 +35,7 @@ void mcpm_set_early_poke(unsigned cpu, unsigned cluster,
|
||||
unsigned long *poke = &mcpm_entry_early_pokes[cluster][cpu][0];
|
||||
poke[0] = poke_phys_addr;
|
||||
poke[1] = poke_val;
|
||||
__cpuc_flush_dcache_area((void *)poke, 8);
|
||||
outer_clean_range(__pa(poke), __pa(poke + 2));
|
||||
__sync_cache_range_w(poke, 2 * sizeof(*poke));
|
||||
}
|
||||
|
||||
static const struct mcpm_platform_ops *platform_ops;
|
||||
@ -167,7 +166,7 @@ void __mcpm_cpu_down(unsigned int cpu, unsigned int cluster)
|
||||
dmb();
|
||||
mcpm_sync.clusters[cluster].cpus[cpu].cpu = CPU_DOWN;
|
||||
sync_cache_w(&mcpm_sync.clusters[cluster].cpus[cpu].cpu);
|
||||
dsb_sev();
|
||||
sev();
|
||||
}
|
||||
|
||||
/*
|
||||
@ -183,7 +182,7 @@ void __mcpm_outbound_leave_critical(unsigned int cluster, int state)
|
||||
dmb();
|
||||
mcpm_sync.clusters[cluster].cluster = state;
|
||||
sync_cache_w(&mcpm_sync.clusters[cluster].cluster);
|
||||
dsb_sev();
|
||||
sev();
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -254,25 +254,59 @@ static inline int constant_fls(int x)
|
||||
}
|
||||
|
||||
/*
|
||||
* On ARMv5 and above those functions can be implemented around
|
||||
* the clz instruction for much better code efficiency.
|
||||
* On ARMv5 and above those functions can be implemented around the
|
||||
* clz instruction for much better code efficiency. __clz returns
|
||||
* the number of leading zeros, zero input will return 32, and
|
||||
* 0x80000000 will return 0.
|
||||
*/
|
||||
|
||||
static inline int fls(int x)
|
||||
static inline unsigned int __clz(unsigned int x)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (__builtin_constant_p(x))
|
||||
return constant_fls(x);
|
||||
unsigned int ret;
|
||||
|
||||
asm("clz\t%0, %1" : "=r" (ret) : "r" (x));
|
||||
ret = 32 - ret;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
#define __fls(x) (fls(x) - 1)
|
||||
#define ffs(x) ({ unsigned long __t = (x); fls(__t & -__t); })
|
||||
#define __ffs(x) (ffs(x) - 1)
|
||||
/*
|
||||
* fls() returns zero if the input is zero, otherwise returns the bit
|
||||
* position of the last set bit, where the LSB is 1 and MSB is 32.
|
||||
*/
|
||||
static inline int fls(int x)
|
||||
{
|
||||
if (__builtin_constant_p(x))
|
||||
return constant_fls(x);
|
||||
|
||||
return 32 - __clz(x);
|
||||
}
|
||||
|
||||
/*
|
||||
* __fls() returns the bit position of the last bit set, where the
|
||||
* LSB is 0 and MSB is 31. Zero input is undefined.
|
||||
*/
|
||||
static inline unsigned long __fls(unsigned long x)
|
||||
{
|
||||
return fls(x) - 1;
|
||||
}
|
||||
|
||||
/*
|
||||
* ffs() returns zero if the input was zero, otherwise returns the bit
|
||||
* position of the first set bit, where the LSB is 1 and MSB is 32.
|
||||
*/
|
||||
static inline int ffs(int x)
|
||||
{
|
||||
return fls(x & -x);
|
||||
}
|
||||
|
||||
/*
|
||||
* __ffs() returns the bit position of the first bit set, where the
|
||||
* LSB is 0 and MSB is 31. Zero input is undefined.
|
||||
*/
|
||||
static inline unsigned long __ffs(unsigned long x)
|
||||
{
|
||||
return ffs(x) - 1;
|
||||
}
|
||||
|
||||
#define ffz(x) __ffs( ~(x) )
|
||||
|
||||
#endif
|
||||
|
@ -481,4 +481,9 @@ static inline void __sync_cache_range_r(volatile void *p, size_t size)
|
||||
: : : "r0","r1","r2","r3","r4","r5","r6","r7", \
|
||||
"r9","r10","lr","memory" )
|
||||
|
||||
int set_memory_ro(unsigned long addr, int numpages);
|
||||
int set_memory_rw(unsigned long addr, int numpages);
|
||||
int set_memory_x(unsigned long addr, int numpages);
|
||||
int set_memory_nx(unsigned long addr, int numpages);
|
||||
|
||||
#endif
|
||||
|
@ -87,19 +87,33 @@ static inline __wsum
|
||||
csum_tcpudp_nofold(__be32 saddr, __be32 daddr, unsigned short len,
|
||||
unsigned short proto, __wsum sum)
|
||||
{
|
||||
__asm__(
|
||||
"adds %0, %1, %2 @ csum_tcpudp_nofold \n\
|
||||
adcs %0, %0, %3 \n"
|
||||
u32 lenprot = len | proto << 16;
|
||||
if (__builtin_constant_p(sum) && sum == 0) {
|
||||
__asm__(
|
||||
"adds %0, %1, %2 @ csum_tcpudp_nofold0 \n\t"
|
||||
#ifdef __ARMEB__
|
||||
"adcs %0, %0, %4 \n"
|
||||
"adcs %0, %0, %3 \n\t"
|
||||
#else
|
||||
"adcs %0, %0, %4, lsl #8 \n"
|
||||
"adcs %0, %0, %3, ror #8 \n\t"
|
||||
#endif
|
||||
"adcs %0, %0, %5 \n\
|
||||
adc %0, %0, #0"
|
||||
: "=&r"(sum)
|
||||
: "r" (sum), "r" (daddr), "r" (saddr), "r" (len), "Ir" (htons(proto))
|
||||
: "cc");
|
||||
"adc %0, %0, #0"
|
||||
: "=&r" (sum)
|
||||
: "r" (daddr), "r" (saddr), "r" (lenprot)
|
||||
: "cc");
|
||||
} else {
|
||||
__asm__(
|
||||
"adds %0, %1, %2 @ csum_tcpudp_nofold \n\t"
|
||||
"adcs %0, %0, %3 \n\t"
|
||||
#ifdef __ARMEB__
|
||||
"adcs %0, %0, %4 \n\t"
|
||||
#else
|
||||
"adcs %0, %0, %4, ror #8 \n\t"
|
||||
#endif
|
||||
"adc %0, %0, #0"
|
||||
: "=&r"(sum)
|
||||
: "r" (sum), "r" (daddr), "r" (saddr), "r" (lenprot)
|
||||
: "cc");
|
||||
}
|
||||
return sum;
|
||||
}
|
||||
/*
|
||||
|
@ -131,6 +131,7 @@ struct l2x0_regs {
|
||||
unsigned long prefetch_ctrl;
|
||||
unsigned long pwr_ctrl;
|
||||
unsigned long ctrl;
|
||||
unsigned long aux2_ctrl;
|
||||
};
|
||||
|
||||
extern struct l2x0_regs l2x0_saved_regs;
|
||||
|
@ -22,18 +22,21 @@ struct map_desc {
|
||||
};
|
||||
|
||||
/* types 0-3 are defined in asm/io.h */
|
||||
#define MT_UNCACHED 4
|
||||
#define MT_CACHECLEAN 5
|
||||
#define MT_MINICLEAN 6
|
||||
#define MT_LOW_VECTORS 7
|
||||
#define MT_HIGH_VECTORS 8
|
||||
#define MT_MEMORY 9
|
||||
#define MT_ROM 10
|
||||
#define MT_MEMORY_NONCACHED 11
|
||||
#define MT_MEMORY_DTCM 12
|
||||
#define MT_MEMORY_ITCM 13
|
||||
#define MT_MEMORY_SO 14
|
||||
#define MT_MEMORY_DMA_READY 15
|
||||
enum {
|
||||
MT_UNCACHED = 4,
|
||||
MT_CACHECLEAN,
|
||||
MT_MINICLEAN,
|
||||
MT_LOW_VECTORS,
|
||||
MT_HIGH_VECTORS,
|
||||
MT_MEMORY_RWX,
|
||||
MT_MEMORY_RW,
|
||||
MT_ROM,
|
||||
MT_MEMORY_RWX_NONCACHED,
|
||||
MT_MEMORY_RW_DTCM,
|
||||
MT_MEMORY_RWX_ITCM,
|
||||
MT_MEMORY_RW_SO,
|
||||
MT_MEMORY_DMA_READY,
|
||||
};
|
||||
|
||||
#ifdef CONFIG_MMU
|
||||
extern void iotable_init(struct map_desc *, int);
|
||||
|
@ -57,12 +57,9 @@ static inline void pci_dma_burst_advice(struct pci_dev *pdev,
|
||||
extern int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
|
||||
enum pci_mmap_state mmap_state, int write_combine);
|
||||
|
||||
/*
|
||||
* Dummy implementation; always return 0.
|
||||
*/
|
||||
static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
|
||||
{
|
||||
return 0;
|
||||
return channel ? 15 : 14;
|
||||
}
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
|
@ -160,6 +160,7 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr)
|
||||
return (pmd_t *)pud;
|
||||
}
|
||||
|
||||
#define pmd_large(pmd) (pmd_val(pmd) & 2)
|
||||
#define pmd_bad(pmd) (pmd_val(pmd) & 2)
|
||||
|
||||
#define copy_pmd(pmdpd,pmdps) \
|
||||
|
@ -142,6 +142,7 @@
|
||||
PMD_TYPE_TABLE)
|
||||
#define pmd_sect(pmd) ((pmd_val(pmd) & PMD_TYPE_MASK) == \
|
||||
PMD_TYPE_SECT)
|
||||
#define pmd_large(pmd) pmd_sect(pmd)
|
||||
|
||||
#define pud_clear(pudp) \
|
||||
do { \
|
||||
|
@ -254,6 +254,8 @@ PTE_BIT_FUNC(mkclean, &= ~L_PTE_DIRTY);
|
||||
PTE_BIT_FUNC(mkdirty, |= L_PTE_DIRTY);
|
||||
PTE_BIT_FUNC(mkold, &= ~L_PTE_YOUNG);
|
||||
PTE_BIT_FUNC(mkyoung, |= L_PTE_YOUNG);
|
||||
PTE_BIT_FUNC(mkexec, &= ~L_PTE_XN);
|
||||
PTE_BIT_FUNC(mknexec, |= L_PTE_XN);
|
||||
|
||||
static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
|
||||
|
||||
|
@ -48,10 +48,14 @@ static inline unsigned long find_zero(unsigned long mask)
|
||||
return ret;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_DCACHE_WORD_ACCESS
|
||||
|
||||
#define zero_bytemask(mask) (mask)
|
||||
|
||||
#else /* __ARMEB__ */
|
||||
#include <asm-generic/word-at-a-time.h>
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_DCACHE_WORD_ACCESS
|
||||
|
||||
/*
|
||||
* Load an unaligned word from kernel space.
|
||||
*
|
||||
@ -73,7 +77,11 @@ static inline unsigned long load_unaligned_zeropad(const void *addr)
|
||||
" bic %2, %2, #0x3\n"
|
||||
" ldr %0, [%2]\n"
|
||||
" lsl %1, %1, #0x3\n"
|
||||
#ifndef __ARMEB__
|
||||
" lsr %0, %0, %1\n"
|
||||
#else
|
||||
" lsl %0, %0, %1\n"
|
||||
#endif
|
||||
" b 2b\n"
|
||||
" .popsection\n"
|
||||
" .pushsection __ex_table,\"a\"\n"
|
||||
@ -86,11 +94,5 @@ static inline unsigned long load_unaligned_zeropad(const void *addr)
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
#endif /* DCACHE_WORD_ACCESS */
|
||||
|
||||
#else /* __ARMEB__ */
|
||||
#include <asm-generic/word-at-a-time.h>
|
||||
#endif
|
||||
|
||||
#endif /* __ASM_ARM_WORD_AT_A_TIME_H */
|
||||
|
@ -35,6 +35,8 @@ extern void __ucmpdi2(void);
|
||||
extern void __udivsi3(void);
|
||||
extern void __umodsi3(void);
|
||||
extern void __do_div64(void);
|
||||
extern void __bswapsi2(void);
|
||||
extern void __bswapdi2(void);
|
||||
|
||||
extern void __aeabi_idiv(void);
|
||||
extern void __aeabi_idivmod(void);
|
||||
@ -114,6 +116,8 @@ EXPORT_SYMBOL(__ucmpdi2);
|
||||
EXPORT_SYMBOL(__udivsi3);
|
||||
EXPORT_SYMBOL(__umodsi3);
|
||||
EXPORT_SYMBOL(__do_div64);
|
||||
EXPORT_SYMBOL(__bswapsi2);
|
||||
EXPORT_SYMBOL(__bswapdi2);
|
||||
|
||||
#ifdef CONFIG_AEABI
|
||||
EXPORT_SYMBOL(__aeabi_idiv);
|
||||
|
@ -14,8 +14,6 @@
|
||||
#include <asm/thread_notify.h>
|
||||
#include <asm/v7m.h>
|
||||
|
||||
#include <mach/entry-macro.S>
|
||||
|
||||
#include "entry-header.S"
|
||||
|
||||
#ifdef CONFIG_TRACE_IRQFLAGS
|
||||
|
@ -385,7 +385,6 @@ out:
|
||||
return ret;
|
||||
|
||||
out_unmap:
|
||||
amba_set_drvdata(dev, NULL);
|
||||
iounmap(t->etb_regs);
|
||||
|
||||
out_release:
|
||||
@ -398,8 +397,6 @@ static int etb_remove(struct amba_device *dev)
|
||||
{
|
||||
struct tracectx *t = amba_get_drvdata(dev);
|
||||
|
||||
amba_set_drvdata(dev, NULL);
|
||||
|
||||
iounmap(t->etb_regs);
|
||||
t->etb_regs = NULL;
|
||||
|
||||
@ -588,7 +585,6 @@ out:
|
||||
return ret;
|
||||
|
||||
out_unmap:
|
||||
amba_set_drvdata(dev, NULL);
|
||||
iounmap(t->etm_regs);
|
||||
|
||||
out_release:
|
||||
@ -601,8 +597,6 @@ static int etm_remove(struct amba_device *dev)
|
||||
{
|
||||
struct tracectx *t = amba_get_drvdata(dev);
|
||||
|
||||
amba_set_drvdata(dev, NULL);
|
||||
|
||||
iounmap(t->etm_regs);
|
||||
t->etm_regs = NULL;
|
||||
|
||||
|
@ -334,7 +334,7 @@ static void __init cacheid_init(void)
|
||||
cacheid = CACHEID_VIVT;
|
||||
}
|
||||
|
||||
printk("CPU: %s data cache, %s instruction cache\n",
|
||||
pr_info("CPU: %s data cache, %s instruction cache\n",
|
||||
cache_is_vivt() ? "VIVT" :
|
||||
cache_is_vipt_aliasing() ? "VIPT aliasing" :
|
||||
cache_is_vipt_nonaliasing() ? "PIPT / VIPT nonaliasing" : "unknown",
|
||||
@ -416,7 +416,7 @@ void notrace cpu_init(void)
|
||||
struct stack *stk = &stacks[cpu];
|
||||
|
||||
if (cpu >= NR_CPUS) {
|
||||
printk(KERN_CRIT "CPU%u: bad primary CPU number\n", cpu);
|
||||
pr_crit("CPU%u: bad primary CPU number\n", cpu);
|
||||
BUG();
|
||||
}
|
||||
|
||||
@ -484,7 +484,7 @@ void __init smp_setup_processor_id(void)
|
||||
*/
|
||||
set_my_cpu_offset(0);
|
||||
|
||||
printk(KERN_INFO "Booting Linux on physical CPU 0x%x\n", mpidr);
|
||||
pr_info("Booting Linux on physical CPU 0x%x\n", mpidr);
|
||||
}
|
||||
|
||||
struct mpidr_hash mpidr_hash;
|
||||
@ -564,8 +564,8 @@ static void __init setup_processor(void)
|
||||
*/
|
||||
list = lookup_processor_type(read_cpuid_id());
|
||||
if (!list) {
|
||||
printk("CPU configuration botched (ID %08x), unable "
|
||||
"to continue.\n", read_cpuid_id());
|
||||
pr_err("CPU configuration botched (ID %08x), unable to continue.\n",
|
||||
read_cpuid_id());
|
||||
while (1);
|
||||
}
|
||||
|
||||
@ -585,9 +585,9 @@ static void __init setup_processor(void)
|
||||
cpu_cache = *list->cache;
|
||||
#endif
|
||||
|
||||
printk("CPU: %s [%08x] revision %d (ARMv%s), cr=%08lx\n",
|
||||
cpu_name, read_cpuid_id(), read_cpuid_id() & 15,
|
||||
proc_arch[cpu_architecture()], cr_alignment);
|
||||
pr_info("CPU: %s [%08x] revision %d (ARMv%s), cr=%08lx\n",
|
||||
cpu_name, read_cpuid_id(), read_cpuid_id() & 15,
|
||||
proc_arch[cpu_architecture()], cr_alignment);
|
||||
|
||||
snprintf(init_utsname()->machine, __NEW_UTS_LEN + 1, "%s%c",
|
||||
list->arch_name, ENDIANNESS);
|
||||
@ -629,8 +629,8 @@ int __init arm_add_memory(u64 start, u64 size)
|
||||
u64 aligned_start;
|
||||
|
||||
if (meminfo.nr_banks >= NR_BANKS) {
|
||||
printk(KERN_CRIT "NR_BANKS too low, "
|
||||
"ignoring memory at 0x%08llx\n", (long long)start);
|
||||
pr_crit("NR_BANKS too low, ignoring memory at 0x%08llx\n",
|
||||
(long long)start);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
@ -643,14 +643,14 @@ int __init arm_add_memory(u64 start, u64 size)
|
||||
|
||||
#ifndef CONFIG_ARCH_PHYS_ADDR_T_64BIT
|
||||
if (aligned_start > ULONG_MAX) {
|
||||
printk(KERN_CRIT "Ignoring memory at 0x%08llx outside "
|
||||
"32-bit physical address space\n", (long long)start);
|
||||
pr_crit("Ignoring memory at 0x%08llx outside 32-bit physical address space\n",
|
||||
(long long)start);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (aligned_start + size > ULONG_MAX) {
|
||||
printk(KERN_CRIT "Truncating memory at 0x%08llx to fit in "
|
||||
"32-bit physical address space\n", (long long)start);
|
||||
pr_crit("Truncating memory at 0x%08llx to fit in 32-bit physical address space\n",
|
||||
(long long)start);
|
||||
/*
|
||||
* To ensure bank->start + bank->size is representable in
|
||||
* 32 bits, we use ULONG_MAX as the upper limit rather than 4GB.
|
||||
@ -660,6 +660,20 @@ int __init arm_add_memory(u64 start, u64 size)
|
||||
}
|
||||
#endif
|
||||
|
||||
if (aligned_start < PHYS_OFFSET) {
|
||||
if (aligned_start + size <= PHYS_OFFSET) {
|
||||
pr_info("Ignoring memory below PHYS_OFFSET: 0x%08llx-0x%08llx\n",
|
||||
aligned_start, aligned_start + size);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
pr_info("Ignoring memory below PHYS_OFFSET: 0x%08llx-0x%08llx\n",
|
||||
aligned_start, (u64)PHYS_OFFSET);
|
||||
|
||||
size -= PHYS_OFFSET - aligned_start;
|
||||
aligned_start = PHYS_OFFSET;
|
||||
}
|
||||
|
||||
bank->start = aligned_start;
|
||||
bank->size = size & ~(phys_addr_t)(PAGE_SIZE - 1);
|
||||
|
||||
@ -817,18 +831,17 @@ static void __init reserve_crashkernel(void)
|
||||
if (ret)
|
||||
return;
|
||||
|
||||
ret = reserve_bootmem(crash_base, crash_size, BOOTMEM_EXCLUSIVE);
|
||||
ret = memblock_reserve(crash_base, crash_size);
|
||||
if (ret < 0) {
|
||||
printk(KERN_WARNING "crashkernel reservation failed - "
|
||||
"memory is in use (0x%lx)\n", (unsigned long)crash_base);
|
||||
pr_warn("crashkernel reservation failed - memory is in use (0x%lx)\n",
|
||||
(unsigned long)crash_base);
|
||||
return;
|
||||
}
|
||||
|
||||
printk(KERN_INFO "Reserving %ldMB of memory at %ldMB "
|
||||
"for crashkernel (System RAM: %ldMB)\n",
|
||||
(unsigned long)(crash_size >> 20),
|
||||
(unsigned long)(crash_base >> 20),
|
||||
(unsigned long)(total_mem >> 20));
|
||||
pr_info("Reserving %ldMB of memory at %ldMB for crashkernel (System RAM: %ldMB)\n",
|
||||
(unsigned long)(crash_size >> 20),
|
||||
(unsigned long)(crash_base >> 20),
|
||||
(unsigned long)(total_mem >> 20));
|
||||
|
||||
crashk_res.start = crash_base;
|
||||
crashk_res.end = crash_base + crash_size - 1;
|
||||
|
@ -105,8 +105,7 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle)
|
||||
secondary_data.pgdir = get_arch_pgd(idmap_pgd);
|
||||
secondary_data.swapper_pg_dir = get_arch_pgd(swapper_pg_dir);
|
||||
#endif
|
||||
__cpuc_flush_dcache_area(&secondary_data, sizeof(secondary_data));
|
||||
outer_clean_range(__pa(&secondary_data), __pa(&secondary_data + 1));
|
||||
sync_cache_w(&secondary_data);
|
||||
|
||||
/*
|
||||
* Now bring the CPU into our world.
|
||||
@ -294,6 +293,9 @@ void __ref cpu_die(void)
|
||||
if (smp_ops.cpu_die)
|
||||
smp_ops.cpu_die(cpu);
|
||||
|
||||
pr_warn("CPU%u: smp_ops.cpu_die() returned, trying to resuscitate\n",
|
||||
cpu);
|
||||
|
||||
/*
|
||||
* Do not return to the idle loop - jump back to the secondary
|
||||
* cpu initialisation. There's some initialisation which needs
|
||||
|
@ -52,7 +52,7 @@ static struct map_desc dtcm_iomap[] __initdata = {
|
||||
.virtual = DTCM_OFFSET,
|
||||
.pfn = __phys_to_pfn(DTCM_OFFSET),
|
||||
.length = 0,
|
||||
.type = MT_MEMORY_DTCM
|
||||
.type = MT_MEMORY_RW_DTCM
|
||||
}
|
||||
};
|
||||
|
||||
@ -61,7 +61,7 @@ static struct map_desc itcm_iomap[] __initdata = {
|
||||
.virtual = ITCM_OFFSET,
|
||||
.pfn = __phys_to_pfn(ITCM_OFFSET),
|
||||
.length = 0,
|
||||
.type = MT_MEMORY_ITCM
|
||||
.type = MT_MEMORY_RWX_ITCM,
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -68,16 +68,16 @@ struct cpu_efficiency {
|
||||
* Processors that are not defined in the table,
|
||||
* use the default SCHED_POWER_SCALE value for cpu_scale.
|
||||
*/
|
||||
struct cpu_efficiency table_efficiency[] = {
|
||||
static const struct cpu_efficiency table_efficiency[] = {
|
||||
{"arm,cortex-a15", 3891},
|
||||
{"arm,cortex-a7", 2048},
|
||||
{NULL, },
|
||||
};
|
||||
|
||||
unsigned long *__cpu_capacity;
|
||||
static unsigned long *__cpu_capacity;
|
||||
#define cpu_capacity(cpu) __cpu_capacity[cpu]
|
||||
|
||||
unsigned long middle_capacity = 1;
|
||||
static unsigned long middle_capacity = 1;
|
||||
|
||||
/*
|
||||
* Iterate all CPUs' descriptor in DT and compute the efficiency
|
||||
@ -89,7 +89,7 @@ unsigned long middle_capacity = 1;
|
||||
*/
|
||||
static void __init parse_dt_topology(void)
|
||||
{
|
||||
struct cpu_efficiency *cpu_eff;
|
||||
const struct cpu_efficiency *cpu_eff;
|
||||
struct device_node *cn = NULL;
|
||||
unsigned long min_capacity = (unsigned long)(-1);
|
||||
unsigned long max_capacity = 0;
|
||||
@ -158,7 +158,7 @@ static void __init parse_dt_topology(void)
|
||||
* boot. The update of all CPUs is in O(n^2) for heteregeneous system but the
|
||||
* function returns directly for SMP system.
|
||||
*/
|
||||
void update_cpu_power(unsigned int cpu)
|
||||
static void update_cpu_power(unsigned int cpu)
|
||||
{
|
||||
if (!cpu_capacity(cpu))
|
||||
return;
|
||||
@ -185,7 +185,7 @@ const struct cpumask *cpu_coregroup_mask(int cpu)
|
||||
return &cpu_topology[cpu].core_sibling;
|
||||
}
|
||||
|
||||
void update_siblings_masks(unsigned int cpuid)
|
||||
static void update_siblings_masks(unsigned int cpuid)
|
||||
{
|
||||
struct cputopo_arm *cpu_topo, *cpuid_topo = &cpu_topology[cpuid];
|
||||
int cpu;
|
||||
|
@ -62,7 +62,7 @@ static void dump_mem(const char *, const char *, unsigned long, unsigned long);
|
||||
void dump_backtrace_entry(unsigned long where, unsigned long from, unsigned long frame)
|
||||
{
|
||||
#ifdef CONFIG_KALLSYMS
|
||||
printk("[<%08lx>] (%pS) from [<%08lx>] (%pS)\n", where, (void *)where, from, (void *)from);
|
||||
printk("[<%08lx>] (%ps) from [<%08lx>] (%pS)\n", where, (void *)where, from, (void *)from);
|
||||
#else
|
||||
printk("Function entered at [<%08lx>] from [<%08lx>]\n", where, from);
|
||||
#endif
|
||||
|
@ -13,7 +13,7 @@ lib-y := backtrace.o changebit.o csumipv6.o csumpartial.o \
|
||||
ashldi3.o ashrdi3.o lshrdi3.o muldi3.o \
|
||||
ucmpdi2.o lib1funcs.o div64.o \
|
||||
io-readsb.o io-writesb.o io-readsl.o io-writesl.o \
|
||||
call_with_stack.o
|
||||
call_with_stack.o bswapsdi2.o
|
||||
|
||||
mmu-y := clear_user.o copy_page.o getuser.o putuser.o
|
||||
|
||||
|
@ -80,14 +80,14 @@ for_each_frame: tst frame, mask @ Check for address exceptions
|
||||
|
||||
ldr r1, [sv_pc, #-4] @ if stmfd sp!, {args} exists,
|
||||
ldr r3, .Ldsi+4
|
||||
teq r3, r1, lsr #10
|
||||
teq r3, r1, lsr #11
|
||||
ldreq r0, [frame, #-8] @ get sp
|
||||
subeq r0, r0, #4 @ point at the last arg
|
||||
bleq .Ldumpstm @ dump saved registers
|
||||
|
||||
1004: ldr r1, [sv_pc, #0] @ if stmfd sp!, {..., fp, ip, lr, pc}
|
||||
ldr r3, .Ldsi @ instruction exists,
|
||||
teq r3, r1, lsr #10
|
||||
teq r3, r1, lsr #11
|
||||
subeq r0, frame, #16
|
||||
bleq .Ldumpstm @ dump saved registers
|
||||
|
||||
@ -128,11 +128,11 @@ ENDPROC(c_backtrace)
|
||||
beq 2f
|
||||
add r7, r7, #1
|
||||
teq r7, #6
|
||||
moveq r7, #1
|
||||
moveq r1, #'\n'
|
||||
movne r1, #' '
|
||||
ldr r3, [stack], #-4
|
||||
mov r2, reg
|
||||
moveq r7, #0
|
||||
adr r3, .Lcr
|
||||
addne r3, r3, #1 @ skip newline
|
||||
ldr r2, [stack], #-4
|
||||
mov r1, reg
|
||||
adr r0, .Lfp
|
||||
bl printk
|
||||
2: subs reg, reg, #1
|
||||
@ -142,11 +142,11 @@ ENDPROC(c_backtrace)
|
||||
blne printk
|
||||
ldmfd sp!, {instr, reg, stack, r7, pc}
|
||||
|
||||
.Lfp: .asciz "%cr%d:%08x"
|
||||
.Lfp: .asciz " r%d:%08x%s"
|
||||
.Lcr: .asciz "\n"
|
||||
.Lbad: .asciz "Backtrace aborted due to bad frame pointer <%p>\n"
|
||||
.align
|
||||
.Ldsi: .word 0xe92dd800 >> 10 @ stmfd sp!, {... fp, ip, lr, pc}
|
||||
.word 0xe92d0000 >> 10 @ stmfd sp!, {}
|
||||
.Ldsi: .word 0xe92dd800 >> 11 @ stmfd sp!, {... fp, ip, lr, pc}
|
||||
.word 0xe92d0000 >> 11 @ stmfd sp!, {}
|
||||
|
||||
#endif
|
||||
|
36
arch/arm/lib/bswapsdi2.S
Normal file
36
arch/arm/lib/bswapsdi2.S
Normal file
@ -0,0 +1,36 @@
|
||||
#include <linux/linkage.h>
|
||||
|
||||
#if __LINUX_ARM_ARCH__ >= 6
|
||||
ENTRY(__bswapsi2)
|
||||
rev r0, r0
|
||||
bx lr
|
||||
ENDPROC(__bswapsi2)
|
||||
|
||||
ENTRY(__bswapdi2)
|
||||
rev r3, r0
|
||||
rev r0, r1
|
||||
mov r1, r3
|
||||
bx lr
|
||||
ENDPROC(__bswapdi2)
|
||||
#else
|
||||
ENTRY(__bswapsi2)
|
||||
eor r3, r0, r0, ror #16
|
||||
mov r3, r3, lsr #8
|
||||
bic r3, r3, #0xff00
|
||||
eor r0, r3, r0, ror #8
|
||||
mov pc, lr
|
||||
ENDPROC(__bswapsi2)
|
||||
|
||||
ENTRY(__bswapdi2)
|
||||
mov ip, r1
|
||||
eor r3, ip, ip, ror #16
|
||||
eor r1, r0, r0, ror #16
|
||||
mov r1, r1, lsr #8
|
||||
mov r3, r3, lsr #8
|
||||
bic r3, r3, #0xff00
|
||||
bic r1, r1, #0xff00
|
||||
eor r1, r1, r0, ror #8
|
||||
eor r0, r3, ip, ror #8
|
||||
mov pc, lr
|
||||
ENDPROC(__bswapdi2)
|
||||
#endif
|
@ -81,7 +81,7 @@ void __init at91_init_sram(int bank, unsigned long base, unsigned int length)
|
||||
|
||||
desc->pfn = __phys_to_pfn(base);
|
||||
desc->length = length;
|
||||
desc->type = MT_MEMORY_NONCACHED;
|
||||
desc->type = MT_MEMORY_RWX_NONCACHED;
|
||||
|
||||
pr_info("AT91: sram at 0x%lx of 0x%x mapped at 0x%lx\n",
|
||||
base, length, desc->virtual);
|
||||
|
@ -143,11 +143,6 @@ static struct map_desc fb_common_io_desc[] __initdata = {
|
||||
.pfn = __phys_to_pfn(DC21285_ARMCSR_BASE),
|
||||
.length = ARMCSR_SIZE,
|
||||
.type = MT_DEVICE,
|
||||
}, {
|
||||
.virtual = XBUS_BASE,
|
||||
.pfn = __phys_to_pfn(0x40000000),
|
||||
.length = XBUS_SIZE,
|
||||
.type = MT_DEVICE,
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -10,3 +10,5 @@ extern void footbridge_init_irq(void);
|
||||
|
||||
extern void isa_init_irq(unsigned int irq);
|
||||
extern void footbridge_restart(enum reboot_mode, const char *);
|
||||
|
||||
extern void footbridge_sched_clock(void);
|
||||
|
@ -9,6 +9,7 @@
|
||||
#include <linux/init.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/irq.h>
|
||||
#include <linux/sched_clock.h>
|
||||
|
||||
#include <asm/irq.h>
|
||||
|
||||
@ -46,6 +47,16 @@ static struct clocksource cksrc_dc21285 = {
|
||||
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
|
||||
};
|
||||
|
||||
static int ckevt_dc21285_set_next_event(unsigned long delta,
|
||||
struct clock_event_device *c)
|
||||
{
|
||||
*CSR_TIMER1_CLR = 0;
|
||||
*CSR_TIMER1_LOAD = delta;
|
||||
*CSR_TIMER1_CNTL = TIMER_CNTL_ENABLE | TIMER_CNTL_DIV16;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void ckevt_dc21285_set_mode(enum clock_event_mode mode,
|
||||
struct clock_event_device *c)
|
||||
{
|
||||
@ -58,7 +69,9 @@ static void ckevt_dc21285_set_mode(enum clock_event_mode mode,
|
||||
TIMER_CNTL_DIV16;
|
||||
break;
|
||||
|
||||
default:
|
||||
case CLOCK_EVT_MODE_ONESHOT:
|
||||
case CLOCK_EVT_MODE_UNUSED:
|
||||
case CLOCK_EVT_MODE_SHUTDOWN:
|
||||
*CSR_TIMER1_CNTL = 0;
|
||||
break;
|
||||
}
|
||||
@ -66,9 +79,11 @@ static void ckevt_dc21285_set_mode(enum clock_event_mode mode,
|
||||
|
||||
static struct clock_event_device ckevt_dc21285 = {
|
||||
.name = "dc21285_timer1",
|
||||
.features = CLOCK_EVT_FEAT_PERIODIC,
|
||||
.features = CLOCK_EVT_FEAT_PERIODIC |
|
||||
CLOCK_EVT_FEAT_ONESHOT,
|
||||
.rating = 200,
|
||||
.irq = IRQ_TIMER1,
|
||||
.set_next_event = ckevt_dc21285_set_next_event,
|
||||
.set_mode = ckevt_dc21285_set_mode,
|
||||
};
|
||||
|
||||
@ -78,6 +93,10 @@ static irqreturn_t timer1_interrupt(int irq, void *dev_id)
|
||||
|
||||
*CSR_TIMER1_CLR = 0;
|
||||
|
||||
/* Stop the timer if in one-shot mode */
|
||||
if (ce->mode == CLOCK_EVT_MODE_ONESHOT)
|
||||
*CSR_TIMER1_CNTL = 0;
|
||||
|
||||
ce->event_handler(ce);
|
||||
|
||||
return IRQ_HANDLED;
|
||||
@ -105,3 +124,19 @@ void __init footbridge_timer_init(void)
|
||||
ce->cpumask = cpumask_of(smp_processor_id());
|
||||
clockevents_config_and_register(ce, rate, 0x4, 0xffffff);
|
||||
}
|
||||
|
||||
static u32 notrace footbridge_read_sched_clock(void)
|
||||
{
|
||||
return ~*CSR_TIMER3_VALUE;
|
||||
}
|
||||
|
||||
void __init footbridge_sched_clock(void)
|
||||
{
|
||||
unsigned rate = DIV_ROUND_CLOSEST(mem_fclk_21285, 16);
|
||||
|
||||
*CSR_TIMER3_LOAD = 0;
|
||||
*CSR_TIMER3_CLR = 0;
|
||||
*CSR_TIMER3_CNTL = TIMER_CNTL_ENABLE | TIMER_CNTL_DIV16;
|
||||
|
||||
setup_sched_clock(footbridge_read_sched_clock, 24, rate);
|
||||
}
|
||||
|
@ -4,6 +4,7 @@
|
||||
* EBSA285 machine fixup
|
||||
*/
|
||||
#include <linux/init.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/leds.h>
|
||||
@ -17,6 +18,11 @@
|
||||
|
||||
/* LEDs */
|
||||
#if defined(CONFIG_NEW_LEDS) && defined(CONFIG_LEDS_CLASS)
|
||||
#define XBUS_AMBER_L BIT(0)
|
||||
#define XBUS_GREEN_L BIT(1)
|
||||
#define XBUS_RED_L BIT(2)
|
||||
#define XBUS_TOGGLE BIT(7)
|
||||
|
||||
struct ebsa285_led {
|
||||
struct led_classdev cdev;
|
||||
u8 mask;
|
||||
@ -36,6 +42,7 @@ static const struct {
|
||||
};
|
||||
|
||||
static unsigned char hw_led_state;
|
||||
static void __iomem *xbus;
|
||||
|
||||
static void ebsa285_led_set(struct led_classdev *cdev,
|
||||
enum led_brightness b)
|
||||
@ -47,7 +54,7 @@ static void ebsa285_led_set(struct led_classdev *cdev,
|
||||
hw_led_state |= led->mask;
|
||||
else
|
||||
hw_led_state &= ~led->mask;
|
||||
*XBUS_LEDS = hw_led_state;
|
||||
writeb(hw_led_state, xbus);
|
||||
}
|
||||
|
||||
static enum led_brightness ebsa285_led_get(struct led_classdev *cdev)
|
||||
@ -65,9 +72,13 @@ static int __init ebsa285_leds_init(void)
|
||||
if (!machine_is_ebsa285())
|
||||
return -ENODEV;
|
||||
|
||||
xbus = ioremap(XBUS_CS2, SZ_4K);
|
||||
if (!xbus)
|
||||
return -ENOMEM;
|
||||
|
||||
/* 3 LEDS all off */
|
||||
hw_led_state = XBUS_LED_AMBER | XBUS_LED_GREEN | XBUS_LED_RED;
|
||||
*XBUS_LEDS = hw_led_state;
|
||||
hw_led_state = XBUS_AMBER_L | XBUS_GREEN_L | XBUS_RED_L;
|
||||
writeb(hw_led_state, xbus);
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(ebsa285_leds); i++) {
|
||||
struct ebsa285_led *led;
|
||||
@ -104,6 +115,7 @@ MACHINE_START(EBSA285, "EBSA285")
|
||||
.video_start = 0x000a0000,
|
||||
.video_end = 0x000bffff,
|
||||
.map_io = footbridge_map_io,
|
||||
.init_early = footbridge_sched_clock,
|
||||
.init_irq = footbridge_init_irq,
|
||||
.init_time = footbridge_timer_init,
|
||||
.restart = footbridge_restart,
|
||||
|
@ -51,11 +51,7 @@
|
||||
#define PCIMEM_SIZE 0x01000000
|
||||
#define PCIMEM_BASE MMU_IO(0xf0000000, 0x80000000)
|
||||
|
||||
#define XBUS_LEDS ((volatile unsigned char *)(XBUS_BASE + 0x12000))
|
||||
#define XBUS_LED_AMBER (1 << 0)
|
||||
#define XBUS_LED_GREEN (1 << 1)
|
||||
#define XBUS_LED_RED (1 << 2)
|
||||
#define XBUS_LED_TOGGLE (1 << 8)
|
||||
#define XBUS_CS2 0x40012000
|
||||
|
||||
#define XBUS_SWITCH ((volatile unsigned char *)(XBUS_BASE + 0x12000))
|
||||
#define XBUS_SWITCH_SWITCH ((*XBUS_SWITCH) & 15)
|
||||
|
@ -3,7 +3,6 @@ config ARCH_MXC
|
||||
select ARCH_REQUIRE_GPIOLIB
|
||||
select ARM_CPU_SUSPEND if PM
|
||||
select ARM_PATCH_PHYS_VIRT
|
||||
select AUTO_ZRELADDR if !ZBOOT_ROM
|
||||
select CLKSRC_MMIO
|
||||
select COMMON_CLK
|
||||
select GENERIC_ALLOCATOR
|
||||
|
@ -244,7 +244,7 @@ static struct map_desc omap44xx_io_desc[] __initdata = {
|
||||
.virtual = OMAP4_SRAM_VA,
|
||||
.pfn = __phys_to_pfn(OMAP4_SRAM_PA),
|
||||
.length = PAGE_SIZE,
|
||||
.type = MT_MEMORY_SO,
|
||||
.type = MT_MEMORY_RW_SO,
|
||||
},
|
||||
#endif
|
||||
|
||||
@ -282,7 +282,7 @@ static struct map_desc omap54xx_io_desc[] __initdata = {
|
||||
.virtual = OMAP4_SRAM_VA,
|
||||
.pfn = __phys_to_pfn(OMAP4_SRAM_PA),
|
||||
.length = PAGE_SIZE,
|
||||
.type = MT_MEMORY_SO,
|
||||
.type = MT_MEMORY_RW_SO,
|
||||
},
|
||||
#endif
|
||||
};
|
||||
|
@ -87,7 +87,7 @@ void __init omap_barriers_init(void)
|
||||
dram_io_desc[0].virtual = OMAP4_DRAM_BARRIER_VA;
|
||||
dram_io_desc[0].pfn = __phys_to_pfn(paddr);
|
||||
dram_io_desc[0].length = size;
|
||||
dram_io_desc[0].type = MT_MEMORY_SO;
|
||||
dram_io_desc[0].type = MT_MEMORY_RW_SO;
|
||||
iotable_init(dram_io_desc, ARRAY_SIZE(dram_io_desc));
|
||||
dram_sync = (void __iomem *) dram_io_desc[0].virtual;
|
||||
sram_sync = (void __iomem *) OMAP4_SRAM_VA;
|
||||
|
@ -75,12 +75,143 @@ void ASSABET_BCR_frob(unsigned int mask, unsigned int val)
|
||||
|
||||
EXPORT_SYMBOL(ASSABET_BCR_frob);
|
||||
|
||||
/*
|
||||
* The codec reset goes to three devices, so we need to release
|
||||
* the rest when any one of these requests it. However, that
|
||||
* causes the ADV7171 to consume around 100mA - more than half
|
||||
* the LCD-blanked power.
|
||||
*
|
||||
* With the ADV7171, LCD and backlight enabled, we go over
|
||||
* budget on the MAX846 Li-Ion charger, and if no Li-Ion battery
|
||||
* is connected, the Assabet crashes.
|
||||
*/
|
||||
#define RST_UCB1X00 (1 << 0)
|
||||
#define RST_UDA1341 (1 << 1)
|
||||
#define RST_ADV7171 (1 << 2)
|
||||
|
||||
#define SDA GPIO_GPIO(15)
|
||||
#define SCK GPIO_GPIO(18)
|
||||
#define MOD GPIO_GPIO(17)
|
||||
|
||||
static void adv7171_start(void)
|
||||
{
|
||||
GPSR = SCK;
|
||||
udelay(1);
|
||||
GPSR = SDA;
|
||||
udelay(2);
|
||||
GPCR = SDA;
|
||||
}
|
||||
|
||||
static void adv7171_stop(void)
|
||||
{
|
||||
GPSR = SCK;
|
||||
udelay(2);
|
||||
GPSR = SDA;
|
||||
udelay(1);
|
||||
}
|
||||
|
||||
static void adv7171_send(unsigned byte)
|
||||
{
|
||||
unsigned i;
|
||||
|
||||
for (i = 0; i < 8; i++, byte <<= 1) {
|
||||
GPCR = SCK;
|
||||
udelay(1);
|
||||
if (byte & 0x80)
|
||||
GPSR = SDA;
|
||||
else
|
||||
GPCR = SDA;
|
||||
udelay(1);
|
||||
GPSR = SCK;
|
||||
udelay(1);
|
||||
}
|
||||
GPCR = SCK;
|
||||
udelay(1);
|
||||
GPSR = SDA;
|
||||
udelay(1);
|
||||
GPDR &= ~SDA;
|
||||
GPSR = SCK;
|
||||
udelay(1);
|
||||
if (GPLR & SDA)
|
||||
printk(KERN_WARNING "No ACK from ADV7171\n");
|
||||
udelay(1);
|
||||
GPCR = SCK | SDA;
|
||||
udelay(1);
|
||||
GPDR |= SDA;
|
||||
udelay(1);
|
||||
}
|
||||
|
||||
static void adv7171_write(unsigned reg, unsigned val)
|
||||
{
|
||||
unsigned gpdr = GPDR;
|
||||
unsigned gplr = GPLR;
|
||||
|
||||
ASSABET_BCR = BCR_value | ASSABET_BCR_AUDIO_ON;
|
||||
udelay(100);
|
||||
|
||||
GPCR = SDA | SCK | MOD; /* clear L3 mode to ensure UDA1341 doesn't respond */
|
||||
GPDR = (GPDR | SCK | MOD) & ~SDA;
|
||||
udelay(10);
|
||||
if (!(GPLR & SDA))
|
||||
printk(KERN_WARNING "Something dragging SDA down?\n");
|
||||
GPDR |= SDA;
|
||||
|
||||
adv7171_start();
|
||||
adv7171_send(0x54);
|
||||
adv7171_send(reg);
|
||||
adv7171_send(val);
|
||||
adv7171_stop();
|
||||
|
||||
/* Restore GPIO state for L3 bus */
|
||||
GPSR = gplr & (SDA | SCK | MOD);
|
||||
GPCR = (~gplr) & (SDA | SCK | MOD);
|
||||
GPDR = gpdr;
|
||||
}
|
||||
|
||||
static void adv7171_sleep(void)
|
||||
{
|
||||
/* Put the ADV7171 into sleep mode */
|
||||
adv7171_write(0x04, 0x40);
|
||||
}
|
||||
|
||||
static unsigned codec_nreset;
|
||||
|
||||
static void assabet_codec_reset(unsigned mask, int set)
|
||||
{
|
||||
unsigned long flags;
|
||||
bool old;
|
||||
|
||||
local_irq_save(flags);
|
||||
old = !codec_nreset;
|
||||
if (set)
|
||||
codec_nreset &= ~mask;
|
||||
else
|
||||
codec_nreset |= mask;
|
||||
|
||||
if (old != !codec_nreset) {
|
||||
if (codec_nreset) {
|
||||
ASSABET_BCR_set(ASSABET_BCR_NCODEC_RST);
|
||||
adv7171_sleep();
|
||||
} else {
|
||||
ASSABET_BCR_clear(ASSABET_BCR_NCODEC_RST);
|
||||
}
|
||||
}
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
static void assabet_ucb1x00_reset(enum ucb1x00_reset state)
|
||||
{
|
||||
if (state == UCB_RST_PROBE)
|
||||
ASSABET_BCR_set(ASSABET_BCR_CODEC_RST);
|
||||
int set = state == UCB_RST_REMOVE || state == UCB_RST_SUSPEND ||
|
||||
state == UCB_RST_PROBE_FAIL;
|
||||
assabet_codec_reset(RST_UCB1X00, set);
|
||||
}
|
||||
|
||||
void assabet_uda1341_reset(int set)
|
||||
{
|
||||
assabet_codec_reset(RST_UDA1341, set);
|
||||
}
|
||||
EXPORT_SYMBOL(assabet_uda1341_reset);
|
||||
|
||||
|
||||
/*
|
||||
* Assabet flash support code.
|
||||
@ -155,12 +286,9 @@ static int assabet_irda_set_power(struct device *dev, unsigned int state)
|
||||
0
|
||||
};
|
||||
|
||||
if (state < 4) {
|
||||
state = bcr_state[state];
|
||||
ASSABET_BCR_clear(state ^ (ASSABET_BCR_IRDA_MD1|
|
||||
ASSABET_BCR_IRDA_MD0));
|
||||
ASSABET_BCR_set(state);
|
||||
}
|
||||
if (state < 4)
|
||||
ASSABET_BCR_frob(ASSABET_BCR_IRDA_MD1 | ASSABET_BCR_IRDA_MD0,
|
||||
bcr_state[state]);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -180,6 +308,7 @@ static struct irda_platform_data assabet_irda_data = {
|
||||
static struct ucb1x00_plat_data assabet_ucb1x00_data = {
|
||||
.reset = assabet_ucb1x00_reset,
|
||||
.gpio_base = -1,
|
||||
.can_wakeup = 1,
|
||||
};
|
||||
|
||||
static struct mcp_plat_data assabet_mcp_data = {
|
||||
|
@ -27,6 +27,8 @@
|
||||
#include <linux/mtd/mtd.h>
|
||||
#include <linux/mtd/partitions.h>
|
||||
#include <linux/timer.h>
|
||||
#include <linux/gpio_keys.h>
|
||||
#include <linux/input.h>
|
||||
#include <linux/gpio.h>
|
||||
#include <linux/pda_power.h>
|
||||
|
||||
@ -242,10 +244,43 @@ struct platform_device collie_locomo_device = {
|
||||
.resource = locomo_resources,
|
||||
};
|
||||
|
||||
static struct gpio_keys_button collie_gpio_keys[] = {
|
||||
{
|
||||
.type = EV_PWR,
|
||||
.code = KEY_RESERVED,
|
||||
.gpio = COLLIE_GPIO_ON_KEY,
|
||||
.desc = "On key",
|
||||
.wakeup = 1,
|
||||
.active_low = 1,
|
||||
},
|
||||
{
|
||||
.type = EV_PWR,
|
||||
.code = KEY_WAKEUP,
|
||||
.gpio = COLLIE_GPIO_WAKEUP,
|
||||
.desc = "Sync",
|
||||
.wakeup = 1,
|
||||
.active_low = 1,
|
||||
},
|
||||
};
|
||||
|
||||
static struct gpio_keys_platform_data collie_gpio_keys_data = {
|
||||
.buttons = collie_gpio_keys,
|
||||
.nbuttons = ARRAY_SIZE(collie_gpio_keys),
|
||||
};
|
||||
|
||||
static struct platform_device collie_gpio_keys_device = {
|
||||
.name = "gpio-keys",
|
||||
.id = -1,
|
||||
.dev = {
|
||||
.platform_data = &collie_gpio_keys_data,
|
||||
},
|
||||
};
|
||||
|
||||
static struct platform_device *devices[] __initdata = {
|
||||
&collie_locomo_device,
|
||||
&colliescoop_device,
|
||||
&collie_power_device,
|
||||
&collie_gpio_keys_device,
|
||||
};
|
||||
|
||||
static struct mtd_partition collie_partitions[] = {
|
||||
|
@ -28,15 +28,35 @@
|
||||
/*
|
||||
* helper for sa1100fb
|
||||
*/
|
||||
static struct gpio h3100_lcd_gpio[] = {
|
||||
{ H3100_GPIO_LCD_3V_ON, GPIOF_OUT_INIT_LOW, "LCD 3V" },
|
||||
{ H3XXX_EGPIO_LCD_ON, GPIOF_OUT_INIT_LOW, "LCD ON" },
|
||||
};
|
||||
|
||||
static bool h3100_lcd_request(void)
|
||||
{
|
||||
static bool h3100_lcd_ok;
|
||||
int rc;
|
||||
|
||||
if (h3100_lcd_ok)
|
||||
return true;
|
||||
|
||||
rc = gpio_request_array(h3100_lcd_gpio, ARRAY_SIZE(h3100_lcd_gpio));
|
||||
if (rc)
|
||||
pr_err("%s: can't request GPIOs\n", __func__);
|
||||
else
|
||||
h3100_lcd_ok = true;
|
||||
|
||||
return h3100_lcd_ok;
|
||||
}
|
||||
|
||||
static void h3100_lcd_power(int enable)
|
||||
{
|
||||
if (!gpio_request(H3XXX_EGPIO_LCD_ON, "LCD ON")) {
|
||||
gpio_set_value(H3100_GPIO_LCD_3V_ON, enable);
|
||||
gpio_direction_output(H3XXX_EGPIO_LCD_ON, enable);
|
||||
gpio_free(H3XXX_EGPIO_LCD_ON);
|
||||
} else {
|
||||
pr_err("%s: can't request H3XXX_EGPIO_LCD_ON\n", __func__);
|
||||
}
|
||||
if (!h3100_lcd_request())
|
||||
return;
|
||||
|
||||
gpio_set_value(H3100_GPIO_LCD_3V_ON, enable);
|
||||
gpio_set_value(H3XXX_EGPIO_LCD_ON, enable);
|
||||
}
|
||||
|
||||
static struct sa1100fb_mach_info h3100_lcd_info = {
|
||||
@ -69,6 +89,11 @@ static void __init h3100_map_io(void)
|
||||
/*
|
||||
* This turns the IRDA power on or off on the Compaq H3100
|
||||
*/
|
||||
static struct gpio h3100_irda_gpio[] = {
|
||||
{ H3100_GPIO_IR_ON, GPIOF_OUT_INIT_LOW, "IrDA power" },
|
||||
{ H3100_GPIO_IR_FSEL, GPIOF_OUT_INIT_LOW, "IrDA fsel" },
|
||||
};
|
||||
|
||||
static int h3100_irda_set_power(struct device *dev, unsigned int state)
|
||||
{
|
||||
gpio_set_value(H3100_GPIO_IR_ON, state);
|
||||
@ -80,18 +105,27 @@ static void h3100_irda_set_speed(struct device *dev, unsigned int speed)
|
||||
gpio_set_value(H3100_GPIO_IR_FSEL, !(speed < 4000000));
|
||||
}
|
||||
|
||||
static int h3100_irda_startup(struct device *dev)
|
||||
{
|
||||
return gpio_request_array(h3100_irda_gpio, sizeof(h3100_irda_gpio));
|
||||
}
|
||||
|
||||
static void h3100_irda_shutdown(struct device *dev)
|
||||
{
|
||||
return gpio_free_array(h3100_irda_gpio, sizeof(h3100_irda_gpio));
|
||||
}
|
||||
|
||||
static struct irda_platform_data h3100_irda_data = {
|
||||
.set_power = h3100_irda_set_power,
|
||||
.set_speed = h3100_irda_set_speed,
|
||||
.startup = h3100_irda_startup,
|
||||
.shutdown = h3100_irda_shutdown,
|
||||
};
|
||||
|
||||
static struct gpio_default_state h3100_default_gpio[] = {
|
||||
{ H3100_GPIO_IR_ON, GPIO_MODE_OUT0, "IrDA power" },
|
||||
{ H3100_GPIO_IR_FSEL, GPIO_MODE_OUT0, "IrDA fsel" },
|
||||
{ H3XXX_GPIO_COM_DCD, GPIO_MODE_IN, "COM DCD" },
|
||||
{ H3XXX_GPIO_COM_CTS, GPIO_MODE_IN, "COM CTS" },
|
||||
{ H3XXX_GPIO_COM_RTS, GPIO_MODE_OUT0, "COM RTS" },
|
||||
{ H3100_GPIO_LCD_3V_ON, GPIO_MODE_OUT0, "LCD 3v" },
|
||||
};
|
||||
|
||||
static void __init h3100_mach_init(void)
|
||||
|
@ -28,35 +28,39 @@
|
||||
/*
|
||||
* helper for sa1100fb
|
||||
*/
|
||||
static struct gpio h3600_lcd_gpio[] = {
|
||||
{ H3XXX_EGPIO_LCD_ON, GPIOF_OUT_INIT_LOW, "LCD power" },
|
||||
{ H3600_EGPIO_LCD_PCI, GPIOF_OUT_INIT_LOW, "LCD control" },
|
||||
{ H3600_EGPIO_LCD_5V_ON, GPIOF_OUT_INIT_LOW, "LCD 5v" },
|
||||
{ H3600_EGPIO_LVDD_ON, GPIOF_OUT_INIT_LOW, "LCD 9v/-6.5v" },
|
||||
};
|
||||
|
||||
static bool h3600_lcd_request(void)
|
||||
{
|
||||
static bool h3600_lcd_ok;
|
||||
int rc;
|
||||
|
||||
if (h3600_lcd_ok)
|
||||
return true;
|
||||
|
||||
rc = gpio_request_array(h3600_lcd_gpio, ARRAY_SIZE(h3600_lcd_gpio));
|
||||
if (rc)
|
||||
pr_err("%s: can't request GPIOs\n", __func__);
|
||||
else
|
||||
h3600_lcd_ok = true;
|
||||
|
||||
return h3600_lcd_ok;
|
||||
}
|
||||
|
||||
static void h3600_lcd_power(int enable)
|
||||
{
|
||||
if (gpio_request(H3XXX_EGPIO_LCD_ON, "LCD power")) {
|
||||
pr_err("%s: can't request H3XXX_EGPIO_LCD_ON\n", __func__);
|
||||
goto err1;
|
||||
}
|
||||
if (gpio_request(H3600_EGPIO_LCD_PCI, "LCD control")) {
|
||||
pr_err("%s: can't request H3XXX_EGPIO_LCD_PCI\n", __func__);
|
||||
goto err2;
|
||||
}
|
||||
if (gpio_request(H3600_EGPIO_LCD_5V_ON, "LCD 5v")) {
|
||||
pr_err("%s: can't request H3XXX_EGPIO_LCD_5V_ON\n", __func__);
|
||||
goto err3;
|
||||
}
|
||||
if (gpio_request(H3600_EGPIO_LVDD_ON, "LCD 9v/-6.5v")) {
|
||||
pr_err("%s: can't request H3600_EGPIO_LVDD_ON\n", __func__);
|
||||
goto err4;
|
||||
}
|
||||
if (!h3600_lcd_request())
|
||||
return;
|
||||
|
||||
gpio_direction_output(H3XXX_EGPIO_LCD_ON, enable);
|
||||
gpio_direction_output(H3600_EGPIO_LCD_PCI, enable);
|
||||
gpio_direction_output(H3600_EGPIO_LCD_5V_ON, enable);
|
||||
gpio_direction_output(H3600_EGPIO_LVDD_ON, enable);
|
||||
|
||||
gpio_free(H3600_EGPIO_LVDD_ON);
|
||||
err4: gpio_free(H3600_EGPIO_LCD_5V_ON);
|
||||
err3: gpio_free(H3600_EGPIO_LCD_PCI);
|
||||
err2: gpio_free(H3XXX_EGPIO_LCD_ON);
|
||||
err1: return;
|
||||
}
|
||||
|
||||
static const struct sa1100fb_rgb h3600_rgb_16 = {
|
||||
@ -93,6 +97,11 @@ static void __init h3600_map_io(void)
|
||||
/*
|
||||
* This turns the IRDA power on or off on the Compaq H3600
|
||||
*/
|
||||
static struct gpio h3600_irda_gpio[] = {
|
||||
{ H3600_EGPIO_IR_ON, GPIOF_OUT_INIT_LOW, "IrDA power" },
|
||||
{ H3600_EGPIO_IR_FSEL, GPIOF_OUT_INIT_LOW, "IrDA fsel" },
|
||||
};
|
||||
|
||||
static int h3600_irda_set_power(struct device *dev, unsigned int state)
|
||||
{
|
||||
gpio_set_value(H3600_EGPIO_IR_ON, state);
|
||||
@ -106,29 +115,12 @@ static void h3600_irda_set_speed(struct device *dev, unsigned int speed)
|
||||
|
||||
static int h3600_irda_startup(struct device *dev)
|
||||
{
|
||||
int err = gpio_request(H3600_EGPIO_IR_ON, "IrDA power");
|
||||
if (err)
|
||||
goto err1;
|
||||
err = gpio_direction_output(H3600_EGPIO_IR_ON, 0);
|
||||
if (err)
|
||||
goto err2;
|
||||
err = gpio_request(H3600_EGPIO_IR_FSEL, "IrDA fsel");
|
||||
if (err)
|
||||
goto err2;
|
||||
err = gpio_direction_output(H3600_EGPIO_IR_FSEL, 0);
|
||||
if (err)
|
||||
goto err3;
|
||||
return 0;
|
||||
|
||||
err3: gpio_free(H3600_EGPIO_IR_FSEL);
|
||||
err2: gpio_free(H3600_EGPIO_IR_ON);
|
||||
err1: return err;
|
||||
return gpio_request_array(h3600_irda_gpio, sizeof(h3600_irda_gpio));
|
||||
}
|
||||
|
||||
static void h3600_irda_shutdown(struct device *dev)
|
||||
{
|
||||
gpio_free(H3600_EGPIO_IR_ON);
|
||||
gpio_free(H3600_EGPIO_IR_FSEL);
|
||||
return gpio_free_array(h3600_irda_gpio, sizeof(h3600_irda_gpio));
|
||||
}
|
||||
|
||||
static struct irda_platform_data h3600_irda_data = {
|
||||
|
@ -39,8 +39,8 @@ extern unsigned long SCR_value;
|
||||
|
||||
#define ASSABET_BCR_CF_PWR (1<<0) /* Compact Flash Power (1 = 3.3v, 0 = off) */
|
||||
#define ASSABET_BCR_CF_RST (1<<1) /* Compact Flash Reset (1 = power up reset) */
|
||||
#define ASSABET_BCR_GFX_RST (1<<1) /* Graphics Accelerator Reset (0 = hold reset) */
|
||||
#define ASSABET_BCR_CODEC_RST (1<<2) /* 0 = Holds UCB1300, ADI7171, and UDA1341 in reset */
|
||||
#define ASSABET_BCR_NGFX_RST (1<<1) /* Graphics Accelerator Reset (0 = hold reset) */
|
||||
#define ASSABET_BCR_NCODEC_RST (1<<2) /* 0 = Holds UCB1300, ADI7171, and UDA1341 in reset */
|
||||
#define ASSABET_BCR_IRDA_FSEL (1<<3) /* IRDA Frequency select (0 = SIR, 1 = MIR/ FIR) */
|
||||
#define ASSABET_BCR_IRDA_MD0 (1<<4) /* Range/Power select */
|
||||
#define ASSABET_BCR_IRDA_MD1 (1<<5) /* Range/Power select */
|
||||
@ -69,6 +69,8 @@ extern void ASSABET_BCR_frob(unsigned int mask, unsigned int set);
|
||||
#define ASSABET_BCR_frob(x,y) do { } while (0)
|
||||
#endif
|
||||
|
||||
extern void assabet_uda1341_reset(int set);
|
||||
|
||||
#define ASSABET_BCR_set(x) ASSABET_BCR_frob((x), (x))
|
||||
#define ASSABET_BCR_clear(x) ASSABET_BCR_frob((x), 0)
|
||||
|
||||
|
@ -43,7 +43,7 @@ extern void ux500_timer_init(void);
|
||||
.virtual = IO_ADDRESS(x), \
|
||||
.pfn = __phys_to_pfn(x), \
|
||||
.length = sz, \
|
||||
.type = MT_MEMORY, \
|
||||
.type = MT_MEMORY_RWX, \
|
||||
}
|
||||
|
||||
extern struct smp_operations ux500_smp_ops;
|
||||
|
@ -12,6 +12,7 @@ ifneq ($(CONFIG_MMU),y)
|
||||
obj-y += nommu.o
|
||||
endif
|
||||
|
||||
obj-$(CONFIG_ARM_PTDUMP) += dump.o
|
||||
obj-$(CONFIG_MODULES) += proc-syms.o
|
||||
|
||||
obj-$(CONFIG_ALIGNMENT_TRAP) += alignment.o
|
||||
|
@ -25,6 +25,7 @@
|
||||
|
||||
#include <asm/cacheflush.h>
|
||||
#include <asm/hardware/cache-l2x0.h>
|
||||
#include "cache-tauros3.h"
|
||||
#include "cache-aurora-l2.h"
|
||||
|
||||
#define CACHE_LINE_SIZE 32
|
||||
@ -767,6 +768,14 @@ static void aurora_save(void)
|
||||
l2x0_saved_regs.aux_ctrl = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
|
||||
}
|
||||
|
||||
static void __init tauros3_save(void)
|
||||
{
|
||||
l2x0_saved_regs.aux2_ctrl =
|
||||
readl_relaxed(l2x0_base + TAUROS3_AUX2_CTRL);
|
||||
l2x0_saved_regs.prefetch_ctrl =
|
||||
readl_relaxed(l2x0_base + L2X0_PREFETCH_CTRL);
|
||||
}
|
||||
|
||||
static void l2x0_resume(void)
|
||||
{
|
||||
if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN)) {
|
||||
@ -821,6 +830,18 @@ static void aurora_resume(void)
|
||||
}
|
||||
}
|
||||
|
||||
static void tauros3_resume(void)
|
||||
{
|
||||
if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN)) {
|
||||
writel_relaxed(l2x0_saved_regs.aux2_ctrl,
|
||||
l2x0_base + TAUROS3_AUX2_CTRL);
|
||||
writel_relaxed(l2x0_saved_regs.prefetch_ctrl,
|
||||
l2x0_base + L2X0_PREFETCH_CTRL);
|
||||
}
|
||||
|
||||
l2x0_resume();
|
||||
}
|
||||
|
||||
static void __init aurora_broadcast_l2_commands(void)
|
||||
{
|
||||
__u32 u;
|
||||
@ -906,6 +927,15 @@ static const struct l2x0_of_data aurora_no_outer_data = {
|
||||
},
|
||||
};
|
||||
|
||||
static const struct l2x0_of_data tauros3_data = {
|
||||
.setup = NULL,
|
||||
.save = tauros3_save,
|
||||
/* Tauros3 broadcasts L1 cache operations to L2 */
|
||||
.outer_cache = {
|
||||
.resume = tauros3_resume,
|
||||
},
|
||||
};
|
||||
|
||||
static const struct l2x0_of_data bcm_l2x0_data = {
|
||||
.setup = pl310_of_setup,
|
||||
.save = pl310_save,
|
||||
@ -922,17 +952,19 @@ static const struct l2x0_of_data bcm_l2x0_data = {
|
||||
};
|
||||
|
||||
static const struct of_device_id l2x0_ids[] __initconst = {
|
||||
{ .compatible = "arm,pl310-cache", .data = (void *)&pl310_data },
|
||||
{ .compatible = "arm,l220-cache", .data = (void *)&l2x0_data },
|
||||
{ .compatible = "arm,l210-cache", .data = (void *)&l2x0_data },
|
||||
{ .compatible = "marvell,aurora-system-cache",
|
||||
.data = (void *)&aurora_no_outer_data},
|
||||
{ .compatible = "marvell,aurora-outer-cache",
|
||||
.data = (void *)&aurora_with_outer_data},
|
||||
{ .compatible = "brcm,bcm11351-a2-pl310-cache",
|
||||
.data = (void *)&bcm_l2x0_data},
|
||||
{ .compatible = "arm,l220-cache", .data = (void *)&l2x0_data },
|
||||
{ .compatible = "arm,pl310-cache", .data = (void *)&pl310_data },
|
||||
{ .compatible = "bcm,bcm11351-a2-pl310-cache", /* deprecated name */
|
||||
.data = (void *)&bcm_l2x0_data},
|
||||
{ .compatible = "brcm,bcm11351-a2-pl310-cache",
|
||||
.data = (void *)&bcm_l2x0_data},
|
||||
{ .compatible = "marvell,aurora-outer-cache",
|
||||
.data = (void *)&aurora_with_outer_data},
|
||||
{ .compatible = "marvell,aurora-system-cache",
|
||||
.data = (void *)&aurora_no_outer_data},
|
||||
{ .compatible = "marvell,tauros3-cache",
|
||||
.data = (void *)&tauros3_data },
|
||||
{}
|
||||
};
|
||||
|
||||
|
41
arch/arm/mm/cache-tauros3.h
Normal file
41
arch/arm/mm/cache-tauros3.h
Normal file
@ -0,0 +1,41 @@
|
||||
/*
|
||||
* Marvell Tauros3 cache controller includes
|
||||
*
|
||||
* Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com>
|
||||
*
|
||||
* based on GPL'ed 2.6 kernel sources
|
||||
* (c) Marvell International Ltd.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
||||
*/
|
||||
|
||||
#ifndef __ASM_ARM_HARDWARE_TAUROS3_H
|
||||
#define __ASM_ARM_HARDWARE_TAUROS3_H
|
||||
|
||||
/*
|
||||
* Marvell Tauros3 L2CC is compatible with PL310 r0p0
|
||||
* but with PREFETCH_CTRL (r2p0) and an additional event counter.
|
||||
* Also, there is AUX2_CTRL for some Marvell specific control.
|
||||
*/
|
||||
|
||||
#define TAUROS3_EVENT_CNT2_CFG 0x224
|
||||
#define TAUROS3_EVENT_CNT2_VAL 0x228
|
||||
#define TAUROS3_INV_ALL 0x780
|
||||
#define TAUROS3_CLEAN_ALL 0x784
|
||||
#define TAUROS3_AUX2_CTRL 0x820
|
||||
|
||||
/* Registers shifts and masks */
|
||||
#define TAUROS3_AUX2_CTRL_LINEFILL_BURST8_EN (1 << 2)
|
||||
|
||||
#endif
|
@ -146,18 +146,18 @@ flush_levels:
|
||||
ldr r7, =0x7fff
|
||||
ands r7, r7, r1, lsr #13 @ extract max number of the index size
|
||||
loop1:
|
||||
mov r9, r4 @ create working copy of max way size
|
||||
mov r9, r7 @ create working copy of max index
|
||||
loop2:
|
||||
ARM( orr r11, r10, r9, lsl r5 ) @ factor way and cache number into r11
|
||||
THUMB( lsl r6, r9, r5 )
|
||||
ARM( orr r11, r10, r4, lsl r5 ) @ factor way and cache number into r11
|
||||
THUMB( lsl r6, r4, r5 )
|
||||
THUMB( orr r11, r10, r6 ) @ factor way and cache number into r11
|
||||
ARM( orr r11, r11, r7, lsl r2 ) @ factor index number into r11
|
||||
THUMB( lsl r6, r7, r2 )
|
||||
ARM( orr r11, r11, r9, lsl r2 ) @ factor index number into r11
|
||||
THUMB( lsl r6, r9, r2 )
|
||||
THUMB( orr r11, r11, r6 ) @ factor index number into r11
|
||||
mcr p15, 0, r11, c7, c14, 2 @ clean & invalidate by set/way
|
||||
subs r9, r9, #1 @ decrement the way
|
||||
subs r9, r9, #1 @ decrement the index
|
||||
bge loop2
|
||||
subs r7, r7, #1 @ decrement the index
|
||||
subs r4, r4, #1 @ decrement the way
|
||||
bge loop1
|
||||
skip:
|
||||
add r10, r10, #2 @ increment cache number
|
||||
|
@ -36,8 +36,8 @@
|
||||
* The context ID is used by debuggers and trace logic, and
|
||||
* should be unique within all running processes.
|
||||
*
|
||||
* In big endian operation, the two 32 bit words are swapped if accesed by
|
||||
* non 64-bit operations.
|
||||
* In big endian operation, the two 32 bit words are swapped if accessed
|
||||
* by non-64-bit operations.
|
||||
*/
|
||||
#define ASID_FIRST_VERSION (1ULL << ASID_BITS)
|
||||
#define NUM_USER_ASIDS ASID_FIRST_VERSION
|
||||
@ -78,20 +78,21 @@ void a15_erratum_get_cpumask(int this_cpu, struct mm_struct *mm,
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_ARM_LPAE
|
||||
static void cpu_set_reserved_ttbr0(void)
|
||||
{
|
||||
/*
|
||||
* Set TTBR0 to swapper_pg_dir which contains only global entries. The
|
||||
* ASID is set to 0.
|
||||
*/
|
||||
cpu_set_ttbr(0, __pa(swapper_pg_dir));
|
||||
isb();
|
||||
}
|
||||
/*
|
||||
* With LPAE, the ASID and page tables are updated atomicly, so there is
|
||||
* no need for a reserved set of tables (the active ASID tracking prevents
|
||||
* any issues across a rollover).
|
||||
*/
|
||||
#define cpu_set_reserved_ttbr0()
|
||||
#else
|
||||
static void cpu_set_reserved_ttbr0(void)
|
||||
{
|
||||
u32 ttb;
|
||||
/* Copy TTBR1 into TTBR0 */
|
||||
/*
|
||||
* Copy TTBR1 into TTBR0.
|
||||
* This points at swapper_pg_dir, which contains only global
|
||||
* entries so any speculative walks are perfectly safe.
|
||||
*/
|
||||
asm volatile(
|
||||
" mrc p15, 0, %0, c2, c0, 1 @ read TTBR1\n"
|
||||
" mcr p15, 0, %0, c2, c0, 0 @ set TTBR0\n"
|
||||
@ -179,6 +180,7 @@ static int is_reserved_asid(u64 asid)
|
||||
|
||||
static u64 new_context(struct mm_struct *mm, unsigned int cpu)
|
||||
{
|
||||
static u32 cur_idx = 1;
|
||||
u64 asid = atomic64_read(&mm->context.id);
|
||||
u64 generation = atomic64_read(&asid_generation);
|
||||
|
||||
@ -193,10 +195,13 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu)
|
||||
* Allocate a free ASID. If we can't find one, take a
|
||||
* note of the currently active ASIDs and mark the TLBs
|
||||
* as requiring flushes. We always count from ASID #1,
|
||||
* as we reserve ASID #0 to switch via TTBR0 and indicate
|
||||
* rollover events.
|
||||
* as we reserve ASID #0 to switch via TTBR0 and to
|
||||
* avoid speculative page table walks from hitting in
|
||||
* any partial walk caches, which could be populated
|
||||
* from overlapping level-1 descriptors used to map both
|
||||
* the module area and the userspace stack.
|
||||
*/
|
||||
asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, 1);
|
||||
asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, cur_idx);
|
||||
if (asid == NUM_USER_ASIDS) {
|
||||
generation = atomic64_add_return(ASID_FIRST_VERSION,
|
||||
&asid_generation);
|
||||
@ -204,6 +209,7 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu)
|
||||
asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, 1);
|
||||
}
|
||||
__set_bit(asid, asid_map);
|
||||
cur_idx = asid;
|
||||
asid |= generation;
|
||||
cpumask_clear(mm_cpumask(mm));
|
||||
}
|
||||
@ -221,8 +227,9 @@ void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk)
|
||||
__check_vmalloc_seq(mm);
|
||||
|
||||
/*
|
||||
* Required during context switch to avoid speculative page table
|
||||
* walking with the wrong TTBR.
|
||||
* We cannot update the pgd and the ASID atomicly with classic
|
||||
* MMU, so switch exclusively to global mappings to avoid
|
||||
* speculative page table walking with the wrong TTBR.
|
||||
*/
|
||||
cpu_set_reserved_ttbr0();
|
||||
|
||||
|
@ -376,7 +376,7 @@ void __init init_dma_coherent_pool_size(unsigned long size)
|
||||
static int __init atomic_pool_init(void)
|
||||
{
|
||||
struct dma_pool *pool = &atomic_pool;
|
||||
pgprot_t prot = pgprot_dmacoherent(pgprot_kernel);
|
||||
pgprot_t prot = pgprot_dmacoherent(PAGE_KERNEL);
|
||||
gfp_t gfp = GFP_KERNEL | GFP_DMA;
|
||||
unsigned long nr_pages = pool->size >> PAGE_SHIFT;
|
||||
unsigned long *bitmap;
|
||||
@ -624,7 +624,7 @@ static void __free_from_contiguous(struct device *dev, struct page *page,
|
||||
if (PageHighMem(page))
|
||||
__dma_free_remap(cpu_addr, size);
|
||||
else
|
||||
__dma_remap(page, size, pgprot_kernel);
|
||||
__dma_remap(page, size, PAGE_KERNEL);
|
||||
dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT);
|
||||
}
|
||||
|
||||
@ -1351,7 +1351,7 @@ static void __iommu_free_atomic(struct device *dev, void *cpu_addr,
|
||||
static void *arm_iommu_alloc_attrs(struct device *dev, size_t size,
|
||||
dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs)
|
||||
{
|
||||
pgprot_t prot = __get_dma_pgprot(attrs, pgprot_kernel);
|
||||
pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL);
|
||||
struct page **pages;
|
||||
void *addr = NULL;
|
||||
|
||||
|
345
arch/arm/mm/dump.c
Normal file
345
arch/arm/mm/dump.c
Normal file
@ -0,0 +1,345 @@
|
||||
/*
|
||||
* Debug helper to dump the current kernel pagetables of the system
|
||||
* so that we can see what the various memory ranges are set to.
|
||||
*
|
||||
* Derived from x86 implementation:
|
||||
* (C) Copyright 2008 Intel Corporation
|
||||
*
|
||||
* Author: Arjan van de Ven <arjan@linux.intel.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; version 2
|
||||
* of the License.
|
||||
*/
|
||||
#include <linux/debugfs.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/seq_file.h>
|
||||
|
||||
#include <asm/fixmap.h>
|
||||
#include <asm/pgtable.h>
|
||||
|
||||
struct addr_marker {
|
||||
unsigned long start_address;
|
||||
const char *name;
|
||||
};
|
||||
|
||||
static struct addr_marker address_markers[] = {
|
||||
{ MODULES_VADDR, "Modules" },
|
||||
{ PAGE_OFFSET, "Kernel Mapping" },
|
||||
{ 0, "vmalloc() Area" },
|
||||
{ VMALLOC_END, "vmalloc() End" },
|
||||
{ FIXADDR_START, "Fixmap Area" },
|
||||
{ CONFIG_VECTORS_BASE, "Vectors" },
|
||||
{ CONFIG_VECTORS_BASE + PAGE_SIZE * 2, "Vectors End" },
|
||||
{ -1, NULL },
|
||||
};
|
||||
|
||||
struct pg_state {
|
||||
struct seq_file *seq;
|
||||
const struct addr_marker *marker;
|
||||
unsigned long start_address;
|
||||
unsigned level;
|
||||
u64 current_prot;
|
||||
};
|
||||
|
||||
struct prot_bits {
|
||||
u64 mask;
|
||||
u64 val;
|
||||
const char *set;
|
||||
const char *clear;
|
||||
};
|
||||
|
||||
static const struct prot_bits pte_bits[] = {
|
||||
{
|
||||
.mask = L_PTE_USER,
|
||||
.val = L_PTE_USER,
|
||||
.set = "USR",
|
||||
.clear = " ",
|
||||
}, {
|
||||
.mask = L_PTE_RDONLY,
|
||||
.val = L_PTE_RDONLY,
|
||||
.set = "ro",
|
||||
.clear = "RW",
|
||||
}, {
|
||||
.mask = L_PTE_XN,
|
||||
.val = L_PTE_XN,
|
||||
.set = "NX",
|
||||
.clear = "x ",
|
||||
}, {
|
||||
.mask = L_PTE_SHARED,
|
||||
.val = L_PTE_SHARED,
|
||||
.set = "SHD",
|
||||
.clear = " ",
|
||||
}, {
|
||||
.mask = L_PTE_MT_MASK,
|
||||
.val = L_PTE_MT_UNCACHED,
|
||||
.set = "SO/UNCACHED",
|
||||
}, {
|
||||
.mask = L_PTE_MT_MASK,
|
||||
.val = L_PTE_MT_BUFFERABLE,
|
||||
.set = "MEM/BUFFERABLE/WC",
|
||||
}, {
|
||||
.mask = L_PTE_MT_MASK,
|
||||
.val = L_PTE_MT_WRITETHROUGH,
|
||||
.set = "MEM/CACHED/WT",
|
||||
}, {
|
||||
.mask = L_PTE_MT_MASK,
|
||||
.val = L_PTE_MT_WRITEBACK,
|
||||
.set = "MEM/CACHED/WBRA",
|
||||
#ifndef CONFIG_ARM_LPAE
|
||||
}, {
|
||||
.mask = L_PTE_MT_MASK,
|
||||
.val = L_PTE_MT_MINICACHE,
|
||||
.set = "MEM/MINICACHE",
|
||||
#endif
|
||||
}, {
|
||||
.mask = L_PTE_MT_MASK,
|
||||
.val = L_PTE_MT_WRITEALLOC,
|
||||
.set = "MEM/CACHED/WBWA",
|
||||
}, {
|
||||
.mask = L_PTE_MT_MASK,
|
||||
.val = L_PTE_MT_DEV_SHARED,
|
||||
.set = "DEV/SHARED",
|
||||
#ifndef CONFIG_ARM_LPAE
|
||||
}, {
|
||||
.mask = L_PTE_MT_MASK,
|
||||
.val = L_PTE_MT_DEV_NONSHARED,
|
||||
.set = "DEV/NONSHARED",
|
||||
#endif
|
||||
}, {
|
||||
.mask = L_PTE_MT_MASK,
|
||||
.val = L_PTE_MT_DEV_WC,
|
||||
.set = "DEV/WC",
|
||||
}, {
|
||||
.mask = L_PTE_MT_MASK,
|
||||
.val = L_PTE_MT_DEV_CACHED,
|
||||
.set = "DEV/CACHED",
|
||||
},
|
||||
};
|
||||
|
||||
static const struct prot_bits section_bits[] = {
|
||||
#ifndef CONFIG_ARM_LPAE
|
||||
/* These are approximate */
|
||||
{
|
||||
.mask = PMD_SECT_AP_READ | PMD_SECT_AP_WRITE,
|
||||
.val = 0,
|
||||
.set = " ro",
|
||||
}, {
|
||||
.mask = PMD_SECT_AP_READ | PMD_SECT_AP_WRITE,
|
||||
.val = PMD_SECT_AP_WRITE,
|
||||
.set = " RW",
|
||||
}, {
|
||||
.mask = PMD_SECT_AP_READ | PMD_SECT_AP_WRITE,
|
||||
.val = PMD_SECT_AP_READ,
|
||||
.set = "USR ro",
|
||||
}, {
|
||||
.mask = PMD_SECT_AP_READ | PMD_SECT_AP_WRITE,
|
||||
.val = PMD_SECT_AP_READ | PMD_SECT_AP_WRITE,
|
||||
.set = "USR RW",
|
||||
#else
|
||||
{
|
||||
.mask = PMD_SECT_USER,
|
||||
.val = PMD_SECT_USER,
|
||||
.set = "USR",
|
||||
}, {
|
||||
.mask = PMD_SECT_RDONLY,
|
||||
.val = PMD_SECT_RDONLY,
|
||||
.set = "ro",
|
||||
.clear = "RW",
|
||||
#endif
|
||||
}, {
|
||||
.mask = PMD_SECT_XN,
|
||||
.val = PMD_SECT_XN,
|
||||
.set = "NX",
|
||||
.clear = "x ",
|
||||
}, {
|
||||
.mask = PMD_SECT_S,
|
||||
.val = PMD_SECT_S,
|
||||
.set = "SHD",
|
||||
.clear = " ",
|
||||
},
|
||||
};
|
||||
|
||||
struct pg_level {
|
||||
const struct prot_bits *bits;
|
||||
size_t num;
|
||||
u64 mask;
|
||||
};
|
||||
|
||||
static struct pg_level pg_level[] = {
|
||||
{
|
||||
}, { /* pgd */
|
||||
}, { /* pud */
|
||||
}, { /* pmd */
|
||||
.bits = section_bits,
|
||||
.num = ARRAY_SIZE(section_bits),
|
||||
}, { /* pte */
|
||||
.bits = pte_bits,
|
||||
.num = ARRAY_SIZE(pte_bits),
|
||||
},
|
||||
};
|
||||
|
||||
static void dump_prot(struct pg_state *st, const struct prot_bits *bits, size_t num)
|
||||
{
|
||||
unsigned i;
|
||||
|
||||
for (i = 0; i < num; i++, bits++) {
|
||||
const char *s;
|
||||
|
||||
if ((st->current_prot & bits->mask) == bits->val)
|
||||
s = bits->set;
|
||||
else
|
||||
s = bits->clear;
|
||||
|
||||
if (s)
|
||||
seq_printf(st->seq, " %s", s);
|
||||
}
|
||||
}
|
||||
|
||||
static void note_page(struct pg_state *st, unsigned long addr, unsigned level, u64 val)
|
||||
{
|
||||
static const char units[] = "KMGTPE";
|
||||
u64 prot = val & pg_level[level].mask;
|
||||
|
||||
if (addr < USER_PGTABLES_CEILING)
|
||||
return;
|
||||
|
||||
if (!st->level) {
|
||||
st->level = level;
|
||||
st->current_prot = prot;
|
||||
seq_printf(st->seq, "---[ %s ]---\n", st->marker->name);
|
||||
} else if (prot != st->current_prot || level != st->level ||
|
||||
addr >= st->marker[1].start_address) {
|
||||
const char *unit = units;
|
||||
unsigned long delta;
|
||||
|
||||
if (st->current_prot) {
|
||||
seq_printf(st->seq, "0x%08lx-0x%08lx ",
|
||||
st->start_address, addr);
|
||||
|
||||
delta = (addr - st->start_address) >> 10;
|
||||
while (!(delta & 1023) && unit[1]) {
|
||||
delta >>= 10;
|
||||
unit++;
|
||||
}
|
||||
seq_printf(st->seq, "%9lu%c", delta, *unit);
|
||||
if (pg_level[st->level].bits)
|
||||
dump_prot(st, pg_level[st->level].bits, pg_level[st->level].num);
|
||||
seq_printf(st->seq, "\n");
|
||||
}
|
||||
|
||||
if (addr >= st->marker[1].start_address) {
|
||||
st->marker++;
|
||||
seq_printf(st->seq, "---[ %s ]---\n", st->marker->name);
|
||||
}
|
||||
st->start_address = addr;
|
||||
st->current_prot = prot;
|
||||
st->level = level;
|
||||
}
|
||||
}
|
||||
|
||||
static void walk_pte(struct pg_state *st, pmd_t *pmd, unsigned long start)
|
||||
{
|
||||
pte_t *pte = pte_offset_kernel(pmd, 0);
|
||||
unsigned long addr;
|
||||
unsigned i;
|
||||
|
||||
for (i = 0; i < PTRS_PER_PTE; i++, pte++) {
|
||||
addr = start + i * PAGE_SIZE;
|
||||
note_page(st, addr, 4, pte_val(*pte));
|
||||
}
|
||||
}
|
||||
|
||||
static void walk_pmd(struct pg_state *st, pud_t *pud, unsigned long start)
|
||||
{
|
||||
pmd_t *pmd = pmd_offset(pud, 0);
|
||||
unsigned long addr;
|
||||
unsigned i;
|
||||
|
||||
for (i = 0; i < PTRS_PER_PMD; i++, pmd++) {
|
||||
addr = start + i * PMD_SIZE;
|
||||
if (pmd_none(*pmd) || pmd_large(*pmd) || !pmd_present(*pmd))
|
||||
note_page(st, addr, 3, pmd_val(*pmd));
|
||||
else
|
||||
walk_pte(st, pmd, addr);
|
||||
}
|
||||
}
|
||||
|
||||
static void walk_pud(struct pg_state *st, pgd_t *pgd, unsigned long start)
|
||||
{
|
||||
pud_t *pud = pud_offset(pgd, 0);
|
||||
unsigned long addr;
|
||||
unsigned i;
|
||||
|
||||
for (i = 0; i < PTRS_PER_PUD; i++, pud++) {
|
||||
addr = start + i * PUD_SIZE;
|
||||
if (!pud_none(*pud)) {
|
||||
walk_pmd(st, pud, addr);
|
||||
} else {
|
||||
note_page(st, addr, 2, pud_val(*pud));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void walk_pgd(struct seq_file *m)
|
||||
{
|
||||
pgd_t *pgd = swapper_pg_dir;
|
||||
struct pg_state st;
|
||||
unsigned long addr;
|
||||
unsigned i, pgdoff = USER_PGTABLES_CEILING / PGDIR_SIZE;
|
||||
|
||||
memset(&st, 0, sizeof(st));
|
||||
st.seq = m;
|
||||
st.marker = address_markers;
|
||||
|
||||
pgd += pgdoff;
|
||||
|
||||
for (i = pgdoff; i < PTRS_PER_PGD; i++, pgd++) {
|
||||
addr = i * PGDIR_SIZE;
|
||||
if (!pgd_none(*pgd)) {
|
||||
walk_pud(&st, pgd, addr);
|
||||
} else {
|
||||
note_page(&st, addr, 1, pgd_val(*pgd));
|
||||
}
|
||||
}
|
||||
|
||||
note_page(&st, 0, 0, 0);
|
||||
}
|
||||
|
||||
static int ptdump_show(struct seq_file *m, void *v)
|
||||
{
|
||||
walk_pgd(m);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ptdump_open(struct inode *inode, struct file *file)
|
||||
{
|
||||
return single_open(file, ptdump_show, NULL);
|
||||
}
|
||||
|
||||
static const struct file_operations ptdump_fops = {
|
||||
.open = ptdump_open,
|
||||
.read = seq_read,
|
||||
.llseek = seq_lseek,
|
||||
.release = single_release,
|
||||
};
|
||||
|
||||
static int ptdump_init(void)
|
||||
{
|
||||
struct dentry *pe;
|
||||
unsigned i, j;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(pg_level); i++)
|
||||
if (pg_level[i].bits)
|
||||
for (j = 0; j < pg_level[i].num; j++)
|
||||
pg_level[i].mask |= pg_level[i].bits[j].mask;
|
||||
|
||||
address_markers[2].start_address = VMALLOC_START;
|
||||
|
||||
pe = debugfs_create_file("kernel_page_tables", 0400, NULL, NULL,
|
||||
&ptdump_fops);
|
||||
return pe ? 0 : -ENOMEM;
|
||||
}
|
||||
__initcall(ptdump_init);
|
@ -142,58 +142,6 @@ static void __init find_limits(unsigned long *min, unsigned long *max_low,
|
||||
*max_high = bank_pfn_end(&mi->bank[mi->nr_banks - 1]);
|
||||
}
|
||||
|
||||
static void __init arm_bootmem_init(unsigned long start_pfn,
|
||||
unsigned long end_pfn)
|
||||
{
|
||||
struct memblock_region *reg;
|
||||
unsigned int boot_pages;
|
||||
phys_addr_t bitmap;
|
||||
pg_data_t *pgdat;
|
||||
|
||||
/*
|
||||
* Allocate the bootmem bitmap page. This must be in a region
|
||||
* of memory which has already been mapped.
|
||||
*/
|
||||
boot_pages = bootmem_bootmap_pages(end_pfn - start_pfn);
|
||||
bitmap = memblock_alloc_base(boot_pages << PAGE_SHIFT, L1_CACHE_BYTES,
|
||||
__pfn_to_phys(end_pfn));
|
||||
|
||||
/*
|
||||
* Initialise the bootmem allocator, handing the
|
||||
* memory banks over to bootmem.
|
||||
*/
|
||||
node_set_online(0);
|
||||
pgdat = NODE_DATA(0);
|
||||
init_bootmem_node(pgdat, __phys_to_pfn(bitmap), start_pfn, end_pfn);
|
||||
|
||||
/* Free the lowmem regions from memblock into bootmem. */
|
||||
for_each_memblock(memory, reg) {
|
||||
unsigned long start = memblock_region_memory_base_pfn(reg);
|
||||
unsigned long end = memblock_region_memory_end_pfn(reg);
|
||||
|
||||
if (end >= end_pfn)
|
||||
end = end_pfn;
|
||||
if (start >= end)
|
||||
break;
|
||||
|
||||
free_bootmem(__pfn_to_phys(start), (end - start) << PAGE_SHIFT);
|
||||
}
|
||||
|
||||
/* Reserve the lowmem memblock reserved regions in bootmem. */
|
||||
for_each_memblock(reserved, reg) {
|
||||
unsigned long start = memblock_region_reserved_base_pfn(reg);
|
||||
unsigned long end = memblock_region_reserved_end_pfn(reg);
|
||||
|
||||
if (end >= end_pfn)
|
||||
end = end_pfn;
|
||||
if (start >= end)
|
||||
break;
|
||||
|
||||
reserve_bootmem(__pfn_to_phys(start),
|
||||
(end - start) << PAGE_SHIFT, BOOTMEM_DEFAULT);
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef CONFIG_ZONE_DMA
|
||||
|
||||
phys_addr_t arm_dma_zone_size __read_mostly;
|
||||
@ -233,7 +181,7 @@ void __init setup_dma_zone(const struct machine_desc *mdesc)
|
||||
#endif
|
||||
}
|
||||
|
||||
static void __init arm_bootmem_free(unsigned long min, unsigned long max_low,
|
||||
static void __init zone_sizes_init(unsigned long min, unsigned long max_low,
|
||||
unsigned long max_high)
|
||||
{
|
||||
unsigned long zone_size[MAX_NR_ZONES], zhole_size[MAX_NR_ZONES];
|
||||
@ -381,7 +329,6 @@ void __init arm_memblock_init(struct meminfo *mi,
|
||||
dma_contiguous_reserve(min(arm_dma_limit, arm_lowmem_limit));
|
||||
|
||||
arm_memblock_steal_permitted = false;
|
||||
memblock_allow_resize();
|
||||
memblock_dump_all();
|
||||
}
|
||||
|
||||
@ -389,12 +336,11 @@ void __init bootmem_init(void)
|
||||
{
|
||||
unsigned long min, max_low, max_high;
|
||||
|
||||
memblock_allow_resize();
|
||||
max_low = max_high = 0;
|
||||
|
||||
find_limits(&min, &max_low, &max_high);
|
||||
|
||||
arm_bootmem_init(min, max_low);
|
||||
|
||||
/*
|
||||
* Sparsemem tries to allocate bootmem in memory_present(),
|
||||
* so must be done after the fixed reservations
|
||||
@ -411,7 +357,7 @@ void __init bootmem_init(void)
|
||||
* the sparse mem_map arrays initialized by sparse_init()
|
||||
* for memmap_init_zone(), otherwise all PFNs are invalid.
|
||||
*/
|
||||
arm_bootmem_free(min, max_low, max_high);
|
||||
zone_sizes_init(min, max_low, max_high);
|
||||
|
||||
/*
|
||||
* This doesn't seem to be used by the Linux memory manager any
|
||||
@ -584,7 +530,7 @@ void __init mem_init(void)
|
||||
extern u32 itcm_end;
|
||||
#endif
|
||||
|
||||
max_mapnr = pfn_to_page(max_pfn + PHYS_PFN_OFFSET) - mem_map;
|
||||
set_max_mapnr(pfn_to_page(max_pfn) - mem_map);
|
||||
|
||||
/* this will put all unused low memory onto the freelists */
|
||||
free_unused_memmap(&meminfo);
|
||||
|
@ -392,9 +392,9 @@ __arm_ioremap_exec(phys_addr_t phys_addr, size_t size, bool cached)
|
||||
unsigned int mtype;
|
||||
|
||||
if (cached)
|
||||
mtype = MT_MEMORY;
|
||||
mtype = MT_MEMORY_RWX;
|
||||
else
|
||||
mtype = MT_MEMORY_NONCACHED;
|
||||
mtype = MT_MEMORY_RWX_NONCACHED;
|
||||
|
||||
return __arm_ioremap_caller(phys_addr, size, mtype,
|
||||
__builtin_return_address(0));
|
||||
|
@ -22,6 +22,7 @@
|
||||
#include <asm/cputype.h>
|
||||
#include <asm/sections.h>
|
||||
#include <asm/cachetype.h>
|
||||
#include <asm/sections.h>
|
||||
#include <asm/setup.h>
|
||||
#include <asm/smp_plat.h>
|
||||
#include <asm/tlb.h>
|
||||
@ -287,36 +288,43 @@ static struct mem_type mem_types[] = {
|
||||
.prot_l1 = PMD_TYPE_TABLE,
|
||||
.domain = DOMAIN_USER,
|
||||
},
|
||||
[MT_MEMORY] = {
|
||||
[MT_MEMORY_RWX] = {
|
||||
.prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
|
||||
.prot_l1 = PMD_TYPE_TABLE,
|
||||
.prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
|
||||
.domain = DOMAIN_KERNEL,
|
||||
},
|
||||
[MT_MEMORY_RW] = {
|
||||
.prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
|
||||
L_PTE_XN,
|
||||
.prot_l1 = PMD_TYPE_TABLE,
|
||||
.prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
|
||||
.domain = DOMAIN_KERNEL,
|
||||
},
|
||||
[MT_ROM] = {
|
||||
.prot_sect = PMD_TYPE_SECT,
|
||||
.domain = DOMAIN_KERNEL,
|
||||
},
|
||||
[MT_MEMORY_NONCACHED] = {
|
||||
[MT_MEMORY_RWX_NONCACHED] = {
|
||||
.prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
|
||||
L_PTE_MT_BUFFERABLE,
|
||||
.prot_l1 = PMD_TYPE_TABLE,
|
||||
.prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
|
||||
.domain = DOMAIN_KERNEL,
|
||||
},
|
||||
[MT_MEMORY_DTCM] = {
|
||||
[MT_MEMORY_RW_DTCM] = {
|
||||
.prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
|
||||
L_PTE_XN,
|
||||
.prot_l1 = PMD_TYPE_TABLE,
|
||||
.prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
|
||||
.domain = DOMAIN_KERNEL,
|
||||
},
|
||||
[MT_MEMORY_ITCM] = {
|
||||
[MT_MEMORY_RWX_ITCM] = {
|
||||
.prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
|
||||
.prot_l1 = PMD_TYPE_TABLE,
|
||||
.domain = DOMAIN_KERNEL,
|
||||
},
|
||||
[MT_MEMORY_SO] = {
|
||||
[MT_MEMORY_RW_SO] = {
|
||||
.prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
|
||||
L_PTE_MT_UNCACHED | L_PTE_XN,
|
||||
.prot_l1 = PMD_TYPE_TABLE,
|
||||
@ -325,7 +333,8 @@ static struct mem_type mem_types[] = {
|
||||
.domain = DOMAIN_KERNEL,
|
||||
},
|
||||
[MT_MEMORY_DMA_READY] = {
|
||||
.prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
|
||||
.prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
|
||||
L_PTE_XN,
|
||||
.prot_l1 = PMD_TYPE_TABLE,
|
||||
.domain = DOMAIN_KERNEL,
|
||||
},
|
||||
@ -337,6 +346,44 @@ const struct mem_type *get_mem_type(unsigned int type)
|
||||
}
|
||||
EXPORT_SYMBOL(get_mem_type);
|
||||
|
||||
#define PTE_SET_FN(_name, pteop) \
|
||||
static int pte_set_##_name(pte_t *ptep, pgtable_t token, unsigned long addr, \
|
||||
void *data) \
|
||||
{ \
|
||||
pte_t pte = pteop(*ptep); \
|
||||
\
|
||||
set_pte_ext(ptep, pte, 0); \
|
||||
return 0; \
|
||||
} \
|
||||
|
||||
#define SET_MEMORY_FN(_name, callback) \
|
||||
int set_memory_##_name(unsigned long addr, int numpages) \
|
||||
{ \
|
||||
unsigned long start = addr; \
|
||||
unsigned long size = PAGE_SIZE*numpages; \
|
||||
unsigned end = start + size; \
|
||||
\
|
||||
if (start < MODULES_VADDR || start >= MODULES_END) \
|
||||
return -EINVAL;\
|
||||
\
|
||||
if (end < MODULES_VADDR || end >= MODULES_END) \
|
||||
return -EINVAL; \
|
||||
\
|
||||
apply_to_page_range(&init_mm, start, size, callback, NULL); \
|
||||
flush_tlb_kernel_range(start, end); \
|
||||
return 0;\
|
||||
}
|
||||
|
||||
PTE_SET_FN(ro, pte_wrprotect)
|
||||
PTE_SET_FN(rw, pte_mkwrite)
|
||||
PTE_SET_FN(x, pte_mkexec)
|
||||
PTE_SET_FN(nx, pte_mknexec)
|
||||
|
||||
SET_MEMORY_FN(ro, pte_set_ro)
|
||||
SET_MEMORY_FN(rw, pte_set_rw)
|
||||
SET_MEMORY_FN(x, pte_set_x)
|
||||
SET_MEMORY_FN(nx, pte_set_nx)
|
||||
|
||||
/*
|
||||
* Adjust the PMD section entries according to the CPU in use.
|
||||
*/
|
||||
@ -410,6 +457,9 @@ static void __init build_mem_type_table(void)
|
||||
mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_XN;
|
||||
mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_XN;
|
||||
mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_XN;
|
||||
|
||||
/* Also setup NX memory mapping */
|
||||
mem_types[MT_MEMORY_RW].prot_sect |= PMD_SECT_XN;
|
||||
}
|
||||
if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) {
|
||||
/*
|
||||
@ -487,11 +537,13 @@ static void __init build_mem_type_table(void)
|
||||
mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_SHARED;
|
||||
mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_S;
|
||||
mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_SHARED;
|
||||
mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
|
||||
mem_types[MT_MEMORY].prot_pte |= L_PTE_SHARED;
|
||||
mem_types[MT_MEMORY_RWX].prot_sect |= PMD_SECT_S;
|
||||
mem_types[MT_MEMORY_RWX].prot_pte |= L_PTE_SHARED;
|
||||
mem_types[MT_MEMORY_RW].prot_sect |= PMD_SECT_S;
|
||||
mem_types[MT_MEMORY_RW].prot_pte |= L_PTE_SHARED;
|
||||
mem_types[MT_MEMORY_DMA_READY].prot_pte |= L_PTE_SHARED;
|
||||
mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S;
|
||||
mem_types[MT_MEMORY_NONCACHED].prot_pte |= L_PTE_SHARED;
|
||||
mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |= PMD_SECT_S;
|
||||
mem_types[MT_MEMORY_RWX_NONCACHED].prot_pte |= L_PTE_SHARED;
|
||||
}
|
||||
}
|
||||
|
||||
@ -502,15 +554,15 @@ static void __init build_mem_type_table(void)
|
||||
if (cpu_arch >= CPU_ARCH_ARMv6) {
|
||||
if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) {
|
||||
/* Non-cacheable Normal is XCB = 001 */
|
||||
mem_types[MT_MEMORY_NONCACHED].prot_sect |=
|
||||
mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |=
|
||||
PMD_SECT_BUFFERED;
|
||||
} else {
|
||||
/* For both ARMv6 and non-TEX-remapping ARMv7 */
|
||||
mem_types[MT_MEMORY_NONCACHED].prot_sect |=
|
||||
mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |=
|
||||
PMD_SECT_TEX(1);
|
||||
}
|
||||
} else {
|
||||
mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE;
|
||||
mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_ARM_LPAE
|
||||
@ -543,10 +595,12 @@ static void __init build_mem_type_table(void)
|
||||
|
||||
mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask;
|
||||
mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask;
|
||||
mem_types[MT_MEMORY].prot_sect |= ecc_mask | cp->pmd;
|
||||
mem_types[MT_MEMORY].prot_pte |= kern_pgprot;
|
||||
mem_types[MT_MEMORY_RWX].prot_sect |= ecc_mask | cp->pmd;
|
||||
mem_types[MT_MEMORY_RWX].prot_pte |= kern_pgprot;
|
||||
mem_types[MT_MEMORY_RW].prot_sect |= ecc_mask | cp->pmd;
|
||||
mem_types[MT_MEMORY_RW].prot_pte |= kern_pgprot;
|
||||
mem_types[MT_MEMORY_DMA_READY].prot_pte |= kern_pgprot;
|
||||
mem_types[MT_MEMORY_NONCACHED].prot_sect |= ecc_mask;
|
||||
mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |= ecc_mask;
|
||||
mem_types[MT_ROM].prot_sect |= cp->pmd;
|
||||
|
||||
switch (cp->pmd) {
|
||||
@ -1296,6 +1350,8 @@ static void __init kmap_init(void)
|
||||
static void __init map_lowmem(void)
|
||||
{
|
||||
struct memblock_region *reg;
|
||||
unsigned long kernel_x_start = round_down(__pa(_stext), SECTION_SIZE);
|
||||
unsigned long kernel_x_end = round_up(__pa(__init_end), SECTION_SIZE);
|
||||
|
||||
/* Map all the lowmem memory banks. */
|
||||
for_each_memblock(memory, reg) {
|
||||
@ -1308,12 +1364,40 @@ static void __init map_lowmem(void)
|
||||
if (start >= end)
|
||||
break;
|
||||
|
||||
map.pfn = __phys_to_pfn(start);
|
||||
map.virtual = __phys_to_virt(start);
|
||||
map.length = end - start;
|
||||
map.type = MT_MEMORY;
|
||||
if (end < kernel_x_start || start >= kernel_x_end) {
|
||||
map.pfn = __phys_to_pfn(start);
|
||||
map.virtual = __phys_to_virt(start);
|
||||
map.length = end - start;
|
||||
map.type = MT_MEMORY_RWX;
|
||||
|
||||
create_mapping(&map);
|
||||
create_mapping(&map);
|
||||
} else {
|
||||
/* This better cover the entire kernel */
|
||||
if (start < kernel_x_start) {
|
||||
map.pfn = __phys_to_pfn(start);
|
||||
map.virtual = __phys_to_virt(start);
|
||||
map.length = kernel_x_start - start;
|
||||
map.type = MT_MEMORY_RW;
|
||||
|
||||
create_mapping(&map);
|
||||
}
|
||||
|
||||
map.pfn = __phys_to_pfn(kernel_x_start);
|
||||
map.virtual = __phys_to_virt(kernel_x_start);
|
||||
map.length = kernel_x_end - kernel_x_start;
|
||||
map.type = MT_MEMORY_RWX;
|
||||
|
||||
create_mapping(&map);
|
||||
|
||||
if (kernel_x_end < end) {
|
||||
map.pfn = __phys_to_pfn(kernel_x_end);
|
||||
map.virtual = __phys_to_virt(kernel_x_end);
|
||||
map.length = end - kernel_x_end;
|
||||
map.type = MT_MEMORY_RW;
|
||||
|
||||
create_mapping(&map);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -23,7 +23,7 @@
|
||||
#define __pgd_alloc() kmalloc(PTRS_PER_PGD * sizeof(pgd_t), GFP_KERNEL)
|
||||
#define __pgd_free(pgd) kfree(pgd)
|
||||
#else
|
||||
#define __pgd_alloc() (pgd_t *)__get_free_pages(GFP_KERNEL, 2)
|
||||
#define __pgd_alloc() (pgd_t *)__get_free_pages(GFP_KERNEL | __GFP_REPEAT, 2)
|
||||
#define __pgd_free(pgd) free_pages((unsigned long)pgd, 2)
|
||||
#endif
|
||||
|
||||
|
@ -83,162 +83,6 @@ static struct device_attribute amba_dev_attrs[] = {
|
||||
__ATTR_NULL,
|
||||
};
|
||||
|
||||
#ifdef CONFIG_PM_SLEEP
|
||||
|
||||
static int amba_legacy_suspend(struct device *dev, pm_message_t mesg)
|
||||
{
|
||||
struct amba_driver *adrv = to_amba_driver(dev->driver);
|
||||
struct amba_device *adev = to_amba_device(dev);
|
||||
int ret = 0;
|
||||
|
||||
if (dev->driver && adrv->suspend)
|
||||
ret = adrv->suspend(adev, mesg);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int amba_legacy_resume(struct device *dev)
|
||||
{
|
||||
struct amba_driver *adrv = to_amba_driver(dev->driver);
|
||||
struct amba_device *adev = to_amba_device(dev);
|
||||
int ret = 0;
|
||||
|
||||
if (dev->driver && adrv->resume)
|
||||
ret = adrv->resume(adev);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
#endif /* CONFIG_PM_SLEEP */
|
||||
|
||||
#ifdef CONFIG_SUSPEND
|
||||
|
||||
static int amba_pm_suspend(struct device *dev)
|
||||
{
|
||||
struct device_driver *drv = dev->driver;
|
||||
int ret = 0;
|
||||
|
||||
if (!drv)
|
||||
return 0;
|
||||
|
||||
if (drv->pm) {
|
||||
if (drv->pm->suspend)
|
||||
ret = drv->pm->suspend(dev);
|
||||
} else {
|
||||
ret = amba_legacy_suspend(dev, PMSG_SUSPEND);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int amba_pm_resume(struct device *dev)
|
||||
{
|
||||
struct device_driver *drv = dev->driver;
|
||||
int ret = 0;
|
||||
|
||||
if (!drv)
|
||||
return 0;
|
||||
|
||||
if (drv->pm) {
|
||||
if (drv->pm->resume)
|
||||
ret = drv->pm->resume(dev);
|
||||
} else {
|
||||
ret = amba_legacy_resume(dev);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
#else /* !CONFIG_SUSPEND */
|
||||
|
||||
#define amba_pm_suspend NULL
|
||||
#define amba_pm_resume NULL
|
||||
|
||||
#endif /* !CONFIG_SUSPEND */
|
||||
|
||||
#ifdef CONFIG_HIBERNATE_CALLBACKS
|
||||
|
||||
static int amba_pm_freeze(struct device *dev)
|
||||
{
|
||||
struct device_driver *drv = dev->driver;
|
||||
int ret = 0;
|
||||
|
||||
if (!drv)
|
||||
return 0;
|
||||
|
||||
if (drv->pm) {
|
||||
if (drv->pm->freeze)
|
||||
ret = drv->pm->freeze(dev);
|
||||
} else {
|
||||
ret = amba_legacy_suspend(dev, PMSG_FREEZE);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int amba_pm_thaw(struct device *dev)
|
||||
{
|
||||
struct device_driver *drv = dev->driver;
|
||||
int ret = 0;
|
||||
|
||||
if (!drv)
|
||||
return 0;
|
||||
|
||||
if (drv->pm) {
|
||||
if (drv->pm->thaw)
|
||||
ret = drv->pm->thaw(dev);
|
||||
} else {
|
||||
ret = amba_legacy_resume(dev);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int amba_pm_poweroff(struct device *dev)
|
||||
{
|
||||
struct device_driver *drv = dev->driver;
|
||||
int ret = 0;
|
||||
|
||||
if (!drv)
|
||||
return 0;
|
||||
|
||||
if (drv->pm) {
|
||||
if (drv->pm->poweroff)
|
||||
ret = drv->pm->poweroff(dev);
|
||||
} else {
|
||||
ret = amba_legacy_suspend(dev, PMSG_HIBERNATE);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int amba_pm_restore(struct device *dev)
|
||||
{
|
||||
struct device_driver *drv = dev->driver;
|
||||
int ret = 0;
|
||||
|
||||
if (!drv)
|
||||
return 0;
|
||||
|
||||
if (drv->pm) {
|
||||
if (drv->pm->restore)
|
||||
ret = drv->pm->restore(dev);
|
||||
} else {
|
||||
ret = amba_legacy_resume(dev);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
#else /* !CONFIG_HIBERNATE_CALLBACKS */
|
||||
|
||||
#define amba_pm_freeze NULL
|
||||
#define amba_pm_thaw NULL
|
||||
#define amba_pm_poweroff NULL
|
||||
#define amba_pm_restore NULL
|
||||
|
||||
#endif /* !CONFIG_HIBERNATE_CALLBACKS */
|
||||
|
||||
#ifdef CONFIG_PM_RUNTIME
|
||||
/*
|
||||
* Hooks to provide runtime PM of the pclk (bus clock). It is safe to
|
||||
@ -251,7 +95,7 @@ static int amba_pm_runtime_suspend(struct device *dev)
|
||||
int ret = pm_generic_runtime_suspend(dev);
|
||||
|
||||
if (ret == 0 && dev->driver)
|
||||
clk_disable(pcdev->pclk);
|
||||
clk_disable_unprepare(pcdev->pclk);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -262,7 +106,7 @@ static int amba_pm_runtime_resume(struct device *dev)
|
||||
int ret;
|
||||
|
||||
if (dev->driver) {
|
||||
ret = clk_enable(pcdev->pclk);
|
||||
ret = clk_prepare_enable(pcdev->pclk);
|
||||
/* Failure is probably fatal to the system, but... */
|
||||
if (ret)
|
||||
return ret;
|
||||
@ -272,15 +116,13 @@ static int amba_pm_runtime_resume(struct device *dev)
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_PM
|
||||
|
||||
static const struct dev_pm_ops amba_pm = {
|
||||
.suspend = amba_pm_suspend,
|
||||
.resume = amba_pm_resume,
|
||||
.freeze = amba_pm_freeze,
|
||||
.thaw = amba_pm_thaw,
|
||||
.poweroff = amba_pm_poweroff,
|
||||
.restore = amba_pm_restore,
|
||||
.suspend = pm_generic_suspend,
|
||||
.resume = pm_generic_resume,
|
||||
.freeze = pm_generic_freeze,
|
||||
.thaw = pm_generic_thaw,
|
||||
.poweroff = pm_generic_poweroff,
|
||||
.restore = pm_generic_restore,
|
||||
SET_RUNTIME_PM_OPS(
|
||||
amba_pm_runtime_suspend,
|
||||
amba_pm_runtime_resume,
|
||||
@ -288,14 +130,6 @@ static const struct dev_pm_ops amba_pm = {
|
||||
)
|
||||
};
|
||||
|
||||
#define AMBA_PM (&amba_pm)
|
||||
|
||||
#else /* !CONFIG_PM */
|
||||
|
||||
#define AMBA_PM NULL
|
||||
|
||||
#endif /* !CONFIG_PM */
|
||||
|
||||
/*
|
||||
* Primecells are part of the Advanced Microcontroller Bus Architecture,
|
||||
* so we call the bus "amba".
|
||||
@ -305,7 +139,7 @@ struct bus_type amba_bustype = {
|
||||
.dev_attrs = amba_dev_attrs,
|
||||
.match = amba_match,
|
||||
.uevent = amba_uevent,
|
||||
.pm = AMBA_PM,
|
||||
.pm = &amba_pm,
|
||||
};
|
||||
|
||||
static int __init amba_init(void)
|
||||
@ -317,36 +151,23 @@ postcore_initcall(amba_init);
|
||||
|
||||
static int amba_get_enable_pclk(struct amba_device *pcdev)
|
||||
{
|
||||
struct clk *pclk = clk_get(&pcdev->dev, "apb_pclk");
|
||||
int ret;
|
||||
|
||||
pcdev->pclk = pclk;
|
||||
pcdev->pclk = clk_get(&pcdev->dev, "apb_pclk");
|
||||
if (IS_ERR(pcdev->pclk))
|
||||
return PTR_ERR(pcdev->pclk);
|
||||
|
||||
if (IS_ERR(pclk))
|
||||
return PTR_ERR(pclk);
|
||||
|
||||
ret = clk_prepare(pclk);
|
||||
if (ret) {
|
||||
clk_put(pclk);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = clk_enable(pclk);
|
||||
if (ret) {
|
||||
clk_unprepare(pclk);
|
||||
clk_put(pclk);
|
||||
}
|
||||
ret = clk_prepare_enable(pcdev->pclk);
|
||||
if (ret)
|
||||
clk_put(pcdev->pclk);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void amba_put_disable_pclk(struct amba_device *pcdev)
|
||||
{
|
||||
struct clk *pclk = pcdev->pclk;
|
||||
|
||||
clk_disable(pclk);
|
||||
clk_unprepare(pclk);
|
||||
clk_put(pclk);
|
||||
clk_disable_unprepare(pcdev->pclk);
|
||||
clk_put(pcdev->pclk);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -167,8 +167,6 @@ static int amba_kmi_remove(struct amba_device *dev)
|
||||
{
|
||||
struct amba_kmi_port *kmi = amba_get_drvdata(dev);
|
||||
|
||||
amba_set_drvdata(dev, NULL);
|
||||
|
||||
serio_unregister_port(kmi->io);
|
||||
clk_put(kmi->clk);
|
||||
iounmap(kmi->base);
|
||||
|
@ -1683,8 +1683,6 @@ static int mmci_remove(struct amba_device *dev)
|
||||
{
|
||||
struct mmc_host *mmc = amba_get_drvdata(dev);
|
||||
|
||||
amba_set_drvdata(dev, NULL);
|
||||
|
||||
if (mmc) {
|
||||
struct mmci_host *host = mmc_priv(mmc);
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user