mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-29 23:24:11 +08:00
Disintegrate and delete asm/system.h
-----BEGIN PGP SIGNATURE----- Version: GnuPG v1.4.12 (GNU/Linux) iQIVAwUAT3NKzROxKuMESys7AQKElw/+JyDxJSlj+g+nymkx8IVVuU8CsEwNLgRk 8KEnRfLhGtkXFLSJYWO6jzGo16F8Uqli1PdMFte/wagSv0285/HZaKlkkBVHdJ/m u40oSjgT013bBh6MQ0Oaf8pFezFUiQB5zPOA9QGaLVGDLXCmgqUgd7exaD5wRIwB ZmyItjZeAVnDfk1R+ZiNYytHAi8A5wSB+eFDCIQYgyulA1Igd1UnRtx+dRKbvc/m rWQ6KWbZHIdvP1ksd8wHHkrlUD2pEeJ8glJLsZUhMm/5oMf/8RmOCvmo8rvE/qwl eDQ1h4cGYlfjobxXZMHqAN9m7Jg2bI946HZjdb7/7oCeO6VW3FwPZ/Ic75p+wp45 HXJTItufERYk6QxShiOKvA+QexnYwY0IT5oRP4DrhdVB/X9cl2MoaZHC+RbYLQy+ /5VNZKi38iK4F9AbFamS7kd0i5QszA/ZzEzKZ6VMuOp3W/fagpn4ZJT1LIA3m4A9 Q0cj24mqeyCfjysu0TMbPtaN+Yjeu1o1OFRvM8XffbZsp5bNzuTDEvviJ2NXw4vK 4qUHulhYSEWcu9YgAZXvEWDEM78FXCkg2v/CrZXH5tyc95kUkMPcgG+QZBB5wElR FaOKpiC/BuNIGEf02IZQ4nfDxE90QwnDeoYeV+FvNj9UEOopJ5z5bMPoTHxm4cCD NypQthI85pc= =G9mT -----END PGP SIGNATURE----- Merge tag 'split-asm_system_h-for-linus-20120328' of git://git.kernel.org/pub/scm/linux/kernel/git/dhowells/linux-asm_system Pull "Disintegrate and delete asm/system.h" from David Howells: "Here are a bunch of patches to disintegrate asm/system.h into a set of separate bits to relieve the problem of circular inclusion dependencies. I've built all the working defconfigs from all the arches that I can and made sure that they don't break. The reason for these patches is that I recently encountered a circular dependency problem that came about when I produced some patches to optimise get_order() by rewriting it to use ilog2(). This uses bitops - and on the SH arch asm/bitops.h drags in asm-generic/get_order.h by a circuituous route involving asm/system.h. The main difficulty seems to be asm/system.h. It holds a number of low level bits with no/few dependencies that are commonly used (eg. memory barriers) and a number of bits with more dependencies that aren't used in many places (eg. switch_to()). These patches break asm/system.h up into the following core pieces: (1) asm/barrier.h Move memory barriers here. This already done for MIPS and Alpha. (2) asm/switch_to.h Move switch_to() and related stuff here. (3) asm/exec.h Move arch_align_stack() here. Other process execution related bits could perhaps go here from asm/processor.h. (4) asm/cmpxchg.h Move xchg() and cmpxchg() here as they're full word atomic ops and frequently used by atomic_xchg() and atomic_cmpxchg(). (5) asm/bug.h Move die() and related bits. (6) asm/auxvec.h Move AT_VECTOR_SIZE_ARCH here. Other arch headers are created as needed on a per-arch basis." Fixed up some conflicts from other header file cleanups and moving code around that has happened in the meantime, so David's testing is somewhat weakened by that. We'll find out anything that got broken and fix it.. * tag 'split-asm_system_h-for-linus-20120328' of git://git.kernel.org/pub/scm/linux/kernel/git/dhowells/linux-asm_system: (38 commits) Delete all instances of asm/system.h Remove all #inclusions of asm/system.h Add #includes needed to permit the removal of asm/system.h Move all declarations of free_initmem() to linux/mm.h Disintegrate asm/system.h for OpenRISC Split arch_align_stack() out from asm-generic/system.h Split the switch_to() wrapper out of asm-generic/system.h Move the asm-generic/system.h xchg() implementation to asm-generic/cmpxchg.h Create asm-generic/barrier.h Make asm-generic/cmpxchg.h #include asm-generic/cmpxchg-local.h Disintegrate asm/system.h for Xtensa Disintegrate asm/system.h for Unicore32 [based on ver #3, changed by gxt] Disintegrate asm/system.h for Tile Disintegrate asm/system.h for Sparc Disintegrate asm/system.h for SH Disintegrate asm/system.h for Score Disintegrate asm/system.h for S390 Disintegrate asm/system.h for PowerPC Disintegrate asm/system.h for PA-RISC Disintegrate asm/system.h for MN10300 ...
This commit is contained in:
commit
0195c00244
@ -13,7 +13,6 @@
|
||||
#include <generated/utsrelease.h>
|
||||
#include <linux/mm.h>
|
||||
|
||||
#include <asm/system.h>
|
||||
#include <asm/console.h>
|
||||
#include <asm/hwrpb.h>
|
||||
#include <asm/pgtable.h>
|
||||
|
@ -15,7 +15,6 @@
|
||||
#include <generated/utsrelease.h>
|
||||
#include <linux/mm.h>
|
||||
|
||||
#include <asm/system.h>
|
||||
#include <asm/console.h>
|
||||
#include <asm/hwrpb.h>
|
||||
#include <asm/pgtable.h>
|
||||
|
@ -4,7 +4,6 @@
|
||||
* initial bootloader stuff..
|
||||
*/
|
||||
|
||||
#include <asm/system.h>
|
||||
|
||||
.set noreorder
|
||||
.globl __start
|
||||
|
@ -11,7 +11,6 @@
|
||||
#include <generated/utsrelease.h>
|
||||
#include <linux/mm.h>
|
||||
|
||||
#include <asm/system.h>
|
||||
#include <asm/console.h>
|
||||
#include <asm/hwrpb.h>
|
||||
#include <asm/pgtable.h>
|
||||
|
@ -3,7 +3,6 @@
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <asm/barrier.h>
|
||||
#include <asm/system.h>
|
||||
|
||||
/*
|
||||
* Atomic operations that C can't guarantee us. Useful for
|
||||
@ -169,6 +168,73 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t * v)
|
||||
return result;
|
||||
}
|
||||
|
||||
/*
|
||||
* Atomic exchange routines.
|
||||
*/
|
||||
|
||||
#define __ASM__MB
|
||||
#define ____xchg(type, args...) __xchg ## type ## _local(args)
|
||||
#define ____cmpxchg(type, args...) __cmpxchg ## type ## _local(args)
|
||||
#include <asm/xchg.h>
|
||||
|
||||
#define xchg_local(ptr,x) \
|
||||
({ \
|
||||
__typeof__(*(ptr)) _x_ = (x); \
|
||||
(__typeof__(*(ptr))) __xchg_local((ptr), (unsigned long)_x_, \
|
||||
sizeof(*(ptr))); \
|
||||
})
|
||||
|
||||
#define cmpxchg_local(ptr, o, n) \
|
||||
({ \
|
||||
__typeof__(*(ptr)) _o_ = (o); \
|
||||
__typeof__(*(ptr)) _n_ = (n); \
|
||||
(__typeof__(*(ptr))) __cmpxchg_local((ptr), (unsigned long)_o_, \
|
||||
(unsigned long)_n_, \
|
||||
sizeof(*(ptr))); \
|
||||
})
|
||||
|
||||
#define cmpxchg64_local(ptr, o, n) \
|
||||
({ \
|
||||
BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
|
||||
cmpxchg_local((ptr), (o), (n)); \
|
||||
})
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
#undef __ASM__MB
|
||||
#define __ASM__MB "\tmb\n"
|
||||
#endif
|
||||
#undef ____xchg
|
||||
#undef ____cmpxchg
|
||||
#define ____xchg(type, args...) __xchg ##type(args)
|
||||
#define ____cmpxchg(type, args...) __cmpxchg ##type(args)
|
||||
#include <asm/xchg.h>
|
||||
|
||||
#define xchg(ptr,x) \
|
||||
({ \
|
||||
__typeof__(*(ptr)) _x_ = (x); \
|
||||
(__typeof__(*(ptr))) __xchg((ptr), (unsigned long)_x_, \
|
||||
sizeof(*(ptr))); \
|
||||
})
|
||||
|
||||
#define cmpxchg(ptr, o, n) \
|
||||
({ \
|
||||
__typeof__(*(ptr)) _o_ = (o); \
|
||||
__typeof__(*(ptr)) _n_ = (n); \
|
||||
(__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \
|
||||
(unsigned long)_n_, sizeof(*(ptr)));\
|
||||
})
|
||||
|
||||
#define cmpxchg64(ptr, o, n) \
|
||||
({ \
|
||||
BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
|
||||
cmpxchg((ptr), (o), (n)); \
|
||||
})
|
||||
|
||||
#undef __ASM__MB
|
||||
#undef ____cmpxchg
|
||||
|
||||
#define __HAVE_ARCH_CMPXCHG 1
|
||||
|
||||
#define atomic64_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new))
|
||||
#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
|
||||
|
||||
|
@ -21,4 +21,6 @@
|
||||
#define AT_L2_CACHESHAPE 36
|
||||
#define AT_L3_CACHESHAPE 37
|
||||
|
||||
#define AT_VECTOR_SIZE_ARCH 4 /* entries in ARCH_DLINFO */
|
||||
|
||||
#endif /* __ASM_ALPHA_AUXVEC_H */
|
||||
|
@ -1,8 +1,8 @@
|
||||
#ifndef __ALPHA_LCA__H__
|
||||
#define __ALPHA_LCA__H__
|
||||
|
||||
#include <asm/system.h>
|
||||
#include <asm/compiler.h>
|
||||
#include <asm/mce.h>
|
||||
|
||||
/*
|
||||
* Low Cost Alpha (LCA) definitions (these apply to 21066 and 21068,
|
||||
|
@ -7,6 +7,7 @@
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <asm/compiler.h>
|
||||
#include <asm/mce.h>
|
||||
|
||||
/*
|
||||
* MCPCIA is the internal name for a core logic chipset which provides
|
||||
|
@ -7,7 +7,6 @@
|
||||
#include <linux/types.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <asm/compiler.h>
|
||||
#include <asm/system.h>
|
||||
|
||||
/*
|
||||
* T2 is the internal name for the core logic chipset which provides
|
||||
|
@ -2,6 +2,7 @@
|
||||
#define __ASM_ALPHA_ELF_H
|
||||
|
||||
#include <asm/auxvec.h>
|
||||
#include <asm/special_insns.h>
|
||||
|
||||
/* Special values for the st_other field in the symbol table. */
|
||||
|
||||
|
6
arch/alpha/include/asm/exec.h
Normal file
6
arch/alpha/include/asm/exec.h
Normal file
@ -0,0 +1,6 @@
|
||||
#ifndef __ALPHA_EXEC_H
|
||||
#define __ALPHA_EXEC_H
|
||||
|
||||
#define arch_align_stack(x) (x)
|
||||
|
||||
#endif /* __ALPHA_EXEC_H */
|
@ -1,6 +1,8 @@
|
||||
#ifndef __ASM_ALPHA_FPU_H
|
||||
#define __ASM_ALPHA_FPU_H
|
||||
|
||||
#include <asm/special_insns.h>
|
||||
|
||||
/*
|
||||
* Alpha floating-point control register defines:
|
||||
*/
|
||||
|
@ -6,7 +6,6 @@
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/mm.h>
|
||||
#include <asm/compiler.h>
|
||||
#include <asm/system.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/machvec.h>
|
||||
#include <asm/hwrpb.h>
|
||||
|
@ -1,7 +1,7 @@
|
||||
#ifndef __ALPHA_IRQFLAGS_H
|
||||
#define __ALPHA_IRQFLAGS_H
|
||||
|
||||
#include <asm/system.h>
|
||||
#include <asm/pal.h>
|
||||
|
||||
#define IPL_MIN 0
|
||||
#define IPL_SW0 1
|
||||
|
83
arch/alpha/include/asm/mce.h
Normal file
83
arch/alpha/include/asm/mce.h
Normal file
@ -0,0 +1,83 @@
|
||||
#ifndef __ALPHA_MCE_H
|
||||
#define __ALPHA_MCE_H
|
||||
|
||||
/*
|
||||
* This is the logout header that should be common to all platforms
|
||||
* (assuming they are running OSF/1 PALcode, I guess).
|
||||
*/
|
||||
struct el_common {
|
||||
unsigned int size; /* size in bytes of logout area */
|
||||
unsigned int sbz1 : 30; /* should be zero */
|
||||
unsigned int err2 : 1; /* second error */
|
||||
unsigned int retry : 1; /* retry flag */
|
||||
unsigned int proc_offset; /* processor-specific offset */
|
||||
unsigned int sys_offset; /* system-specific offset */
|
||||
unsigned int code; /* machine check code */
|
||||
unsigned int frame_rev; /* frame revision */
|
||||
};
|
||||
|
||||
/* Machine Check Frame for uncorrectable errors (Large format)
|
||||
* --- This is used to log uncorrectable errors such as
|
||||
* double bit ECC errors.
|
||||
* --- These errors are detected by both processor and systems.
|
||||
*/
|
||||
struct el_common_EV5_uncorrectable_mcheck {
|
||||
unsigned long shadow[8]; /* Shadow reg. 8-14, 25 */
|
||||
unsigned long paltemp[24]; /* PAL TEMP REGS. */
|
||||
unsigned long exc_addr; /* Address of excepting instruction*/
|
||||
unsigned long exc_sum; /* Summary of arithmetic traps. */
|
||||
unsigned long exc_mask; /* Exception mask (from exc_sum). */
|
||||
unsigned long pal_base; /* Base address for PALcode. */
|
||||
unsigned long isr; /* Interrupt Status Reg. */
|
||||
unsigned long icsr; /* CURRENT SETUP OF EV5 IBOX */
|
||||
unsigned long ic_perr_stat; /* I-CACHE Reg. <11> set Data parity
|
||||
<12> set TAG parity*/
|
||||
unsigned long dc_perr_stat; /* D-CACHE error Reg. Bits set to 1:
|
||||
<2> Data error in bank 0
|
||||
<3> Data error in bank 1
|
||||
<4> Tag error in bank 0
|
||||
<5> Tag error in bank 1 */
|
||||
unsigned long va; /* Effective VA of fault or miss. */
|
||||
unsigned long mm_stat; /* Holds the reason for D-stream
|
||||
fault or D-cache parity errors */
|
||||
unsigned long sc_addr; /* Address that was being accessed
|
||||
when EV5 detected Secondary cache
|
||||
failure. */
|
||||
unsigned long sc_stat; /* Helps determine if the error was
|
||||
TAG/Data parity(Secondary Cache)*/
|
||||
unsigned long bc_tag_addr; /* Contents of EV5 BC_TAG_ADDR */
|
||||
unsigned long ei_addr; /* Physical address of any transfer
|
||||
that is logged in EV5 EI_STAT */
|
||||
unsigned long fill_syndrome; /* For correcting ECC errors. */
|
||||
unsigned long ei_stat; /* Helps identify reason of any
|
||||
processor uncorrectable error
|
||||
at its external interface. */
|
||||
unsigned long ld_lock; /* Contents of EV5 LD_LOCK register*/
|
||||
};
|
||||
|
||||
struct el_common_EV6_mcheck {
|
||||
unsigned int FrameSize; /* Bytes, including this field */
|
||||
unsigned int FrameFlags; /* <31> = Retry, <30> = Second Error */
|
||||
unsigned int CpuOffset; /* Offset to CPU-specific info */
|
||||
unsigned int SystemOffset; /* Offset to system-specific info */
|
||||
unsigned int MCHK_Code;
|
||||
unsigned int MCHK_Frame_Rev;
|
||||
unsigned long I_STAT; /* EV6 Internal Processor Registers */
|
||||
unsigned long DC_STAT; /* (See the 21264 Spec) */
|
||||
unsigned long C_ADDR;
|
||||
unsigned long DC1_SYNDROME;
|
||||
unsigned long DC0_SYNDROME;
|
||||
unsigned long C_STAT;
|
||||
unsigned long C_STS;
|
||||
unsigned long MM_STAT;
|
||||
unsigned long EXC_ADDR;
|
||||
unsigned long IER_CM;
|
||||
unsigned long ISUM;
|
||||
unsigned long RESERVED0;
|
||||
unsigned long PAL_BASE;
|
||||
unsigned long I_CTL;
|
||||
unsigned long PCTX;
|
||||
};
|
||||
|
||||
|
||||
#endif /* __ALPHA_MCE_H */
|
@ -7,7 +7,6 @@
|
||||
* Copyright (C) 1996, Linus Torvalds
|
||||
*/
|
||||
|
||||
#include <asm/system.h>
|
||||
#include <asm/machvec.h>
|
||||
#include <asm/compiler.h>
|
||||
#include <asm-generic/mm_hooks.h>
|
||||
|
@ -48,4 +48,116 @@
|
||||
#define PAL_retsys 61
|
||||
#define PAL_rti 63
|
||||
|
||||
#ifdef __KERNEL__
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
extern void halt(void) __attribute__((noreturn));
|
||||
#define __halt() __asm__ __volatile__ ("call_pal %0 #halt" : : "i" (PAL_halt))
|
||||
|
||||
#define imb() \
|
||||
__asm__ __volatile__ ("call_pal %0 #imb" : : "i" (PAL_imb) : "memory")
|
||||
|
||||
#define draina() \
|
||||
__asm__ __volatile__ ("call_pal %0 #draina" : : "i" (PAL_draina) : "memory")
|
||||
|
||||
#define __CALL_PAL_R0(NAME, TYPE) \
|
||||
extern inline TYPE NAME(void) \
|
||||
{ \
|
||||
register TYPE __r0 __asm__("$0"); \
|
||||
__asm__ __volatile__( \
|
||||
"call_pal %1 # " #NAME \
|
||||
:"=r" (__r0) \
|
||||
:"i" (PAL_ ## NAME) \
|
||||
:"$1", "$16", "$22", "$23", "$24", "$25"); \
|
||||
return __r0; \
|
||||
}
|
||||
|
||||
#define __CALL_PAL_W1(NAME, TYPE0) \
|
||||
extern inline void NAME(TYPE0 arg0) \
|
||||
{ \
|
||||
register TYPE0 __r16 __asm__("$16") = arg0; \
|
||||
__asm__ __volatile__( \
|
||||
"call_pal %1 # "#NAME \
|
||||
: "=r"(__r16) \
|
||||
: "i"(PAL_ ## NAME), "0"(__r16) \
|
||||
: "$1", "$22", "$23", "$24", "$25"); \
|
||||
}
|
||||
|
||||
#define __CALL_PAL_W2(NAME, TYPE0, TYPE1) \
|
||||
extern inline void NAME(TYPE0 arg0, TYPE1 arg1) \
|
||||
{ \
|
||||
register TYPE0 __r16 __asm__("$16") = arg0; \
|
||||
register TYPE1 __r17 __asm__("$17") = arg1; \
|
||||
__asm__ __volatile__( \
|
||||
"call_pal %2 # "#NAME \
|
||||
: "=r"(__r16), "=r"(__r17) \
|
||||
: "i"(PAL_ ## NAME), "0"(__r16), "1"(__r17) \
|
||||
: "$1", "$22", "$23", "$24", "$25"); \
|
||||
}
|
||||
|
||||
#define __CALL_PAL_RW1(NAME, RTYPE, TYPE0) \
|
||||
extern inline RTYPE NAME(TYPE0 arg0) \
|
||||
{ \
|
||||
register RTYPE __r0 __asm__("$0"); \
|
||||
register TYPE0 __r16 __asm__("$16") = arg0; \
|
||||
__asm__ __volatile__( \
|
||||
"call_pal %2 # "#NAME \
|
||||
: "=r"(__r16), "=r"(__r0) \
|
||||
: "i"(PAL_ ## NAME), "0"(__r16) \
|
||||
: "$1", "$22", "$23", "$24", "$25"); \
|
||||
return __r0; \
|
||||
}
|
||||
|
||||
#define __CALL_PAL_RW2(NAME, RTYPE, TYPE0, TYPE1) \
|
||||
extern inline RTYPE NAME(TYPE0 arg0, TYPE1 arg1) \
|
||||
{ \
|
||||
register RTYPE __r0 __asm__("$0"); \
|
||||
register TYPE0 __r16 __asm__("$16") = arg0; \
|
||||
register TYPE1 __r17 __asm__("$17") = arg1; \
|
||||
__asm__ __volatile__( \
|
||||
"call_pal %3 # "#NAME \
|
||||
: "=r"(__r16), "=r"(__r17), "=r"(__r0) \
|
||||
: "i"(PAL_ ## NAME), "0"(__r16), "1"(__r17) \
|
||||
: "$1", "$22", "$23", "$24", "$25"); \
|
||||
return __r0; \
|
||||
}
|
||||
|
||||
__CALL_PAL_W1(cflush, unsigned long);
|
||||
__CALL_PAL_R0(rdmces, unsigned long);
|
||||
__CALL_PAL_R0(rdps, unsigned long);
|
||||
__CALL_PAL_R0(rdusp, unsigned long);
|
||||
__CALL_PAL_RW1(swpipl, unsigned long, unsigned long);
|
||||
__CALL_PAL_R0(whami, unsigned long);
|
||||
__CALL_PAL_W2(wrent, void*, unsigned long);
|
||||
__CALL_PAL_W1(wripir, unsigned long);
|
||||
__CALL_PAL_W1(wrkgp, unsigned long);
|
||||
__CALL_PAL_W1(wrmces, unsigned long);
|
||||
__CALL_PAL_RW2(wrperfmon, unsigned long, unsigned long, unsigned long);
|
||||
__CALL_PAL_W1(wrusp, unsigned long);
|
||||
__CALL_PAL_W1(wrvptptr, unsigned long);
|
||||
|
||||
/*
|
||||
* TB routines..
|
||||
*/
|
||||
#define __tbi(nr,arg,arg1...) \
|
||||
({ \
|
||||
register unsigned long __r16 __asm__("$16") = (nr); \
|
||||
register unsigned long __r17 __asm__("$17"); arg; \
|
||||
__asm__ __volatile__( \
|
||||
"call_pal %3 #__tbi" \
|
||||
:"=r" (__r16),"=r" (__r17) \
|
||||
:"0" (__r16),"i" (PAL_tbi) ,##arg1 \
|
||||
:"$0", "$1", "$22", "$23", "$24", "$25"); \
|
||||
})
|
||||
|
||||
#define tbi(x,y) __tbi(x,__r17=(y),"1" (__r17))
|
||||
#define tbisi(x) __tbi(1,__r17=(x),"1" (__r17))
|
||||
#define tbisd(x) __tbi(2,__r17=(x),"1" (__r17))
|
||||
#define tbis(x) __tbi(3,__r17=(x),"1" (__r17))
|
||||
#define tbiap() __tbi(-1, /* no second argument */)
|
||||
#define tbia() __tbi(-2, /* no second argument */)
|
||||
|
||||
#endif /* !__ASSEMBLY__ */
|
||||
#endif /* __KERNEL__ */
|
||||
|
||||
#endif /* __ALPHA_PAL_H */
|
||||
|
@ -15,6 +15,7 @@
|
||||
#include <asm/page.h>
|
||||
#include <asm/processor.h> /* For TASK_SIZE */
|
||||
#include <asm/machvec.h>
|
||||
#include <asm/setup.h>
|
||||
|
||||
struct mm_struct;
|
||||
struct vm_area_struct;
|
||||
|
@ -3,4 +3,40 @@
|
||||
|
||||
#define COMMAND_LINE_SIZE 256
|
||||
|
||||
/*
|
||||
* We leave one page for the initial stack page, and one page for
|
||||
* the initial process structure. Also, the console eats 3 MB for
|
||||
* the initial bootloader (one of which we can reclaim later).
|
||||
*/
|
||||
#define BOOT_PCB 0x20000000
|
||||
#define BOOT_ADDR 0x20000000
|
||||
/* Remove when official MILO sources have ELF support: */
|
||||
#define BOOT_SIZE (16*1024)
|
||||
|
||||
#ifdef CONFIG_ALPHA_LEGACY_START_ADDRESS
|
||||
#define KERNEL_START_PHYS 0x300000 /* Old bootloaders hardcoded this. */
|
||||
#else
|
||||
#define KERNEL_START_PHYS 0x1000000 /* required: Wildfire/Titan/Marvel */
|
||||
#endif
|
||||
|
||||
#define KERNEL_START (PAGE_OFFSET+KERNEL_START_PHYS)
|
||||
#define SWAPPER_PGD KERNEL_START
|
||||
#define INIT_STACK (PAGE_OFFSET+KERNEL_START_PHYS+0x02000)
|
||||
#define EMPTY_PGT (PAGE_OFFSET+KERNEL_START_PHYS+0x04000)
|
||||
#define EMPTY_PGE (PAGE_OFFSET+KERNEL_START_PHYS+0x08000)
|
||||
#define ZERO_PGE (PAGE_OFFSET+KERNEL_START_PHYS+0x0A000)
|
||||
|
||||
#define START_ADDR (PAGE_OFFSET+KERNEL_START_PHYS+0x10000)
|
||||
|
||||
/*
|
||||
* This is setup by the secondary bootstrap loader. Because
|
||||
* the zero page is zeroed out as soon as the vm system is
|
||||
* initialized, we need to copy things out into a more permanent
|
||||
* place.
|
||||
*/
|
||||
#define PARAM ZERO_PGE
|
||||
#define COMMAND_LINE ((char*)(PARAM + 0x0000))
|
||||
#define INITRD_START (*(unsigned long *) (PARAM+0x100))
|
||||
#define INITRD_SIZE (*(unsigned long *) (PARAM+0x108))
|
||||
|
||||
#endif
|
||||
|
41
arch/alpha/include/asm/special_insns.h
Normal file
41
arch/alpha/include/asm/special_insns.h
Normal file
@ -0,0 +1,41 @@
|
||||
#ifndef __ALPHA_SPECIAL_INSNS_H
|
||||
#define __ALPHA_SPECIAL_INSNS_H
|
||||
|
||||
enum implver_enum {
|
||||
IMPLVER_EV4,
|
||||
IMPLVER_EV5,
|
||||
IMPLVER_EV6
|
||||
};
|
||||
|
||||
#ifdef CONFIG_ALPHA_GENERIC
|
||||
#define implver() \
|
||||
({ unsigned long __implver; \
|
||||
__asm__ ("implver %0" : "=r"(__implver)); \
|
||||
(enum implver_enum) __implver; })
|
||||
#else
|
||||
/* Try to eliminate some dead code. */
|
||||
#ifdef CONFIG_ALPHA_EV4
|
||||
#define implver() IMPLVER_EV4
|
||||
#endif
|
||||
#ifdef CONFIG_ALPHA_EV5
|
||||
#define implver() IMPLVER_EV5
|
||||
#endif
|
||||
#if defined(CONFIG_ALPHA_EV6)
|
||||
#define implver() IMPLVER_EV6
|
||||
#endif
|
||||
#endif
|
||||
|
||||
enum amask_enum {
|
||||
AMASK_BWX = (1UL << 0),
|
||||
AMASK_FIX = (1UL << 1),
|
||||
AMASK_CIX = (1UL << 2),
|
||||
AMASK_MAX = (1UL << 8),
|
||||
AMASK_PRECISE_TRAP = (1UL << 9),
|
||||
};
|
||||
|
||||
#define amask(mask) \
|
||||
({ unsigned long __amask, __input = (mask); \
|
||||
__asm__ ("amask %1,%0" : "=r"(__amask) : "rI"(__input)); \
|
||||
__amask; })
|
||||
|
||||
#endif /* __ALPHA_SPECIAL_INSNS_H */
|
@ -1,7 +1,6 @@
|
||||
#ifndef _ALPHA_SPINLOCK_H
|
||||
#define _ALPHA_SPINLOCK_H
|
||||
|
||||
#include <asm/system.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <asm/current.h>
|
||||
|
||||
|
14
arch/alpha/include/asm/switch_to.h
Normal file
14
arch/alpha/include/asm/switch_to.h
Normal file
@ -0,0 +1,14 @@
|
||||
#ifndef __ALPHA_SWITCH_TO_H
|
||||
#define __ALPHA_SWITCH_TO_H
|
||||
|
||||
|
||||
struct task_struct;
|
||||
extern struct task_struct *alpha_switch_to(unsigned long, struct task_struct *);
|
||||
|
||||
#define switch_to(P,N,L) \
|
||||
do { \
|
||||
(L) = alpha_switch_to(virt_to_phys(&task_thread_info(N)->pcb), (P)); \
|
||||
check_mmu_context(); \
|
||||
} while (0)
|
||||
|
||||
#endif /* __ALPHA_SWITCH_TO_H */
|
@ -1,354 +0,0 @@
|
||||
#ifndef __ALPHA_SYSTEM_H
|
||||
#define __ALPHA_SYSTEM_H
|
||||
|
||||
#include <asm/pal.h>
|
||||
#include <asm/page.h>
|
||||
#include <asm/barrier.h>
|
||||
|
||||
/*
|
||||
* System defines.. Note that this is included both from .c and .S
|
||||
* files, so it does only defines, not any C code.
|
||||
*/
|
||||
|
||||
/*
|
||||
* We leave one page for the initial stack page, and one page for
|
||||
* the initial process structure. Also, the console eats 3 MB for
|
||||
* the initial bootloader (one of which we can reclaim later).
|
||||
*/
|
||||
#define BOOT_PCB 0x20000000
|
||||
#define BOOT_ADDR 0x20000000
|
||||
/* Remove when official MILO sources have ELF support: */
|
||||
#define BOOT_SIZE (16*1024)
|
||||
|
||||
#ifdef CONFIG_ALPHA_LEGACY_START_ADDRESS
|
||||
#define KERNEL_START_PHYS 0x300000 /* Old bootloaders hardcoded this. */
|
||||
#else
|
||||
#define KERNEL_START_PHYS 0x1000000 /* required: Wildfire/Titan/Marvel */
|
||||
#endif
|
||||
|
||||
#define KERNEL_START (PAGE_OFFSET+KERNEL_START_PHYS)
|
||||
#define SWAPPER_PGD KERNEL_START
|
||||
#define INIT_STACK (PAGE_OFFSET+KERNEL_START_PHYS+0x02000)
|
||||
#define EMPTY_PGT (PAGE_OFFSET+KERNEL_START_PHYS+0x04000)
|
||||
#define EMPTY_PGE (PAGE_OFFSET+KERNEL_START_PHYS+0x08000)
|
||||
#define ZERO_PGE (PAGE_OFFSET+KERNEL_START_PHYS+0x0A000)
|
||||
|
||||
#define START_ADDR (PAGE_OFFSET+KERNEL_START_PHYS+0x10000)
|
||||
|
||||
/*
|
||||
* This is setup by the secondary bootstrap loader. Because
|
||||
* the zero page is zeroed out as soon as the vm system is
|
||||
* initialized, we need to copy things out into a more permanent
|
||||
* place.
|
||||
*/
|
||||
#define PARAM ZERO_PGE
|
||||
#define COMMAND_LINE ((char*)(PARAM + 0x0000))
|
||||
#define INITRD_START (*(unsigned long *) (PARAM+0x100))
|
||||
#define INITRD_SIZE (*(unsigned long *) (PARAM+0x108))
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
#include <linux/kernel.h>
|
||||
#define AT_VECTOR_SIZE_ARCH 4 /* entries in ARCH_DLINFO */
|
||||
|
||||
/*
|
||||
* This is the logout header that should be common to all platforms
|
||||
* (assuming they are running OSF/1 PALcode, I guess).
|
||||
*/
|
||||
struct el_common {
|
||||
unsigned int size; /* size in bytes of logout area */
|
||||
unsigned int sbz1 : 30; /* should be zero */
|
||||
unsigned int err2 : 1; /* second error */
|
||||
unsigned int retry : 1; /* retry flag */
|
||||
unsigned int proc_offset; /* processor-specific offset */
|
||||
unsigned int sys_offset; /* system-specific offset */
|
||||
unsigned int code; /* machine check code */
|
||||
unsigned int frame_rev; /* frame revision */
|
||||
};
|
||||
|
||||
/* Machine Check Frame for uncorrectable errors (Large format)
|
||||
* --- This is used to log uncorrectable errors such as
|
||||
* double bit ECC errors.
|
||||
* --- These errors are detected by both processor and systems.
|
||||
*/
|
||||
struct el_common_EV5_uncorrectable_mcheck {
|
||||
unsigned long shadow[8]; /* Shadow reg. 8-14, 25 */
|
||||
unsigned long paltemp[24]; /* PAL TEMP REGS. */
|
||||
unsigned long exc_addr; /* Address of excepting instruction*/
|
||||
unsigned long exc_sum; /* Summary of arithmetic traps. */
|
||||
unsigned long exc_mask; /* Exception mask (from exc_sum). */
|
||||
unsigned long pal_base; /* Base address for PALcode. */
|
||||
unsigned long isr; /* Interrupt Status Reg. */
|
||||
unsigned long icsr; /* CURRENT SETUP OF EV5 IBOX */
|
||||
unsigned long ic_perr_stat; /* I-CACHE Reg. <11> set Data parity
|
||||
<12> set TAG parity*/
|
||||
unsigned long dc_perr_stat; /* D-CACHE error Reg. Bits set to 1:
|
||||
<2> Data error in bank 0
|
||||
<3> Data error in bank 1
|
||||
<4> Tag error in bank 0
|
||||
<5> Tag error in bank 1 */
|
||||
unsigned long va; /* Effective VA of fault or miss. */
|
||||
unsigned long mm_stat; /* Holds the reason for D-stream
|
||||
fault or D-cache parity errors */
|
||||
unsigned long sc_addr; /* Address that was being accessed
|
||||
when EV5 detected Secondary cache
|
||||
failure. */
|
||||
unsigned long sc_stat; /* Helps determine if the error was
|
||||
TAG/Data parity(Secondary Cache)*/
|
||||
unsigned long bc_tag_addr; /* Contents of EV5 BC_TAG_ADDR */
|
||||
unsigned long ei_addr; /* Physical address of any transfer
|
||||
that is logged in EV5 EI_STAT */
|
||||
unsigned long fill_syndrome; /* For correcting ECC errors. */
|
||||
unsigned long ei_stat; /* Helps identify reason of any
|
||||
processor uncorrectable error
|
||||
at its external interface. */
|
||||
unsigned long ld_lock; /* Contents of EV5 LD_LOCK register*/
|
||||
};
|
||||
|
||||
struct el_common_EV6_mcheck {
|
||||
unsigned int FrameSize; /* Bytes, including this field */
|
||||
unsigned int FrameFlags; /* <31> = Retry, <30> = Second Error */
|
||||
unsigned int CpuOffset; /* Offset to CPU-specific info */
|
||||
unsigned int SystemOffset; /* Offset to system-specific info */
|
||||
unsigned int MCHK_Code;
|
||||
unsigned int MCHK_Frame_Rev;
|
||||
unsigned long I_STAT; /* EV6 Internal Processor Registers */
|
||||
unsigned long DC_STAT; /* (See the 21264 Spec) */
|
||||
unsigned long C_ADDR;
|
||||
unsigned long DC1_SYNDROME;
|
||||
unsigned long DC0_SYNDROME;
|
||||
unsigned long C_STAT;
|
||||
unsigned long C_STS;
|
||||
unsigned long MM_STAT;
|
||||
unsigned long EXC_ADDR;
|
||||
unsigned long IER_CM;
|
||||
unsigned long ISUM;
|
||||
unsigned long RESERVED0;
|
||||
unsigned long PAL_BASE;
|
||||
unsigned long I_CTL;
|
||||
unsigned long PCTX;
|
||||
};
|
||||
|
||||
extern void halt(void) __attribute__((noreturn));
|
||||
#define __halt() __asm__ __volatile__ ("call_pal %0 #halt" : : "i" (PAL_halt))
|
||||
|
||||
#define switch_to(P,N,L) \
|
||||
do { \
|
||||
(L) = alpha_switch_to(virt_to_phys(&task_thread_info(N)->pcb), (P)); \
|
||||
check_mmu_context(); \
|
||||
} while (0)
|
||||
|
||||
struct task_struct;
|
||||
extern struct task_struct *alpha_switch_to(unsigned long, struct task_struct*);
|
||||
|
||||
#define imb() \
|
||||
__asm__ __volatile__ ("call_pal %0 #imb" : : "i" (PAL_imb) : "memory")
|
||||
|
||||
#define draina() \
|
||||
__asm__ __volatile__ ("call_pal %0 #draina" : : "i" (PAL_draina) : "memory")
|
||||
|
||||
enum implver_enum {
|
||||
IMPLVER_EV4,
|
||||
IMPLVER_EV5,
|
||||
IMPLVER_EV6
|
||||
};
|
||||
|
||||
#ifdef CONFIG_ALPHA_GENERIC
|
||||
#define implver() \
|
||||
({ unsigned long __implver; \
|
||||
__asm__ ("implver %0" : "=r"(__implver)); \
|
||||
(enum implver_enum) __implver; })
|
||||
#else
|
||||
/* Try to eliminate some dead code. */
|
||||
#ifdef CONFIG_ALPHA_EV4
|
||||
#define implver() IMPLVER_EV4
|
||||
#endif
|
||||
#ifdef CONFIG_ALPHA_EV5
|
||||
#define implver() IMPLVER_EV5
|
||||
#endif
|
||||
#if defined(CONFIG_ALPHA_EV6)
|
||||
#define implver() IMPLVER_EV6
|
||||
#endif
|
||||
#endif
|
||||
|
||||
enum amask_enum {
|
||||
AMASK_BWX = (1UL << 0),
|
||||
AMASK_FIX = (1UL << 1),
|
||||
AMASK_CIX = (1UL << 2),
|
||||
AMASK_MAX = (1UL << 8),
|
||||
AMASK_PRECISE_TRAP = (1UL << 9),
|
||||
};
|
||||
|
||||
#define amask(mask) \
|
||||
({ unsigned long __amask, __input = (mask); \
|
||||
__asm__ ("amask %1,%0" : "=r"(__amask) : "rI"(__input)); \
|
||||
__amask; })
|
||||
|
||||
#define __CALL_PAL_R0(NAME, TYPE) \
|
||||
extern inline TYPE NAME(void) \
|
||||
{ \
|
||||
register TYPE __r0 __asm__("$0"); \
|
||||
__asm__ __volatile__( \
|
||||
"call_pal %1 # " #NAME \
|
||||
:"=r" (__r0) \
|
||||
:"i" (PAL_ ## NAME) \
|
||||
:"$1", "$16", "$22", "$23", "$24", "$25"); \
|
||||
return __r0; \
|
||||
}
|
||||
|
||||
#define __CALL_PAL_W1(NAME, TYPE0) \
|
||||
extern inline void NAME(TYPE0 arg0) \
|
||||
{ \
|
||||
register TYPE0 __r16 __asm__("$16") = arg0; \
|
||||
__asm__ __volatile__( \
|
||||
"call_pal %1 # "#NAME \
|
||||
: "=r"(__r16) \
|
||||
: "i"(PAL_ ## NAME), "0"(__r16) \
|
||||
: "$1", "$22", "$23", "$24", "$25"); \
|
||||
}
|
||||
|
||||
#define __CALL_PAL_W2(NAME, TYPE0, TYPE1) \
|
||||
extern inline void NAME(TYPE0 arg0, TYPE1 arg1) \
|
||||
{ \
|
||||
register TYPE0 __r16 __asm__("$16") = arg0; \
|
||||
register TYPE1 __r17 __asm__("$17") = arg1; \
|
||||
__asm__ __volatile__( \
|
||||
"call_pal %2 # "#NAME \
|
||||
: "=r"(__r16), "=r"(__r17) \
|
||||
: "i"(PAL_ ## NAME), "0"(__r16), "1"(__r17) \
|
||||
: "$1", "$22", "$23", "$24", "$25"); \
|
||||
}
|
||||
|
||||
#define __CALL_PAL_RW1(NAME, RTYPE, TYPE0) \
|
||||
extern inline RTYPE NAME(TYPE0 arg0) \
|
||||
{ \
|
||||
register RTYPE __r0 __asm__("$0"); \
|
||||
register TYPE0 __r16 __asm__("$16") = arg0; \
|
||||
__asm__ __volatile__( \
|
||||
"call_pal %2 # "#NAME \
|
||||
: "=r"(__r16), "=r"(__r0) \
|
||||
: "i"(PAL_ ## NAME), "0"(__r16) \
|
||||
: "$1", "$22", "$23", "$24", "$25"); \
|
||||
return __r0; \
|
||||
}
|
||||
|
||||
#define __CALL_PAL_RW2(NAME, RTYPE, TYPE0, TYPE1) \
|
||||
extern inline RTYPE NAME(TYPE0 arg0, TYPE1 arg1) \
|
||||
{ \
|
||||
register RTYPE __r0 __asm__("$0"); \
|
||||
register TYPE0 __r16 __asm__("$16") = arg0; \
|
||||
register TYPE1 __r17 __asm__("$17") = arg1; \
|
||||
__asm__ __volatile__( \
|
||||
"call_pal %3 # "#NAME \
|
||||
: "=r"(__r16), "=r"(__r17), "=r"(__r0) \
|
||||
: "i"(PAL_ ## NAME), "0"(__r16), "1"(__r17) \
|
||||
: "$1", "$22", "$23", "$24", "$25"); \
|
||||
return __r0; \
|
||||
}
|
||||
|
||||
__CALL_PAL_W1(cflush, unsigned long);
|
||||
__CALL_PAL_R0(rdmces, unsigned long);
|
||||
__CALL_PAL_R0(rdps, unsigned long);
|
||||
__CALL_PAL_R0(rdusp, unsigned long);
|
||||
__CALL_PAL_RW1(swpipl, unsigned long, unsigned long);
|
||||
__CALL_PAL_R0(whami, unsigned long);
|
||||
__CALL_PAL_W2(wrent, void*, unsigned long);
|
||||
__CALL_PAL_W1(wripir, unsigned long);
|
||||
__CALL_PAL_W1(wrkgp, unsigned long);
|
||||
__CALL_PAL_W1(wrmces, unsigned long);
|
||||
__CALL_PAL_RW2(wrperfmon, unsigned long, unsigned long, unsigned long);
|
||||
__CALL_PAL_W1(wrusp, unsigned long);
|
||||
__CALL_PAL_W1(wrvptptr, unsigned long);
|
||||
|
||||
/*
|
||||
* TB routines..
|
||||
*/
|
||||
#define __tbi(nr,arg,arg1...) \
|
||||
({ \
|
||||
register unsigned long __r16 __asm__("$16") = (nr); \
|
||||
register unsigned long __r17 __asm__("$17"); arg; \
|
||||
__asm__ __volatile__( \
|
||||
"call_pal %3 #__tbi" \
|
||||
:"=r" (__r16),"=r" (__r17) \
|
||||
:"0" (__r16),"i" (PAL_tbi) ,##arg1 \
|
||||
:"$0", "$1", "$22", "$23", "$24", "$25"); \
|
||||
})
|
||||
|
||||
#define tbi(x,y) __tbi(x,__r17=(y),"1" (__r17))
|
||||
#define tbisi(x) __tbi(1,__r17=(x),"1" (__r17))
|
||||
#define tbisd(x) __tbi(2,__r17=(x),"1" (__r17))
|
||||
#define tbis(x) __tbi(3,__r17=(x),"1" (__r17))
|
||||
#define tbiap() __tbi(-1, /* no second argument */)
|
||||
#define tbia() __tbi(-2, /* no second argument */)
|
||||
|
||||
/*
|
||||
* Atomic exchange routines.
|
||||
*/
|
||||
|
||||
#define __ASM__MB
|
||||
#define ____xchg(type, args...) __xchg ## type ## _local(args)
|
||||
#define ____cmpxchg(type, args...) __cmpxchg ## type ## _local(args)
|
||||
#include <asm/xchg.h>
|
||||
|
||||
#define xchg_local(ptr,x) \
|
||||
({ \
|
||||
__typeof__(*(ptr)) _x_ = (x); \
|
||||
(__typeof__(*(ptr))) __xchg_local((ptr), (unsigned long)_x_, \
|
||||
sizeof(*(ptr))); \
|
||||
})
|
||||
|
||||
#define cmpxchg_local(ptr, o, n) \
|
||||
({ \
|
||||
__typeof__(*(ptr)) _o_ = (o); \
|
||||
__typeof__(*(ptr)) _n_ = (n); \
|
||||
(__typeof__(*(ptr))) __cmpxchg_local((ptr), (unsigned long)_o_, \
|
||||
(unsigned long)_n_, \
|
||||
sizeof(*(ptr))); \
|
||||
})
|
||||
|
||||
#define cmpxchg64_local(ptr, o, n) \
|
||||
({ \
|
||||
BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
|
||||
cmpxchg_local((ptr), (o), (n)); \
|
||||
})
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
#undef __ASM__MB
|
||||
#define __ASM__MB "\tmb\n"
|
||||
#endif
|
||||
#undef ____xchg
|
||||
#undef ____cmpxchg
|
||||
#define ____xchg(type, args...) __xchg ##type(args)
|
||||
#define ____cmpxchg(type, args...) __cmpxchg ##type(args)
|
||||
#include <asm/xchg.h>
|
||||
|
||||
#define xchg(ptr,x) \
|
||||
({ \
|
||||
__typeof__(*(ptr)) _x_ = (x); \
|
||||
(__typeof__(*(ptr))) __xchg((ptr), (unsigned long)_x_, \
|
||||
sizeof(*(ptr))); \
|
||||
})
|
||||
|
||||
#define cmpxchg(ptr, o, n) \
|
||||
({ \
|
||||
__typeof__(*(ptr)) _o_ = (o); \
|
||||
__typeof__(*(ptr)) _n_ = (n); \
|
||||
(__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \
|
||||
(unsigned long)_n_, sizeof(*(ptr)));\
|
||||
})
|
||||
|
||||
#define cmpxchg64(ptr, o, n) \
|
||||
({ \
|
||||
BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
|
||||
cmpxchg((ptr), (o), (n)); \
|
||||
})
|
||||
|
||||
#undef __ASM__MB
|
||||
#undef ____cmpxchg
|
||||
|
||||
#define __HAVE_ARCH_CMPXCHG 1
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
||||
#define arch_align_stack(x) (x)
|
||||
|
||||
#endif
|
@ -1,4 +1,4 @@
|
||||
#ifndef __ALPHA_SYSTEM_H
|
||||
#ifndef _ALPHA_ATOMIC_H
|
||||
#error Do not include xchg.h directly!
|
||||
#else
|
||||
/*
|
||||
|
@ -21,6 +21,7 @@
|
||||
|
||||
#include <asm/ptrace.h>
|
||||
#include <asm/smp.h>
|
||||
#include <asm/mce.h>
|
||||
|
||||
#include "proto.h"
|
||||
#include "pci_impl.h"
|
||||
|
@ -23,6 +23,7 @@
|
||||
#include <linux/bootmem.h>
|
||||
|
||||
#include <asm/ptrace.h>
|
||||
#include <asm/mce.h>
|
||||
|
||||
#include "proto.h"
|
||||
#include "pci_impl.h"
|
||||
|
@ -21,6 +21,7 @@
|
||||
|
||||
#include <asm/ptrace.h>
|
||||
#include <asm/delay.h>
|
||||
#include <asm/mce.h>
|
||||
|
||||
#include "proto.h"
|
||||
#include "pci_impl.h"
|
||||
|
@ -7,6 +7,8 @@
|
||||
* implementations.
|
||||
*/
|
||||
|
||||
#include <asm/mce.h>
|
||||
|
||||
union el_timestamp;
|
||||
struct el_subpacket;
|
||||
struct ev7_lf_subpackets;
|
||||
|
@ -8,14 +8,12 @@
|
||||
*/
|
||||
|
||||
#include <linux/init.h>
|
||||
#include <asm/system.h>
|
||||
#include <asm/asm-offsets.h>
|
||||
#include <asm/pal.h>
|
||||
#include <asm/setup.h>
|
||||
|
||||
__HEAD
|
||||
.globl swapper_pg_dir
|
||||
.globl _stext
|
||||
swapper_pg_dir=SWAPPER_PGD
|
||||
|
||||
.set noreorder
|
||||
.globl __start
|
||||
.ent __start
|
||||
|
@ -26,7 +26,6 @@
|
||||
#include <linux/profile.h>
|
||||
#include <linux/bitops.h>
|
||||
|
||||
#include <asm/system.h>
|
||||
#include <asm/io.h>
|
||||
#include <asm/uaccess.h>
|
||||
|
||||
|
@ -11,6 +11,7 @@
|
||||
#include <asm/machvec.h>
|
||||
#include <asm/dma.h>
|
||||
#include <asm/perf_event.h>
|
||||
#include <asm/mce.h>
|
||||
|
||||
#include "proto.h"
|
||||
#include "irq_impl.h"
|
||||
|
@ -40,7 +40,6 @@
|
||||
#include <asm/fpu.h>
|
||||
#include <asm/io.h>
|
||||
#include <asm/uaccess.h>
|
||||
#include <asm/system.h>
|
||||
#include <asm/sysinfo.h>
|
||||
#include <asm/thread_info.h>
|
||||
#include <asm/hwrpb.h>
|
||||
|
@ -31,7 +31,6 @@
|
||||
|
||||
#include <asm/reg.h>
|
||||
#include <asm/uaccess.h>
|
||||
#include <asm/system.h>
|
||||
#include <asm/io.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/hwrpb.h>
|
||||
|
@ -16,7 +16,6 @@
|
||||
|
||||
#include <asm/uaccess.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/system.h>
|
||||
#include <asm/fpu.h>
|
||||
|
||||
#include "proto.h"
|
||||
|
@ -55,7 +55,6 @@ static struct notifier_block alpha_panic_block = {
|
||||
|
||||
#include <asm/uaccess.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/system.h>
|
||||
#include <asm/hwrpb.h>
|
||||
#include <asm/dma.h>
|
||||
#include <asm/mmu_context.h>
|
||||
|
@ -18,7 +18,6 @@
|
||||
#include <linux/bitops.h>
|
||||
|
||||
#include <asm/ptrace.h>
|
||||
#include <asm/system.h>
|
||||
#include <asm/io.h>
|
||||
#include <asm/dma.h>
|
||||
#include <asm/mmu_context.h>
|
||||
|
@ -18,7 +18,6 @@
|
||||
#include <linux/bitops.h>
|
||||
|
||||
#include <asm/ptrace.h>
|
||||
#include <asm/system.h>
|
||||
#include <asm/dma.h>
|
||||
#include <asm/irq.h>
|
||||
#include <asm/mmu_context.h>
|
||||
|
@ -21,7 +21,6 @@
|
||||
#include <linux/bitops.h>
|
||||
|
||||
#include <asm/ptrace.h>
|
||||
#include <asm/system.h>
|
||||
#include <asm/dma.h>
|
||||
#include <asm/irq.h>
|
||||
#include <asm/mmu_context.h>
|
||||
|
@ -17,7 +17,6 @@
|
||||
#include <linux/bitops.h>
|
||||
|
||||
#include <asm/ptrace.h>
|
||||
#include <asm/system.h>
|
||||
#include <asm/dma.h>
|
||||
#include <asm/irq.h>
|
||||
#include <asm/mmu_context.h>
|
||||
|
@ -18,7 +18,6 @@
|
||||
#include <linux/bitops.h>
|
||||
|
||||
#include <asm/ptrace.h>
|
||||
#include <asm/system.h>
|
||||
#include <asm/dma.h>
|
||||
#include <asm/irq.h>
|
||||
#include <asm/mmu_context.h>
|
||||
|
@ -15,7 +15,6 @@
|
||||
#include <linux/init.h>
|
||||
|
||||
#include <asm/ptrace.h>
|
||||
#include <asm/system.h>
|
||||
|
||||
#define __EXTERN_INLINE inline
|
||||
#include <asm/io.h>
|
||||
|
@ -13,7 +13,6 @@
|
||||
#include <linux/bitops.h>
|
||||
|
||||
#include <asm/ptrace.h>
|
||||
#include <asm/system.h>
|
||||
#include <asm/dma.h>
|
||||
#include <asm/irq.h>
|
||||
#include <asm/mmu_context.h>
|
||||
|
@ -17,7 +17,6 @@
|
||||
#include <linux/reboot.h>
|
||||
|
||||
#include <asm/ptrace.h>
|
||||
#include <asm/system.h>
|
||||
#include <asm/dma.h>
|
||||
#include <asm/irq.h>
|
||||
#include <asm/mmu_context.h>
|
||||
|
@ -17,7 +17,7 @@
|
||||
#include <linux/bitops.h>
|
||||
|
||||
#include <asm/ptrace.h>
|
||||
#include <asm/system.h>
|
||||
#include <asm/mce.h>
|
||||
#include <asm/dma.h>
|
||||
#include <asm/irq.h>
|
||||
#include <asm/mmu_context.h>
|
||||
|
@ -35,7 +35,6 @@
|
||||
#include <linux/bitops.h>
|
||||
|
||||
#include <asm/ptrace.h>
|
||||
#include <asm/system.h>
|
||||
#include <asm/dma.h>
|
||||
#include <asm/irq.h>
|
||||
#include <asm/mmu_context.h>
|
||||
|
@ -18,7 +18,7 @@
|
||||
#include <linux/bitops.h>
|
||||
|
||||
#include <asm/ptrace.h>
|
||||
#include <asm/system.h>
|
||||
#include <asm/mce.h>
|
||||
#include <asm/dma.h>
|
||||
#include <asm/irq.h>
|
||||
#include <asm/mmu_context.h>
|
||||
|
@ -16,7 +16,6 @@
|
||||
#include <linux/init.h>
|
||||
|
||||
#include <asm/ptrace.h>
|
||||
#include <asm/system.h>
|
||||
#include <asm/dma.h>
|
||||
#include <asm/irq.h>
|
||||
#include <asm/mmu_context.h>
|
||||
|
@ -18,7 +18,6 @@
|
||||
#include <linux/init.h>
|
||||
|
||||
#include <asm/ptrace.h>
|
||||
#include <asm/system.h>
|
||||
#include <asm/dma.h>
|
||||
#include <asm/irq.h>
|
||||
#include <asm/mmu_context.h>
|
||||
|
@ -17,7 +17,6 @@
|
||||
#include <linux/bitops.h>
|
||||
|
||||
#include <asm/ptrace.h>
|
||||
#include <asm/system.h>
|
||||
#include <asm/dma.h>
|
||||
#include <asm/irq.h>
|
||||
#include <asm/mmu_context.h>
|
||||
|
@ -16,7 +16,6 @@
|
||||
#include <linux/init.h>
|
||||
|
||||
#include <asm/ptrace.h>
|
||||
#include <asm/system.h>
|
||||
#include <asm/dma.h>
|
||||
#include <asm/irq.h>
|
||||
#include <asm/mmu_context.h>
|
||||
|
@ -20,7 +20,6 @@
|
||||
|
||||
#include <asm/compiler.h>
|
||||
#include <asm/ptrace.h>
|
||||
#include <asm/system.h>
|
||||
#include <asm/dma.h>
|
||||
#include <asm/irq.h>
|
||||
#include <asm/mmu_context.h>
|
||||
|
@ -17,7 +17,6 @@
|
||||
#include <linux/bitops.h>
|
||||
|
||||
#include <asm/ptrace.h>
|
||||
#include <asm/system.h>
|
||||
#include <asm/dma.h>
|
||||
#include <asm/irq.h>
|
||||
#include <asm/mmu_context.h>
|
||||
@ -26,6 +25,7 @@
|
||||
#include <asm/core_cia.h>
|
||||
#include <asm/hwrpb.h>
|
||||
#include <asm/tlbflush.h>
|
||||
#include <asm/special_insns.h>
|
||||
|
||||
#include "proto.h"
|
||||
#include "irq_impl.h"
|
||||
|
@ -16,7 +16,6 @@
|
||||
#include <linux/init.h>
|
||||
|
||||
#include <asm/ptrace.h>
|
||||
#include <asm/system.h>
|
||||
#include <asm/dma.h>
|
||||
#include <asm/irq.h>
|
||||
#include <asm/mmu_context.h>
|
||||
|
@ -21,7 +21,6 @@
|
||||
#include <linux/bitops.h>
|
||||
|
||||
#include <asm/ptrace.h>
|
||||
#include <asm/system.h>
|
||||
#include <asm/dma.h>
|
||||
#include <asm/irq.h>
|
||||
#include <asm/mmu_context.h>
|
||||
|
@ -15,7 +15,6 @@
|
||||
#include <linux/bitops.h>
|
||||
|
||||
#include <asm/ptrace.h>
|
||||
#include <asm/system.h>
|
||||
#include <asm/dma.h>
|
||||
#include <asm/irq.h>
|
||||
#include <asm/mmu_context.h>
|
||||
|
@ -24,6 +24,7 @@
|
||||
#include <asm/sysinfo.h>
|
||||
#include <asm/hwrpb.h>
|
||||
#include <asm/mmu_context.h>
|
||||
#include <asm/special_insns.h>
|
||||
|
||||
#include "proto.h"
|
||||
|
||||
|
@ -2,6 +2,7 @@
|
||||
#include <asm/thread_info.h>
|
||||
#include <asm/cache.h>
|
||||
#include <asm/page.h>
|
||||
#include <asm/setup.h>
|
||||
|
||||
OUTPUT_FORMAT("elf64-alpha")
|
||||
OUTPUT_ARCH(alpha)
|
||||
@ -25,6 +26,7 @@ SECTIONS
|
||||
*(.fixup)
|
||||
*(.gnu.warning)
|
||||
} :kernel
|
||||
swapper_pg_dir = SWAPPER_PGD;
|
||||
_etext = .; /* End of text section */
|
||||
|
||||
NOTES :kernel :note
|
||||
|
@ -1,5 +1,4 @@
|
||||
#include <linux/kernel.h>
|
||||
#include <asm/system.h>
|
||||
|
||||
typedef unsigned int instr;
|
||||
|
||||
|
@ -24,7 +24,6 @@
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/module.h>
|
||||
|
||||
#include <asm/system.h>
|
||||
#include <asm/uaccess.h>
|
||||
|
||||
extern void die_if_kernel(char *,struct pt_regs *,long, unsigned long *);
|
||||
|
@ -22,7 +22,6 @@
|
||||
#include <linux/vmalloc.h>
|
||||
#include <linux/gfp.h>
|
||||
|
||||
#include <asm/system.h>
|
||||
#include <asm/uaccess.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/pgalloc.h>
|
||||
@ -31,6 +30,7 @@
|
||||
#include <asm/mmu_context.h>
|
||||
#include <asm/console.h>
|
||||
#include <asm/tlb.h>
|
||||
#include <asm/setup.h>
|
||||
|
||||
extern void die_if_kernel(char *,struct pt_regs *,long);
|
||||
|
||||
|
@ -12,7 +12,6 @@
|
||||
#include <linux/smp.h>
|
||||
#include <linux/errno.h>
|
||||
#include <asm/ptrace.h>
|
||||
#include <asm/system.h>
|
||||
|
||||
#include "op_impl.h"
|
||||
|
||||
|
@ -11,7 +11,6 @@
|
||||
#include <linux/init.h>
|
||||
#include <linux/smp.h>
|
||||
#include <asm/ptrace.h>
|
||||
#include <asm/system.h>
|
||||
|
||||
#include "op_impl.h"
|
||||
|
||||
|
@ -11,7 +11,6 @@
|
||||
#include <linux/init.h>
|
||||
#include <linux/smp.h>
|
||||
#include <asm/ptrace.h>
|
||||
#include <asm/system.h>
|
||||
|
||||
#include "op_impl.h"
|
||||
|
||||
|
@ -11,7 +11,6 @@
|
||||
#include <linux/init.h>
|
||||
#include <linux/smp.h>
|
||||
#include <asm/ptrace.h>
|
||||
#include <asm/system.h>
|
||||
|
||||
#include "op_impl.h"
|
||||
|
||||
|
@ -12,7 +12,6 @@
|
||||
#include <linux/init.h>
|
||||
#include <linux/smp.h>
|
||||
#include <asm/ptrace.h>
|
||||
#include <asm/system.h>
|
||||
|
||||
#include "op_impl.h"
|
||||
|
||||
|
@ -6,7 +6,6 @@
|
||||
#include <linux/ioport.h>
|
||||
#include <linux/io.h>
|
||||
|
||||
#include <asm/system.h>
|
||||
|
||||
#include <asm/mach/pci.h>
|
||||
|
||||
|
@ -13,7 +13,9 @@
|
||||
|
||||
#include <linux/compiler.h>
|
||||
#include <linux/types.h>
|
||||
#include <asm/system.h>
|
||||
#include <linux/irqflags.h>
|
||||
#include <asm/barrier.h>
|
||||
#include <asm/cmpxchg.h>
|
||||
|
||||
#define ATOMIC_INIT(i) { (i) }
|
||||
|
||||
|
69
arch/arm/include/asm/barrier.h
Normal file
69
arch/arm/include/asm/barrier.h
Normal file
@ -0,0 +1,69 @@
|
||||
#ifndef __ASM_BARRIER_H
|
||||
#define __ASM_BARRIER_H
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
#define nop() __asm__ __volatile__("mov\tr0,r0\t@ nop\n\t");
|
||||
|
||||
#if __LINUX_ARM_ARCH__ >= 7 || \
|
||||
(__LINUX_ARM_ARCH__ == 6 && defined(CONFIG_CPU_32v6K))
|
||||
#define sev() __asm__ __volatile__ ("sev" : : : "memory")
|
||||
#define wfe() __asm__ __volatile__ ("wfe" : : : "memory")
|
||||
#define wfi() __asm__ __volatile__ ("wfi" : : : "memory")
|
||||
#endif
|
||||
|
||||
#if __LINUX_ARM_ARCH__ >= 7
|
||||
#define isb() __asm__ __volatile__ ("isb" : : : "memory")
|
||||
#define dsb() __asm__ __volatile__ ("dsb" : : : "memory")
|
||||
#define dmb() __asm__ __volatile__ ("dmb" : : : "memory")
|
||||
#elif defined(CONFIG_CPU_XSC3) || __LINUX_ARM_ARCH__ == 6
|
||||
#define isb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c5, 4" \
|
||||
: : "r" (0) : "memory")
|
||||
#define dsb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 4" \
|
||||
: : "r" (0) : "memory")
|
||||
#define dmb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 5" \
|
||||
: : "r" (0) : "memory")
|
||||
#elif defined(CONFIG_CPU_FA526)
|
||||
#define isb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c5, 4" \
|
||||
: : "r" (0) : "memory")
|
||||
#define dsb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 4" \
|
||||
: : "r" (0) : "memory")
|
||||
#define dmb() __asm__ __volatile__ ("" : : : "memory")
|
||||
#else
|
||||
#define isb() __asm__ __volatile__ ("" : : : "memory")
|
||||
#define dsb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 4" \
|
||||
: : "r" (0) : "memory")
|
||||
#define dmb() __asm__ __volatile__ ("" : : : "memory")
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_ARCH_HAS_BARRIERS
|
||||
#include <mach/barriers.h>
|
||||
#elif defined(CONFIG_ARM_DMA_MEM_BUFFERABLE) || defined(CONFIG_SMP)
|
||||
#include <asm/outercache.h>
|
||||
#define mb() do { dsb(); outer_sync(); } while (0)
|
||||
#define rmb() dsb()
|
||||
#define wmb() mb()
|
||||
#else
|
||||
#include <asm/memory.h>
|
||||
#define mb() do { if (arch_is_coherent()) dmb(); else barrier(); } while (0)
|
||||
#define rmb() do { if (arch_is_coherent()) dmb(); else barrier(); } while (0)
|
||||
#define wmb() do { if (arch_is_coherent()) dmb(); else barrier(); } while (0)
|
||||
#endif
|
||||
|
||||
#ifndef CONFIG_SMP
|
||||
#define smp_mb() barrier()
|
||||
#define smp_rmb() barrier()
|
||||
#define smp_wmb() barrier()
|
||||
#else
|
||||
#define smp_mb() dmb()
|
||||
#define smp_rmb() dmb()
|
||||
#define smp_wmb() dmb()
|
||||
#endif
|
||||
|
||||
#define read_barrier_depends() do { } while(0)
|
||||
#define smp_read_barrier_depends() do { } while(0)
|
||||
|
||||
#define set_mb(var, value) do { var = value; smp_mb(); } while (0)
|
||||
|
||||
#endif /* !__ASSEMBLY__ */
|
||||
#endif /* __ASM_BARRIER_H */
|
@ -24,7 +24,7 @@
|
||||
#endif
|
||||
|
||||
#include <linux/compiler.h>
|
||||
#include <asm/system.h>
|
||||
#include <linux/irqflags.h>
|
||||
|
||||
#define smp_mb__before_clear_bit() smp_mb()
|
||||
#define smp_mb__after_clear_bit() smp_mb()
|
||||
|
@ -1,6 +1,7 @@
|
||||
#ifndef _ASMARM_BUG_H
|
||||
#define _ASMARM_BUG_H
|
||||
|
||||
#include <linux/linkage.h>
|
||||
|
||||
#ifdef CONFIG_BUG
|
||||
|
||||
@ -57,4 +58,33 @@ do { \
|
||||
|
||||
#include <asm-generic/bug.h>
|
||||
|
||||
struct pt_regs;
|
||||
void die(const char *msg, struct pt_regs *regs, int err);
|
||||
|
||||
struct siginfo;
|
||||
void arm_notify_die(const char *str, struct pt_regs *regs, struct siginfo *info,
|
||||
unsigned long err, unsigned long trap);
|
||||
|
||||
#ifdef CONFIG_ARM_LPAE
|
||||
#define FAULT_CODE_ALIGNMENT 33
|
||||
#define FAULT_CODE_DEBUG 34
|
||||
#else
|
||||
#define FAULT_CODE_ALIGNMENT 1
|
||||
#define FAULT_CODE_DEBUG 2
|
||||
#endif
|
||||
|
||||
void hook_fault_code(int nr, int (*fn)(unsigned long, unsigned int,
|
||||
struct pt_regs *),
|
||||
int sig, int code, const char *name);
|
||||
|
||||
void hook_ifault_code(int nr, int (*fn)(unsigned long, unsigned int,
|
||||
struct pt_regs *),
|
||||
int sig, int code, const char *name);
|
||||
|
||||
extern asmlinkage void c_backtrace(unsigned long fp, int pmode);
|
||||
|
||||
struct mm_struct;
|
||||
extern void show_pte(struct mm_struct *mm, unsigned long addr);
|
||||
extern void __show_regs(struct pt_regs *);
|
||||
|
||||
#endif
|
||||
|
295
arch/arm/include/asm/cmpxchg.h
Normal file
295
arch/arm/include/asm/cmpxchg.h
Normal file
@ -0,0 +1,295 @@
|
||||
#ifndef __ASM_ARM_CMPXCHG_H
|
||||
#define __ASM_ARM_CMPXCHG_H
|
||||
|
||||
#include <linux/irqflags.h>
|
||||
#include <asm/barrier.h>
|
||||
|
||||
#if defined(CONFIG_CPU_SA1100) || defined(CONFIG_CPU_SA110)
|
||||
/*
|
||||
* On the StrongARM, "swp" is terminally broken since it bypasses the
|
||||
* cache totally. This means that the cache becomes inconsistent, and,
|
||||
* since we use normal loads/stores as well, this is really bad.
|
||||
* Typically, this causes oopsen in filp_close, but could have other,
|
||||
* more disastrous effects. There are two work-arounds:
|
||||
* 1. Disable interrupts and emulate the atomic swap
|
||||
* 2. Clean the cache, perform atomic swap, flush the cache
|
||||
*
|
||||
* We choose (1) since its the "easiest" to achieve here and is not
|
||||
* dependent on the processor type.
|
||||
*
|
||||
* NOTE that this solution won't work on an SMP system, so explcitly
|
||||
* forbid it here.
|
||||
*/
|
||||
#define swp_is_buggy
|
||||
#endif
|
||||
|
||||
static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size)
|
||||
{
|
||||
extern void __bad_xchg(volatile void *, int);
|
||||
unsigned long ret;
|
||||
#ifdef swp_is_buggy
|
||||
unsigned long flags;
|
||||
#endif
|
||||
#if __LINUX_ARM_ARCH__ >= 6
|
||||
unsigned int tmp;
|
||||
#endif
|
||||
|
||||
smp_mb();
|
||||
|
||||
switch (size) {
|
||||
#if __LINUX_ARM_ARCH__ >= 6
|
||||
case 1:
|
||||
asm volatile("@ __xchg1\n"
|
||||
"1: ldrexb %0, [%3]\n"
|
||||
" strexb %1, %2, [%3]\n"
|
||||
" teq %1, #0\n"
|
||||
" bne 1b"
|
||||
: "=&r" (ret), "=&r" (tmp)
|
||||
: "r" (x), "r" (ptr)
|
||||
: "memory", "cc");
|
||||
break;
|
||||
case 4:
|
||||
asm volatile("@ __xchg4\n"
|
||||
"1: ldrex %0, [%3]\n"
|
||||
" strex %1, %2, [%3]\n"
|
||||
" teq %1, #0\n"
|
||||
" bne 1b"
|
||||
: "=&r" (ret), "=&r" (tmp)
|
||||
: "r" (x), "r" (ptr)
|
||||
: "memory", "cc");
|
||||
break;
|
||||
#elif defined(swp_is_buggy)
|
||||
#ifdef CONFIG_SMP
|
||||
#error SMP is not supported on this platform
|
||||
#endif
|
||||
case 1:
|
||||
raw_local_irq_save(flags);
|
||||
ret = *(volatile unsigned char *)ptr;
|
||||
*(volatile unsigned char *)ptr = x;
|
||||
raw_local_irq_restore(flags);
|
||||
break;
|
||||
|
||||
case 4:
|
||||
raw_local_irq_save(flags);
|
||||
ret = *(volatile unsigned long *)ptr;
|
||||
*(volatile unsigned long *)ptr = x;
|
||||
raw_local_irq_restore(flags);
|
||||
break;
|
||||
#else
|
||||
case 1:
|
||||
asm volatile("@ __xchg1\n"
|
||||
" swpb %0, %1, [%2]"
|
||||
: "=&r" (ret)
|
||||
: "r" (x), "r" (ptr)
|
||||
: "memory", "cc");
|
||||
break;
|
||||
case 4:
|
||||
asm volatile("@ __xchg4\n"
|
||||
" swp %0, %1, [%2]"
|
||||
: "=&r" (ret)
|
||||
: "r" (x), "r" (ptr)
|
||||
: "memory", "cc");
|
||||
break;
|
||||
#endif
|
||||
default:
|
||||
__bad_xchg(ptr, size), ret = 0;
|
||||
break;
|
||||
}
|
||||
smp_mb();
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
#define xchg(ptr,x) \
|
||||
((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
|
||||
|
||||
#include <asm-generic/cmpxchg-local.h>
|
||||
|
||||
#if __LINUX_ARM_ARCH__ < 6
|
||||
/* min ARCH < ARMv6 */
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
#error "SMP is not supported on this platform"
|
||||
#endif
|
||||
|
||||
/*
|
||||
* cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
|
||||
* them available.
|
||||
*/
|
||||
#define cmpxchg_local(ptr, o, n) \
|
||||
((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), (unsigned long)(o),\
|
||||
(unsigned long)(n), sizeof(*(ptr))))
|
||||
#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
|
||||
|
||||
#ifndef CONFIG_SMP
|
||||
#include <asm-generic/cmpxchg.h>
|
||||
#endif
|
||||
|
||||
#else /* min ARCH >= ARMv6 */
|
||||
|
||||
extern void __bad_cmpxchg(volatile void *ptr, int size);
|
||||
|
||||
/*
|
||||
* cmpxchg only support 32-bits operands on ARMv6.
|
||||
*/
|
||||
|
||||
static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
|
||||
unsigned long new, int size)
|
||||
{
|
||||
unsigned long oldval, res;
|
||||
|
||||
switch (size) {
|
||||
#ifndef CONFIG_CPU_V6 /* min ARCH >= ARMv6K */
|
||||
case 1:
|
||||
do {
|
||||
asm volatile("@ __cmpxchg1\n"
|
||||
" ldrexb %1, [%2]\n"
|
||||
" mov %0, #0\n"
|
||||
" teq %1, %3\n"
|
||||
" strexbeq %0, %4, [%2]\n"
|
||||
: "=&r" (res), "=&r" (oldval)
|
||||
: "r" (ptr), "Ir" (old), "r" (new)
|
||||
: "memory", "cc");
|
||||
} while (res);
|
||||
break;
|
||||
case 2:
|
||||
do {
|
||||
asm volatile("@ __cmpxchg1\n"
|
||||
" ldrexh %1, [%2]\n"
|
||||
" mov %0, #0\n"
|
||||
" teq %1, %3\n"
|
||||
" strexheq %0, %4, [%2]\n"
|
||||
: "=&r" (res), "=&r" (oldval)
|
||||
: "r" (ptr), "Ir" (old), "r" (new)
|
||||
: "memory", "cc");
|
||||
} while (res);
|
||||
break;
|
||||
#endif
|
||||
case 4:
|
||||
do {
|
||||
asm volatile("@ __cmpxchg4\n"
|
||||
" ldrex %1, [%2]\n"
|
||||
" mov %0, #0\n"
|
||||
" teq %1, %3\n"
|
||||
" strexeq %0, %4, [%2]\n"
|
||||
: "=&r" (res), "=&r" (oldval)
|
||||
: "r" (ptr), "Ir" (old), "r" (new)
|
||||
: "memory", "cc");
|
||||
} while (res);
|
||||
break;
|
||||
default:
|
||||
__bad_cmpxchg(ptr, size);
|
||||
oldval = 0;
|
||||
}
|
||||
|
||||
return oldval;
|
||||
}
|
||||
|
||||
static inline unsigned long __cmpxchg_mb(volatile void *ptr, unsigned long old,
|
||||
unsigned long new, int size)
|
||||
{
|
||||
unsigned long ret;
|
||||
|
||||
smp_mb();
|
||||
ret = __cmpxchg(ptr, old, new, size);
|
||||
smp_mb();
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
#define cmpxchg(ptr,o,n) \
|
||||
((__typeof__(*(ptr)))__cmpxchg_mb((ptr), \
|
||||
(unsigned long)(o), \
|
||||
(unsigned long)(n), \
|
||||
sizeof(*(ptr))))
|
||||
|
||||
static inline unsigned long __cmpxchg_local(volatile void *ptr,
|
||||
unsigned long old,
|
||||
unsigned long new, int size)
|
||||
{
|
||||
unsigned long ret;
|
||||
|
||||
switch (size) {
|
||||
#ifdef CONFIG_CPU_V6 /* min ARCH == ARMv6 */
|
||||
case 1:
|
||||
case 2:
|
||||
ret = __cmpxchg_local_generic(ptr, old, new, size);
|
||||
break;
|
||||
#endif
|
||||
default:
|
||||
ret = __cmpxchg(ptr, old, new, size);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
#define cmpxchg_local(ptr,o,n) \
|
||||
((__typeof__(*(ptr)))__cmpxchg_local((ptr), \
|
||||
(unsigned long)(o), \
|
||||
(unsigned long)(n), \
|
||||
sizeof(*(ptr))))
|
||||
|
||||
#ifndef CONFIG_CPU_V6 /* min ARCH >= ARMv6K */
|
||||
|
||||
/*
|
||||
* Note : ARMv7-M (currently unsupported by Linux) does not support
|
||||
* ldrexd/strexd. If ARMv7-M is ever supported by the Linux kernel, it should
|
||||
* not be allowed to use __cmpxchg64.
|
||||
*/
|
||||
static inline unsigned long long __cmpxchg64(volatile void *ptr,
|
||||
unsigned long long old,
|
||||
unsigned long long new)
|
||||
{
|
||||
register unsigned long long oldval asm("r0");
|
||||
register unsigned long long __old asm("r2") = old;
|
||||
register unsigned long long __new asm("r4") = new;
|
||||
unsigned long res;
|
||||
|
||||
do {
|
||||
asm volatile(
|
||||
" @ __cmpxchg8\n"
|
||||
" ldrexd %1, %H1, [%2]\n"
|
||||
" mov %0, #0\n"
|
||||
" teq %1, %3\n"
|
||||
" teqeq %H1, %H3\n"
|
||||
" strexdeq %0, %4, %H4, [%2]\n"
|
||||
: "=&r" (res), "=&r" (oldval)
|
||||
: "r" (ptr), "Ir" (__old), "r" (__new)
|
||||
: "memory", "cc");
|
||||
} while (res);
|
||||
|
||||
return oldval;
|
||||
}
|
||||
|
||||
static inline unsigned long long __cmpxchg64_mb(volatile void *ptr,
|
||||
unsigned long long old,
|
||||
unsigned long long new)
|
||||
{
|
||||
unsigned long long ret;
|
||||
|
||||
smp_mb();
|
||||
ret = __cmpxchg64(ptr, old, new);
|
||||
smp_mb();
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
#define cmpxchg64(ptr,o,n) \
|
||||
((__typeof__(*(ptr)))__cmpxchg64_mb((ptr), \
|
||||
(unsigned long long)(o), \
|
||||
(unsigned long long)(n)))
|
||||
|
||||
#define cmpxchg64_local(ptr,o,n) \
|
||||
((__typeof__(*(ptr)))__cmpxchg64((ptr), \
|
||||
(unsigned long long)(o), \
|
||||
(unsigned long long)(n)))
|
||||
|
||||
#else /* min ARCH = ARMv6 */
|
||||
|
||||
#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
|
||||
|
||||
#endif
|
||||
|
||||
#endif /* __LINUX_ARM_ARCH__ >= 6 */
|
||||
|
||||
#endif /* __ASM_ARM_CMPXCHG_H */
|
15
arch/arm/include/asm/compiler.h
Normal file
15
arch/arm/include/asm/compiler.h
Normal file
@ -0,0 +1,15 @@
|
||||
#ifndef __ASM_ARM_COMPILER_H
|
||||
#define __ASM_ARM_COMPILER_H
|
||||
|
||||
/*
|
||||
* This is used to ensure the compiler did actually allocate the register we
|
||||
* asked it for some inline assembly sequences. Apparently we can't trust
|
||||
* the compiler from one version to another so a bit of paranoia won't hurt.
|
||||
* This string is meant to be concatenated with the inline asm string and
|
||||
* will cause compilation to stop on mismatch.
|
||||
* (for details, see gcc PR 15089)
|
||||
*/
|
||||
#define __asmeq(x, y) ".ifnc " x "," y " ; .err ; .endif\n\t"
|
||||
|
||||
|
||||
#endif /* __ASM_ARM_COMPILER_H */
|
87
arch/arm/include/asm/cp15.h
Normal file
87
arch/arm/include/asm/cp15.h
Normal file
@ -0,0 +1,87 @@
|
||||
#ifndef __ASM_ARM_CP15_H
|
||||
#define __ASM_ARM_CP15_H
|
||||
|
||||
#include <asm/barrier.h>
|
||||
|
||||
/*
|
||||
* CR1 bits (CP#15 CR1)
|
||||
*/
|
||||
#define CR_M (1 << 0) /* MMU enable */
|
||||
#define CR_A (1 << 1) /* Alignment abort enable */
|
||||
#define CR_C (1 << 2) /* Dcache enable */
|
||||
#define CR_W (1 << 3) /* Write buffer enable */
|
||||
#define CR_P (1 << 4) /* 32-bit exception handler */
|
||||
#define CR_D (1 << 5) /* 32-bit data address range */
|
||||
#define CR_L (1 << 6) /* Implementation defined */
|
||||
#define CR_B (1 << 7) /* Big endian */
|
||||
#define CR_S (1 << 8) /* System MMU protection */
|
||||
#define CR_R (1 << 9) /* ROM MMU protection */
|
||||
#define CR_F (1 << 10) /* Implementation defined */
|
||||
#define CR_Z (1 << 11) /* Implementation defined */
|
||||
#define CR_I (1 << 12) /* Icache enable */
|
||||
#define CR_V (1 << 13) /* Vectors relocated to 0xffff0000 */
|
||||
#define CR_RR (1 << 14) /* Round Robin cache replacement */
|
||||
#define CR_L4 (1 << 15) /* LDR pc can set T bit */
|
||||
#define CR_DT (1 << 16)
|
||||
#define CR_IT (1 << 18)
|
||||
#define CR_ST (1 << 19)
|
||||
#define CR_FI (1 << 21) /* Fast interrupt (lower latency mode) */
|
||||
#define CR_U (1 << 22) /* Unaligned access operation */
|
||||
#define CR_XP (1 << 23) /* Extended page tables */
|
||||
#define CR_VE (1 << 24) /* Vectored interrupts */
|
||||
#define CR_EE (1 << 25) /* Exception (Big) Endian */
|
||||
#define CR_TRE (1 << 28) /* TEX remap enable */
|
||||
#define CR_AFE (1 << 29) /* Access flag enable */
|
||||
#define CR_TE (1 << 30) /* Thumb exception enable */
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
#if __LINUX_ARM_ARCH__ >= 4
|
||||
#define vectors_high() (cr_alignment & CR_V)
|
||||
#else
|
||||
#define vectors_high() (0)
|
||||
#endif
|
||||
|
||||
extern unsigned long cr_no_alignment; /* defined in entry-armv.S */
|
||||
extern unsigned long cr_alignment; /* defined in entry-armv.S */
|
||||
|
||||
static inline unsigned int get_cr(void)
|
||||
{
|
||||
unsigned int val;
|
||||
asm("mrc p15, 0, %0, c1, c0, 0 @ get CR" : "=r" (val) : : "cc");
|
||||
return val;
|
||||
}
|
||||
|
||||
static inline void set_cr(unsigned int val)
|
||||
{
|
||||
asm volatile("mcr p15, 0, %0, c1, c0, 0 @ set CR"
|
||||
: : "r" (val) : "cc");
|
||||
isb();
|
||||
}
|
||||
|
||||
#ifndef CONFIG_SMP
|
||||
extern void adjust_cr(unsigned long mask, unsigned long set);
|
||||
#endif
|
||||
|
||||
#define CPACC_FULL(n) (3 << (n * 2))
|
||||
#define CPACC_SVC(n) (1 << (n * 2))
|
||||
#define CPACC_DISABLE(n) (0 << (n * 2))
|
||||
|
||||
static inline unsigned int get_copro_access(void)
|
||||
{
|
||||
unsigned int val;
|
||||
asm("mrc p15, 0, %0, c1, c0, 2 @ get copro access"
|
||||
: "=r" (val) : : "cc");
|
||||
return val;
|
||||
}
|
||||
|
||||
static inline void set_copro_access(unsigned int val)
|
||||
{
|
||||
asm volatile("mcr p15, 0, %0, c1, c0, 2 @ set copro access"
|
||||
: : "r" (val) : "cc");
|
||||
isb();
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
#endif
|
@ -1,8 +1,8 @@
|
||||
#ifndef __ASM_ARM_DIV64
|
||||
#define __ASM_ARM_DIV64
|
||||
|
||||
#include <asm/system.h>
|
||||
#include <linux/types.h>
|
||||
#include <asm/compiler.h>
|
||||
|
||||
/*
|
||||
* The semantics of do_div() are:
|
||||
|
@ -19,7 +19,6 @@
|
||||
* It should not be re-used except for that purpose.
|
||||
*/
|
||||
#include <linux/spinlock.h>
|
||||
#include <asm/system.h>
|
||||
#include <asm/scatterlist.h>
|
||||
|
||||
#include <mach/isa-dma.h>
|
||||
|
@ -10,6 +10,10 @@
|
||||
#ifndef __ASM_PROC_DOMAIN_H
|
||||
#define __ASM_PROC_DOMAIN_H
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
#include <asm/barrier.h>
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Domain numbers
|
||||
*
|
||||
|
6
arch/arm/include/asm/exec.h
Normal file
6
arch/arm/include/asm/exec.h
Normal file
@ -0,0 +1,6 @@
|
||||
#ifndef __ASM_ARM_EXEC_H
|
||||
#define __ASM_ARM_EXEC_H
|
||||
|
||||
#define arch_align_stack(x) (x)
|
||||
|
||||
#endif /* __ASM_ARM_EXEC_H */
|
@ -231,6 +231,9 @@ extern int iop3xx_get_init_atu(void);
|
||||
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
#include <linux/types.h>
|
||||
|
||||
void iop3xx_map_io(void);
|
||||
void iop_init_cp6_handler(void);
|
||||
void iop_init_time(unsigned long tickrate);
|
||||
|
@ -26,7 +26,6 @@
|
||||
#include <linux/types.h>
|
||||
#include <asm/byteorder.h>
|
||||
#include <asm/memory.h>
|
||||
#include <asm/system.h>
|
||||
#include <asm-generic/pci_iomap.h>
|
||||
|
||||
/*
|
||||
@ -99,6 +98,7 @@ static inline void __iomem *__typesafe_io(unsigned long addr)
|
||||
|
||||
/* IO barriers */
|
||||
#ifdef CONFIG_ARM_DMA_MEM_BUFFERABLE
|
||||
#include <asm/barrier.h>
|
||||
#define __iormb() rmb()
|
||||
#define __iowmb() wmb()
|
||||
#else
|
||||
|
@ -34,4 +34,11 @@ typedef struct {
|
||||
|
||||
#endif
|
||||
|
||||
/*
|
||||
* switch_mm() may do a full cache flush over the context switch,
|
||||
* so enable interrupts over the context switch to avoid high
|
||||
* latency.
|
||||
*/
|
||||
#define __ARCH_WANT_INTERRUPTS_ON_CTXSW
|
||||
|
||||
#endif
|
||||
|
@ -22,7 +22,6 @@
|
||||
#include <asm/hw_breakpoint.h>
|
||||
#include <asm/ptrace.h>
|
||||
#include <asm/types.h>
|
||||
#include <asm/system.h>
|
||||
|
||||
#ifdef __KERNEL__
|
||||
#define STACK_TOP ((current->personality & ADDR_LIMIT_32BIT) ? \
|
||||
@ -90,6 +89,8 @@ unsigned long get_wchan(struct task_struct *p);
|
||||
#define cpu_relax() barrier()
|
||||
#endif
|
||||
|
||||
void cpu_idle_wait(void);
|
||||
|
||||
/*
|
||||
* Create a new kernel thread
|
||||
*/
|
||||
|
18
arch/arm/include/asm/switch_to.h
Normal file
18
arch/arm/include/asm/switch_to.h
Normal file
@ -0,0 +1,18 @@
|
||||
#ifndef __ASM_ARM_SWITCH_TO_H
|
||||
#define __ASM_ARM_SWITCH_TO_H
|
||||
|
||||
#include <linux/thread_info.h>
|
||||
|
||||
/*
|
||||
* switch_to(prev, next) should switch from task `prev' to `next'
|
||||
* `prev' will never be the same as `next'. schedule() itself
|
||||
* contains the memory barrier to tell GCC not to cache `current'.
|
||||
*/
|
||||
extern struct task_struct *__switch_to(struct task_struct *, struct thread_info *, struct thread_info *);
|
||||
|
||||
#define switch_to(prev,next,last) \
|
||||
do { \
|
||||
last = __switch_to(prev,task_thread_info(prev), task_thread_info(next)); \
|
||||
} while (0)
|
||||
|
||||
#endif /* __ASM_ARM_SWITCH_TO_H */
|
@ -1,544 +1,8 @@
|
||||
#ifndef __ASM_ARM_SYSTEM_H
|
||||
#define __ASM_ARM_SYSTEM_H
|
||||
|
||||
#ifdef __KERNEL__
|
||||
|
||||
#define CPU_ARCH_UNKNOWN 0
|
||||
#define CPU_ARCH_ARMv3 1
|
||||
#define CPU_ARCH_ARMv4 2
|
||||
#define CPU_ARCH_ARMv4T 3
|
||||
#define CPU_ARCH_ARMv5 4
|
||||
#define CPU_ARCH_ARMv5T 5
|
||||
#define CPU_ARCH_ARMv5TE 6
|
||||
#define CPU_ARCH_ARMv5TEJ 7
|
||||
#define CPU_ARCH_ARMv6 8
|
||||
#define CPU_ARCH_ARMv7 9
|
||||
|
||||
/*
|
||||
* CR1 bits (CP#15 CR1)
|
||||
*/
|
||||
#define CR_M (1 << 0) /* MMU enable */
|
||||
#define CR_A (1 << 1) /* Alignment abort enable */
|
||||
#define CR_C (1 << 2) /* Dcache enable */
|
||||
#define CR_W (1 << 3) /* Write buffer enable */
|
||||
#define CR_P (1 << 4) /* 32-bit exception handler */
|
||||
#define CR_D (1 << 5) /* 32-bit data address range */
|
||||
#define CR_L (1 << 6) /* Implementation defined */
|
||||
#define CR_B (1 << 7) /* Big endian */
|
||||
#define CR_S (1 << 8) /* System MMU protection */
|
||||
#define CR_R (1 << 9) /* ROM MMU protection */
|
||||
#define CR_F (1 << 10) /* Implementation defined */
|
||||
#define CR_Z (1 << 11) /* Implementation defined */
|
||||
#define CR_I (1 << 12) /* Icache enable */
|
||||
#define CR_V (1 << 13) /* Vectors relocated to 0xffff0000 */
|
||||
#define CR_RR (1 << 14) /* Round Robin cache replacement */
|
||||
#define CR_L4 (1 << 15) /* LDR pc can set T bit */
|
||||
#define CR_DT (1 << 16)
|
||||
#define CR_IT (1 << 18)
|
||||
#define CR_ST (1 << 19)
|
||||
#define CR_FI (1 << 21) /* Fast interrupt (lower latency mode) */
|
||||
#define CR_U (1 << 22) /* Unaligned access operation */
|
||||
#define CR_XP (1 << 23) /* Extended page tables */
|
||||
#define CR_VE (1 << 24) /* Vectored interrupts */
|
||||
#define CR_EE (1 << 25) /* Exception (Big) Endian */
|
||||
#define CR_TRE (1 << 28) /* TEX remap enable */
|
||||
#define CR_AFE (1 << 29) /* Access flag enable */
|
||||
#define CR_TE (1 << 30) /* Thumb exception enable */
|
||||
|
||||
/*
|
||||
* This is used to ensure the compiler did actually allocate the register we
|
||||
* asked it for some inline assembly sequences. Apparently we can't trust
|
||||
* the compiler from one version to another so a bit of paranoia won't hurt.
|
||||
* This string is meant to be concatenated with the inline asm string and
|
||||
* will cause compilation to stop on mismatch.
|
||||
* (for details, see gcc PR 15089)
|
||||
*/
|
||||
#define __asmeq(x, y) ".ifnc " x "," y " ; .err ; .endif\n\t"
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
#include <linux/compiler.h>
|
||||
#include <linux/linkage.h>
|
||||
#include <linux/irqflags.h>
|
||||
|
||||
#include <asm/outercache.h>
|
||||
|
||||
struct thread_info;
|
||||
struct task_struct;
|
||||
|
||||
/* information about the system we're running on */
|
||||
extern unsigned int system_rev;
|
||||
extern unsigned int system_serial_low;
|
||||
extern unsigned int system_serial_high;
|
||||
extern unsigned int mem_fclk_21285;
|
||||
|
||||
struct pt_regs;
|
||||
|
||||
void die(const char *msg, struct pt_regs *regs, int err);
|
||||
|
||||
struct siginfo;
|
||||
void arm_notify_die(const char *str, struct pt_regs *regs, struct siginfo *info,
|
||||
unsigned long err, unsigned long trap);
|
||||
|
||||
#ifdef CONFIG_ARM_LPAE
|
||||
#define FAULT_CODE_ALIGNMENT 33
|
||||
#define FAULT_CODE_DEBUG 34
|
||||
#else
|
||||
#define FAULT_CODE_ALIGNMENT 1
|
||||
#define FAULT_CODE_DEBUG 2
|
||||
#endif
|
||||
|
||||
void hook_fault_code(int nr, int (*fn)(unsigned long, unsigned int,
|
||||
struct pt_regs *),
|
||||
int sig, int code, const char *name);
|
||||
|
||||
void hook_ifault_code(int nr, int (*fn)(unsigned long, unsigned int,
|
||||
struct pt_regs *),
|
||||
int sig, int code, const char *name);
|
||||
|
||||
#define xchg(ptr,x) \
|
||||
((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
|
||||
|
||||
extern asmlinkage void c_backtrace(unsigned long fp, int pmode);
|
||||
|
||||
struct mm_struct;
|
||||
extern void show_pte(struct mm_struct *mm, unsigned long addr);
|
||||
extern void __show_regs(struct pt_regs *);
|
||||
|
||||
extern int __pure cpu_architecture(void);
|
||||
extern void cpu_init(void);
|
||||
|
||||
void soft_restart(unsigned long);
|
||||
extern void (*arm_pm_restart)(char str, const char *cmd);
|
||||
extern void (*arm_pm_idle)(void);
|
||||
|
||||
#define UDBG_UNDEFINED (1 << 0)
|
||||
#define UDBG_SYSCALL (1 << 1)
|
||||
#define UDBG_BADABORT (1 << 2)
|
||||
#define UDBG_SEGV (1 << 3)
|
||||
#define UDBG_BUS (1 << 4)
|
||||
|
||||
extern unsigned int user_debug;
|
||||
|
||||
#if __LINUX_ARM_ARCH__ >= 4
|
||||
#define vectors_high() (cr_alignment & CR_V)
|
||||
#else
|
||||
#define vectors_high() (0)
|
||||
#endif
|
||||
|
||||
#if __LINUX_ARM_ARCH__ >= 7 || \
|
||||
(__LINUX_ARM_ARCH__ == 6 && defined(CONFIG_CPU_32v6K))
|
||||
#define sev() __asm__ __volatile__ ("sev" : : : "memory")
|
||||
#define wfe() __asm__ __volatile__ ("wfe" : : : "memory")
|
||||
#define wfi() __asm__ __volatile__ ("wfi" : : : "memory")
|
||||
#endif
|
||||
|
||||
#if __LINUX_ARM_ARCH__ >= 7
|
||||
#define isb() __asm__ __volatile__ ("isb" : : : "memory")
|
||||
#define dsb() __asm__ __volatile__ ("dsb" : : : "memory")
|
||||
#define dmb() __asm__ __volatile__ ("dmb" : : : "memory")
|
||||
#elif defined(CONFIG_CPU_XSC3) || __LINUX_ARM_ARCH__ == 6
|
||||
#define isb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c5, 4" \
|
||||
: : "r" (0) : "memory")
|
||||
#define dsb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 4" \
|
||||
: : "r" (0) : "memory")
|
||||
#define dmb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 5" \
|
||||
: : "r" (0) : "memory")
|
||||
#elif defined(CONFIG_CPU_FA526)
|
||||
#define isb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c5, 4" \
|
||||
: : "r" (0) : "memory")
|
||||
#define dsb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 4" \
|
||||
: : "r" (0) : "memory")
|
||||
#define dmb() __asm__ __volatile__ ("" : : : "memory")
|
||||
#else
|
||||
#define isb() __asm__ __volatile__ ("" : : : "memory")
|
||||
#define dsb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 4" \
|
||||
: : "r" (0) : "memory")
|
||||
#define dmb() __asm__ __volatile__ ("" : : : "memory")
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_ARCH_HAS_BARRIERS
|
||||
#include <mach/barriers.h>
|
||||
#elif defined(CONFIG_ARM_DMA_MEM_BUFFERABLE) || defined(CONFIG_SMP)
|
||||
#define mb() do { dsb(); outer_sync(); } while (0)
|
||||
#define rmb() dsb()
|
||||
#define wmb() mb()
|
||||
#else
|
||||
#include <asm/memory.h>
|
||||
#define mb() do { if (arch_is_coherent()) dmb(); else barrier(); } while (0)
|
||||
#define rmb() do { if (arch_is_coherent()) dmb(); else barrier(); } while (0)
|
||||
#define wmb() do { if (arch_is_coherent()) dmb(); else barrier(); } while (0)
|
||||
#endif
|
||||
|
||||
#ifndef CONFIG_SMP
|
||||
#define smp_mb() barrier()
|
||||
#define smp_rmb() barrier()
|
||||
#define smp_wmb() barrier()
|
||||
#else
|
||||
#define smp_mb() dmb()
|
||||
#define smp_rmb() dmb()
|
||||
#define smp_wmb() dmb()
|
||||
#endif
|
||||
|
||||
#define read_barrier_depends() do { } while(0)
|
||||
#define smp_read_barrier_depends() do { } while(0)
|
||||
|
||||
#define set_mb(var, value) do { var = value; smp_mb(); } while (0)
|
||||
#define nop() __asm__ __volatile__("mov\tr0,r0\t@ nop\n\t");
|
||||
|
||||
extern unsigned long cr_no_alignment; /* defined in entry-armv.S */
|
||||
extern unsigned long cr_alignment; /* defined in entry-armv.S */
|
||||
|
||||
static inline unsigned int get_cr(void)
|
||||
{
|
||||
unsigned int val;
|
||||
asm("mrc p15, 0, %0, c1, c0, 0 @ get CR" : "=r" (val) : : "cc");
|
||||
return val;
|
||||
}
|
||||
|
||||
static inline void set_cr(unsigned int val)
|
||||
{
|
||||
asm volatile("mcr p15, 0, %0, c1, c0, 0 @ set CR"
|
||||
: : "r" (val) : "cc");
|
||||
isb();
|
||||
}
|
||||
|
||||
#ifndef CONFIG_SMP
|
||||
extern void adjust_cr(unsigned long mask, unsigned long set);
|
||||
#endif
|
||||
|
||||
#define CPACC_FULL(n) (3 << (n * 2))
|
||||
#define CPACC_SVC(n) (1 << (n * 2))
|
||||
#define CPACC_DISABLE(n) (0 << (n * 2))
|
||||
|
||||
static inline unsigned int get_copro_access(void)
|
||||
{
|
||||
unsigned int val;
|
||||
asm("mrc p15, 0, %0, c1, c0, 2 @ get copro access"
|
||||
: "=r" (val) : : "cc");
|
||||
return val;
|
||||
}
|
||||
|
||||
static inline void set_copro_access(unsigned int val)
|
||||
{
|
||||
asm volatile("mcr p15, 0, %0, c1, c0, 2 @ set copro access"
|
||||
: : "r" (val) : "cc");
|
||||
isb();
|
||||
}
|
||||
|
||||
/*
|
||||
* switch_mm() may do a full cache flush over the context switch,
|
||||
* so enable interrupts over the context switch to avoid high
|
||||
* latency.
|
||||
*/
|
||||
#define __ARCH_WANT_INTERRUPTS_ON_CTXSW
|
||||
|
||||
/*
|
||||
* switch_to(prev, next) should switch from task `prev' to `next'
|
||||
* `prev' will never be the same as `next'. schedule() itself
|
||||
* contains the memory barrier to tell GCC not to cache `current'.
|
||||
*/
|
||||
extern struct task_struct *__switch_to(struct task_struct *, struct thread_info *, struct thread_info *);
|
||||
|
||||
#define switch_to(prev,next,last) \
|
||||
do { \
|
||||
last = __switch_to(prev,task_thread_info(prev), task_thread_info(next)); \
|
||||
} while (0)
|
||||
|
||||
#if defined(CONFIG_CPU_SA1100) || defined(CONFIG_CPU_SA110)
|
||||
/*
|
||||
* On the StrongARM, "swp" is terminally broken since it bypasses the
|
||||
* cache totally. This means that the cache becomes inconsistent, and,
|
||||
* since we use normal loads/stores as well, this is really bad.
|
||||
* Typically, this causes oopsen in filp_close, but could have other,
|
||||
* more disastrous effects. There are two work-arounds:
|
||||
* 1. Disable interrupts and emulate the atomic swap
|
||||
* 2. Clean the cache, perform atomic swap, flush the cache
|
||||
*
|
||||
* We choose (1) since its the "easiest" to achieve here and is not
|
||||
* dependent on the processor type.
|
||||
*
|
||||
* NOTE that this solution won't work on an SMP system, so explcitly
|
||||
* forbid it here.
|
||||
*/
|
||||
#define swp_is_buggy
|
||||
#endif
|
||||
|
||||
static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size)
|
||||
{
|
||||
extern void __bad_xchg(volatile void *, int);
|
||||
unsigned long ret;
|
||||
#ifdef swp_is_buggy
|
||||
unsigned long flags;
|
||||
#endif
|
||||
#if __LINUX_ARM_ARCH__ >= 6
|
||||
unsigned int tmp;
|
||||
#endif
|
||||
|
||||
smp_mb();
|
||||
|
||||
switch (size) {
|
||||
#if __LINUX_ARM_ARCH__ >= 6
|
||||
case 1:
|
||||
asm volatile("@ __xchg1\n"
|
||||
"1: ldrexb %0, [%3]\n"
|
||||
" strexb %1, %2, [%3]\n"
|
||||
" teq %1, #0\n"
|
||||
" bne 1b"
|
||||
: "=&r" (ret), "=&r" (tmp)
|
||||
: "r" (x), "r" (ptr)
|
||||
: "memory", "cc");
|
||||
break;
|
||||
case 4:
|
||||
asm volatile("@ __xchg4\n"
|
||||
"1: ldrex %0, [%3]\n"
|
||||
" strex %1, %2, [%3]\n"
|
||||
" teq %1, #0\n"
|
||||
" bne 1b"
|
||||
: "=&r" (ret), "=&r" (tmp)
|
||||
: "r" (x), "r" (ptr)
|
||||
: "memory", "cc");
|
||||
break;
|
||||
#elif defined(swp_is_buggy)
|
||||
#ifdef CONFIG_SMP
|
||||
#error SMP is not supported on this platform
|
||||
#endif
|
||||
case 1:
|
||||
raw_local_irq_save(flags);
|
||||
ret = *(volatile unsigned char *)ptr;
|
||||
*(volatile unsigned char *)ptr = x;
|
||||
raw_local_irq_restore(flags);
|
||||
break;
|
||||
|
||||
case 4:
|
||||
raw_local_irq_save(flags);
|
||||
ret = *(volatile unsigned long *)ptr;
|
||||
*(volatile unsigned long *)ptr = x;
|
||||
raw_local_irq_restore(flags);
|
||||
break;
|
||||
#else
|
||||
case 1:
|
||||
asm volatile("@ __xchg1\n"
|
||||
" swpb %0, %1, [%2]"
|
||||
: "=&r" (ret)
|
||||
: "r" (x), "r" (ptr)
|
||||
: "memory", "cc");
|
||||
break;
|
||||
case 4:
|
||||
asm volatile("@ __xchg4\n"
|
||||
" swp %0, %1, [%2]"
|
||||
: "=&r" (ret)
|
||||
: "r" (x), "r" (ptr)
|
||||
: "memory", "cc");
|
||||
break;
|
||||
#endif
|
||||
default:
|
||||
__bad_xchg(ptr, size), ret = 0;
|
||||
break;
|
||||
}
|
||||
smp_mb();
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
extern void disable_hlt(void);
|
||||
extern void enable_hlt(void);
|
||||
|
||||
void cpu_idle_wait(void);
|
||||
|
||||
#include <asm-generic/cmpxchg-local.h>
|
||||
|
||||
#if __LINUX_ARM_ARCH__ < 6
|
||||
/* min ARCH < ARMv6 */
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
#error "SMP is not supported on this platform"
|
||||
#endif
|
||||
|
||||
/*
|
||||
* cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
|
||||
* them available.
|
||||
*/
|
||||
#define cmpxchg_local(ptr, o, n) \
|
||||
((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), (unsigned long)(o),\
|
||||
(unsigned long)(n), sizeof(*(ptr))))
|
||||
#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
|
||||
|
||||
#ifndef CONFIG_SMP
|
||||
#include <asm-generic/cmpxchg.h>
|
||||
#endif
|
||||
|
||||
#else /* min ARCH >= ARMv6 */
|
||||
|
||||
extern void __bad_cmpxchg(volatile void *ptr, int size);
|
||||
|
||||
/*
|
||||
* cmpxchg only support 32-bits operands on ARMv6.
|
||||
*/
|
||||
|
||||
static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
|
||||
unsigned long new, int size)
|
||||
{
|
||||
unsigned long oldval, res;
|
||||
|
||||
switch (size) {
|
||||
#ifndef CONFIG_CPU_V6 /* min ARCH >= ARMv6K */
|
||||
case 1:
|
||||
do {
|
||||
asm volatile("@ __cmpxchg1\n"
|
||||
" ldrexb %1, [%2]\n"
|
||||
" mov %0, #0\n"
|
||||
" teq %1, %3\n"
|
||||
" strexbeq %0, %4, [%2]\n"
|
||||
: "=&r" (res), "=&r" (oldval)
|
||||
: "r" (ptr), "Ir" (old), "r" (new)
|
||||
: "memory", "cc");
|
||||
} while (res);
|
||||
break;
|
||||
case 2:
|
||||
do {
|
||||
asm volatile("@ __cmpxchg1\n"
|
||||
" ldrexh %1, [%2]\n"
|
||||
" mov %0, #0\n"
|
||||
" teq %1, %3\n"
|
||||
" strexheq %0, %4, [%2]\n"
|
||||
: "=&r" (res), "=&r" (oldval)
|
||||
: "r" (ptr), "Ir" (old), "r" (new)
|
||||
: "memory", "cc");
|
||||
} while (res);
|
||||
break;
|
||||
#endif
|
||||
case 4:
|
||||
do {
|
||||
asm volatile("@ __cmpxchg4\n"
|
||||
" ldrex %1, [%2]\n"
|
||||
" mov %0, #0\n"
|
||||
" teq %1, %3\n"
|
||||
" strexeq %0, %4, [%2]\n"
|
||||
: "=&r" (res), "=&r" (oldval)
|
||||
: "r" (ptr), "Ir" (old), "r" (new)
|
||||
: "memory", "cc");
|
||||
} while (res);
|
||||
break;
|
||||
default:
|
||||
__bad_cmpxchg(ptr, size);
|
||||
oldval = 0;
|
||||
}
|
||||
|
||||
return oldval;
|
||||
}
|
||||
|
||||
static inline unsigned long __cmpxchg_mb(volatile void *ptr, unsigned long old,
|
||||
unsigned long new, int size)
|
||||
{
|
||||
unsigned long ret;
|
||||
|
||||
smp_mb();
|
||||
ret = __cmpxchg(ptr, old, new, size);
|
||||
smp_mb();
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
#define cmpxchg(ptr,o,n) \
|
||||
((__typeof__(*(ptr)))__cmpxchg_mb((ptr), \
|
||||
(unsigned long)(o), \
|
||||
(unsigned long)(n), \
|
||||
sizeof(*(ptr))))
|
||||
|
||||
static inline unsigned long __cmpxchg_local(volatile void *ptr,
|
||||
unsigned long old,
|
||||
unsigned long new, int size)
|
||||
{
|
||||
unsigned long ret;
|
||||
|
||||
switch (size) {
|
||||
#ifdef CONFIG_CPU_V6 /* min ARCH == ARMv6 */
|
||||
case 1:
|
||||
case 2:
|
||||
ret = __cmpxchg_local_generic(ptr, old, new, size);
|
||||
break;
|
||||
#endif
|
||||
default:
|
||||
ret = __cmpxchg(ptr, old, new, size);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
#define cmpxchg_local(ptr,o,n) \
|
||||
((__typeof__(*(ptr)))__cmpxchg_local((ptr), \
|
||||
(unsigned long)(o), \
|
||||
(unsigned long)(n), \
|
||||
sizeof(*(ptr))))
|
||||
|
||||
#ifndef CONFIG_CPU_V6 /* min ARCH >= ARMv6K */
|
||||
|
||||
/*
|
||||
* Note : ARMv7-M (currently unsupported by Linux) does not support
|
||||
* ldrexd/strexd. If ARMv7-M is ever supported by the Linux kernel, it should
|
||||
* not be allowed to use __cmpxchg64.
|
||||
*/
|
||||
static inline unsigned long long __cmpxchg64(volatile void *ptr,
|
||||
unsigned long long old,
|
||||
unsigned long long new)
|
||||
{
|
||||
register unsigned long long oldval asm("r0");
|
||||
register unsigned long long __old asm("r2") = old;
|
||||
register unsigned long long __new asm("r4") = new;
|
||||
unsigned long res;
|
||||
|
||||
do {
|
||||
asm volatile(
|
||||
" @ __cmpxchg8\n"
|
||||
" ldrexd %1, %H1, [%2]\n"
|
||||
" mov %0, #0\n"
|
||||
" teq %1, %3\n"
|
||||
" teqeq %H1, %H3\n"
|
||||
" strexdeq %0, %4, %H4, [%2]\n"
|
||||
: "=&r" (res), "=&r" (oldval)
|
||||
: "r" (ptr), "Ir" (__old), "r" (__new)
|
||||
: "memory", "cc");
|
||||
} while (res);
|
||||
|
||||
return oldval;
|
||||
}
|
||||
|
||||
static inline unsigned long long __cmpxchg64_mb(volatile void *ptr,
|
||||
unsigned long long old,
|
||||
unsigned long long new)
|
||||
{
|
||||
unsigned long long ret;
|
||||
|
||||
smp_mb();
|
||||
ret = __cmpxchg64(ptr, old, new);
|
||||
smp_mb();
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
#define cmpxchg64(ptr,o,n) \
|
||||
((__typeof__(*(ptr)))__cmpxchg64_mb((ptr), \
|
||||
(unsigned long long)(o), \
|
||||
(unsigned long long)(n)))
|
||||
|
||||
#define cmpxchg64_local(ptr,o,n) \
|
||||
((__typeof__(*(ptr)))__cmpxchg64((ptr), \
|
||||
(unsigned long long)(o), \
|
||||
(unsigned long long)(n)))
|
||||
|
||||
#else /* min ARCH = ARMv6 */
|
||||
|
||||
#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
|
||||
|
||||
#endif
|
||||
|
||||
#endif /* __LINUX_ARM_ARCH__ >= 6 */
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
||||
#define arch_align_stack(x) (x)
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
|
||||
#endif
|
||||
/* FILE TO BE DELETED. DO NOT ADD STUFF HERE! */
|
||||
#include <asm/barrier.h>
|
||||
#include <asm/compiler.h>
|
||||
#include <asm/cmpxchg.h>
|
||||
#include <asm/exec.h>
|
||||
#include <asm/switch_to.h>
|
||||
#include <asm/system_info.h>
|
||||
#include <asm/system_misc.h>
|
||||
|
27
arch/arm/include/asm/system_info.h
Normal file
27
arch/arm/include/asm/system_info.h
Normal file
@ -0,0 +1,27 @@
|
||||
#ifndef __ASM_ARM_SYSTEM_INFO_H
|
||||
#define __ASM_ARM_SYSTEM_INFO_H
|
||||
|
||||
#define CPU_ARCH_UNKNOWN 0
|
||||
#define CPU_ARCH_ARMv3 1
|
||||
#define CPU_ARCH_ARMv4 2
|
||||
#define CPU_ARCH_ARMv4T 3
|
||||
#define CPU_ARCH_ARMv5 4
|
||||
#define CPU_ARCH_ARMv5T 5
|
||||
#define CPU_ARCH_ARMv5TE 6
|
||||
#define CPU_ARCH_ARMv5TEJ 7
|
||||
#define CPU_ARCH_ARMv6 8
|
||||
#define CPU_ARCH_ARMv7 9
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
/* information about the system we're running on */
|
||||
extern unsigned int system_rev;
|
||||
extern unsigned int system_serial_low;
|
||||
extern unsigned int system_serial_high;
|
||||
extern unsigned int mem_fclk_21285;
|
||||
|
||||
extern int __pure cpu_architecture(void);
|
||||
|
||||
#endif /* !__ASSEMBLY__ */
|
||||
|
||||
#endif /* __ASM_ARM_SYSTEM_INFO_H */
|
29
arch/arm/include/asm/system_misc.h
Normal file
29
arch/arm/include/asm/system_misc.h
Normal file
@ -0,0 +1,29 @@
|
||||
#ifndef __ASM_ARM_SYSTEM_MISC_H
|
||||
#define __ASM_ARM_SYSTEM_MISC_H
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
#include <linux/compiler.h>
|
||||
#include <linux/linkage.h>
|
||||
#include <linux/irqflags.h>
|
||||
|
||||
extern void cpu_init(void);
|
||||
|
||||
void soft_restart(unsigned long);
|
||||
extern void (*arm_pm_restart)(char str, const char *cmd);
|
||||
extern void (*arm_pm_idle)(void);
|
||||
|
||||
#define UDBG_UNDEFINED (1 << 0)
|
||||
#define UDBG_SYSCALL (1 << 1)
|
||||
#define UDBG_BADABORT (1 << 2)
|
||||
#define UDBG_SEGV (1 << 3)
|
||||
#define UDBG_BUS (1 << 4)
|
||||
|
||||
extern unsigned int user_debug;
|
||||
|
||||
extern void disable_hlt(void);
|
||||
extern void enable_hlt(void);
|
||||
|
||||
#endif /* !__ASSEMBLY__ */
|
||||
|
||||
#endif /* __ASM_ARM_SYSTEM_MISC_H */
|
@ -16,8 +16,8 @@
|
||||
#include <asm/errno.h>
|
||||
#include <asm/memory.h>
|
||||
#include <asm/domain.h>
|
||||
#include <asm/system.h>
|
||||
#include <asm/unified.h>
|
||||
#include <asm/compiler.h>
|
||||
|
||||
#define VERIFY_READ 0
|
||||
#define VERIFY_WRITE 1
|
||||
|
@ -18,7 +18,6 @@
|
||||
#include <linux/io.h>
|
||||
|
||||
#include <asm/checksum.h>
|
||||
#include <asm/system.h>
|
||||
#include <asm/ftrace.h>
|
||||
|
||||
/*
|
||||
|
@ -3,6 +3,7 @@
|
||||
#include <linux/personality.h>
|
||||
#include <linux/binfmts.h>
|
||||
#include <linux/elf.h>
|
||||
#include <asm/system_info.h>
|
||||
|
||||
int elf_check_arch(const struct elf32_hdr *x)
|
||||
{
|
||||
|
@ -26,7 +26,7 @@
|
||||
#include <asm/unwind.h>
|
||||
#include <asm/unistd.h>
|
||||
#include <asm/tls.h>
|
||||
#include <asm/system.h>
|
||||
#include <asm/system_info.h>
|
||||
|
||||
#include "entry-header.S"
|
||||
#include <asm/entry-macro-multi.S>
|
||||
|
@ -42,9 +42,9 @@
|
||||
#include <linux/seq_file.h>
|
||||
|
||||
#include <asm/cacheflush.h>
|
||||
#include <asm/cp15.h>
|
||||
#include <asm/fiq.h>
|
||||
#include <asm/irq.h>
|
||||
#include <asm/system.h>
|
||||
#include <asm/traps.h>
|
||||
|
||||
static unsigned long no_fiq_insn;
|
||||
|
@ -17,8 +17,8 @@
|
||||
#include <asm/assembler.h>
|
||||
#include <asm/ptrace.h>
|
||||
#include <asm/asm-offsets.h>
|
||||
#include <asm/cp15.h>
|
||||
#include <asm/thread_info.h>
|
||||
#include <asm/system.h>
|
||||
|
||||
/*
|
||||
* Kernel startup entry point.
|
||||
|
@ -15,12 +15,12 @@
|
||||
#include <linux/init.h>
|
||||
|
||||
#include <asm/assembler.h>
|
||||
#include <asm/cp15.h>
|
||||
#include <asm/domain.h>
|
||||
#include <asm/ptrace.h>
|
||||
#include <asm/asm-offsets.h>
|
||||
#include <asm/memory.h>
|
||||
#include <asm/thread_info.h>
|
||||
#include <asm/system.h>
|
||||
#include <asm/pgtable.h>
|
||||
|
||||
#ifdef CONFIG_DEBUG_LL
|
||||
|
@ -34,7 +34,6 @@
|
||||
#include <asm/current.h>
|
||||
#include <asm/hw_breakpoint.h>
|
||||
#include <asm/kdebug.h>
|
||||
#include <asm/system.h>
|
||||
#include <asm/traps.h>
|
||||
|
||||
/* Breakpoint currently in use for each BRP. */
|
||||
|
@ -36,7 +36,6 @@
|
||||
#include <linux/proc_fs.h>
|
||||
|
||||
#include <asm/exception.h>
|
||||
#include <asm/system.h>
|
||||
#include <asm/mach/arch.h>
|
||||
#include <asm/mach/irq.h>
|
||||
#include <asm/mach/time.h>
|
||||
|
@ -13,6 +13,7 @@
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/kprobes.h>
|
||||
#include <asm/system_info.h>
|
||||
|
||||
#include "kprobes.h"
|
||||
|
||||
|
@ -12,7 +12,7 @@
|
||||
#include <asm/mmu_context.h>
|
||||
#include <asm/cacheflush.h>
|
||||
#include <asm/mach-types.h>
|
||||
#include <asm/system.h>
|
||||
#include <asm/system_misc.h>
|
||||
|
||||
extern const unsigned char relocate_new_kernel[];
|
||||
extern const unsigned int relocate_new_kernel_size;
|
||||
|
@ -35,7 +35,6 @@
|
||||
#include <asm/cacheflush.h>
|
||||
#include <asm/leds.h>
|
||||
#include <asm/processor.h>
|
||||
#include <asm/system.h>
|
||||
#include <asm/thread_notify.h>
|
||||
#include <asm/stacktrace.h>
|
||||
#include <asm/mach/time.h>
|
||||
|
@ -26,7 +26,6 @@
|
||||
#include <linux/audit.h>
|
||||
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/system.h>
|
||||
#include <asm/traps.h>
|
||||
|
||||
#define REG_PC 15
|
||||
|
@ -33,6 +33,7 @@
|
||||
#include <linux/sort.h>
|
||||
|
||||
#include <asm/unified.h>
|
||||
#include <asm/cp15.h>
|
||||
#include <asm/cpu.h>
|
||||
#include <asm/cputype.h>
|
||||
#include <asm/elf.h>
|
||||
@ -44,12 +45,13 @@
|
||||
#include <asm/cacheflush.h>
|
||||
#include <asm/cachetype.h>
|
||||
#include <asm/tlbflush.h>
|
||||
#include <asm/system.h>
|
||||
|
||||
#include <asm/prom.h>
|
||||
#include <asm/mach/arch.h>
|
||||
#include <asm/mach/irq.h>
|
||||
#include <asm/mach/time.h>
|
||||
#include <asm/system_info.h>
|
||||
#include <asm/system_misc.h>
|
||||
#include <asm/traps.h>
|
||||
#include <asm/unwind.h>
|
||||
#include <asm/memblock.h>
|
||||
|
@ -4,7 +4,6 @@
|
||||
#include <asm/assembler.h>
|
||||
#include <asm/glue-cache.h>
|
||||
#include <asm/glue-proc.h>
|
||||
#include <asm/system.h>
|
||||
.text
|
||||
|
||||
/*
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user