mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-30 07:34:12 +08:00
Merge branch 'upstream' of git://ftp.linux-mips.org/pub/scm/upstream-linus
* 'upstream' of git://ftp.linux-mips.org/pub/scm/upstream-linus: [MIPS] Clocksource: Only install r4k counter as clocksource if present. [MIPS] Lasat: fix LASAT_CASCADE_IRQ [MIPS] Delete leftovers of old pcspeaker support. [MIPS] BCM1480: Init pci controller io_map_base [MIPS] Yosemite: Fix a few more section reference bugs. [MIPS] Fix yosemite build error [MIPS] Fix loads of section missmatches [MIPS] IP27: Tighten up CPU description to fix warnings. [MIPS] Fix plat_ioremap for JMR3927 [MIPS] Export __ucmpdi2 to modules. [MIPS] Fix typo in comment [MIPS] Use KBUILD_DEFCONFIG [MIPS] Allow 48Hz to be selected if CONFIG_SYS_SUPPORTS_ARBIT_HZ is set. [MIPS] Added missing cases for rdhwr emulation [MIPS] Alchemy: Fix ids in Alchemy db dma device table
This commit is contained in:
commit
299601cfc0
@ -1824,7 +1824,7 @@ choice
|
||||
Allows the configuration of the timer frequency.
|
||||
|
||||
config HZ_48
|
||||
bool "48 HZ" if SYS_SUPPORTS_48HZ
|
||||
bool "48 HZ" if SYS_SUPPORTS_48HZ || SYS_SUPPORTS_ARBIT_HZ
|
||||
|
||||
config HZ_100
|
||||
bool "100 HZ" if SYS_SUPPORTS_100HZ || SYS_SUPPORTS_ARBIT_HZ
|
||||
|
@ -12,6 +12,8 @@
|
||||
# for "archclean" cleaning up for this architecture.
|
||||
#
|
||||
|
||||
KBUILD_DEFCONFIG := ip22_defconfig
|
||||
|
||||
cflags-y :=
|
||||
|
||||
#
|
||||
|
@ -161,22 +161,22 @@ static dbdev_tab_t dbdev_tab[] = {
|
||||
{ DSCR_CMD0_ALWAYS, DEV_FLAGS_ANYUSE, 0, 0, 0x00000000, 0, 0 },
|
||||
|
||||
/* Provide 16 user definable device types */
|
||||
{ 0, 0, 0, 0, 0, 0, 0 },
|
||||
{ 0, 0, 0, 0, 0, 0, 0 },
|
||||
{ 0, 0, 0, 0, 0, 0, 0 },
|
||||
{ 0, 0, 0, 0, 0, 0, 0 },
|
||||
{ 0, 0, 0, 0, 0, 0, 0 },
|
||||
{ 0, 0, 0, 0, 0, 0, 0 },
|
||||
{ 0, 0, 0, 0, 0, 0, 0 },
|
||||
{ 0, 0, 0, 0, 0, 0, 0 },
|
||||
{ 0, 0, 0, 0, 0, 0, 0 },
|
||||
{ 0, 0, 0, 0, 0, 0, 0 },
|
||||
{ 0, 0, 0, 0, 0, 0, 0 },
|
||||
{ 0, 0, 0, 0, 0, 0, 0 },
|
||||
{ 0, 0, 0, 0, 0, 0, 0 },
|
||||
{ 0, 0, 0, 0, 0, 0, 0 },
|
||||
{ 0, 0, 0, 0, 0, 0, 0 },
|
||||
{ 0, 0, 0, 0, 0, 0, 0 },
|
||||
{ ~0, 0, 0, 0, 0, 0, 0 },
|
||||
{ ~0, 0, 0, 0, 0, 0, 0 },
|
||||
{ ~0, 0, 0, 0, 0, 0, 0 },
|
||||
{ ~0, 0, 0, 0, 0, 0, 0 },
|
||||
{ ~0, 0, 0, 0, 0, 0, 0 },
|
||||
{ ~0, 0, 0, 0, 0, 0, 0 },
|
||||
{ ~0, 0, 0, 0, 0, 0, 0 },
|
||||
{ ~0, 0, 0, 0, 0, 0, 0 },
|
||||
{ ~0, 0, 0, 0, 0, 0, 0 },
|
||||
{ ~0, 0, 0, 0, 0, 0, 0 },
|
||||
{ ~0, 0, 0, 0, 0, 0, 0 },
|
||||
{ ~0, 0, 0, 0, 0, 0, 0 },
|
||||
{ ~0, 0, 0, 0, 0, 0, 0 },
|
||||
{ ~0, 0, 0, 0, 0, 0, 0 },
|
||||
{ ~0, 0, 0, 0, 0, 0, 0 },
|
||||
{ ~0, 0, 0, 0, 0, 0, 0 },
|
||||
};
|
||||
|
||||
#define DBDEV_TAB_SIZE ARRAY_SIZE(dbdev_tab)
|
||||
@ -209,7 +209,7 @@ au1xxx_ddma_add_device(dbdev_tab_t *dev)
|
||||
dbdev_tab_t *p=NULL;
|
||||
static u16 new_id=0x1000;
|
||||
|
||||
p = find_dbdev_id(0);
|
||||
p = find_dbdev_id(~0);
|
||||
if ( NULL != p )
|
||||
{
|
||||
memcpy(p, dev, sizeof(dbdev_tab_t));
|
||||
|
1158
arch/mips/defconfig
1158
arch/mips/defconfig
File diff suppressed because it is too large
Load Diff
@ -76,7 +76,6 @@ obj-$(CONFIG_PROC_FS) += proc.o
|
||||
obj-$(CONFIG_64BIT) += cpu-bugs64.o
|
||||
|
||||
obj-$(CONFIG_I8253) += i8253.o
|
||||
obj-$(CONFIG_PCSPEAKER) += pcspeaker.o
|
||||
|
||||
obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o
|
||||
obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
|
||||
|
@ -167,7 +167,7 @@ static inline void check_mult_sh(void)
|
||||
panic(bug64hit, !R4000_WAR ? r4kwar : nowar);
|
||||
}
|
||||
|
||||
static volatile int daddi_ov __initdata = 0;
|
||||
static volatile int daddi_ov __cpuinitdata = 0;
|
||||
|
||||
asmlinkage void __init do_daddi_ov(struct pt_regs *regs)
|
||||
{
|
||||
@ -239,7 +239,7 @@ static inline void check_daddi(void)
|
||||
panic(bug64hit, !DADDI_WAR ? daddiwar : nowar);
|
||||
}
|
||||
|
||||
int daddiu_bug __initdata = -1;
|
||||
int daddiu_bug __cpuinitdata = -1;
|
||||
|
||||
static inline void check_daddiu(void)
|
||||
{
|
||||
|
@ -550,7 +550,7 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c)
|
||||
}
|
||||
}
|
||||
|
||||
static char unknown_isa[] __initdata = KERN_ERR \
|
||||
static char unknown_isa[] __cpuinitdata = KERN_ERR \
|
||||
"Unsupported ISA type, c0.config0: %d.";
|
||||
|
||||
static inline unsigned int decode_config0(struct cpuinfo_mips *c)
|
||||
@ -656,7 +656,7 @@ static inline unsigned int decode_config3(struct cpuinfo_mips *c)
|
||||
return config3 & MIPS_CONF_M;
|
||||
}
|
||||
|
||||
static void __init decode_configs(struct cpuinfo_mips *c)
|
||||
static void __cpuinit decode_configs(struct cpuinfo_mips *c)
|
||||
{
|
||||
/* MIPS32 or MIPS64 compliant CPU. */
|
||||
c->options = MIPS_CPU_4KEX | MIPS_CPU_4K_CACHE | MIPS_CPU_COUNTER |
|
||||
@ -814,7 +814,7 @@ const char *__cpu_name[NR_CPUS];
|
||||
/*
|
||||
* Name a CPU
|
||||
*/
|
||||
static __init const char *cpu_to_name(struct cpuinfo_mips *c)
|
||||
static __cpuinit const char *cpu_to_name(struct cpuinfo_mips *c)
|
||||
{
|
||||
const char *name = NULL;
|
||||
|
||||
@ -896,7 +896,7 @@ static __init const char *cpu_to_name(struct cpuinfo_mips *c)
|
||||
return name;
|
||||
}
|
||||
|
||||
__init void cpu_probe(void)
|
||||
__cpuinit void cpu_probe(void)
|
||||
{
|
||||
struct cpuinfo_mips *c = ¤t_cpu_data;
|
||||
unsigned int cpu = smp_processor_id();
|
||||
@ -959,7 +959,7 @@ __init void cpu_probe(void)
|
||||
c->srsets = 1;
|
||||
}
|
||||
|
||||
__init void cpu_report(void)
|
||||
__cpuinit void cpu_report(void)
|
||||
{
|
||||
struct cpuinfo_mips *c = ¤t_cpu_data;
|
||||
|
||||
|
@ -22,12 +22,17 @@ static struct clocksource clocksource_mips = {
|
||||
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
|
||||
};
|
||||
|
||||
void __init init_mips_clocksource(void)
|
||||
int __init init_mips_clocksource(void)
|
||||
{
|
||||
if (!cpu_has_counter || !mips_hpt_frequency)
|
||||
return -ENXIO;
|
||||
|
||||
/* Calclate a somewhat reasonable rating value */
|
||||
clocksource_mips.rating = 200 + mips_hpt_frequency / 10000000;
|
||||
|
||||
clocksource_set_clock(&clocksource_mips, mips_hpt_frequency);
|
||||
|
||||
clocksource_register(&clocksource_mips);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -195,7 +195,7 @@ NESTED(kernel_entry, 16, sp) # kernel entry point
|
||||
j start_kernel
|
||||
END(kernel_entry)
|
||||
|
||||
__INIT
|
||||
__CPUINIT
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
/*
|
||||
|
@ -534,8 +534,7 @@ static int simulate_llsc(struct pt_regs *regs, unsigned int opcode)
|
||||
|
||||
/*
|
||||
* Simulate trapping 'rdhwr' instructions to provide user accessible
|
||||
* registers not implemented in hardware. The only current use of this
|
||||
* is the thread area pointer.
|
||||
* registers not implemented in hardware.
|
||||
*/
|
||||
static int simulate_rdhwr(struct pt_regs *regs, unsigned int opcode)
|
||||
{
|
||||
@ -545,11 +544,31 @@ static int simulate_rdhwr(struct pt_regs *regs, unsigned int opcode)
|
||||
int rd = (opcode & RD) >> 11;
|
||||
int rt = (opcode & RT) >> 16;
|
||||
switch (rd) {
|
||||
case 29:
|
||||
regs->regs[rt] = ti->tp_value;
|
||||
return 0;
|
||||
case 0: /* CPU number */
|
||||
regs->regs[rt] = smp_processor_id();
|
||||
return 0;
|
||||
case 1: /* SYNCI length */
|
||||
regs->regs[rt] = min(current_cpu_data.dcache.linesz,
|
||||
current_cpu_data.icache.linesz);
|
||||
return 0;
|
||||
case 2: /* Read count register */
|
||||
regs->regs[rt] = read_c0_count();
|
||||
return 0;
|
||||
case 3: /* Count register resolution */
|
||||
switch (current_cpu_data.cputype) {
|
||||
case CPU_20KC:
|
||||
case CPU_25KF:
|
||||
regs->regs[rt] = 1;
|
||||
break;
|
||||
default:
|
||||
return -1;
|
||||
regs->regs[rt] = 2;
|
||||
}
|
||||
return 0;
|
||||
case 29:
|
||||
regs->regs[rt] = ti->tp_value;
|
||||
return 0;
|
||||
default:
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
@ -1287,7 +1306,7 @@ int cp0_compare_irq;
|
||||
int cp0_perfcount_irq;
|
||||
EXPORT_SYMBOL_GPL(cp0_perfcount_irq);
|
||||
|
||||
void __init per_cpu_trap_init(void)
|
||||
void __cpuinit per_cpu_trap_init(void)
|
||||
{
|
||||
unsigned int cpu = smp_processor_id();
|
||||
unsigned int status_set = ST0_CU0;
|
||||
@ -1404,11 +1423,12 @@ void __init set_handler(unsigned long offset, void *addr, unsigned long size)
|
||||
flush_icache_range(ebase + offset, ebase + offset + size);
|
||||
}
|
||||
|
||||
static char panic_null_cerr[] __initdata =
|
||||
static char panic_null_cerr[] __cpuinitdata =
|
||||
"Trying to set NULL cache error exception handler";
|
||||
|
||||
/* Install uncached CPU exception handler */
|
||||
void __init set_uncached_handler(unsigned long offset, void *addr, unsigned long size)
|
||||
void __cpuinit set_uncached_handler(unsigned long offset, void *addr,
|
||||
unsigned long size)
|
||||
{
|
||||
#ifdef CONFIG_32BIT
|
||||
unsigned long uncached_ebase = KSEG1ADDR(ebase);
|
||||
|
@ -17,3 +17,5 @@ word_type __ucmpdi2(unsigned long long a, unsigned long long b)
|
||||
return 2;
|
||||
return 1;
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(__ucmpdi2);
|
||||
|
@ -36,7 +36,7 @@
|
||||
* values, so we can avoid sharing the same stack area between a cached
|
||||
* and the uncached mode.
|
||||
*/
|
||||
unsigned long __init run_uncached(void *func)
|
||||
unsigned long __cpuinit run_uncached(void *func)
|
||||
{
|
||||
register long sp __asm__("$sp");
|
||||
register long ret __asm__("$2");
|
||||
|
@ -146,7 +146,7 @@ void __init plat_perf_setup(void)
|
||||
}
|
||||
}
|
||||
|
||||
unsigned int __init get_c0_compare_int(void)
|
||||
unsigned int __cpuinit get_c0_compare_int(void)
|
||||
{
|
||||
#ifdef MSC01E_INT_BASE
|
||||
if (cpu_has_veic) {
|
||||
|
@ -83,7 +83,7 @@ static void mips_timer_dispatch(void)
|
||||
}
|
||||
|
||||
|
||||
unsigned __init get_c0_compare_int(void)
|
||||
unsigned __cpuinit get_c0_compare_int(void)
|
||||
{
|
||||
#ifdef MSC01E_INT_BASE
|
||||
if (cpu_has_veic) {
|
||||
|
@ -307,7 +307,7 @@ static void r3k_dma_cache_wback_inv(unsigned long start, unsigned long size)
|
||||
r3k_flush_dcache_range(start, start + size);
|
||||
}
|
||||
|
||||
void __init r3k_cache_init(void)
|
||||
void __cpuinit r3k_cache_init(void)
|
||||
{
|
||||
extern void build_clear_page(void);
|
||||
extern void build_copy_page(void);
|
||||
|
@ -93,7 +93,7 @@ static inline void r4k_blast_dcache_page_dc32(unsigned long addr)
|
||||
blast_dcache32_page(addr);
|
||||
}
|
||||
|
||||
static void __init r4k_blast_dcache_page_setup(void)
|
||||
static void __cpuinit r4k_blast_dcache_page_setup(void)
|
||||
{
|
||||
unsigned long dc_lsize = cpu_dcache_line_size();
|
||||
|
||||
@ -107,7 +107,7 @@ static void __init r4k_blast_dcache_page_setup(void)
|
||||
|
||||
static void (* r4k_blast_dcache_page_indexed)(unsigned long addr);
|
||||
|
||||
static void __init r4k_blast_dcache_page_indexed_setup(void)
|
||||
static void __cpuinit r4k_blast_dcache_page_indexed_setup(void)
|
||||
{
|
||||
unsigned long dc_lsize = cpu_dcache_line_size();
|
||||
|
||||
@ -121,7 +121,7 @@ static void __init r4k_blast_dcache_page_indexed_setup(void)
|
||||
|
||||
static void (* r4k_blast_dcache)(void);
|
||||
|
||||
static void __init r4k_blast_dcache_setup(void)
|
||||
static void __cpuinit r4k_blast_dcache_setup(void)
|
||||
{
|
||||
unsigned long dc_lsize = cpu_dcache_line_size();
|
||||
|
||||
@ -206,7 +206,7 @@ static inline void tx49_blast_icache32_page_indexed(unsigned long page)
|
||||
|
||||
static void (* r4k_blast_icache_page)(unsigned long addr);
|
||||
|
||||
static void __init r4k_blast_icache_page_setup(void)
|
||||
static void __cpuinit r4k_blast_icache_page_setup(void)
|
||||
{
|
||||
unsigned long ic_lsize = cpu_icache_line_size();
|
||||
|
||||
@ -223,7 +223,7 @@ static void __init r4k_blast_icache_page_setup(void)
|
||||
|
||||
static void (* r4k_blast_icache_page_indexed)(unsigned long addr);
|
||||
|
||||
static void __init r4k_blast_icache_page_indexed_setup(void)
|
||||
static void __cpuinit r4k_blast_icache_page_indexed_setup(void)
|
||||
{
|
||||
unsigned long ic_lsize = cpu_icache_line_size();
|
||||
|
||||
@ -247,7 +247,7 @@ static void __init r4k_blast_icache_page_indexed_setup(void)
|
||||
|
||||
static void (* r4k_blast_icache)(void);
|
||||
|
||||
static void __init r4k_blast_icache_setup(void)
|
||||
static void __cpuinit r4k_blast_icache_setup(void)
|
||||
{
|
||||
unsigned long ic_lsize = cpu_icache_line_size();
|
||||
|
||||
@ -268,7 +268,7 @@ static void __init r4k_blast_icache_setup(void)
|
||||
|
||||
static void (* r4k_blast_scache_page)(unsigned long addr);
|
||||
|
||||
static void __init r4k_blast_scache_page_setup(void)
|
||||
static void __cpuinit r4k_blast_scache_page_setup(void)
|
||||
{
|
||||
unsigned long sc_lsize = cpu_scache_line_size();
|
||||
|
||||
@ -286,7 +286,7 @@ static void __init r4k_blast_scache_page_setup(void)
|
||||
|
||||
static void (* r4k_blast_scache_page_indexed)(unsigned long addr);
|
||||
|
||||
static void __init r4k_blast_scache_page_indexed_setup(void)
|
||||
static void __cpuinit r4k_blast_scache_page_indexed_setup(void)
|
||||
{
|
||||
unsigned long sc_lsize = cpu_scache_line_size();
|
||||
|
||||
@ -304,7 +304,7 @@ static void __init r4k_blast_scache_page_indexed_setup(void)
|
||||
|
||||
static void (* r4k_blast_scache)(void);
|
||||
|
||||
static void __init r4k_blast_scache_setup(void)
|
||||
static void __cpuinit r4k_blast_scache_setup(void)
|
||||
{
|
||||
unsigned long sc_lsize = cpu_scache_line_size();
|
||||
|
||||
@ -691,11 +691,11 @@ static inline void rm7k_erratum31(void)
|
||||
}
|
||||
}
|
||||
|
||||
static char *way_string[] __initdata = { NULL, "direct mapped", "2-way",
|
||||
static char *way_string[] __cpuinitdata = { NULL, "direct mapped", "2-way",
|
||||
"3-way", "4-way", "5-way", "6-way", "7-way", "8-way"
|
||||
};
|
||||
|
||||
static void __init probe_pcache(void)
|
||||
static void __cpuinit probe_pcache(void)
|
||||
{
|
||||
struct cpuinfo_mips *c = ¤t_cpu_data;
|
||||
unsigned int config = read_c0_config();
|
||||
@ -1016,7 +1016,7 @@ static void __init probe_pcache(void)
|
||||
* executes in KSEG1 space or else you will crash and burn badly. You have
|
||||
* been warned.
|
||||
*/
|
||||
static int __init probe_scache(void)
|
||||
static int __cpuinit probe_scache(void)
|
||||
{
|
||||
unsigned long flags, addr, begin, end, pow2;
|
||||
unsigned int config = read_c0_config();
|
||||
@ -1095,7 +1095,7 @@ extern int r5k_sc_init(void);
|
||||
extern int rm7k_sc_init(void);
|
||||
extern int mips_sc_init(void);
|
||||
|
||||
static void __init setup_scache(void)
|
||||
static void __cpuinit setup_scache(void)
|
||||
{
|
||||
struct cpuinfo_mips *c = ¤t_cpu_data;
|
||||
unsigned int config = read_c0_config();
|
||||
@ -1206,7 +1206,7 @@ void au1x00_fixup_config_od(void)
|
||||
}
|
||||
}
|
||||
|
||||
static void __init coherency_setup(void)
|
||||
static void __cpuinit coherency_setup(void)
|
||||
{
|
||||
change_c0_config(CONF_CM_CMASK, CONF_CM_DEFAULT);
|
||||
|
||||
@ -1238,7 +1238,7 @@ static void __init coherency_setup(void)
|
||||
}
|
||||
}
|
||||
|
||||
void __init r4k_cache_init(void)
|
||||
void __cpuinit r4k_cache_init(void)
|
||||
{
|
||||
extern void build_clear_page(void);
|
||||
extern void build_copy_page(void);
|
||||
|
@ -329,7 +329,7 @@ static __init void tx39_probe_cache(void)
|
||||
}
|
||||
}
|
||||
|
||||
void __init tx39_cache_init(void)
|
||||
void __cpuinit tx39_cache_init(void)
|
||||
{
|
||||
extern void build_clear_page(void);
|
||||
extern void build_copy_page(void);
|
||||
|
@ -127,9 +127,10 @@ void __update_cache(struct vm_area_struct *vma, unsigned long address,
|
||||
}
|
||||
}
|
||||
|
||||
static char cache_panic[] __initdata = "Yeee, unsupported cache architecture.";
|
||||
static char cache_panic[] __cpuinitdata =
|
||||
"Yeee, unsupported cache architecture.";
|
||||
|
||||
void __init cpu_cache_init(void)
|
||||
void __devinit cpu_cache_init(void)
|
||||
{
|
||||
if (cpu_has_3k_cache) {
|
||||
extern void __weak r3k_cache_init(void);
|
||||
|
@ -34,8 +34,6 @@
|
||||
* is changed.
|
||||
*/
|
||||
|
||||
__INIT
|
||||
|
||||
.set mips64
|
||||
.set noreorder
|
||||
.set noat
|
||||
@ -51,6 +49,8 @@
|
||||
* (0x170-0x17f) are used to preserve k0, k1, and ra.
|
||||
*/
|
||||
|
||||
__CPUINIT
|
||||
|
||||
LEAF(except_vec2_sb1)
|
||||
/*
|
||||
* If this error is recoverable, we need to exit the handler
|
||||
|
@ -66,21 +66,21 @@ EXPORT_SYMBOL(copy_page);
|
||||
* with 64-bit kernels. The prefetch offsets have been experimentally tuned
|
||||
* an Origin 200.
|
||||
*/
|
||||
static int pref_offset_clear __initdata = 512;
|
||||
static int pref_offset_copy __initdata = 256;
|
||||
static int pref_offset_clear __cpuinitdata = 512;
|
||||
static int pref_offset_copy __cpuinitdata = 256;
|
||||
|
||||
static unsigned int pref_src_mode __initdata;
|
||||
static unsigned int pref_dst_mode __initdata;
|
||||
static unsigned int pref_src_mode __cpuinitdata;
|
||||
static unsigned int pref_dst_mode __cpuinitdata;
|
||||
|
||||
static int load_offset __initdata;
|
||||
static int store_offset __initdata;
|
||||
static int load_offset __cpuinitdata;
|
||||
static int store_offset __cpuinitdata;
|
||||
|
||||
static unsigned int __initdata *dest, *epc;
|
||||
static unsigned int __cpuinitdata *dest, *epc;
|
||||
|
||||
static unsigned int instruction_pending;
|
||||
static union mips_instruction delayed_mi;
|
||||
|
||||
static void __init emit_instruction(union mips_instruction mi)
|
||||
static void __cpuinit emit_instruction(union mips_instruction mi)
|
||||
{
|
||||
if (instruction_pending)
|
||||
*epc++ = delayed_mi.word;
|
||||
@ -222,7 +222,7 @@ static inline void build_cdex_p(void)
|
||||
emit_instruction(mi);
|
||||
}
|
||||
|
||||
static void __init __build_store_reg(int reg)
|
||||
static void __cpuinit __build_store_reg(int reg)
|
||||
{
|
||||
union mips_instruction mi;
|
||||
unsigned int width;
|
||||
@ -339,7 +339,7 @@ static inline void build_jr_ra(void)
|
||||
flush_delay_slot_or_nop();
|
||||
}
|
||||
|
||||
void __init build_clear_page(void)
|
||||
void __cpuinit build_clear_page(void)
|
||||
{
|
||||
unsigned int loop_start;
|
||||
unsigned long off;
|
||||
@ -442,7 +442,7 @@ dest = label();
|
||||
pr_debug("\t.set pop\n");
|
||||
}
|
||||
|
||||
void __init build_copy_page(void)
|
||||
void __cpuinit build_copy_page(void)
|
||||
{
|
||||
unsigned int loop_start;
|
||||
unsigned long off;
|
||||
|
@ -293,10 +293,10 @@ void copy_page(void *to, void *from)
|
||||
EXPORT_SYMBOL(clear_page);
|
||||
EXPORT_SYMBOL(copy_page);
|
||||
|
||||
void __init build_clear_page(void)
|
||||
void __cpuinit build_clear_page(void)
|
||||
{
|
||||
}
|
||||
|
||||
void __init build_copy_page(void)
|
||||
void __cpuinit build_copy_page(void)
|
||||
{
|
||||
}
|
||||
|
@ -168,7 +168,7 @@ struct bcache_ops indy_sc_ops = {
|
||||
.bc_inv = indy_sc_wback_invalidate
|
||||
};
|
||||
|
||||
void __init indy_sc_init(void)
|
||||
void __cpuinit indy_sc_init(void)
|
||||
{
|
||||
if (indy_sc_probe()) {
|
||||
indy_sc_enable();
|
||||
|
@ -100,7 +100,7 @@ static inline int __init mips_sc_probe(void)
|
||||
return 1;
|
||||
}
|
||||
|
||||
int __init mips_sc_init(void)
|
||||
int __cpuinit mips_sc_init(void)
|
||||
{
|
||||
int found = mips_sc_probe();
|
||||
if (found) {
|
||||
@ -109,4 +109,3 @@ int __init mips_sc_init(void)
|
||||
}
|
||||
return found;
|
||||
}
|
||||
|
||||
|
@ -99,7 +99,7 @@ static struct bcache_ops r5k_sc_ops = {
|
||||
.bc_inv = r5k_dma_cache_inv_sc
|
||||
};
|
||||
|
||||
void __init r5k_sc_init(void)
|
||||
void __cpuinit r5k_sc_init(void)
|
||||
{
|
||||
if (r5k_sc_probe()) {
|
||||
r5k_sc_enable();
|
||||
|
@ -128,7 +128,7 @@ struct bcache_ops rm7k_sc_ops = {
|
||||
.bc_inv = rm7k_sc_inv
|
||||
};
|
||||
|
||||
void __init rm7k_sc_init(void)
|
||||
void __cpuinit rm7k_sc_init(void)
|
||||
{
|
||||
struct cpuinfo_mips *c = ¤t_cpu_data;
|
||||
unsigned int config = read_c0_config();
|
||||
|
@ -281,7 +281,7 @@ void __init add_wired_entry(unsigned long entrylo0, unsigned long entrylo1,
|
||||
}
|
||||
}
|
||||
|
||||
void __init tlb_init(void)
|
||||
void __cpuinit tlb_init(void)
|
||||
{
|
||||
local_flush_tlb_all();
|
||||
|
||||
|
@ -388,7 +388,7 @@ void __init add_wired_entry(unsigned long entrylo0, unsigned long entrylo1,
|
||||
* lifetime of the system
|
||||
*/
|
||||
|
||||
static int temp_tlb_entry __initdata;
|
||||
static int temp_tlb_entry __cpuinitdata;
|
||||
|
||||
__init int add_temporary_entry(unsigned long entrylo0, unsigned long entrylo1,
|
||||
unsigned long entryhi, unsigned long pagemask)
|
||||
@ -427,7 +427,7 @@ out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void __init probe_tlb(unsigned long config)
|
||||
static void __cpuinit probe_tlb(unsigned long config)
|
||||
{
|
||||
struct cpuinfo_mips *c = ¤t_cpu_data;
|
||||
unsigned int reg;
|
||||
@ -455,7 +455,7 @@ static void __init probe_tlb(unsigned long config)
|
||||
c->tlbsize = ((reg >> 25) & 0x3f) + 1;
|
||||
}
|
||||
|
||||
static int __initdata ntlb = 0;
|
||||
static int __cpuinitdata ntlb = 0;
|
||||
static int __init set_ntlb(char *str)
|
||||
{
|
||||
get_option(&str, &ntlb);
|
||||
@ -464,7 +464,7 @@ static int __init set_ntlb(char *str)
|
||||
|
||||
__setup("ntlb=", set_ntlb);
|
||||
|
||||
void __init tlb_init(void)
|
||||
void __cpuinit tlb_init(void)
|
||||
{
|
||||
unsigned int config = read_c0_config();
|
||||
|
||||
@ -473,7 +473,7 @@ void __init tlb_init(void)
|
||||
* - On R4600 1.7 the tlbp never hits for pages smaller than
|
||||
* the value in the c0_pagemask register.
|
||||
* - The entire mm handling assumes the c0_pagemask register to
|
||||
* be set for 4kb pages.
|
||||
* be set to fixed-size pages.
|
||||
*/
|
||||
probe_tlb(config);
|
||||
write_c0_pagemask(PM_DEFAULT_MASK);
|
||||
|
@ -214,14 +214,14 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
static void __init probe_tlb(unsigned long config)
|
||||
static void __cpuinit probe_tlb(unsigned long config)
|
||||
{
|
||||
struct cpuinfo_mips *c = ¤t_cpu_data;
|
||||
|
||||
c->tlbsize = 3 * 128; /* 3 sets each 128 entries */
|
||||
}
|
||||
|
||||
void __init tlb_init(void)
|
||||
void __cpuinit tlb_init(void)
|
||||
{
|
||||
unsigned int config = read_c0_config();
|
||||
unsigned long status;
|
||||
|
@ -60,7 +60,7 @@ static inline int __maybe_unused r10000_llsc_war(void)
|
||||
* why; it's not an issue caused by the core RTL.
|
||||
*
|
||||
*/
|
||||
static int __init m4kc_tlbp_war(void)
|
||||
static int __cpuinit m4kc_tlbp_war(void)
|
||||
{
|
||||
return (current_cpu_data.processor_id & 0xffff00) ==
|
||||
(PRID_COMP_MIPS | PRID_IMP_4KC);
|
||||
@ -144,16 +144,16 @@ static inline void dump_handler(const u32 *handler, int count)
|
||||
* We deliberately chose a buffer size of 128, so we won't scribble
|
||||
* over anything important on overflow before we panic.
|
||||
*/
|
||||
static u32 tlb_handler[128] __initdata;
|
||||
static u32 tlb_handler[128] __cpuinitdata;
|
||||
|
||||
/* simply assume worst case size for labels and relocs */
|
||||
static struct uasm_label labels[128] __initdata;
|
||||
static struct uasm_reloc relocs[128] __initdata;
|
||||
static struct uasm_label labels[128] __cpuinitdata;
|
||||
static struct uasm_reloc relocs[128] __cpuinitdata;
|
||||
|
||||
/*
|
||||
* The R3000 TLB handler is simple.
|
||||
*/
|
||||
static void __init build_r3000_tlb_refill_handler(void)
|
||||
static void __cpuinit build_r3000_tlb_refill_handler(void)
|
||||
{
|
||||
long pgdc = (long)pgd_current;
|
||||
u32 *p;
|
||||
@ -197,7 +197,7 @@ static void __init build_r3000_tlb_refill_handler(void)
|
||||
* other one.To keep things simple, we first assume linear space,
|
||||
* then we relocate it to the final handler layout as needed.
|
||||
*/
|
||||
static u32 final_handler[64] __initdata;
|
||||
static u32 final_handler[64] __cpuinitdata;
|
||||
|
||||
/*
|
||||
* Hazards
|
||||
@ -221,7 +221,7 @@ static u32 final_handler[64] __initdata;
|
||||
*
|
||||
* As if we MIPS hackers wouldn't know how to nop pipelines happy ...
|
||||
*/
|
||||
static void __init __maybe_unused build_tlb_probe_entry(u32 **p)
|
||||
static void __cpuinit __maybe_unused build_tlb_probe_entry(u32 **p)
|
||||
{
|
||||
switch (current_cpu_type()) {
|
||||
/* Found by experiment: R4600 v2.0 needs this, too. */
|
||||
@ -245,7 +245,7 @@ static void __init __maybe_unused build_tlb_probe_entry(u32 **p)
|
||||
*/
|
||||
enum tlb_write_entry { tlb_random, tlb_indexed };
|
||||
|
||||
static void __init build_tlb_write_entry(u32 **p, struct uasm_label **l,
|
||||
static void __cpuinit build_tlb_write_entry(u32 **p, struct uasm_label **l,
|
||||
struct uasm_reloc **r,
|
||||
enum tlb_write_entry wmode)
|
||||
{
|
||||
@ -389,7 +389,7 @@ static void __init build_tlb_write_entry(u32 **p, struct uasm_label **l,
|
||||
* TMP and PTR are scratch.
|
||||
* TMP will be clobbered, PTR will hold the pmd entry.
|
||||
*/
|
||||
static void __init
|
||||
static void __cpuinit
|
||||
build_get_pmde64(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
|
||||
unsigned int tmp, unsigned int ptr)
|
||||
{
|
||||
@ -450,7 +450,7 @@ build_get_pmde64(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
|
||||
* BVADDR is the faulting address, PTR is scratch.
|
||||
* PTR will hold the pgd for vmalloc.
|
||||
*/
|
||||
static void __init
|
||||
static void __cpuinit
|
||||
build_get_pgd_vmalloc64(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
|
||||
unsigned int bvaddr, unsigned int ptr)
|
||||
{
|
||||
@ -522,7 +522,7 @@ build_get_pgd_vmalloc64(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
|
||||
* TMP and PTR are scratch.
|
||||
* TMP will be clobbered, PTR will hold the pgd entry.
|
||||
*/
|
||||
static void __init __maybe_unused
|
||||
static void __cpuinit __maybe_unused
|
||||
build_get_pgde32(u32 **p, unsigned int tmp, unsigned int ptr)
|
||||
{
|
||||
long pgdc = (long)pgd_current;
|
||||
@ -557,7 +557,7 @@ build_get_pgde32(u32 **p, unsigned int tmp, unsigned int ptr)
|
||||
|
||||
#endif /* !CONFIG_64BIT */
|
||||
|
||||
static void __init build_adjust_context(u32 **p, unsigned int ctx)
|
||||
static void __cpuinit build_adjust_context(u32 **p, unsigned int ctx)
|
||||
{
|
||||
unsigned int shift = 4 - (PTE_T_LOG2 + 1) + PAGE_SHIFT - 12;
|
||||
unsigned int mask = (PTRS_PER_PTE / 2 - 1) << (PTE_T_LOG2 + 1);
|
||||
@ -583,7 +583,7 @@ static void __init build_adjust_context(u32 **p, unsigned int ctx)
|
||||
uasm_i_andi(p, ctx, ctx, mask);
|
||||
}
|
||||
|
||||
static void __init build_get_ptep(u32 **p, unsigned int tmp, unsigned int ptr)
|
||||
static void __cpuinit build_get_ptep(u32 **p, unsigned int tmp, unsigned int ptr)
|
||||
{
|
||||
/*
|
||||
* Bug workaround for the Nevada. It seems as if under certain
|
||||
@ -608,7 +608,7 @@ static void __init build_get_ptep(u32 **p, unsigned int tmp, unsigned int ptr)
|
||||
UASM_i_ADDU(p, ptr, ptr, tmp); /* add in offset */
|
||||
}
|
||||
|
||||
static void __init build_update_entries(u32 **p, unsigned int tmp,
|
||||
static void __cpuinit build_update_entries(u32 **p, unsigned int tmp,
|
||||
unsigned int ptep)
|
||||
{
|
||||
/*
|
||||
@ -651,7 +651,7 @@ static void __init build_update_entries(u32 **p, unsigned int tmp,
|
||||
#endif
|
||||
}
|
||||
|
||||
static void __init build_r4000_tlb_refill_handler(void)
|
||||
static void __cpuinit build_r4000_tlb_refill_handler(void)
|
||||
{
|
||||
u32 *p = tlb_handler;
|
||||
struct uasm_label *l = labels;
|
||||
@ -783,7 +783,7 @@ u32 handle_tlbl[FASTPATH_SIZE] __cacheline_aligned;
|
||||
u32 handle_tlbs[FASTPATH_SIZE] __cacheline_aligned;
|
||||
u32 handle_tlbm[FASTPATH_SIZE] __cacheline_aligned;
|
||||
|
||||
static void __init
|
||||
static void __cpuinit
|
||||
iPTE_LW(u32 **p, struct uasm_label **l, unsigned int pte, unsigned int ptr)
|
||||
{
|
||||
#ifdef CONFIG_SMP
|
||||
@ -803,7 +803,7 @@ iPTE_LW(u32 **p, struct uasm_label **l, unsigned int pte, unsigned int ptr)
|
||||
#endif
|
||||
}
|
||||
|
||||
static void __init
|
||||
static void __cpuinit
|
||||
iPTE_SW(u32 **p, struct uasm_reloc **r, unsigned int pte, unsigned int ptr,
|
||||
unsigned int mode)
|
||||
{
|
||||
@ -863,7 +863,7 @@ iPTE_SW(u32 **p, struct uasm_reloc **r, unsigned int pte, unsigned int ptr,
|
||||
* the page table where this PTE is located, PTE will be re-loaded
|
||||
* with it's original value.
|
||||
*/
|
||||
static void __init
|
||||
static void __cpuinit
|
||||
build_pte_present(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
|
||||
unsigned int pte, unsigned int ptr, enum label_id lid)
|
||||
{
|
||||
@ -874,7 +874,7 @@ build_pte_present(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
|
||||
}
|
||||
|
||||
/* Make PTE valid, store result in PTR. */
|
||||
static void __init
|
||||
static void __cpuinit
|
||||
build_make_valid(u32 **p, struct uasm_reloc **r, unsigned int pte,
|
||||
unsigned int ptr)
|
||||
{
|
||||
@ -887,7 +887,7 @@ build_make_valid(u32 **p, struct uasm_reloc **r, unsigned int pte,
|
||||
* Check if PTE can be written to, if not branch to LABEL. Regardless
|
||||
* restore PTE with value from PTR when done.
|
||||
*/
|
||||
static void __init
|
||||
static void __cpuinit
|
||||
build_pte_writable(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
|
||||
unsigned int pte, unsigned int ptr, enum label_id lid)
|
||||
{
|
||||
@ -900,7 +900,7 @@ build_pte_writable(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
|
||||
/* Make PTE writable, update software status bits as well, then store
|
||||
* at PTR.
|
||||
*/
|
||||
static void __init
|
||||
static void __cpuinit
|
||||
build_make_write(u32 **p, struct uasm_reloc **r, unsigned int pte,
|
||||
unsigned int ptr)
|
||||
{
|
||||
@ -914,7 +914,7 @@ build_make_write(u32 **p, struct uasm_reloc **r, unsigned int pte,
|
||||
* Check if PTE can be modified, if not branch to LABEL. Regardless
|
||||
* restore PTE with value from PTR when done.
|
||||
*/
|
||||
static void __init
|
||||
static void __cpuinit
|
||||
build_pte_modifiable(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
|
||||
unsigned int pte, unsigned int ptr, enum label_id lid)
|
||||
{
|
||||
@ -931,7 +931,7 @@ build_pte_modifiable(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
|
||||
* This places the pte into ENTRYLO0 and writes it with tlbwi.
|
||||
* Then it returns.
|
||||
*/
|
||||
static void __init
|
||||
static void __cpuinit
|
||||
build_r3000_pte_reload_tlbwi(u32 **p, unsigned int pte, unsigned int tmp)
|
||||
{
|
||||
uasm_i_mtc0(p, pte, C0_ENTRYLO0); /* cp0 delay */
|
||||
@ -947,7 +947,7 @@ build_r3000_pte_reload_tlbwi(u32 **p, unsigned int pte, unsigned int tmp)
|
||||
* may have the probe fail bit set as a result of a trap on a
|
||||
* kseg2 access, i.e. without refill. Then it returns.
|
||||
*/
|
||||
static void __init
|
||||
static void __cpuinit
|
||||
build_r3000_tlb_reload_write(u32 **p, struct uasm_label **l,
|
||||
struct uasm_reloc **r, unsigned int pte,
|
||||
unsigned int tmp)
|
||||
@ -965,7 +965,7 @@ build_r3000_tlb_reload_write(u32 **p, struct uasm_label **l,
|
||||
uasm_i_rfe(p); /* branch delay */
|
||||
}
|
||||
|
||||
static void __init
|
||||
static void __cpuinit
|
||||
build_r3000_tlbchange_handler_head(u32 **p, unsigned int pte,
|
||||
unsigned int ptr)
|
||||
{
|
||||
@ -985,7 +985,7 @@ build_r3000_tlbchange_handler_head(u32 **p, unsigned int pte,
|
||||
uasm_i_tlbp(p); /* load delay */
|
||||
}
|
||||
|
||||
static void __init build_r3000_tlb_load_handler(void)
|
||||
static void __cpuinit build_r3000_tlb_load_handler(void)
|
||||
{
|
||||
u32 *p = handle_tlbl;
|
||||
struct uasm_label *l = labels;
|
||||
@ -1015,7 +1015,7 @@ static void __init build_r3000_tlb_load_handler(void)
|
||||
dump_handler(handle_tlbl, ARRAY_SIZE(handle_tlbl));
|
||||
}
|
||||
|
||||
static void __init build_r3000_tlb_store_handler(void)
|
||||
static void __cpuinit build_r3000_tlb_store_handler(void)
|
||||
{
|
||||
u32 *p = handle_tlbs;
|
||||
struct uasm_label *l = labels;
|
||||
@ -1045,7 +1045,7 @@ static void __init build_r3000_tlb_store_handler(void)
|
||||
dump_handler(handle_tlbs, ARRAY_SIZE(handle_tlbs));
|
||||
}
|
||||
|
||||
static void __init build_r3000_tlb_modify_handler(void)
|
||||
static void __cpuinit build_r3000_tlb_modify_handler(void)
|
||||
{
|
||||
u32 *p = handle_tlbm;
|
||||
struct uasm_label *l = labels;
|
||||
@ -1078,7 +1078,7 @@ static void __init build_r3000_tlb_modify_handler(void)
|
||||
/*
|
||||
* R4000 style TLB load/store/modify handlers.
|
||||
*/
|
||||
static void __init
|
||||
static void __cpuinit
|
||||
build_r4000_tlbchange_handler_head(u32 **p, struct uasm_label **l,
|
||||
struct uasm_reloc **r, unsigned int pte,
|
||||
unsigned int ptr)
|
||||
@ -1103,7 +1103,7 @@ build_r4000_tlbchange_handler_head(u32 **p, struct uasm_label **l,
|
||||
build_tlb_probe_entry(p);
|
||||
}
|
||||
|
||||
static void __init
|
||||
static void __cpuinit
|
||||
build_r4000_tlbchange_handler_tail(u32 **p, struct uasm_label **l,
|
||||
struct uasm_reloc **r, unsigned int tmp,
|
||||
unsigned int ptr)
|
||||
@ -1120,7 +1120,7 @@ build_r4000_tlbchange_handler_tail(u32 **p, struct uasm_label **l,
|
||||
#endif
|
||||
}
|
||||
|
||||
static void __init build_r4000_tlb_load_handler(void)
|
||||
static void __cpuinit build_r4000_tlb_load_handler(void)
|
||||
{
|
||||
u32 *p = handle_tlbl;
|
||||
struct uasm_label *l = labels;
|
||||
@ -1160,7 +1160,7 @@ static void __init build_r4000_tlb_load_handler(void)
|
||||
dump_handler(handle_tlbl, ARRAY_SIZE(handle_tlbl));
|
||||
}
|
||||
|
||||
static void __init build_r4000_tlb_store_handler(void)
|
||||
static void __cpuinit build_r4000_tlb_store_handler(void)
|
||||
{
|
||||
u32 *p = handle_tlbs;
|
||||
struct uasm_label *l = labels;
|
||||
@ -1191,7 +1191,7 @@ static void __init build_r4000_tlb_store_handler(void)
|
||||
dump_handler(handle_tlbs, ARRAY_SIZE(handle_tlbs));
|
||||
}
|
||||
|
||||
static void __init build_r4000_tlb_modify_handler(void)
|
||||
static void __cpuinit build_r4000_tlb_modify_handler(void)
|
||||
{
|
||||
u32 *p = handle_tlbm;
|
||||
struct uasm_label *l = labels;
|
||||
@ -1223,7 +1223,7 @@ static void __init build_r4000_tlb_modify_handler(void)
|
||||
dump_handler(handle_tlbm, ARRAY_SIZE(handle_tlbm));
|
||||
}
|
||||
|
||||
void __init build_tlb_refill_handler(void)
|
||||
void __cpuinit build_tlb_refill_handler(void)
|
||||
{
|
||||
/*
|
||||
* The refill handler is generated per-CPU, multi-node systems
|
||||
@ -1269,7 +1269,7 @@ void __init build_tlb_refill_handler(void)
|
||||
}
|
||||
}
|
||||
|
||||
void __init flush_tlb_handlers(void)
|
||||
void __cpuinit flush_tlb_handlers(void)
|
||||
{
|
||||
flush_icache_range((unsigned long)handle_tlbl,
|
||||
(unsigned long)handle_tlbl + sizeof(handle_tlbl));
|
||||
|
@ -82,7 +82,7 @@ struct insn {
|
||||
| (e) << RE_SH \
|
||||
| (f) << FUNC_SH)
|
||||
|
||||
static struct insn insn_table[] __initdata = {
|
||||
static struct insn insn_table[] __cpuinitdata = {
|
||||
{ insn_addiu, M(addiu_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
|
||||
{ insn_addu, M(spec_op, 0, 0, 0, 0, addu_op), RS | RT | RD },
|
||||
{ insn_and, M(spec_op, 0, 0, 0, 0, and_op), RS | RT | RD },
|
||||
@ -135,7 +135,7 @@ static struct insn insn_table[] __initdata = {
|
||||
|
||||
#undef M
|
||||
|
||||
static inline __init u32 build_rs(u32 arg)
|
||||
static inline __cpuinit u32 build_rs(u32 arg)
|
||||
{
|
||||
if (arg & ~RS_MASK)
|
||||
printk(KERN_WARNING "Micro-assembler field overflow\n");
|
||||
@ -143,7 +143,7 @@ static inline __init u32 build_rs(u32 arg)
|
||||
return (arg & RS_MASK) << RS_SH;
|
||||
}
|
||||
|
||||
static inline __init u32 build_rt(u32 arg)
|
||||
static inline __cpuinit u32 build_rt(u32 arg)
|
||||
{
|
||||
if (arg & ~RT_MASK)
|
||||
printk(KERN_WARNING "Micro-assembler field overflow\n");
|
||||
@ -151,7 +151,7 @@ static inline __init u32 build_rt(u32 arg)
|
||||
return (arg & RT_MASK) << RT_SH;
|
||||
}
|
||||
|
||||
static inline __init u32 build_rd(u32 arg)
|
||||
static inline __cpuinit u32 build_rd(u32 arg)
|
||||
{
|
||||
if (arg & ~RD_MASK)
|
||||
printk(KERN_WARNING "Micro-assembler field overflow\n");
|
||||
@ -159,7 +159,7 @@ static inline __init u32 build_rd(u32 arg)
|
||||
return (arg & RD_MASK) << RD_SH;
|
||||
}
|
||||
|
||||
static inline __init u32 build_re(u32 arg)
|
||||
static inline __cpuinit u32 build_re(u32 arg)
|
||||
{
|
||||
if (arg & ~RE_MASK)
|
||||
printk(KERN_WARNING "Micro-assembler field overflow\n");
|
||||
@ -167,7 +167,7 @@ static inline __init u32 build_re(u32 arg)
|
||||
return (arg & RE_MASK) << RE_SH;
|
||||
}
|
||||
|
||||
static inline __init u32 build_simm(s32 arg)
|
||||
static inline __cpuinit u32 build_simm(s32 arg)
|
||||
{
|
||||
if (arg > 0x7fff || arg < -0x8000)
|
||||
printk(KERN_WARNING "Micro-assembler field overflow\n");
|
||||
@ -175,7 +175,7 @@ static inline __init u32 build_simm(s32 arg)
|
||||
return arg & 0xffff;
|
||||
}
|
||||
|
||||
static inline __init u32 build_uimm(u32 arg)
|
||||
static inline __cpuinit u32 build_uimm(u32 arg)
|
||||
{
|
||||
if (arg & ~IMM_MASK)
|
||||
printk(KERN_WARNING "Micro-assembler field overflow\n");
|
||||
@ -183,7 +183,7 @@ static inline __init u32 build_uimm(u32 arg)
|
||||
return arg & IMM_MASK;
|
||||
}
|
||||
|
||||
static inline __init u32 build_bimm(s32 arg)
|
||||
static inline __cpuinit u32 build_bimm(s32 arg)
|
||||
{
|
||||
if (arg > 0x1ffff || arg < -0x20000)
|
||||
printk(KERN_WARNING "Micro-assembler field overflow\n");
|
||||
@ -194,7 +194,7 @@ static inline __init u32 build_bimm(s32 arg)
|
||||
return ((arg < 0) ? (1 << 15) : 0) | ((arg >> 2) & 0x7fff);
|
||||
}
|
||||
|
||||
static inline __init u32 build_jimm(u32 arg)
|
||||
static inline __cpuinit u32 build_jimm(u32 arg)
|
||||
{
|
||||
if (arg & ~((JIMM_MASK) << 2))
|
||||
printk(KERN_WARNING "Micro-assembler field overflow\n");
|
||||
@ -202,7 +202,7 @@ static inline __init u32 build_jimm(u32 arg)
|
||||
return (arg >> 2) & JIMM_MASK;
|
||||
}
|
||||
|
||||
static inline __init u32 build_func(u32 arg)
|
||||
static inline __cpuinit u32 build_func(u32 arg)
|
||||
{
|
||||
if (arg & ~FUNC_MASK)
|
||||
printk(KERN_WARNING "Micro-assembler field overflow\n");
|
||||
@ -210,7 +210,7 @@ static inline __init u32 build_func(u32 arg)
|
||||
return arg & FUNC_MASK;
|
||||
}
|
||||
|
||||
static inline __init u32 build_set(u32 arg)
|
||||
static inline __cpuinit u32 build_set(u32 arg)
|
||||
{
|
||||
if (arg & ~SET_MASK)
|
||||
printk(KERN_WARNING "Micro-assembler field overflow\n");
|
||||
@ -222,7 +222,7 @@ static inline __init u32 build_set(u32 arg)
|
||||
* The order of opcode arguments is implicitly left to right,
|
||||
* starting with RS and ending with FUNC or IMM.
|
||||
*/
|
||||
static void __init build_insn(u32 **buf, enum opcode opc, ...)
|
||||
static void __cpuinit build_insn(u32 **buf, enum opcode opc, ...)
|
||||
{
|
||||
struct insn *ip = NULL;
|
||||
unsigned int i;
|
||||
@ -375,14 +375,14 @@ I_u3u1u2(_xor)
|
||||
I_u2u1u3(_xori)
|
||||
|
||||
/* Handle labels. */
|
||||
void __init uasm_build_label(struct uasm_label **lab, u32 *addr, int lid)
|
||||
void __cpuinit uasm_build_label(struct uasm_label **lab, u32 *addr, int lid)
|
||||
{
|
||||
(*lab)->addr = addr;
|
||||
(*lab)->lab = lid;
|
||||
(*lab)++;
|
||||
}
|
||||
|
||||
int __init uasm_in_compat_space_p(long addr)
|
||||
int __cpuinit uasm_in_compat_space_p(long addr)
|
||||
{
|
||||
/* Is this address in 32bit compat space? */
|
||||
#ifdef CONFIG_64BIT
|
||||
@ -392,7 +392,7 @@ int __init uasm_in_compat_space_p(long addr)
|
||||
#endif
|
||||
}
|
||||
|
||||
int __init uasm_rel_highest(long val)
|
||||
int __cpuinit uasm_rel_highest(long val)
|
||||
{
|
||||
#ifdef CONFIG_64BIT
|
||||
return ((((val + 0x800080008000L) >> 48) & 0xffff) ^ 0x8000) - 0x8000;
|
||||
@ -401,7 +401,7 @@ int __init uasm_rel_highest(long val)
|
||||
#endif
|
||||
}
|
||||
|
||||
int __init uasm_rel_higher(long val)
|
||||
int __cpuinit uasm_rel_higher(long val)
|
||||
{
|
||||
#ifdef CONFIG_64BIT
|
||||
return ((((val + 0x80008000L) >> 32) & 0xffff) ^ 0x8000) - 0x8000;
|
||||
@ -410,17 +410,17 @@ int __init uasm_rel_higher(long val)
|
||||
#endif
|
||||
}
|
||||
|
||||
int __init uasm_rel_hi(long val)
|
||||
int __cpuinit uasm_rel_hi(long val)
|
||||
{
|
||||
return ((((val + 0x8000L) >> 16) & 0xffff) ^ 0x8000) - 0x8000;
|
||||
}
|
||||
|
||||
int __init uasm_rel_lo(long val)
|
||||
int __cpuinit uasm_rel_lo(long val)
|
||||
{
|
||||
return ((val & 0xffff) ^ 0x8000) - 0x8000;
|
||||
}
|
||||
|
||||
void __init UASM_i_LA_mostly(u32 **buf, unsigned int rs, long addr)
|
||||
void __cpuinit UASM_i_LA_mostly(u32 **buf, unsigned int rs, long addr)
|
||||
{
|
||||
if (!uasm_in_compat_space_p(addr)) {
|
||||
uasm_i_lui(buf, rs, uasm_rel_highest(addr));
|
||||
@ -436,7 +436,7 @@ void __init UASM_i_LA_mostly(u32 **buf, unsigned int rs, long addr)
|
||||
uasm_i_lui(buf, rs, uasm_rel_hi(addr));
|
||||
}
|
||||
|
||||
void __init UASM_i_LA(u32 **buf, unsigned int rs, long addr)
|
||||
void __cpuinit UASM_i_LA(u32 **buf, unsigned int rs, long addr)
|
||||
{
|
||||
UASM_i_LA_mostly(buf, rs, addr);
|
||||
if (uasm_rel_lo(addr)) {
|
||||
@ -448,7 +448,7 @@ void __init UASM_i_LA(u32 **buf, unsigned int rs, long addr)
|
||||
}
|
||||
|
||||
/* Handle relocations. */
|
||||
void __init
|
||||
void __cpuinit
|
||||
uasm_r_mips_pc16(struct uasm_reloc **rel, u32 *addr, int lid)
|
||||
{
|
||||
(*rel)->addr = addr;
|
||||
@ -457,7 +457,7 @@ uasm_r_mips_pc16(struct uasm_reloc **rel, u32 *addr, int lid)
|
||||
(*rel)++;
|
||||
}
|
||||
|
||||
static inline void __init
|
||||
static inline void __cpuinit
|
||||
__resolve_relocs(struct uasm_reloc *rel, struct uasm_label *lab)
|
||||
{
|
||||
long laddr = (long)lab->addr;
|
||||
@ -474,7 +474,7 @@ __resolve_relocs(struct uasm_reloc *rel, struct uasm_label *lab)
|
||||
}
|
||||
}
|
||||
|
||||
void __init
|
||||
void __cpuinit
|
||||
uasm_resolve_relocs(struct uasm_reloc *rel, struct uasm_label *lab)
|
||||
{
|
||||
struct uasm_label *l;
|
||||
@ -485,7 +485,7 @@ uasm_resolve_relocs(struct uasm_reloc *rel, struct uasm_label *lab)
|
||||
__resolve_relocs(rel, l);
|
||||
}
|
||||
|
||||
void __init
|
||||
void __cpuinit
|
||||
uasm_move_relocs(struct uasm_reloc *rel, u32 *first, u32 *end, long off)
|
||||
{
|
||||
for (; rel->lab != UASM_LABEL_INVALID; rel++)
|
||||
@ -493,7 +493,7 @@ uasm_move_relocs(struct uasm_reloc *rel, u32 *first, u32 *end, long off)
|
||||
rel->addr += off;
|
||||
}
|
||||
|
||||
void __init
|
||||
void __cpuinit
|
||||
uasm_move_labels(struct uasm_label *lab, u32 *first, u32 *end, long off)
|
||||
{
|
||||
for (; lab->lab != UASM_LABEL_INVALID; lab++)
|
||||
@ -501,7 +501,7 @@ uasm_move_labels(struct uasm_label *lab, u32 *first, u32 *end, long off)
|
||||
lab->addr += off;
|
||||
}
|
||||
|
||||
void __init
|
||||
void __cpuinit
|
||||
uasm_copy_handler(struct uasm_reloc *rel, struct uasm_label *lab, u32 *first,
|
||||
u32 *end, u32 *target)
|
||||
{
|
||||
@ -513,7 +513,7 @@ uasm_copy_handler(struct uasm_reloc *rel, struct uasm_label *lab, u32 *first,
|
||||
uasm_move_labels(lab, first, end, off);
|
||||
}
|
||||
|
||||
int __init uasm_insn_has_bdelay(struct uasm_reloc *rel, u32 *addr)
|
||||
int __cpuinit uasm_insn_has_bdelay(struct uasm_reloc *rel, u32 *addr)
|
||||
{
|
||||
for (; rel->lab != UASM_LABEL_INVALID; rel++) {
|
||||
if (rel->addr == addr
|
||||
@ -526,49 +526,49 @@ int __init uasm_insn_has_bdelay(struct uasm_reloc *rel, u32 *addr)
|
||||
}
|
||||
|
||||
/* Convenience functions for labeled branches. */
|
||||
void __init
|
||||
void __cpuinit
|
||||
uasm_il_bltz(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid)
|
||||
{
|
||||
uasm_r_mips_pc16(r, *p, lid);
|
||||
uasm_i_bltz(p, reg, 0);
|
||||
}
|
||||
|
||||
void __init
|
||||
void __cpuinit
|
||||
uasm_il_b(u32 **p, struct uasm_reloc **r, int lid)
|
||||
{
|
||||
uasm_r_mips_pc16(r, *p, lid);
|
||||
uasm_i_b(p, 0);
|
||||
}
|
||||
|
||||
void __init
|
||||
void __cpuinit
|
||||
uasm_il_beqz(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid)
|
||||
{
|
||||
uasm_r_mips_pc16(r, *p, lid);
|
||||
uasm_i_beqz(p, reg, 0);
|
||||
}
|
||||
|
||||
void __init
|
||||
void __cpuinit
|
||||
uasm_il_beqzl(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid)
|
||||
{
|
||||
uasm_r_mips_pc16(r, *p, lid);
|
||||
uasm_i_beqzl(p, reg, 0);
|
||||
}
|
||||
|
||||
void __init
|
||||
void __cpuinit
|
||||
uasm_il_bnez(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid)
|
||||
{
|
||||
uasm_r_mips_pc16(r, *p, lid);
|
||||
uasm_i_bnez(p, reg, 0);
|
||||
}
|
||||
|
||||
void __init
|
||||
void __cpuinit
|
||||
uasm_il_bgezl(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid)
|
||||
{
|
||||
uasm_r_mips_pc16(r, *p, lid);
|
||||
uasm_i_bgezl(p, reg, 0);
|
||||
}
|
||||
|
||||
void __init
|
||||
void __cpuinit
|
||||
uasm_il_bgez(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid)
|
||||
{
|
||||
uasm_r_mips_pc16(r, *p, lid);
|
||||
|
@ -11,38 +11,38 @@
|
||||
#include <linux/types.h>
|
||||
|
||||
#define Ip_u1u2u3(op) \
|
||||
void __init \
|
||||
void __cpuinit \
|
||||
uasm_i##op(u32 **buf, unsigned int a, unsigned int b, unsigned int c)
|
||||
|
||||
#define Ip_u2u1u3(op) \
|
||||
void __init \
|
||||
void __cpuinit \
|
||||
uasm_i##op(u32 **buf, unsigned int a, unsigned int b, unsigned int c)
|
||||
|
||||
#define Ip_u3u1u2(op) \
|
||||
void __init \
|
||||
void __cpuinit \
|
||||
uasm_i##op(u32 **buf, unsigned int a, unsigned int b, unsigned int c)
|
||||
|
||||
#define Ip_u1u2s3(op) \
|
||||
void __init \
|
||||
void __cpuinit \
|
||||
uasm_i##op(u32 **buf, unsigned int a, unsigned int b, signed int c)
|
||||
|
||||
#define Ip_u2s3u1(op) \
|
||||
void __init \
|
||||
void __cpuinit \
|
||||
uasm_i##op(u32 **buf, unsigned int a, signed int b, unsigned int c)
|
||||
|
||||
#define Ip_u2u1s3(op) \
|
||||
void __init \
|
||||
void __cpuinit \
|
||||
uasm_i##op(u32 **buf, unsigned int a, unsigned int b, signed int c)
|
||||
|
||||
#define Ip_u1u2(op) \
|
||||
void __init uasm_i##op(u32 **buf, unsigned int a, unsigned int b)
|
||||
void __cpuinit uasm_i##op(u32 **buf, unsigned int a, unsigned int b)
|
||||
|
||||
#define Ip_u1s2(op) \
|
||||
void __init uasm_i##op(u32 **buf, unsigned int a, signed int b)
|
||||
void __cpuinit uasm_i##op(u32 **buf, unsigned int a, signed int b)
|
||||
|
||||
#define Ip_u1(op) void __init uasm_i##op(u32 **buf, unsigned int a)
|
||||
#define Ip_u1(op) void __cpuinit uasm_i##op(u32 **buf, unsigned int a)
|
||||
|
||||
#define Ip_0(op) void __init uasm_i##op(u32 **buf)
|
||||
#define Ip_0(op) void __cpuinit uasm_i##op(u32 **buf)
|
||||
|
||||
Ip_u2u1s3(_addiu);
|
||||
Ip_u3u1u2(_addu);
|
||||
@ -98,19 +98,19 @@ struct uasm_label {
|
||||
int lab;
|
||||
};
|
||||
|
||||
void __init uasm_build_label(struct uasm_label **lab, u32 *addr, int lid);
|
||||
void __cpuinit uasm_build_label(struct uasm_label **lab, u32 *addr, int lid);
|
||||
#ifdef CONFIG_64BIT
|
||||
int __init uasm_in_compat_space_p(long addr);
|
||||
int __init uasm_rel_highest(long val);
|
||||
int __init uasm_rel_higher(long val);
|
||||
int uasm_in_compat_space_p(long addr);
|
||||
int uasm_rel_highest(long val);
|
||||
int uasm_rel_higher(long val);
|
||||
#endif
|
||||
int __init uasm_rel_hi(long val);
|
||||
int __init uasm_rel_lo(long val);
|
||||
void __init UASM_i_LA_mostly(u32 **buf, unsigned int rs, long addr);
|
||||
void __init UASM_i_LA(u32 **buf, unsigned int rs, long addr);
|
||||
int uasm_rel_hi(long val);
|
||||
int uasm_rel_lo(long val);
|
||||
void UASM_i_LA_mostly(u32 **buf, unsigned int rs, long addr);
|
||||
void UASM_i_LA(u32 **buf, unsigned int rs, long addr);
|
||||
|
||||
#define UASM_L_LA(lb) \
|
||||
static inline void __init uasm_l##lb(struct uasm_label **lab, u32 *addr) \
|
||||
static inline void __cpuinit uasm_l##lb(struct uasm_label **lab, u32 *addr) \
|
||||
{ \
|
||||
uasm_build_label(lab, addr, label##lb); \
|
||||
}
|
||||
@ -164,29 +164,19 @@ struct uasm_reloc {
|
||||
/* This is zero so we can use zeroed label arrays. */
|
||||
#define UASM_LABEL_INVALID 0
|
||||
|
||||
void __init uasm_r_mips_pc16(struct uasm_reloc **rel, u32 *addr, int lid);
|
||||
void __init
|
||||
uasm_resolve_relocs(struct uasm_reloc *rel, struct uasm_label *lab);
|
||||
void __init
|
||||
uasm_move_relocs(struct uasm_reloc *rel, u32 *first, u32 *end, long off);
|
||||
void __init
|
||||
uasm_move_labels(struct uasm_label *lab, u32 *first, u32 *end, long off);
|
||||
void __init
|
||||
uasm_copy_handler(struct uasm_reloc *rel, struct uasm_label *lab, u32 *first,
|
||||
u32 *end, u32 *target);
|
||||
int __init uasm_insn_has_bdelay(struct uasm_reloc *rel, u32 *addr);
|
||||
void uasm_r_mips_pc16(struct uasm_reloc **rel, u32 *addr, int lid);
|
||||
void uasm_resolve_relocs(struct uasm_reloc *rel, struct uasm_label *lab);
|
||||
void uasm_move_relocs(struct uasm_reloc *rel, u32 *first, u32 *end, long off);
|
||||
void uasm_move_labels(struct uasm_label *lab, u32 *first, u32 *end, long off);
|
||||
void uasm_copy_handler(struct uasm_reloc *rel, struct uasm_label *lab,
|
||||
u32 *first, u32 *end, u32 *target);
|
||||
int uasm_insn_has_bdelay(struct uasm_reloc *rel, u32 *addr);
|
||||
|
||||
/* Convenience functions for labeled branches. */
|
||||
void __init
|
||||
uasm_il_bltz(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid);
|
||||
void __init uasm_il_b(u32 **p, struct uasm_reloc **r, int lid);
|
||||
void __init
|
||||
uasm_il_beqz(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid);
|
||||
void __init
|
||||
uasm_il_beqzl(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid);
|
||||
void __init
|
||||
uasm_il_bnez(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid);
|
||||
void __init
|
||||
uasm_il_bgezl(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid);
|
||||
void __init
|
||||
uasm_il_bgez(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid);
|
||||
void uasm_il_bltz(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid);
|
||||
void uasm_il_b(u32 **p, struct uasm_reloc **r, int lid);
|
||||
void uasm_il_beqz(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid);
|
||||
void uasm_il_beqzl(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid);
|
||||
void uasm_il_bnez(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid);
|
||||
void uasm_il_bgezl(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid);
|
||||
void uasm_il_bgez(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid);
|
||||
|
@ -249,8 +249,9 @@ static int __init bcm1480_pcibios_init(void)
|
||||
* XXX ehs: Should this happen in PCI Device mode?
|
||||
*/
|
||||
|
||||
set_io_port_base((unsigned long)
|
||||
ioremap(A_BCM1480_PHYS_PCI_IO_MATCH_BYTES, 65536));
|
||||
bcm1480_controller.io_map_base = (unsigned long)
|
||||
ioremap(A_BCM1480_PHYS_PCI_IO_MATCH_BYTES, 65536);
|
||||
set_io_port_base(bcm1480_controller.io_map_base);
|
||||
isa_slot_offset = (unsigned long)
|
||||
ioremap(A_BCM1480_PHYS_PCI_MEM_MATCH_BYTES, 1024*1024);
|
||||
|
||||
|
@ -40,7 +40,7 @@ int irq_to_slot[MAX_PCI_BUSSES * MAX_DEVICES_PER_PCIBUS];
|
||||
|
||||
extern struct pci_ops bridge_pci_ops;
|
||||
|
||||
int __init bridge_probe(nasid_t nasid, int widget_id, int masterwid)
|
||||
int __cpuinit bridge_probe(nasid_t nasid, int widget_id, int masterwid)
|
||||
{
|
||||
unsigned long offset = NODE_OFFSET(nasid);
|
||||
struct bridge_controller *bc;
|
||||
|
@ -260,7 +260,7 @@ static void pcibios_fixup_device_resources(struct pci_dev *dev,
|
||||
}
|
||||
}
|
||||
|
||||
void pcibios_fixup_bus(struct pci_bus *bus)
|
||||
void __devinit pcibios_fixup_bus(struct pci_bus *bus)
|
||||
{
|
||||
/* Propagate hose info into the subordinate devices. */
|
||||
|
||||
|
@ -7,10 +7,10 @@
|
||||
|
||||
#define LAUNCHSTACK_SIZE 256
|
||||
|
||||
static __initdata DEFINE_SPINLOCK(launch_lock);
|
||||
static __cpuinitdata DEFINE_SPINLOCK(launch_lock);
|
||||
|
||||
static unsigned long secondary_sp __initdata;
|
||||
static unsigned long secondary_gp __initdata;
|
||||
static unsigned long secondary_sp __cpuinitdata;
|
||||
static unsigned long secondary_gp __cpuinitdata;
|
||||
|
||||
static unsigned char launchstack[LAUNCHSTACK_SIZE] __initdata
|
||||
__attribute__((aligned(2 * sizeof(long))));
|
||||
|
@ -53,7 +53,7 @@ extern void pcibr_setup(cnodeid_t);
|
||||
|
||||
extern void xtalk_probe_node(cnodeid_t nid);
|
||||
|
||||
static void __init per_hub_init(cnodeid_t cnode)
|
||||
static void __cpuinit per_hub_init(cnodeid_t cnode)
|
||||
{
|
||||
struct hub_data *hub = hub_data(cnode);
|
||||
nasid_t nasid = COMPACT_TO_NASID_NODEID(cnode);
|
||||
|
@ -285,7 +285,7 @@ void __cpuinit cpu_time_init(void)
|
||||
set_c0_status(SRB_TIMOCLK);
|
||||
}
|
||||
|
||||
void __init hub_rtc_init(cnodeid_t cnode)
|
||||
void __cpuinit hub_rtc_init(cnodeid_t cnode)
|
||||
{
|
||||
/*
|
||||
* We only need to initialize the current node.
|
||||
|
@ -22,7 +22,7 @@
|
||||
|
||||
extern int bridge_probe(nasid_t nasid, int widget, int masterwid);
|
||||
|
||||
static int __init probe_one_port(nasid_t nasid, int widget, int masterwid)
|
||||
static int __cpuinit probe_one_port(nasid_t nasid, int widget, int masterwid)
|
||||
{
|
||||
widgetreg_t widget_id;
|
||||
xwidget_part_num_t partnum;
|
||||
@ -46,7 +46,7 @@ static int __init probe_one_port(nasid_t nasid, int widget, int masterwid)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __init xbow_probe(nasid_t nasid)
|
||||
static int __cpuinit xbow_probe(nasid_t nasid)
|
||||
{
|
||||
lboard_t *brd;
|
||||
klxbow_t *xbow_p;
|
||||
@ -99,7 +99,7 @@ static int __init xbow_probe(nasid_t nasid)
|
||||
return 0;
|
||||
}
|
||||
|
||||
void __init xtalk_probe_node(cnodeid_t nid)
|
||||
void __cpuinit xtalk_probe_node(cnodeid_t nid)
|
||||
{
|
||||
volatile u64 hubreg;
|
||||
nasid_t nasid;
|
||||
|
@ -93,7 +93,7 @@ extern void (*flush_data_cache_page)(unsigned long addr);
|
||||
clear_bit(PG_dcache_dirty, &(page)->flags)
|
||||
|
||||
/* Run kernel code uncached, useful for cache probing functions. */
|
||||
unsigned long __init run_uncached(void *func);
|
||||
unsigned long run_uncached(void *func);
|
||||
|
||||
extern void *kmap_coherent(struct page *page, unsigned long addr);
|
||||
extern void kunmap_coherent(void);
|
||||
|
@ -39,7 +39,6 @@ extern pte_t *pkmap_page_table;
|
||||
* easily, subsequent pte tables have to be allocated in one physical
|
||||
* chunk of RAM.
|
||||
*/
|
||||
#define PKMAP_BASE (0xfe000000UL)
|
||||
#define LAST_PKMAP 1024
|
||||
#define LAST_PKMAP_MASK (LAST_PKMAP-1)
|
||||
#define PKMAP_NR(virt) ((virt-PKMAP_BASE) >> PAGE_SHIFT)
|
||||
|
@ -34,7 +34,11 @@
|
||||
#define cpu_has_64bits 1
|
||||
|
||||
#define cpu_has_4kex 1
|
||||
#define cpu_has_3k_cache 0
|
||||
#define cpu_has_6k_cache 0
|
||||
#define cpu_has_4k_cache 1
|
||||
#define cpu_has_8k_cache 0
|
||||
#define cpu_has_tx39_cache 0
|
||||
|
||||
#define cpu_has_inclusive_pcaches 1
|
||||
|
||||
|
@ -25,7 +25,7 @@ static inline void __iomem *plat_ioremap(phys_t offset, unsigned long size,
|
||||
{
|
||||
#define TXX9_DIRECTMAP_BASE 0xff000000ul
|
||||
if (offset >= TXX9_DIRECTMAP_BASE &&
|
||||
offset < TXX9_DIRECTMAP_BASE + 0xf0000)
|
||||
offset < TXX9_DIRECTMAP_BASE + 0xff0000)
|
||||
return (void __iomem *)offset;
|
||||
return NULL;
|
||||
}
|
||||
|
@ -1,7 +1,7 @@
|
||||
#ifndef _ASM_MACH_LASAT_IRQ_H
|
||||
#define _ASM_MACH_LASAT_IRQ_H
|
||||
|
||||
#define LASAT_CASCADE_IRQ (MIPS_CPU_IRQ_BASE + 0)
|
||||
#define LASAT_CASCADE_IRQ (MIPS_CPU_IRQ_BASE + 2)
|
||||
|
||||
#define LASAT_IRQ_BASE 8
|
||||
#define LASAT_IRQ_END 23
|
||||
|
@ -65,6 +65,8 @@ extern int add_temporary_entry(unsigned long entrylo0, unsigned long entrylo1,
|
||||
|
||||
#define VMALLOC_START MAP_BASE
|
||||
|
||||
#define PKMAP_BASE (0xfe000000UL)
|
||||
|
||||
#ifdef CONFIG_HIGHMEM
|
||||
# define VMALLOC_END (PKMAP_BASE-2*PAGE_SIZE)
|
||||
#else
|
||||
|
@ -64,10 +64,11 @@ static inline int mips_clockevent_init(void)
|
||||
* Initialize the count register as a clocksource
|
||||
*/
|
||||
#ifdef CONFIG_CEVT_R4K
|
||||
extern void init_mips_clocksource(void);
|
||||
extern int init_mips_clocksource(void);
|
||||
#else
|
||||
static inline void init_mips_clocksource(void)
|
||||
static inline int init_mips_clocksource(void)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user