mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-11-20 00:26:39 +08:00
Merge branch 'merge' of git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc
* 'merge' of git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc: (25 commits) powerpc: Disable 64K hugetlb support when doing 64K SPU mappings powerpc/powermac: Fixup default serial port device for pmac_zilog powerpc/powermac: Use sane default baudrate for SCC debugging powerpc/mm: Implement _PAGE_SPECIAL & pte_special() for 64-bit powerpc: Show processor cache information in sysfs powerpc: Make core id information available to userspace powerpc: Make core sibling information available to userspace powerpc/vio: More fallout from dma_mapping_error API change ibmveth: Fix multiple errors with dma_mapping_error conversion powerpc/pseries: Fix CMO sysdev attribute API change fallout powerpc: Enable tracehook for the architecture powerpc: Add TIF_NOTIFY_RESUME support for tracehook powerpc: Add asm/syscall.h with the tracehook entry points powerpc: Make syscall tracing use tracehook.h helpers powerpc: Call tracehook_signal_handler() when setting up signal frames powerpc: Update cpu_sibling_maps dynamically powerpc: register_cpu_online should be __cpuinit powerpc: kill useless SMT code in prom_hold_cpus powerpc: Fix 8xx build failure powerpc: Fix vio build warnings ...
This commit is contained in:
commit
d9089c296b
@ -117,6 +117,7 @@ config PPC
|
||||
select HAVE_KPROBES
|
||||
select HAVE_ARCH_KGDB
|
||||
select HAVE_KRETPROBES
|
||||
select HAVE_ARCH_TRACEHOOK
|
||||
select HAVE_LMB
|
||||
select HAVE_DMA_ATTRS if PPC64
|
||||
select USE_GENERIC_SMP_HELPERS if SMP
|
||||
|
@ -148,7 +148,7 @@ transfer_to_handler:
|
||||
/* Check to see if the dbcr0 register is set up to debug. Use the
|
||||
internal debug mode bit to do this. */
|
||||
lwz r12,THREAD_DBCR0(r12)
|
||||
andis. r12,r12,(DBCR0_IDM | DBSR_DAC1R | DBSR_DAC1W)@h
|
||||
andis. r12,r12,DBCR0_IDM@h
|
||||
beq+ 3f
|
||||
/* From user and task is ptraced - load up global dbcr0 */
|
||||
li r12,-1 /* clear all pending debug events */
|
||||
@ -292,7 +292,7 @@ syscall_exit_cont:
|
||||
/* If the process has its own DBCR0 value, load it up. The internal
|
||||
debug mode bit tells us that dbcr0 should be loaded. */
|
||||
lwz r0,THREAD+THREAD_DBCR0(r2)
|
||||
andis. r10,r0,(DBCR0_IDM | DBSR_DAC1R | DBSR_DAC1W)@h
|
||||
andis. r10,r0,DBCR0_IDM@h
|
||||
bnel- load_dbcr0
|
||||
#endif
|
||||
#ifdef CONFIG_44x
|
||||
@ -343,7 +343,12 @@ syscall_dotrace:
|
||||
stw r0,_TRAP(r1)
|
||||
addi r3,r1,STACK_FRAME_OVERHEAD
|
||||
bl do_syscall_trace_enter
|
||||
lwz r0,GPR0(r1) /* Restore original registers */
|
||||
/*
|
||||
* Restore argument registers possibly just changed.
|
||||
* We use the return value of do_syscall_trace_enter
|
||||
* for call number to look up in the table (r0).
|
||||
*/
|
||||
mr r0,r3
|
||||
lwz r3,GPR3(r1)
|
||||
lwz r4,GPR4(r1)
|
||||
lwz r5,GPR5(r1)
|
||||
@ -720,7 +725,7 @@ restore_user:
|
||||
/* Check whether this process has its own DBCR0 value. The internal
|
||||
debug mode bit tells us that dbcr0 should be loaded. */
|
||||
lwz r0,THREAD+THREAD_DBCR0(r2)
|
||||
andis. r10,r0,(DBCR0_IDM | DBSR_DAC1R | DBSR_DAC1W)@h
|
||||
andis. r10,r0,DBCR0_IDM@h
|
||||
bnel- load_dbcr0
|
||||
#endif
|
||||
|
||||
@ -1055,8 +1060,8 @@ do_user_signal: /* r10 contains MSR_KERNEL here */
|
||||
SAVE_NVGPRS(r1)
|
||||
rlwinm r3,r3,0,0,30
|
||||
stw r3,_TRAP(r1)
|
||||
2: li r3,0
|
||||
addi r4,r1,STACK_FRAME_OVERHEAD
|
||||
2: addi r3,r1,STACK_FRAME_OVERHEAD
|
||||
mr r4,r9
|
||||
bl do_signal
|
||||
REST_NVGPRS(r1)
|
||||
b recheck
|
||||
|
@ -214,7 +214,12 @@ syscall_dotrace:
|
||||
bl .save_nvgprs
|
||||
addi r3,r1,STACK_FRAME_OVERHEAD
|
||||
bl .do_syscall_trace_enter
|
||||
ld r0,GPR0(r1) /* Restore original registers */
|
||||
/*
|
||||
* Restore argument registers possibly just changed.
|
||||
* We use the return value of do_syscall_trace_enter
|
||||
* for the call number to look up in the table (r0).
|
||||
*/
|
||||
mr r0,r3
|
||||
ld r3,GPR3(r1)
|
||||
ld r4,GPR4(r1)
|
||||
ld r5,GPR5(r1)
|
||||
@ -638,8 +643,7 @@ user_work:
|
||||
b .ret_from_except_lite
|
||||
|
||||
1: bl .save_nvgprs
|
||||
li r3,0
|
||||
addi r4,r1,STACK_FRAME_OVERHEAD
|
||||
addi r3,r1,STACK_FRAME_OVERHEAD
|
||||
bl .do_signal
|
||||
b .ret_from_except
|
||||
|
||||
|
@ -493,18 +493,18 @@ static int __init serial_dev_init(void)
|
||||
device_initcall(serial_dev_init);
|
||||
|
||||
|
||||
#ifdef CONFIG_SERIAL_8250_CONSOLE
|
||||
/*
|
||||
* This is called very early, as part of console_init() (typically just after
|
||||
* time_init()). This function is respondible for trying to find a good
|
||||
* default console on serial ports. It tries to match the open firmware
|
||||
* default output with one of the available serial console drivers, either
|
||||
* one of the platform serial ports that have been probed earlier by
|
||||
* find_legacy_serial_ports() or some more platform specific ones.
|
||||
* default output with one of the available serial console drivers that have
|
||||
* been probed earlier by find_legacy_serial_ports()
|
||||
*/
|
||||
static int __init check_legacy_serial_console(void)
|
||||
{
|
||||
struct device_node *prom_stdout = NULL;
|
||||
int speed = 0, offset = 0;
|
||||
int i, speed = 0, offset = 0;
|
||||
const char *name;
|
||||
const u32 *spd;
|
||||
|
||||
@ -548,31 +548,20 @@ static int __init check_legacy_serial_console(void)
|
||||
if (spd)
|
||||
speed = *spd;
|
||||
|
||||
if (0)
|
||||
;
|
||||
#ifdef CONFIG_SERIAL_8250_CONSOLE
|
||||
else if (strcmp(name, "serial") == 0) {
|
||||
int i;
|
||||
/* Look for it in probed array */
|
||||
for (i = 0; i < legacy_serial_count; i++) {
|
||||
if (prom_stdout != legacy_serial_infos[i].np)
|
||||
continue;
|
||||
offset = i;
|
||||
speed = legacy_serial_infos[i].speed;
|
||||
break;
|
||||
}
|
||||
if (i >= legacy_serial_count)
|
||||
goto not_found;
|
||||
}
|
||||
#endif /* CONFIG_SERIAL_8250_CONSOLE */
|
||||
#ifdef CONFIG_SERIAL_PMACZILOG_CONSOLE
|
||||
else if (strcmp(name, "ch-a") == 0)
|
||||
offset = 0;
|
||||
else if (strcmp(name, "ch-b") == 0)
|
||||
offset = 1;
|
||||
#endif /* CONFIG_SERIAL_PMACZILOG_CONSOLE */
|
||||
else
|
||||
if (strcmp(name, "serial") != 0)
|
||||
goto not_found;
|
||||
|
||||
/* Look for it in probed array */
|
||||
for (i = 0; i < legacy_serial_count; i++) {
|
||||
if (prom_stdout != legacy_serial_infos[i].np)
|
||||
continue;
|
||||
offset = i;
|
||||
speed = legacy_serial_infos[i].speed;
|
||||
break;
|
||||
}
|
||||
if (i >= legacy_serial_count)
|
||||
goto not_found;
|
||||
|
||||
of_node_put(prom_stdout);
|
||||
|
||||
DBG("Found serial console at ttyS%d\n", offset);
|
||||
@ -591,3 +580,4 @@ static int __init check_legacy_serial_console(void)
|
||||
}
|
||||
console_initcall(check_legacy_serial_console);
|
||||
|
||||
#endif /* CONFIG_SERIAL_8250_CONSOLE */
|
||||
|
@ -254,7 +254,7 @@ void do_dabr(struct pt_regs *regs, unsigned long address,
|
||||
return;
|
||||
|
||||
/* Clear the DAC and struct entries. One shot trigger */
|
||||
#if (defined(CONFIG_44x) || defined(CONFIG_BOOKE))
|
||||
#if defined(CONFIG_BOOKE)
|
||||
mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) & ~(DBSR_DAC1R | DBSR_DAC1W
|
||||
| DBCR0_IDM));
|
||||
#endif
|
||||
@ -286,7 +286,7 @@ int set_dabr(unsigned long dabr)
|
||||
mtspr(SPRN_DABR, dabr);
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_44x) || defined(CONFIG_BOOKE)
|
||||
#if defined(CONFIG_BOOKE)
|
||||
mtspr(SPRN_DAC1, dabr);
|
||||
#endif
|
||||
|
||||
@ -373,7 +373,7 @@ struct task_struct *__switch_to(struct task_struct *prev,
|
||||
if (unlikely(__get_cpu_var(current_dabr) != new->thread.dabr))
|
||||
set_dabr(new->thread.dabr);
|
||||
|
||||
#if defined(CONFIG_44x) || defined(CONFIG_BOOKE)
|
||||
#if defined(CONFIG_BOOKE)
|
||||
/* If new thread DAC (HW breakpoint) is the same then leave it */
|
||||
if (new->thread.dabr)
|
||||
set_dabr(new->thread.dabr);
|
||||
@ -568,7 +568,7 @@ void flush_thread(void)
|
||||
current->thread.dabr = 0;
|
||||
set_dabr(0);
|
||||
|
||||
#if defined(CONFIG_44x) || defined(CONFIG_BOOKE)
|
||||
#if defined(CONFIG_BOOKE)
|
||||
current->thread.dbcr0 &= ~(DBSR_DAC1R | DBSR_DAC1W);
|
||||
#endif
|
||||
}
|
||||
|
@ -205,8 +205,6 @@ static int __initdata mem_reserve_cnt;
|
||||
static cell_t __initdata regbuf[1024];
|
||||
|
||||
|
||||
#define MAX_CPU_THREADS 2
|
||||
|
||||
/*
|
||||
* Error results ... some OF calls will return "-1" on error, some
|
||||
* will return 0, some will return either. To simplify, here are
|
||||
@ -1339,10 +1337,6 @@ static void __init prom_hold_cpus(void)
|
||||
unsigned int reg;
|
||||
phandle node;
|
||||
char type[64];
|
||||
int cpuid = 0;
|
||||
unsigned int interrupt_server[MAX_CPU_THREADS];
|
||||
unsigned int cpu_threads, hw_cpu_num;
|
||||
int propsize;
|
||||
struct prom_t *_prom = &RELOC(prom);
|
||||
unsigned long *spinloop
|
||||
= (void *) LOW_ADDR(__secondary_hold_spinloop);
|
||||
@ -1386,7 +1380,6 @@ static void __init prom_hold_cpus(void)
|
||||
reg = -1;
|
||||
prom_getprop(node, "reg", ®, sizeof(reg));
|
||||
|
||||
prom_debug("\ncpuid = 0x%x\n", cpuid);
|
||||
prom_debug("cpu hw idx = 0x%x\n", reg);
|
||||
|
||||
/* Init the acknowledge var which will be reset by
|
||||
@ -1395,28 +1388,9 @@ static void __init prom_hold_cpus(void)
|
||||
*/
|
||||
*acknowledge = (unsigned long)-1;
|
||||
|
||||
propsize = prom_getprop(node, "ibm,ppc-interrupt-server#s",
|
||||
&interrupt_server,
|
||||
sizeof(interrupt_server));
|
||||
if (propsize < 0) {
|
||||
/* no property. old hardware has no SMT */
|
||||
cpu_threads = 1;
|
||||
interrupt_server[0] = reg; /* fake it with phys id */
|
||||
} else {
|
||||
/* We have a threaded processor */
|
||||
cpu_threads = propsize / sizeof(u32);
|
||||
if (cpu_threads > MAX_CPU_THREADS) {
|
||||
prom_printf("SMT: too many threads!\n"
|
||||
"SMT: found %x, max is %x\n",
|
||||
cpu_threads, MAX_CPU_THREADS);
|
||||
cpu_threads = 1; /* ToDo: panic? */
|
||||
}
|
||||
}
|
||||
|
||||
hw_cpu_num = interrupt_server[0];
|
||||
if (hw_cpu_num != _prom->cpu) {
|
||||
if (reg != _prom->cpu) {
|
||||
/* Primary Thread of non-boot cpu */
|
||||
prom_printf("%x : starting cpu hw idx %x... ", cpuid, reg);
|
||||
prom_printf("starting cpu hw idx %x... ", reg);
|
||||
call_prom("start-cpu", 3, 0, node,
|
||||
secondary_hold, reg);
|
||||
|
||||
@ -1431,17 +1405,10 @@ static void __init prom_hold_cpus(void)
|
||||
}
|
||||
#ifdef CONFIG_SMP
|
||||
else
|
||||
prom_printf("%x : boot cpu %x\n", cpuid, reg);
|
||||
prom_printf("boot cpu hw idx %x\n", reg);
|
||||
#endif /* CONFIG_SMP */
|
||||
|
||||
/* Reserve cpu #s for secondary threads. They start later. */
|
||||
cpuid += cpu_threads;
|
||||
}
|
||||
|
||||
if (cpuid > NR_CPUS)
|
||||
prom_printf("WARNING: maximum CPUs (" __stringify(NR_CPUS)
|
||||
") exceeded: ignoring extras\n");
|
||||
|
||||
prom_debug("prom_hold_cpus: end...\n");
|
||||
}
|
||||
|
||||
|
@ -22,6 +22,7 @@
|
||||
#include <linux/errno.h>
|
||||
#include <linux/ptrace.h>
|
||||
#include <linux/regset.h>
|
||||
#include <linux/tracehook.h>
|
||||
#include <linux/elf.h>
|
||||
#include <linux/user.h>
|
||||
#include <linux/security.h>
|
||||
@ -717,7 +718,7 @@ void user_disable_single_step(struct task_struct *task)
|
||||
struct pt_regs *regs = task->thread.regs;
|
||||
|
||||
|
||||
#if defined(CONFIG_44x) || defined(CONFIG_BOOKE)
|
||||
#if defined(CONFIG_BOOKE)
|
||||
/* If DAC then do not single step, skip */
|
||||
if (task->thread.dabr)
|
||||
return;
|
||||
@ -744,10 +745,11 @@ int ptrace_set_debugreg(struct task_struct *task, unsigned long addr,
|
||||
if (addr > 0)
|
||||
return -EINVAL;
|
||||
|
||||
/* The bottom 3 bits in dabr are flags */
|
||||
if ((data & ~0x7UL) >= TASK_SIZE)
|
||||
return -EIO;
|
||||
|
||||
#ifdef CONFIG_PPC64
|
||||
#ifndef CONFIG_BOOKE
|
||||
|
||||
/* For processors using DABR (i.e. 970), the bottom 3 bits are flags.
|
||||
* It was assumed, on previous implementations, that 3 bits were
|
||||
@ -769,7 +771,7 @@ int ptrace_set_debugreg(struct task_struct *task, unsigned long addr,
|
||||
task->thread.dabr = data;
|
||||
|
||||
#endif
|
||||
#if defined(CONFIG_44x) || defined(CONFIG_BOOKE)
|
||||
#if defined(CONFIG_BOOKE)
|
||||
|
||||
/* As described above, it was assumed 3 bits were passed with the data
|
||||
* address, but we will assume only the mode bits will be passed
|
||||
@ -1013,31 +1015,24 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void do_syscall_trace(void)
|
||||
/*
|
||||
* We must return the syscall number to actually look up in the table.
|
||||
* This can be -1L to skip running any syscall at all.
|
||||
*/
|
||||
long do_syscall_trace_enter(struct pt_regs *regs)
|
||||
{
|
||||
/* the 0x80 provides a way for the tracing parent to distinguish
|
||||
between a syscall stop and SIGTRAP delivery */
|
||||
ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD)
|
||||
? 0x80 : 0));
|
||||
long ret = 0;
|
||||
|
||||
/*
|
||||
* this isn't the same as continuing with a signal, but it will do
|
||||
* for normal use. strace only continues with a signal if the
|
||||
* stopping signal is not SIGTRAP. -brl
|
||||
*/
|
||||
if (current->exit_code) {
|
||||
send_sig(current->exit_code, current, 1);
|
||||
current->exit_code = 0;
|
||||
}
|
||||
}
|
||||
|
||||
void do_syscall_trace_enter(struct pt_regs *regs)
|
||||
{
|
||||
secure_computing(regs->gpr[0]);
|
||||
|
||||
if (test_thread_flag(TIF_SYSCALL_TRACE)
|
||||
&& (current->ptrace & PT_PTRACED))
|
||||
do_syscall_trace();
|
||||
if (test_thread_flag(TIF_SYSCALL_TRACE) &&
|
||||
tracehook_report_syscall_entry(regs))
|
||||
/*
|
||||
* Tracing decided this syscall should not happen.
|
||||
* We'll return a bogus call number to get an ENOSYS
|
||||
* error, but leave the original number in regs->gpr[0].
|
||||
*/
|
||||
ret = -1L;
|
||||
|
||||
if (unlikely(current->audit_context)) {
|
||||
#ifdef CONFIG_PPC64
|
||||
@ -1055,16 +1050,19 @@ void do_syscall_trace_enter(struct pt_regs *regs)
|
||||
regs->gpr[5] & 0xffffffff,
|
||||
regs->gpr[6] & 0xffffffff);
|
||||
}
|
||||
|
||||
return ret ?: regs->gpr[0];
|
||||
}
|
||||
|
||||
void do_syscall_trace_leave(struct pt_regs *regs)
|
||||
{
|
||||
int step;
|
||||
|
||||
if (unlikely(current->audit_context))
|
||||
audit_syscall_exit((regs->ccr&0x10000000)?AUDITSC_FAILURE:AUDITSC_SUCCESS,
|
||||
regs->result);
|
||||
|
||||
if ((test_thread_flag(TIF_SYSCALL_TRACE)
|
||||
|| test_thread_flag(TIF_SINGLESTEP))
|
||||
&& (current->ptrace & PT_PTRACED))
|
||||
do_syscall_trace();
|
||||
step = test_thread_flag(TIF_SINGLESTEP);
|
||||
if (step || test_thread_flag(TIF_SYSCALL_TRACE))
|
||||
tracehook_report_syscall_exit(regs, step);
|
||||
}
|
||||
|
@ -367,7 +367,6 @@ static void __init cpu_init_thread_core_maps(int tpc)
|
||||
* setup_cpu_maps - initialize the following cpu maps:
|
||||
* cpu_possible_map
|
||||
* cpu_present_map
|
||||
* cpu_sibling_map
|
||||
*
|
||||
* Having the possible map set up early allows us to restrict allocations
|
||||
* of things like irqstacks to num_possible_cpus() rather than NR_CPUS.
|
||||
@ -475,29 +474,6 @@ void __init smp_setup_cpu_maps(void)
|
||||
*/
|
||||
cpu_init_thread_core_maps(nthreads);
|
||||
}
|
||||
|
||||
/*
|
||||
* Being that cpu_sibling_map is now a per_cpu array, then it cannot
|
||||
* be initialized until the per_cpu areas have been created. This
|
||||
* function is now called from setup_per_cpu_areas().
|
||||
*/
|
||||
void __init smp_setup_cpu_sibling_map(void)
|
||||
{
|
||||
#ifdef CONFIG_PPC64
|
||||
int i, cpu, base;
|
||||
|
||||
for_each_possible_cpu(cpu) {
|
||||
DBG("Sibling map for CPU %d:", cpu);
|
||||
base = cpu_first_thread_in_core(cpu);
|
||||
for (i = 0; i < threads_per_core; i++) {
|
||||
cpu_set(base + i, per_cpu(cpu_sibling_map, cpu));
|
||||
DBG(" %d", base + i);
|
||||
}
|
||||
DBG("\n");
|
||||
}
|
||||
|
||||
#endif /* CONFIG_PPC64 */
|
||||
}
|
||||
#endif /* CONFIG_SMP */
|
||||
|
||||
#ifdef CONFIG_PCSPKR_PLATFORM
|
||||
|
@ -611,9 +611,6 @@ void __init setup_per_cpu_areas(void)
|
||||
paca[i].data_offset = ptr - __per_cpu_start;
|
||||
memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start);
|
||||
}
|
||||
|
||||
/* Now that per_cpu is setup, initialize cpu_sibling_map */
|
||||
smp_setup_cpu_sibling_map();
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@ -9,7 +9,7 @@
|
||||
* this archive for more details.
|
||||
*/
|
||||
|
||||
#include <linux/ptrace.h>
|
||||
#include <linux/tracehook.h>
|
||||
#include <linux/signal.h>
|
||||
#include <asm/uaccess.h>
|
||||
#include <asm/unistd.h>
|
||||
@ -112,7 +112,7 @@ static void check_syscall_restart(struct pt_regs *regs, struct k_sigaction *ka,
|
||||
}
|
||||
}
|
||||
|
||||
int do_signal(sigset_t *oldset, struct pt_regs *regs)
|
||||
static int do_signal_pending(sigset_t *oldset, struct pt_regs *regs)
|
||||
{
|
||||
siginfo_t info;
|
||||
int signr;
|
||||
@ -147,7 +147,7 @@ int do_signal(sigset_t *oldset, struct pt_regs *regs)
|
||||
*/
|
||||
if (current->thread.dabr) {
|
||||
set_dabr(current->thread.dabr);
|
||||
#if defined(CONFIG_44x) || defined(CONFIG_BOOKE)
|
||||
#if defined(CONFIG_BOOKE)
|
||||
mtspr(SPRN_DBCR0, current->thread.dbcr0);
|
||||
#endif
|
||||
}
|
||||
@ -177,11 +177,28 @@ int do_signal(sigset_t *oldset, struct pt_regs *regs)
|
||||
* its frame, and we can clear the TLF_RESTORE_SIGMASK flag.
|
||||
*/
|
||||
current_thread_info()->local_flags &= ~_TLF_RESTORE_SIGMASK;
|
||||
|
||||
/*
|
||||
* Let tracing know that we've done the handler setup.
|
||||
*/
|
||||
tracehook_signal_handler(signr, &info, &ka, regs,
|
||||
test_thread_flag(TIF_SINGLESTEP));
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
void do_signal(struct pt_regs *regs, unsigned long thread_info_flags)
|
||||
{
|
||||
if (thread_info_flags & _TIF_SIGPENDING)
|
||||
do_signal_pending(NULL, regs);
|
||||
|
||||
if (thread_info_flags & _TIF_NOTIFY_RESUME) {
|
||||
clear_thread_flag(TIF_NOTIFY_RESUME);
|
||||
tracehook_notify_resume(regs);
|
||||
}
|
||||
}
|
||||
|
||||
long sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss,
|
||||
unsigned long r5, unsigned long r6, unsigned long r7,
|
||||
unsigned long r8, struct pt_regs *regs)
|
||||
|
@ -41,6 +41,7 @@
|
||||
#include <asm/smp.h>
|
||||
#include <asm/time.h>
|
||||
#include <asm/machdep.h>
|
||||
#include <asm/cputhreads.h>
|
||||
#include <asm/cputable.h>
|
||||
#include <asm/system.h>
|
||||
#include <asm/mpic.h>
|
||||
@ -62,10 +63,12 @@ struct thread_info *secondary_ti;
|
||||
cpumask_t cpu_possible_map = CPU_MASK_NONE;
|
||||
cpumask_t cpu_online_map = CPU_MASK_NONE;
|
||||
DEFINE_PER_CPU(cpumask_t, cpu_sibling_map) = CPU_MASK_NONE;
|
||||
DEFINE_PER_CPU(cpumask_t, cpu_core_map) = CPU_MASK_NONE;
|
||||
|
||||
EXPORT_SYMBOL(cpu_online_map);
|
||||
EXPORT_SYMBOL(cpu_possible_map);
|
||||
EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
|
||||
EXPORT_PER_CPU_SYMBOL(cpu_core_map);
|
||||
|
||||
/* SMP operations for this machine */
|
||||
struct smp_ops_t *smp_ops;
|
||||
@ -228,6 +231,8 @@ void __devinit smp_prepare_boot_cpu(void)
|
||||
BUG_ON(smp_processor_id() != boot_cpuid);
|
||||
|
||||
cpu_set(boot_cpuid, cpu_online_map);
|
||||
cpu_set(boot_cpuid, per_cpu(cpu_sibling_map, boot_cpuid));
|
||||
cpu_set(boot_cpuid, per_cpu(cpu_core_map, boot_cpuid));
|
||||
#ifdef CONFIG_PPC64
|
||||
paca[boot_cpuid].__current = current;
|
||||
#endif
|
||||
@ -375,11 +380,60 @@ int __cpuinit __cpu_up(unsigned int cpu)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Return the value of the reg property corresponding to the given
|
||||
* logical cpu.
|
||||
*/
|
||||
int cpu_to_core_id(int cpu)
|
||||
{
|
||||
struct device_node *np;
|
||||
const int *reg;
|
||||
int id = -1;
|
||||
|
||||
np = of_get_cpu_node(cpu, NULL);
|
||||
if (!np)
|
||||
goto out;
|
||||
|
||||
reg = of_get_property(np, "reg", NULL);
|
||||
if (!reg)
|
||||
goto out;
|
||||
|
||||
id = *reg;
|
||||
out:
|
||||
of_node_put(np);
|
||||
return id;
|
||||
}
|
||||
|
||||
/* Must be called when no change can occur to cpu_present_map,
|
||||
* i.e. during cpu online or offline.
|
||||
*/
|
||||
static struct device_node *cpu_to_l2cache(int cpu)
|
||||
{
|
||||
struct device_node *np;
|
||||
const phandle *php;
|
||||
phandle ph;
|
||||
|
||||
if (!cpu_present(cpu))
|
||||
return NULL;
|
||||
|
||||
np = of_get_cpu_node(cpu, NULL);
|
||||
if (np == NULL)
|
||||
return NULL;
|
||||
|
||||
php = of_get_property(np, "l2-cache", NULL);
|
||||
if (php == NULL)
|
||||
return NULL;
|
||||
ph = *php;
|
||||
of_node_put(np);
|
||||
|
||||
return of_find_node_by_phandle(ph);
|
||||
}
|
||||
|
||||
/* Activate a secondary processor. */
|
||||
int __devinit start_secondary(void *unused)
|
||||
{
|
||||
unsigned int cpu = smp_processor_id();
|
||||
struct device_node *l2_cache;
|
||||
int i, base;
|
||||
|
||||
atomic_inc(&init_mm.mm_count);
|
||||
current->active_mm = &init_mm;
|
||||
@ -400,6 +454,33 @@ int __devinit start_secondary(void *unused)
|
||||
|
||||
ipi_call_lock();
|
||||
cpu_set(cpu, cpu_online_map);
|
||||
/* Update sibling maps */
|
||||
base = cpu_first_thread_in_core(cpu);
|
||||
for (i = 0; i < threads_per_core; i++) {
|
||||
if (cpu_is_offline(base + i))
|
||||
continue;
|
||||
cpu_set(cpu, per_cpu(cpu_sibling_map, base + i));
|
||||
cpu_set(base + i, per_cpu(cpu_sibling_map, cpu));
|
||||
|
||||
/* cpu_core_map should be a superset of
|
||||
* cpu_sibling_map even if we don't have cache
|
||||
* information, so update the former here, too.
|
||||
*/
|
||||
cpu_set(cpu, per_cpu(cpu_core_map, base +i));
|
||||
cpu_set(base + i, per_cpu(cpu_core_map, cpu));
|
||||
}
|
||||
l2_cache = cpu_to_l2cache(cpu);
|
||||
for_each_online_cpu(i) {
|
||||
struct device_node *np = cpu_to_l2cache(i);
|
||||
if (!np)
|
||||
continue;
|
||||
if (np == l2_cache) {
|
||||
cpu_set(cpu, per_cpu(cpu_core_map, i));
|
||||
cpu_set(i, per_cpu(cpu_core_map, cpu));
|
||||
}
|
||||
of_node_put(np);
|
||||
}
|
||||
of_node_put(l2_cache);
|
||||
ipi_call_unlock();
|
||||
|
||||
local_irq_enable();
|
||||
@ -437,10 +518,42 @@ void __init smp_cpus_done(unsigned int max_cpus)
|
||||
#ifdef CONFIG_HOTPLUG_CPU
|
||||
int __cpu_disable(void)
|
||||
{
|
||||
if (smp_ops->cpu_disable)
|
||||
return smp_ops->cpu_disable();
|
||||
struct device_node *l2_cache;
|
||||
int cpu = smp_processor_id();
|
||||
int base, i;
|
||||
int err;
|
||||
|
||||
return -ENOSYS;
|
||||
if (!smp_ops->cpu_disable)
|
||||
return -ENOSYS;
|
||||
|
||||
err = smp_ops->cpu_disable();
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
/* Update sibling maps */
|
||||
base = cpu_first_thread_in_core(cpu);
|
||||
for (i = 0; i < threads_per_core; i++) {
|
||||
cpu_clear(cpu, per_cpu(cpu_sibling_map, base + i));
|
||||
cpu_clear(base + i, per_cpu(cpu_sibling_map, cpu));
|
||||
cpu_clear(cpu, per_cpu(cpu_core_map, base +i));
|
||||
cpu_clear(base + i, per_cpu(cpu_core_map, cpu));
|
||||
}
|
||||
|
||||
l2_cache = cpu_to_l2cache(cpu);
|
||||
for_each_present_cpu(i) {
|
||||
struct device_node *np = cpu_to_l2cache(i);
|
||||
if (!np)
|
||||
continue;
|
||||
if (np == l2_cache) {
|
||||
cpu_clear(cpu, per_cpu(cpu_core_map, i));
|
||||
cpu_clear(i, per_cpu(cpu_core_map, cpu));
|
||||
}
|
||||
of_node_put(np);
|
||||
}
|
||||
of_node_put(l2_cache);
|
||||
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void __cpu_die(unsigned int cpu)
|
||||
|
@ -13,7 +13,6 @@
|
||||
#include <linux/module.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/stacktrace.h>
|
||||
#include <linux/module.h>
|
||||
#include <asm/ptrace.h>
|
||||
#include <asm/processor.h>
|
||||
|
||||
|
@ -22,6 +22,8 @@
|
||||
|
||||
static DEFINE_PER_CPU(struct cpu, cpu_devices);
|
||||
|
||||
static DEFINE_PER_CPU(struct kobject *, cache_toplevel);
|
||||
|
||||
/* SMT stuff */
|
||||
|
||||
#ifdef CONFIG_PPC_MULTIPLATFORM
|
||||
@ -297,8 +299,289 @@ static struct sysdev_attribute pa6t_attrs[] = {
|
||||
#endif /* CONFIG_DEBUG_KERNEL */
|
||||
};
|
||||
|
||||
struct cache_desc {
|
||||
struct kobject kobj;
|
||||
struct cache_desc *next;
|
||||
const char *type; /* Instruction, Data, or Unified */
|
||||
u32 size; /* total cache size in KB */
|
||||
u32 line_size; /* in bytes */
|
||||
u32 nr_sets; /* number of sets */
|
||||
u32 level; /* e.g. 1, 2, 3... */
|
||||
u32 associativity; /* e.g. 8-way... 0 is fully associative */
|
||||
};
|
||||
|
||||
static void register_cpu_online(unsigned int cpu)
|
||||
DEFINE_PER_CPU(struct cache_desc *, cache_desc);
|
||||
|
||||
static struct cache_desc *kobj_to_cache_desc(struct kobject *k)
|
||||
{
|
||||
return container_of(k, struct cache_desc, kobj);
|
||||
}
|
||||
|
||||
static void cache_desc_release(struct kobject *k)
|
||||
{
|
||||
struct cache_desc *desc = kobj_to_cache_desc(k);
|
||||
|
||||
pr_debug("%s: releasing %s\n", __func__, kobject_name(k));
|
||||
|
||||
if (desc->next)
|
||||
kobject_put(&desc->next->kobj);
|
||||
|
||||
kfree(kobj_to_cache_desc(k));
|
||||
}
|
||||
|
||||
static ssize_t cache_desc_show(struct kobject *k, struct attribute *attr, char *buf)
|
||||
{
|
||||
struct kobj_attribute *kobj_attr;
|
||||
|
||||
kobj_attr = container_of(attr, struct kobj_attribute, attr);
|
||||
|
||||
return kobj_attr->show(k, kobj_attr, buf);
|
||||
}
|
||||
|
||||
static struct sysfs_ops cache_desc_sysfs_ops = {
|
||||
.show = cache_desc_show,
|
||||
};
|
||||
|
||||
static struct kobj_type cache_desc_type = {
|
||||
.release = cache_desc_release,
|
||||
.sysfs_ops = &cache_desc_sysfs_ops,
|
||||
};
|
||||
|
||||
static ssize_t cache_size_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
|
||||
{
|
||||
struct cache_desc *cache = kobj_to_cache_desc(k);
|
||||
|
||||
return sprintf(buf, "%uK\n", cache->size);
|
||||
}
|
||||
|
||||
static struct kobj_attribute cache_size_attr =
|
||||
__ATTR(size, 0444, cache_size_show, NULL);
|
||||
|
||||
static ssize_t cache_line_size_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
|
||||
{
|
||||
struct cache_desc *cache = kobj_to_cache_desc(k);
|
||||
|
||||
return sprintf(buf, "%u\n", cache->line_size);
|
||||
}
|
||||
|
||||
static struct kobj_attribute cache_line_size_attr =
|
||||
__ATTR(coherency_line_size, 0444, cache_line_size_show, NULL);
|
||||
|
||||
static ssize_t cache_nr_sets_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
|
||||
{
|
||||
struct cache_desc *cache = kobj_to_cache_desc(k);
|
||||
|
||||
return sprintf(buf, "%u\n", cache->nr_sets);
|
||||
}
|
||||
|
||||
static struct kobj_attribute cache_nr_sets_attr =
|
||||
__ATTR(number_of_sets, 0444, cache_nr_sets_show, NULL);
|
||||
|
||||
static ssize_t cache_type_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
|
||||
{
|
||||
struct cache_desc *cache = kobj_to_cache_desc(k);
|
||||
|
||||
return sprintf(buf, "%s\n", cache->type);
|
||||
}
|
||||
|
||||
static struct kobj_attribute cache_type_attr =
|
||||
__ATTR(type, 0444, cache_type_show, NULL);
|
||||
|
||||
static ssize_t cache_level_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
|
||||
{
|
||||
struct cache_desc *cache = kobj_to_cache_desc(k);
|
||||
|
||||
return sprintf(buf, "%u\n", cache->level);
|
||||
}
|
||||
|
||||
static struct kobj_attribute cache_level_attr =
|
||||
__ATTR(level, 0444, cache_level_show, NULL);
|
||||
|
||||
static ssize_t cache_assoc_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
|
||||
{
|
||||
struct cache_desc *cache = kobj_to_cache_desc(k);
|
||||
|
||||
return sprintf(buf, "%u\n", cache->associativity);
|
||||
}
|
||||
|
||||
static struct kobj_attribute cache_assoc_attr =
|
||||
__ATTR(ways_of_associativity, 0444, cache_assoc_show, NULL);
|
||||
|
||||
struct cache_desc_info {
|
||||
const char *type;
|
||||
const char *size_prop;
|
||||
const char *line_size_prop;
|
||||
const char *nr_sets_prop;
|
||||
};
|
||||
|
||||
/* PowerPC Processor binding says the [di]-cache-* must be equal on
|
||||
* unified caches, so just use d-cache properties. */
|
||||
static struct cache_desc_info ucache_info = {
|
||||
.type = "Unified",
|
||||
.size_prop = "d-cache-size",
|
||||
.line_size_prop = "d-cache-line-size",
|
||||
.nr_sets_prop = "d-cache-sets",
|
||||
};
|
||||
|
||||
static struct cache_desc_info dcache_info = {
|
||||
.type = "Data",
|
||||
.size_prop = "d-cache-size",
|
||||
.line_size_prop = "d-cache-line-size",
|
||||
.nr_sets_prop = "d-cache-sets",
|
||||
};
|
||||
|
||||
static struct cache_desc_info icache_info = {
|
||||
.type = "Instruction",
|
||||
.size_prop = "i-cache-size",
|
||||
.line_size_prop = "i-cache-line-size",
|
||||
.nr_sets_prop = "i-cache-sets",
|
||||
};
|
||||
|
||||
static struct cache_desc * __cpuinit create_cache_desc(struct device_node *np, struct kobject *parent, int index, int level, struct cache_desc_info *info)
|
||||
{
|
||||
const u32 *cache_line_size;
|
||||
struct cache_desc *new;
|
||||
const u32 *cache_size;
|
||||
const u32 *nr_sets;
|
||||
int rc;
|
||||
|
||||
new = kzalloc(sizeof(*new), GFP_KERNEL);
|
||||
if (!new)
|
||||
return NULL;
|
||||
|
||||
rc = kobject_init_and_add(&new->kobj, &cache_desc_type, parent,
|
||||
"index%d", index);
|
||||
if (rc)
|
||||
goto err;
|
||||
|
||||
/* type */
|
||||
new->type = info->type;
|
||||
rc = sysfs_create_file(&new->kobj, &cache_type_attr.attr);
|
||||
WARN_ON(rc);
|
||||
|
||||
/* level */
|
||||
new->level = level;
|
||||
rc = sysfs_create_file(&new->kobj, &cache_level_attr.attr);
|
||||
WARN_ON(rc);
|
||||
|
||||
/* size */
|
||||
cache_size = of_get_property(np, info->size_prop, NULL);
|
||||
if (cache_size) {
|
||||
new->size = *cache_size / 1024;
|
||||
rc = sysfs_create_file(&new->kobj,
|
||||
&cache_size_attr.attr);
|
||||
WARN_ON(rc);
|
||||
}
|
||||
|
||||
/* coherency_line_size */
|
||||
cache_line_size = of_get_property(np, info->line_size_prop, NULL);
|
||||
if (cache_line_size) {
|
||||
new->line_size = *cache_line_size;
|
||||
rc = sysfs_create_file(&new->kobj,
|
||||
&cache_line_size_attr.attr);
|
||||
WARN_ON(rc);
|
||||
}
|
||||
|
||||
/* number_of_sets */
|
||||
nr_sets = of_get_property(np, info->nr_sets_prop, NULL);
|
||||
if (nr_sets) {
|
||||
new->nr_sets = *nr_sets;
|
||||
rc = sysfs_create_file(&new->kobj,
|
||||
&cache_nr_sets_attr.attr);
|
||||
WARN_ON(rc);
|
||||
}
|
||||
|
||||
/* ways_of_associativity */
|
||||
if (new->nr_sets == 1) {
|
||||
/* fully associative */
|
||||
new->associativity = 0;
|
||||
goto create_assoc;
|
||||
}
|
||||
|
||||
if (new->nr_sets && new->size && new->line_size) {
|
||||
/* If we have values for all of these we can derive
|
||||
* the associativity. */
|
||||
new->associativity =
|
||||
((new->size * 1024) / new->nr_sets) / new->line_size;
|
||||
create_assoc:
|
||||
rc = sysfs_create_file(&new->kobj,
|
||||
&cache_assoc_attr.attr);
|
||||
WARN_ON(rc);
|
||||
}
|
||||
|
||||
return new;
|
||||
err:
|
||||
kfree(new);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static bool cache_is_unified(struct device_node *np)
|
||||
{
|
||||
return of_get_property(np, "cache-unified", NULL);
|
||||
}
|
||||
|
||||
static struct cache_desc * __cpuinit create_cache_index_info(struct device_node *np, struct kobject *parent, int index, int level)
|
||||
{
|
||||
const phandle *next_cache_phandle;
|
||||
struct device_node *next_cache;
|
||||
struct cache_desc *new, **end;
|
||||
|
||||
pr_debug("%s(node = %s, index = %d)\n", __func__, np->full_name, index);
|
||||
|
||||
if (cache_is_unified(np)) {
|
||||
new = create_cache_desc(np, parent, index, level,
|
||||
&ucache_info);
|
||||
} else {
|
||||
new = create_cache_desc(np, parent, index, level,
|
||||
&dcache_info);
|
||||
if (new) {
|
||||
index++;
|
||||
new->next = create_cache_desc(np, parent, index, level,
|
||||
&icache_info);
|
||||
}
|
||||
}
|
||||
if (!new)
|
||||
return NULL;
|
||||
|
||||
end = &new->next;
|
||||
while (*end)
|
||||
end = &(*end)->next;
|
||||
|
||||
next_cache_phandle = of_get_property(np, "l2-cache", NULL);
|
||||
if (!next_cache_phandle)
|
||||
goto out;
|
||||
|
||||
next_cache = of_find_node_by_phandle(*next_cache_phandle);
|
||||
if (!next_cache)
|
||||
goto out;
|
||||
|
||||
*end = create_cache_index_info(next_cache, parent, ++index, ++level);
|
||||
|
||||
of_node_put(next_cache);
|
||||
out:
|
||||
return new;
|
||||
}
|
||||
|
||||
static void __cpuinit create_cache_info(struct sys_device *sysdev)
|
||||
{
|
||||
struct kobject *cache_toplevel;
|
||||
struct device_node *np = NULL;
|
||||
int cpu = sysdev->id;
|
||||
|
||||
cache_toplevel = kobject_create_and_add("cache", &sysdev->kobj);
|
||||
if (!cache_toplevel)
|
||||
return;
|
||||
per_cpu(cache_toplevel, cpu) = cache_toplevel;
|
||||
np = of_get_cpu_node(cpu, NULL);
|
||||
if (np != NULL) {
|
||||
per_cpu(cache_desc, cpu) =
|
||||
create_cache_index_info(np, cache_toplevel, 0, 1);
|
||||
of_node_put(np);
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
static void __cpuinit register_cpu_online(unsigned int cpu)
|
||||
{
|
||||
struct cpu *c = &per_cpu(cpu_devices, cpu);
|
||||
struct sys_device *s = &c->sysdev;
|
||||
@ -346,9 +629,33 @@ static void register_cpu_online(unsigned int cpu)
|
||||
|
||||
if (cpu_has_feature(CPU_FTR_DSCR))
|
||||
sysdev_create_file(s, &attr_dscr);
|
||||
|
||||
create_cache_info(s);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_HOTPLUG_CPU
|
||||
static void remove_cache_info(struct sys_device *sysdev)
|
||||
{
|
||||
struct kobject *cache_toplevel;
|
||||
struct cache_desc *cache_desc;
|
||||
int cpu = sysdev->id;
|
||||
|
||||
cache_desc = per_cpu(cache_desc, cpu);
|
||||
if (cache_desc != NULL) {
|
||||
sysfs_remove_file(&cache_desc->kobj, &cache_size_attr.attr);
|
||||
sysfs_remove_file(&cache_desc->kobj, &cache_line_size_attr.attr);
|
||||
sysfs_remove_file(&cache_desc->kobj, &cache_type_attr.attr);
|
||||
sysfs_remove_file(&cache_desc->kobj, &cache_level_attr.attr);
|
||||
sysfs_remove_file(&cache_desc->kobj, &cache_nr_sets_attr.attr);
|
||||
sysfs_remove_file(&cache_desc->kobj, &cache_assoc_attr.attr);
|
||||
|
||||
kobject_put(&cache_desc->kobj);
|
||||
}
|
||||
cache_toplevel = per_cpu(cache_toplevel, cpu);
|
||||
if (cache_toplevel != NULL)
|
||||
kobject_put(cache_toplevel);
|
||||
}
|
||||
|
||||
static void unregister_cpu_online(unsigned int cpu)
|
||||
{
|
||||
struct cpu *c = &per_cpu(cpu_devices, cpu);
|
||||
@ -399,6 +706,8 @@ static void unregister_cpu_online(unsigned int cpu)
|
||||
|
||||
if (cpu_has_feature(CPU_FTR_DSCR))
|
||||
sysdev_remove_file(s, &attr_dscr);
|
||||
|
||||
remove_cache_info(s);
|
||||
}
|
||||
#endif /* CONFIG_HOTPLUG_CPU */
|
||||
|
||||
|
@ -530,7 +530,7 @@ static dma_addr_t vio_dma_iommu_map_single(struct device *dev, void *vaddr,
|
||||
}
|
||||
|
||||
ret = dma_iommu_ops.map_single(dev, vaddr, size, direction, attrs);
|
||||
if (unlikely(dma_mapping_error(ret))) {
|
||||
if (unlikely(dma_mapping_error(dev, ret))) {
|
||||
vio_cmo_dealloc(viodev, roundup(size, IOMMU_PAGE_SIZE));
|
||||
atomic_inc(&viodev->cmo.allocs_failed);
|
||||
}
|
||||
@ -1031,8 +1031,8 @@ void vio_cmo_set_dev_desired(struct vio_dev *viodev, size_t desired) {}
|
||||
static int vio_cmo_bus_probe(struct vio_dev *viodev) { return 0; }
|
||||
static void vio_cmo_bus_remove(struct vio_dev *viodev) {}
|
||||
static void vio_cmo_set_dma_ops(struct vio_dev *viodev) {}
|
||||
static void vio_cmo_bus_init() {}
|
||||
static void vio_cmo_sysfs_init() { }
|
||||
static void vio_cmo_bus_init(void) {}
|
||||
static void vio_cmo_sysfs_init(void) { }
|
||||
#endif /* CONFIG_PPC_SMLPAR */
|
||||
EXPORT_SYMBOL(vio_cmo_entitlement_update);
|
||||
EXPORT_SYMBOL(vio_cmo_set_dev_desired);
|
||||
|
@ -736,14 +736,21 @@ static int __init hugetlbpage_init(void)
|
||||
|
||||
if (!cpu_has_feature(CPU_FTR_16M_PAGE))
|
||||
return -ENODEV;
|
||||
|
||||
/* Add supported huge page sizes. Need to change HUGE_MAX_HSTATE
|
||||
* and adjust PTE_NONCACHE_NUM if the number of supported huge page
|
||||
* sizes changes.
|
||||
*/
|
||||
set_huge_psize(MMU_PAGE_16M);
|
||||
set_huge_psize(MMU_PAGE_64K);
|
||||
set_huge_psize(MMU_PAGE_16G);
|
||||
|
||||
/* Temporarily disable support for 64K huge pages when 64K SPU local
|
||||
* store support is enabled as the current implementation conflicts.
|
||||
*/
|
||||
#ifndef CONFIG_SPU_FS_64K_LS
|
||||
set_huge_psize(MMU_PAGE_64K);
|
||||
#endif
|
||||
|
||||
for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) {
|
||||
if (mmu_huge_psizes[psize]) {
|
||||
huge_pgtable_cache(psize) = kmem_cache_create(
|
||||
|
@ -541,6 +541,78 @@ static int __init pmac_declare_of_platform_devices(void)
|
||||
}
|
||||
machine_device_initcall(powermac, pmac_declare_of_platform_devices);
|
||||
|
||||
#ifdef CONFIG_SERIAL_PMACZILOG_CONSOLE
|
||||
/*
|
||||
* This is called very early, as part of console_init() (typically just after
|
||||
* time_init()). This function is respondible for trying to find a good
|
||||
* default console on serial ports. It tries to match the open firmware
|
||||
* default output with one of the available serial console drivers.
|
||||
*/
|
||||
static int __init check_pmac_serial_console(void)
|
||||
{
|
||||
struct device_node *prom_stdout = NULL;
|
||||
int offset = 0;
|
||||
const char *name;
|
||||
#ifdef CONFIG_SERIAL_PMACZILOG_TTYS
|
||||
char *devname = "ttyS";
|
||||
#else
|
||||
char *devname = "ttyPZ";
|
||||
#endif
|
||||
|
||||
pr_debug(" -> check_pmac_serial_console()\n");
|
||||
|
||||
/* The user has requested a console so this is already set up. */
|
||||
if (strstr(boot_command_line, "console=")) {
|
||||
pr_debug(" console was specified !\n");
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
if (!of_chosen) {
|
||||
pr_debug(" of_chosen is NULL !\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
/* We are getting a weird phandle from OF ... */
|
||||
/* ... So use the full path instead */
|
||||
name = of_get_property(of_chosen, "linux,stdout-path", NULL);
|
||||
if (name == NULL) {
|
||||
pr_debug(" no linux,stdout-path !\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
prom_stdout = of_find_node_by_path(name);
|
||||
if (!prom_stdout) {
|
||||
pr_debug(" can't find stdout package %s !\n", name);
|
||||
return -ENODEV;
|
||||
}
|
||||
pr_debug("stdout is %s\n", prom_stdout->full_name);
|
||||
|
||||
name = of_get_property(prom_stdout, "name", NULL);
|
||||
if (!name) {
|
||||
pr_debug(" stdout package has no name !\n");
|
||||
goto not_found;
|
||||
}
|
||||
|
||||
if (strcmp(name, "ch-a") == 0)
|
||||
offset = 0;
|
||||
else if (strcmp(name, "ch-b") == 0)
|
||||
offset = 1;
|
||||
else
|
||||
goto not_found;
|
||||
of_node_put(prom_stdout);
|
||||
|
||||
pr_debug("Found serial console at %s%d\n", devname, offset);
|
||||
|
||||
return add_preferred_console(devname, offset, NULL);
|
||||
|
||||
not_found:
|
||||
pr_debug("No preferred console found !\n");
|
||||
of_node_put(prom_stdout);
|
||||
return -ENODEV;
|
||||
}
|
||||
console_initcall(check_pmac_serial_console);
|
||||
|
||||
#endif /* CONFIG_SERIAL_PMACZILOG_CONSOLE */
|
||||
|
||||
/*
|
||||
* Called very early, MMU is off, device-tree isn't unflattened
|
||||
*/
|
||||
|
@ -125,13 +125,23 @@ void udbg_scc_init(int force_scc)
|
||||
out_8(sccc, 0xc0);
|
||||
|
||||
/* If SCC was the OF output port, read the BRG value, else
|
||||
* Setup for 57600 8N1
|
||||
* Setup for 38400 or 57600 8N1 depending on the machine
|
||||
*/
|
||||
if (ch_def != NULL) {
|
||||
out_8(sccc, 13);
|
||||
scc_inittab[1] = in_8(sccc);
|
||||
out_8(sccc, 12);
|
||||
scc_inittab[3] = in_8(sccc);
|
||||
} else if (machine_is_compatible("RackMac1,1")
|
||||
|| machine_is_compatible("RackMac1,2")
|
||||
|| machine_is_compatible("MacRISC4")) {
|
||||
/* Xserves and G5s default to 57600 */
|
||||
scc_inittab[1] = 0;
|
||||
scc_inittab[3] = 0;
|
||||
} else {
|
||||
/* Others default to 38400 */
|
||||
scc_inittab[1] = 0;
|
||||
scc_inittab[3] = 1;
|
||||
}
|
||||
|
||||
for (i = 0; i < sizeof(scc_inittab); ++i)
|
||||
|
@ -289,7 +289,9 @@ static int cmm_thread(void *dummy)
|
||||
}
|
||||
|
||||
#define CMM_SHOW(name, format, args...) \
|
||||
static ssize_t show_##name(struct sys_device *dev, char *buf) \
|
||||
static ssize_t show_##name(struct sys_device *dev, \
|
||||
struct sysdev_attribute *attr, \
|
||||
char *buf) \
|
||||
{ \
|
||||
return sprintf(buf, format, ##args); \
|
||||
} \
|
||||
@ -298,12 +300,14 @@ static int cmm_thread(void *dummy)
|
||||
CMM_SHOW(loaned_kb, "%lu\n", PAGES2KB(loaned_pages));
|
||||
CMM_SHOW(loaned_target_kb, "%lu\n", PAGES2KB(loaned_pages_target));
|
||||
|
||||
static ssize_t show_oom_pages(struct sys_device *dev, char *buf)
|
||||
static ssize_t show_oom_pages(struct sys_device *dev,
|
||||
struct sysdev_attribute *attr, char *buf)
|
||||
{
|
||||
return sprintf(buf, "%lu\n", PAGES2KB(oom_freed_pages));
|
||||
}
|
||||
|
||||
static ssize_t store_oom_pages(struct sys_device *dev,
|
||||
struct sysdev_attribute *attr,
|
||||
const char *buf, size_t count)
|
||||
{
|
||||
unsigned long val = simple_strtoul (buf, NULL, 10);
|
||||
|
@ -260,7 +260,7 @@ static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter, struc
|
||||
dma_addr = dma_map_single(&adapter->vdev->dev, skb->data,
|
||||
pool->buff_size, DMA_FROM_DEVICE);
|
||||
|
||||
if (dma_mapping_error((&adapter->vdev->dev, dma_addr))
|
||||
if (dma_mapping_error(&adapter->vdev->dev, dma_addr))
|
||||
goto failure;
|
||||
|
||||
pool->free_map[free_index] = IBM_VETH_INVALID_MAP;
|
||||
@ -294,7 +294,7 @@ failure:
|
||||
pool->consumer_index = pool->size - 1;
|
||||
else
|
||||
pool->consumer_index--;
|
||||
if (!dma_mapping_error((&adapter->vdev->dev, dma_addr))
|
||||
if (!dma_mapping_error(&adapter->vdev->dev, dma_addr))
|
||||
dma_unmap_single(&adapter->vdev->dev,
|
||||
pool->dma_addr[index], pool->buff_size,
|
||||
DMA_FROM_DEVICE);
|
||||
@ -488,7 +488,7 @@ static void ibmveth_cleanup(struct ibmveth_adapter *adapter)
|
||||
&adapter->rx_buff_pool[i]);
|
||||
|
||||
if (adapter->bounce_buffer != NULL) {
|
||||
if (!dma_mapping_error(adapter->bounce_buffer_dma)) {
|
||||
if (!dma_mapping_error(dev, adapter->bounce_buffer_dma)) {
|
||||
dma_unmap_single(&adapter->vdev->dev,
|
||||
adapter->bounce_buffer_dma,
|
||||
adapter->netdev->mtu + IBMVETH_BUFF_OH,
|
||||
@ -924,7 +924,7 @@ static int ibmveth_start_xmit(struct sk_buff *skb, struct net_device *netdev)
|
||||
buf[1] = 0;
|
||||
}
|
||||
|
||||
if (dma_mapping_error((&adapter->vdev->dev, data_dma_addr)) {
|
||||
if (dma_mapping_error(&adapter->vdev->dev, data_dma_addr)) {
|
||||
if (!firmware_has_feature(FW_FEATURE_CMO))
|
||||
ibmveth_error_printk("tx: unable to map xmit buffer\n");
|
||||
skb_copy_from_linear_data(skb, adapter->bounce_buffer,
|
||||
|
@ -13,3 +13,9 @@ config OF_I2C
|
||||
depends on PPC_OF && I2C
|
||||
help
|
||||
OpenFirmware I2C accessors
|
||||
|
||||
config OF_SPI
|
||||
def_tristate SPI
|
||||
depends on OF && PPC_OF && SPI
|
||||
help
|
||||
OpenFirmware SPI accessors
|
||||
|
@ -2,3 +2,4 @@ obj-y = base.o
|
||||
obj-$(CONFIG_OF_DEVICE) += device.o platform.o
|
||||
obj-$(CONFIG_OF_GPIO) += gpio.o
|
||||
obj-$(CONFIG_OF_I2C) += of_i2c.o
|
||||
obj-$(CONFIG_OF_SPI) += of_spi.o
|
||||
|
@ -385,3 +385,91 @@ struct device_node *of_find_matching_node(struct device_node *from,
|
||||
return np;
|
||||
}
|
||||
EXPORT_SYMBOL(of_find_matching_node);
|
||||
|
||||
/**
|
||||
* of_modalias_table: Table of explicit compatible ==> modalias mappings
|
||||
*
|
||||
* This table allows particulare compatible property values to be mapped
|
||||
* to modalias strings. This is useful for busses which do not directly
|
||||
* understand the OF device tree but are populated based on data contained
|
||||
* within the device tree. SPI and I2C are the two current users of this
|
||||
* table.
|
||||
*
|
||||
* In most cases, devices do not need to be listed in this table because
|
||||
* the modalias value can be derived directly from the compatible table.
|
||||
* However, if for any reason a value cannot be derived, then this table
|
||||
* provides a method to override the implicit derivation.
|
||||
*
|
||||
* At the moment, a single table is used for all bus types because it is
|
||||
* assumed that the data size is small and that the compatible values
|
||||
* should already be distinct enough to differentiate between SPI, I2C
|
||||
* and other devices.
|
||||
*/
|
||||
struct of_modalias_table {
|
||||
char *of_device;
|
||||
char *modalias;
|
||||
};
|
||||
static struct of_modalias_table of_modalias_table[] = {
|
||||
/* Empty for now; add entries as needed */
|
||||
};
|
||||
|
||||
/**
|
||||
* of_modalias_node - Lookup appropriate modalias for a device node
|
||||
* @node: pointer to a device tree node
|
||||
* @modalias: Pointer to buffer that modalias value will be copied into
|
||||
* @len: Length of modalias value
|
||||
*
|
||||
* Based on the value of the compatible property, this routine will determine
|
||||
* an appropriate modalias value for a particular device tree node. Three
|
||||
* separate methods are used to derive a modalias value.
|
||||
*
|
||||
* First method is to lookup the compatible value in of_modalias_table.
|
||||
* Second is to look for a "linux,<modalias>" entry in the compatible list
|
||||
* and used that for modalias. Third is to strip off the manufacturer
|
||||
* prefix from the first compatible entry and use the remainder as modalias
|
||||
*
|
||||
* This routine returns 0 on success
|
||||
*/
|
||||
int of_modalias_node(struct device_node *node, char *modalias, int len)
|
||||
{
|
||||
int i, cplen;
|
||||
const char *compatible;
|
||||
const char *p;
|
||||
|
||||
/* 1. search for exception list entry */
|
||||
for (i = 0; i < ARRAY_SIZE(of_modalias_table); i++) {
|
||||
compatible = of_modalias_table[i].of_device;
|
||||
if (!of_device_is_compatible(node, compatible))
|
||||
continue;
|
||||
strlcpy(modalias, of_modalias_table[i].modalias, len);
|
||||
return 0;
|
||||
}
|
||||
|
||||
compatible = of_get_property(node, "compatible", &cplen);
|
||||
if (!compatible)
|
||||
return -ENODEV;
|
||||
|
||||
/* 2. search for linux,<modalias> entry */
|
||||
p = compatible;
|
||||
while (cplen > 0) {
|
||||
if (!strncmp(p, "linux,", 6)) {
|
||||
p += 6;
|
||||
strlcpy(modalias, p, len);
|
||||
return 0;
|
||||
}
|
||||
|
||||
i = strlen(p) + 1;
|
||||
p += i;
|
||||
cplen -= i;
|
||||
}
|
||||
|
||||
/* 3. take first compatible entry and strip manufacturer */
|
||||
p = strchr(compatible, ',');
|
||||
if (!p)
|
||||
return -ENODEV;
|
||||
p++;
|
||||
strlcpy(modalias, p, len);
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(of_modalias_node);
|
||||
|
||||
|
@ -16,62 +16,6 @@
|
||||
#include <linux/of_i2c.h>
|
||||
#include <linux/module.h>
|
||||
|
||||
struct i2c_driver_device {
|
||||
char *of_device;
|
||||
char *i2c_type;
|
||||
};
|
||||
|
||||
static struct i2c_driver_device i2c_devices[] = {
|
||||
};
|
||||
|
||||
static int of_find_i2c_driver(struct device_node *node,
|
||||
struct i2c_board_info *info)
|
||||
{
|
||||
int i, cplen;
|
||||
const char *compatible;
|
||||
const char *p;
|
||||
|
||||
/* 1. search for exception list entry */
|
||||
for (i = 0; i < ARRAY_SIZE(i2c_devices); i++) {
|
||||
if (!of_device_is_compatible(node, i2c_devices[i].of_device))
|
||||
continue;
|
||||
if (strlcpy(info->type, i2c_devices[i].i2c_type,
|
||||
I2C_NAME_SIZE) >= I2C_NAME_SIZE)
|
||||
return -ENOMEM;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
compatible = of_get_property(node, "compatible", &cplen);
|
||||
if (!compatible)
|
||||
return -ENODEV;
|
||||
|
||||
/* 2. search for linux,<i2c-type> entry */
|
||||
p = compatible;
|
||||
while (cplen > 0) {
|
||||
if (!strncmp(p, "linux,", 6)) {
|
||||
p += 6;
|
||||
if (strlcpy(info->type, p,
|
||||
I2C_NAME_SIZE) >= I2C_NAME_SIZE)
|
||||
return -ENOMEM;
|
||||
return 0;
|
||||
}
|
||||
|
||||
i = strlen(p) + 1;
|
||||
p += i;
|
||||
cplen -= i;
|
||||
}
|
||||
|
||||
/* 3. take fist compatible entry and strip manufacturer */
|
||||
p = strchr(compatible, ',');
|
||||
if (!p)
|
||||
return -ENODEV;
|
||||
p++;
|
||||
if (strlcpy(info->type, p, I2C_NAME_SIZE) >= I2C_NAME_SIZE)
|
||||
return -ENOMEM;
|
||||
return 0;
|
||||
}
|
||||
|
||||
void of_register_i2c_devices(struct i2c_adapter *adap,
|
||||
struct device_node *adap_node)
|
||||
{
|
||||
@ -83,6 +27,9 @@ void of_register_i2c_devices(struct i2c_adapter *adap,
|
||||
const u32 *addr;
|
||||
int len;
|
||||
|
||||
if (of_modalias_node(node, info.type, sizeof(info.type)) < 0)
|
||||
continue;
|
||||
|
||||
addr = of_get_property(node, "reg", &len);
|
||||
if (!addr || len < sizeof(int) || *addr > (1 << 10) - 1) {
|
||||
printk(KERN_ERR
|
||||
@ -92,11 +39,6 @@ void of_register_i2c_devices(struct i2c_adapter *adap,
|
||||
|
||||
info.irq = irq_of_parse_and_map(node, 0);
|
||||
|
||||
if (of_find_i2c_driver(node, &info) < 0) {
|
||||
irq_dispose_mapping(info.irq);
|
||||
continue;
|
||||
}
|
||||
|
||||
info.addr = *addr;
|
||||
|
||||
request_module(info.type);
|
||||
|
93
drivers/of/of_spi.c
Normal file
93
drivers/of/of_spi.c
Normal file
@ -0,0 +1,93 @@
|
||||
/*
|
||||
* SPI OF support routines
|
||||
* Copyright (C) 2008 Secret Lab Technologies Ltd.
|
||||
*
|
||||
* Support routines for deriving SPI device attachments from the device
|
||||
* tree.
|
||||
*/
|
||||
|
||||
#include <linux/of.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/spi/spi.h>
|
||||
#include <linux/of_spi.h>
|
||||
|
||||
/**
|
||||
* of_register_spi_devices - Register child devices onto the SPI bus
|
||||
* @master: Pointer to spi_master device
|
||||
* @np: parent node of SPI device nodes
|
||||
*
|
||||
* Registers an spi_device for each child node of 'np' which has a 'reg'
|
||||
* property.
|
||||
*/
|
||||
void of_register_spi_devices(struct spi_master *master, struct device_node *np)
|
||||
{
|
||||
struct spi_device *spi;
|
||||
struct device_node *nc;
|
||||
const u32 *prop;
|
||||
int rc;
|
||||
int len;
|
||||
|
||||
for_each_child_of_node(np, nc) {
|
||||
/* Alloc an spi_device */
|
||||
spi = spi_alloc_device(master);
|
||||
if (!spi) {
|
||||
dev_err(&master->dev, "spi_device alloc error for %s\n",
|
||||
nc->full_name);
|
||||
spi_dev_put(spi);
|
||||
continue;
|
||||
}
|
||||
|
||||
/* Select device driver */
|
||||
if (of_modalias_node(nc, spi->modalias,
|
||||
sizeof(spi->modalias)) < 0) {
|
||||
dev_err(&master->dev, "cannot find modalias for %s\n",
|
||||
nc->full_name);
|
||||
spi_dev_put(spi);
|
||||
continue;
|
||||
}
|
||||
|
||||
/* Device address */
|
||||
prop = of_get_property(nc, "reg", &len);
|
||||
if (!prop || len < sizeof(*prop)) {
|
||||
dev_err(&master->dev, "%s has no 'reg' property\n",
|
||||
nc->full_name);
|
||||
spi_dev_put(spi);
|
||||
continue;
|
||||
}
|
||||
spi->chip_select = *prop;
|
||||
|
||||
/* Mode (clock phase/polarity/etc.) */
|
||||
if (of_find_property(nc, "spi-cpha", NULL))
|
||||
spi->mode |= SPI_CPHA;
|
||||
if (of_find_property(nc, "spi-cpol", NULL))
|
||||
spi->mode |= SPI_CPOL;
|
||||
|
||||
/* Device speed */
|
||||
prop = of_get_property(nc, "spi-max-frequency", &len);
|
||||
if (!prop || len < sizeof(*prop)) {
|
||||
dev_err(&master->dev, "%s has no 'spi-max-frequency' property\n",
|
||||
nc->full_name);
|
||||
spi_dev_put(spi);
|
||||
continue;
|
||||
}
|
||||
spi->max_speed_hz = *prop;
|
||||
|
||||
/* IRQ */
|
||||
spi->irq = irq_of_parse_and_map(nc, 0);
|
||||
|
||||
/* Store a pointer to the node in the device structure */
|
||||
of_node_get(nc);
|
||||
spi->dev.archdata.of_node = nc;
|
||||
|
||||
/* Register the new device */
|
||||
request_module(spi->modalias);
|
||||
rc = spi_add_device(spi);
|
||||
if (rc) {
|
||||
dev_err(&master->dev, "spi_device register error %s\n",
|
||||
nc->full_name);
|
||||
spi_dev_put(spi);
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(of_register_spi_devices);
|
@ -178,6 +178,96 @@ struct boardinfo {
|
||||
static LIST_HEAD(board_list);
|
||||
static DEFINE_MUTEX(board_lock);
|
||||
|
||||
/**
|
||||
* spi_alloc_device - Allocate a new SPI device
|
||||
* @master: Controller to which device is connected
|
||||
* Context: can sleep
|
||||
*
|
||||
* Allows a driver to allocate and initialize a spi_device without
|
||||
* registering it immediately. This allows a driver to directly
|
||||
* fill the spi_device with device parameters before calling
|
||||
* spi_add_device() on it.
|
||||
*
|
||||
* Caller is responsible to call spi_add_device() on the returned
|
||||
* spi_device structure to add it to the SPI master. If the caller
|
||||
* needs to discard the spi_device without adding it, then it should
|
||||
* call spi_dev_put() on it.
|
||||
*
|
||||
* Returns a pointer to the new device, or NULL.
|
||||
*/
|
||||
struct spi_device *spi_alloc_device(struct spi_master *master)
|
||||
{
|
||||
struct spi_device *spi;
|
||||
struct device *dev = master->dev.parent;
|
||||
|
||||
if (!spi_master_get(master))
|
||||
return NULL;
|
||||
|
||||
spi = kzalloc(sizeof *spi, GFP_KERNEL);
|
||||
if (!spi) {
|
||||
dev_err(dev, "cannot alloc spi_device\n");
|
||||
spi_master_put(master);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
spi->master = master;
|
||||
spi->dev.parent = dev;
|
||||
spi->dev.bus = &spi_bus_type;
|
||||
spi->dev.release = spidev_release;
|
||||
device_initialize(&spi->dev);
|
||||
return spi;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(spi_alloc_device);
|
||||
|
||||
/**
|
||||
* spi_add_device - Add spi_device allocated with spi_alloc_device
|
||||
* @spi: spi_device to register
|
||||
*
|
||||
* Companion function to spi_alloc_device. Devices allocated with
|
||||
* spi_alloc_device can be added onto the spi bus with this function.
|
||||
*
|
||||
* Returns 0 on success; non-zero on failure
|
||||
*/
|
||||
int spi_add_device(struct spi_device *spi)
|
||||
{
|
||||
struct device *dev = spi->master->dev.parent;
|
||||
int status;
|
||||
|
||||
/* Chipselects are numbered 0..max; validate. */
|
||||
if (spi->chip_select >= spi->master->num_chipselect) {
|
||||
dev_err(dev, "cs%d >= max %d\n",
|
||||
spi->chip_select,
|
||||
spi->master->num_chipselect);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* Set the bus ID string */
|
||||
snprintf(spi->dev.bus_id, sizeof spi->dev.bus_id,
|
||||
"%s.%u", spi->master->dev.bus_id,
|
||||
spi->chip_select);
|
||||
|
||||
/* drivers may modify this initial i/o setup */
|
||||
status = spi->master->setup(spi);
|
||||
if (status < 0) {
|
||||
dev_err(dev, "can't %s %s, status %d\n",
|
||||
"setup", spi->dev.bus_id, status);
|
||||
return status;
|
||||
}
|
||||
|
||||
/* driver core catches callers that misbehave by defining
|
||||
* devices that already exist.
|
||||
*/
|
||||
status = device_add(&spi->dev);
|
||||
if (status < 0) {
|
||||
dev_err(dev, "can't %s %s, status %d\n",
|
||||
"add", spi->dev.bus_id, status);
|
||||
return status;
|
||||
}
|
||||
|
||||
dev_dbg(dev, "registered child %s\n", spi->dev.bus_id);
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(spi_add_device);
|
||||
|
||||
/**
|
||||
* spi_new_device - instantiate one new SPI device
|
||||
@ -197,7 +287,6 @@ struct spi_device *spi_new_device(struct spi_master *master,
|
||||
struct spi_board_info *chip)
|
||||
{
|
||||
struct spi_device *proxy;
|
||||
struct device *dev = master->dev.parent;
|
||||
int status;
|
||||
|
||||
/* NOTE: caller did any chip->bus_num checks necessary.
|
||||
@ -207,66 +296,28 @@ struct spi_device *spi_new_device(struct spi_master *master,
|
||||
* suggests syslogged diagnostics are best here (ugh).
|
||||
*/
|
||||
|
||||
/* Chipselects are numbered 0..max; validate. */
|
||||
if (chip->chip_select >= master->num_chipselect) {
|
||||
dev_err(dev, "cs%d > max %d\n",
|
||||
chip->chip_select,
|
||||
master->num_chipselect);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (!spi_master_get(master))
|
||||
proxy = spi_alloc_device(master);
|
||||
if (!proxy)
|
||||
return NULL;
|
||||
|
||||
WARN_ON(strlen(chip->modalias) >= sizeof(proxy->modalias));
|
||||
|
||||
proxy = kzalloc(sizeof *proxy, GFP_KERNEL);
|
||||
if (!proxy) {
|
||||
dev_err(dev, "can't alloc dev for cs%d\n",
|
||||
chip->chip_select);
|
||||
goto fail;
|
||||
}
|
||||
proxy->master = master;
|
||||
proxy->chip_select = chip->chip_select;
|
||||
proxy->max_speed_hz = chip->max_speed_hz;
|
||||
proxy->mode = chip->mode;
|
||||
proxy->irq = chip->irq;
|
||||
strlcpy(proxy->modalias, chip->modalias, sizeof(proxy->modalias));
|
||||
|
||||
snprintf(proxy->dev.bus_id, sizeof proxy->dev.bus_id,
|
||||
"%s.%u", master->dev.bus_id,
|
||||
chip->chip_select);
|
||||
proxy->dev.parent = dev;
|
||||
proxy->dev.bus = &spi_bus_type;
|
||||
proxy->dev.platform_data = (void *) chip->platform_data;
|
||||
proxy->controller_data = chip->controller_data;
|
||||
proxy->controller_state = NULL;
|
||||
proxy->dev.release = spidev_release;
|
||||
|
||||
/* drivers may modify this initial i/o setup */
|
||||
status = master->setup(proxy);
|
||||
status = spi_add_device(proxy);
|
||||
if (status < 0) {
|
||||
dev_err(dev, "can't %s %s, status %d\n",
|
||||
"setup", proxy->dev.bus_id, status);
|
||||
goto fail;
|
||||
spi_dev_put(proxy);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* driver core catches callers that misbehave by defining
|
||||
* devices that already exist.
|
||||
*/
|
||||
status = device_register(&proxy->dev);
|
||||
if (status < 0) {
|
||||
dev_err(dev, "can't %s %s, status %d\n",
|
||||
"add", proxy->dev.bus_id, status);
|
||||
goto fail;
|
||||
}
|
||||
dev_dbg(dev, "registered child %s\n", proxy->dev.bus_id);
|
||||
return proxy;
|
||||
|
||||
fail:
|
||||
spi_master_put(master);
|
||||
kfree(proxy);
|
||||
return NULL;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(spi_new_device);
|
||||
|
||||
|
@ -46,6 +46,8 @@
|
||||
#define _PAGE_GROUP_IX 0x7000 /* software: HPTE index within group */
|
||||
#define _PAGE_F_SECOND _PAGE_SECONDARY
|
||||
#define _PAGE_F_GIX _PAGE_GROUP_IX
|
||||
#define _PAGE_SPECIAL 0x10000 /* software: special page */
|
||||
#define __HAVE_ARCH_PTE_SPECIAL
|
||||
|
||||
/* PTE flags to conserve for HPTE identification */
|
||||
#define _PAGE_HPTEFLAGS (_PAGE_BUSY | _PAGE_HASHPTE | \
|
||||
|
@ -70,6 +70,8 @@ static inline struct subpage_prot_table *pgd_subpage_prot(pgd_t *pgd)
|
||||
#define PGDIR_MASK (~(PGDIR_SIZE-1))
|
||||
|
||||
/* Additional PTE bits (don't change without checking asm in hash_low.S) */
|
||||
#define __HAVE_ARCH_PTE_SPECIAL
|
||||
#define _PAGE_SPECIAL 0x00000400 /* software: special page */
|
||||
#define _PAGE_HPTE_SUB 0x0ffff000 /* combo only: sub pages HPTE bits */
|
||||
#define _PAGE_HPTE_SUB0 0x08000000 /* combo only: first sub page */
|
||||
#define _PAGE_COMBO 0x10000000 /* this is a combo 4k page */
|
||||
|
@ -401,6 +401,9 @@ extern int icache_44x_need_flush;
|
||||
#ifndef _PAGE_COHERENT
|
||||
#define _PAGE_COHERENT 0
|
||||
#endif
|
||||
#ifndef _PAGE_WRITETHRU
|
||||
#define _PAGE_WRITETHRU 0
|
||||
#endif
|
||||
#ifndef _PMD_PRESENT_MASK
|
||||
#define _PMD_PRESENT_MASK _PMD_PRESENT
|
||||
#endif
|
||||
|
@ -245,7 +245,7 @@ static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_RW;}
|
||||
static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY;}
|
||||
static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED;}
|
||||
static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE;}
|
||||
static inline int pte_special(pte_t pte) { return 0; }
|
||||
static inline int pte_special(pte_t pte) { return pte_val(pte) & _PAGE_SPECIAL; }
|
||||
|
||||
static inline void pte_uncache(pte_t pte) { pte_val(pte) |= _PAGE_NO_CACHE; }
|
||||
static inline void pte_cache(pte_t pte) { pte_val(pte) &= ~_PAGE_NO_CACHE; }
|
||||
@ -265,7 +265,7 @@ static inline pte_t pte_mkyoung(pte_t pte) {
|
||||
static inline pte_t pte_mkhuge(pte_t pte) {
|
||||
return pte; }
|
||||
static inline pte_t pte_mkspecial(pte_t pte) {
|
||||
return pte; }
|
||||
pte_val(pte) |= _PAGE_SPECIAL; return pte; }
|
||||
static inline unsigned long pte_pgprot(pte_t pte)
|
||||
{
|
||||
return __pgprot(pte_val(pte)) & PAGE_PROT_BITS;
|
||||
|
@ -84,6 +84,7 @@ struct pt_regs {
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
#define instruction_pointer(regs) ((regs)->nip)
|
||||
#define user_stack_pointer(regs) ((regs)->gpr[1])
|
||||
#define regs_return_value(regs) ((regs)->gpr[3])
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
|
@ -122,8 +122,7 @@ typedef struct sigaltstack {
|
||||
|
||||
#ifdef __KERNEL__
|
||||
struct pt_regs;
|
||||
extern int do_signal(sigset_t *oldset, struct pt_regs *regs);
|
||||
extern int do_signal32(sigset_t *oldset, struct pt_regs *regs);
|
||||
extern void do_signal(struct pt_regs *regs, unsigned long thread_info_flags);
|
||||
#define ptrace_signal_deliver(regs, cookie) do { } while (0)
|
||||
#endif /* __KERNEL__ */
|
||||
|
||||
|
@ -62,6 +62,8 @@ extern int smp_hw_index[];
|
||||
#endif
|
||||
|
||||
DECLARE_PER_CPU(cpumask_t, cpu_sibling_map);
|
||||
DECLARE_PER_CPU(cpumask_t, cpu_core_map);
|
||||
extern int cpu_to_core_id(int cpu);
|
||||
|
||||
/* Since OpenPIC has only 4 IPIs, we use slightly different message numbers.
|
||||
*
|
||||
|
84
include/asm-powerpc/syscall.h
Normal file
84
include/asm-powerpc/syscall.h
Normal file
@ -0,0 +1,84 @@
|
||||
/*
|
||||
* Access to user system call parameters and results
|
||||
*
|
||||
* Copyright (C) 2008 Red Hat, Inc. All rights reserved.
|
||||
*
|
||||
* This copyrighted material is made available to anyone wishing to use,
|
||||
* modify, copy, or redistribute it subject to the terms and conditions
|
||||
* of the GNU General Public License v.2.
|
||||
*
|
||||
* See asm-generic/syscall.h for descriptions of what we must do here.
|
||||
*/
|
||||
|
||||
#ifndef _ASM_SYSCALL_H
|
||||
#define _ASM_SYSCALL_H 1
|
||||
|
||||
#include <linux/sched.h>
|
||||
|
||||
static inline long syscall_get_nr(struct task_struct *task,
|
||||
struct pt_regs *regs)
|
||||
{
|
||||
return TRAP(regs) == 0xc00 ? regs->gpr[0] : -1L;
|
||||
}
|
||||
|
||||
static inline void syscall_rollback(struct task_struct *task,
|
||||
struct pt_regs *regs)
|
||||
{
|
||||
regs->gpr[3] = regs->orig_gpr3;
|
||||
}
|
||||
|
||||
static inline long syscall_get_error(struct task_struct *task,
|
||||
struct pt_regs *regs)
|
||||
{
|
||||
return (regs->ccr & 0x1000) ? -regs->gpr[3] : 0;
|
||||
}
|
||||
|
||||
static inline long syscall_get_return_value(struct task_struct *task,
|
||||
struct pt_regs *regs)
|
||||
{
|
||||
return regs->gpr[3];
|
||||
}
|
||||
|
||||
static inline void syscall_set_return_value(struct task_struct *task,
|
||||
struct pt_regs *regs,
|
||||
int error, long val)
|
||||
{
|
||||
if (error) {
|
||||
regs->ccr |= 0x1000L;
|
||||
regs->gpr[3] = -error;
|
||||
} else {
|
||||
regs->ccr &= ~0x1000L;
|
||||
regs->gpr[3] = val;
|
||||
}
|
||||
}
|
||||
|
||||
static inline void syscall_get_arguments(struct task_struct *task,
|
||||
struct pt_regs *regs,
|
||||
unsigned int i, unsigned int n,
|
||||
unsigned long *args)
|
||||
{
|
||||
BUG_ON(i + n > 6);
|
||||
#ifdef CONFIG_PPC64
|
||||
if (test_tsk_thread_flag(task, TIF_32BIT)) {
|
||||
/*
|
||||
* Zero-extend 32-bit argument values. The high bits are
|
||||
* garbage ignored by the actual syscall dispatch.
|
||||
*/
|
||||
while (n-- > 0)
|
||||
args[n] = (u32) regs->gpr[3 + i + n];
|
||||
return;
|
||||
}
|
||||
#endif
|
||||
memcpy(args, ®s->gpr[3 + i], n * sizeof(args[0]));
|
||||
}
|
||||
|
||||
static inline void syscall_set_arguments(struct task_struct *task,
|
||||
struct pt_regs *regs,
|
||||
unsigned int i, unsigned int n,
|
||||
const unsigned long *args)
|
||||
{
|
||||
BUG_ON(i + n > 6);
|
||||
memcpy(®s->gpr[3 + i], args, n * sizeof(args[0]));
|
||||
}
|
||||
|
||||
#endif /* _ASM_SYSCALL_H */
|
@ -108,6 +108,7 @@ static inline struct thread_info *current_thread_info(void)
|
||||
#define TIF_SECCOMP 10 /* secure computing */
|
||||
#define TIF_RESTOREALL 11 /* Restore all regs (implies NOERROR) */
|
||||
#define TIF_NOERROR 12 /* Force successful syscall return */
|
||||
#define TIF_NOTIFY_RESUME 13 /* callback before returning to user */
|
||||
#define TIF_FREEZE 14 /* Freezing for suspend */
|
||||
#define TIF_RUNLATCH 15 /* Is the runlatch enabled? */
|
||||
#define TIF_ABI_PENDING 16 /* 32/64 bit switch needed */
|
||||
@ -125,12 +126,14 @@ static inline struct thread_info *current_thread_info(void)
|
||||
#define _TIF_SECCOMP (1<<TIF_SECCOMP)
|
||||
#define _TIF_RESTOREALL (1<<TIF_RESTOREALL)
|
||||
#define _TIF_NOERROR (1<<TIF_NOERROR)
|
||||
#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME)
|
||||
#define _TIF_FREEZE (1<<TIF_FREEZE)
|
||||
#define _TIF_RUNLATCH (1<<TIF_RUNLATCH)
|
||||
#define _TIF_ABI_PENDING (1<<TIF_ABI_PENDING)
|
||||
#define _TIF_SYSCALL_T_OR_A (_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SECCOMP)
|
||||
|
||||
#define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | _TIF_NEED_RESCHED)
|
||||
#define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \
|
||||
_TIF_NOTIFY_RESUME)
|
||||
#define _TIF_PERSYSCALL_MASK (_TIF_RESTOREALL|_TIF_NOERROR)
|
||||
|
||||
/* Bits in local_flags */
|
||||
|
@ -108,6 +108,8 @@ static inline void sysfs_remove_device_from_node(struct sys_device *dev,
|
||||
#include <asm/smp.h>
|
||||
|
||||
#define topology_thread_siblings(cpu) (per_cpu(cpu_sibling_map, cpu))
|
||||
#define topology_core_siblings(cpu) (per_cpu(cpu_core_map, cpu))
|
||||
#define topology_core_id(cpu) (cpu_to_core_id(cpu))
|
||||
#endif
|
||||
#endif
|
||||
|
||||
|
@ -70,5 +70,6 @@ extern int of_n_addr_cells(struct device_node *np);
|
||||
extern int of_n_size_cells(struct device_node *np);
|
||||
extern const struct of_device_id *of_match_node(
|
||||
const struct of_device_id *matches, const struct device_node *node);
|
||||
extern int of_modalias_node(struct device_node *node, char *modalias, int len);
|
||||
|
||||
#endif /* _LINUX_OF_H */
|
||||
|
18
include/linux/of_spi.h
Normal file
18
include/linux/of_spi.h
Normal file
@ -0,0 +1,18 @@
|
||||
/*
|
||||
* OpenFirmware SPI support routines
|
||||
* Copyright (C) 2008 Secret Lab Technologies Ltd.
|
||||
*
|
||||
* Support routines for deriving SPI device attachments from the device
|
||||
* tree.
|
||||
*/
|
||||
|
||||
#ifndef __LINUX_OF_SPI_H
|
||||
#define __LINUX_OF_SPI_H
|
||||
|
||||
#include <linux/of.h>
|
||||
#include <linux/spi/spi.h>
|
||||
|
||||
extern void of_register_spi_devices(struct spi_master *master,
|
||||
struct device_node *np);
|
||||
|
||||
#endif /* __LINUX_OF_SPI */
|
@ -778,7 +778,19 @@ spi_register_board_info(struct spi_board_info const *info, unsigned n)
|
||||
* use spi_new_device() to describe each device. You can also call
|
||||
* spi_unregister_device() to start making that device vanish, but
|
||||
* normally that would be handled by spi_unregister_master().
|
||||
*
|
||||
* You can also use spi_alloc_device() and spi_add_device() to use a two
|
||||
* stage registration sequence for each spi_device. This gives the caller
|
||||
* some more control over the spi_device structure before it is registered,
|
||||
* but requires that caller to initialize fields that would otherwise
|
||||
* be defined using the board info.
|
||||
*/
|
||||
extern struct spi_device *
|
||||
spi_alloc_device(struct spi_master *master);
|
||||
|
||||
extern int
|
||||
spi_add_device(struct spi_device *spi);
|
||||
|
||||
extern struct spi_device *
|
||||
spi_new_device(struct spi_master *, struct spi_board_info *);
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user