2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-12-17 17:53:56 +08:00

Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc-2.6

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc-2.6: (44 commits)
  sparc: Remove Sparc's asm-offsets for sclow.S
  sparc64: Update defconfig.
  sparc64: Add Niagara2 RNG driver.
  sparc64: Add missing hypervisor service group numbers.
  sparc64: Remove 4MB and 512K base page size options.
  sparc64: Convert to generic helpers for IPI function calls.
  sparc: Use new '%pS' infrastructure to print symbols.
  sparc32: fix init.c allnoconfig build error
  sparc64: Config category "Processor type and features" absent
  sparc: arch/sparc/kernel/apc.c to unlocked_ioctl
  sparc: join the remaining header files
  sparc: merge header files with trivial differences
  sparc: when header files are equal use asm-sparc version
  sparc: copy sparc64 specific files to asm-sparc
  sparc: Merge asm-sparc{,64}/asi.h
  sparc: export openprom.h to userspace
  sparc: Merge asm-sparc{,64}/types.h
  sparc: Merge asm-sparc{,64}/termios.h
  sparc: Merge asm-sparc{,64}/termbits.h
  sparc: Merge asm-sparc{,64}/setup.h
  ...
This commit is contained in:
Linus Torvalds 2008-07-21 09:40:26 -07:00
commit f8b71a3a92
462 changed files with 26909 additions and 27109 deletions

View File

@ -56,7 +56,7 @@ __setup("apc=", apc_setup);
* CPU idle callback function
* See .../arch/sparc/kernel/process.c
*/
void apc_swift_idle(void)
static void apc_swift_idle(void)
{
#ifdef APC_DEBUG_LED
set_auxio(0x00, AUXIO_LED);
@ -85,54 +85,70 @@ static int apc_release(struct inode *inode, struct file *f)
return 0;
}
static int apc_ioctl(struct inode *inode, struct file *f,
unsigned int cmd, unsigned long __arg)
static long apc_ioctl(struct file *f, unsigned int cmd, unsigned long __arg)
{
__u8 inarg, __user *arg;
arg = (__u8 __user *) __arg;
lock_kernel();
switch (cmd) {
case APCIOCGFANCTL:
if (put_user(apc_readb(APC_FANCTL_REG) & APC_REGMASK, arg))
return -EFAULT;
if (put_user(apc_readb(APC_FANCTL_REG) & APC_REGMASK, arg)) {
unlock_kernel();
return -EFAULT;
}
break;
case APCIOCGCPWR:
if (put_user(apc_readb(APC_CPOWER_REG) & APC_REGMASK, arg))
if (put_user(apc_readb(APC_CPOWER_REG) & APC_REGMASK, arg)) {
unlock_kernel();
return -EFAULT;
}
break;
case APCIOCGBPORT:
if (put_user(apc_readb(APC_BPORT_REG) & APC_BPMASK, arg))
if (put_user(apc_readb(APC_BPORT_REG) & APC_BPMASK, arg)) {
unlock_kernel();
return -EFAULT;
}
break;
case APCIOCSFANCTL:
if (get_user(inarg, arg))
if (get_user(inarg, arg)) {
unlock_kernel();
return -EFAULT;
}
apc_writeb(inarg & APC_REGMASK, APC_FANCTL_REG);
break;
case APCIOCSCPWR:
if (get_user(inarg, arg))
if (get_user(inarg, arg)) {
unlock_kernel();
return -EFAULT;
}
apc_writeb(inarg & APC_REGMASK, APC_CPOWER_REG);
break;
case APCIOCSBPORT:
if (get_user(inarg, arg))
if (get_user(inarg, arg)) {
unlock_kernel();
return -EFAULT;
}
apc_writeb(inarg & APC_BPMASK, APC_BPORT_REG);
break;
default:
unlock_kernel();
return -EINVAL;
};
unlock_kernel();
return 0;
}
static const struct file_operations apc_fops = {
.ioctl = apc_ioctl,
.open = apc_open,
.release = apc_release,
.unlocked_ioctl = apc_ioctl,
.open = apc_open,
.release = apc_release,
};
static struct miscdevice apc_miscdev = { APC_MINOR, APC_DEVNAME, &apc_fops };

View File

@ -18,18 +18,6 @@ int foo(void)
{
DEFINE(AOFF_task_thread, offsetof(struct task_struct, thread));
BLANK();
/* XXX This is the stuff for sclow.S, kill it. */
DEFINE(AOFF_task_pid, offsetof(struct task_struct, pid));
DEFINE(AOFF_task_uid, offsetof(struct task_struct, uid));
DEFINE(AOFF_task_gid, offsetof(struct task_struct, gid));
DEFINE(AOFF_task_euid, offsetof(struct task_struct, euid));
DEFINE(AOFF_task_egid, offsetof(struct task_struct, egid));
/* DEFINE(THREAD_INFO, offsetof(struct task_struct, stack)); */
DEFINE(ASIZ_task_uid, sizeof(current->uid));
DEFINE(ASIZ_task_gid, sizeof(current->gid));
DEFINE(ASIZ_task_euid, sizeof(current->euid));
DEFINE(ASIZ_task_egid, sizeof(current->egid));
BLANK();
DEFINE(AOFF_thread_fork_kpsr,
offsetof(struct thread_struct, fork_kpsr));
BLANK();

View File

@ -69,7 +69,7 @@ static inline unsigned long ebus_alloc(size_t size)
/*
*/
int __init ebus_blacklist_irq(const char *name)
static int __init ebus_blacklist_irq(const char *name)
{
struct ebus_device_irq *dp;
@ -83,8 +83,8 @@ int __init ebus_blacklist_irq(const char *name)
return 0;
}
void __init fill_ebus_child(struct device_node *dp,
struct linux_ebus_child *dev)
static void __init fill_ebus_child(struct device_node *dp,
struct linux_ebus_child *dev)
{
const int *regs;
const int *irqs;
@ -144,7 +144,8 @@ void __init fill_ebus_child(struct device_node *dp,
}
}
void __init fill_ebus_device(struct device_node *dp, struct linux_ebus_device *dev)
static void __init fill_ebus_device(struct device_node *dp,
struct linux_ebus_device *dev)
{
const struct linux_prom_registers *regs;
struct linux_ebus_child *child;

View File

@ -19,6 +19,7 @@
#include <asm/vaddrs.h>
#include <asm/memreg.h>
#include <asm/page.h>
#include <asm/pgtable.h>
#ifdef CONFIG_SUN4
#include <asm/pgtsun4.h>
#else
@ -1317,7 +1318,6 @@ linux_sparc_syscall:
bne linux_fast_syscall
/* Just do first insn from SAVE_ALL in the delay slot */
.globl syscall_is_too_hard
syscall_is_too_hard:
SAVE_ALL_HEAD
rd %wim, %l3
@ -1544,8 +1544,7 @@ kgdb_trap_low:
#endif
.align 4
.globl __handle_exception, flush_patch_exception
__handle_exception:
.globl flush_patch_exception
flush_patch_exception:
FLUSH_ALL_KERNEL_WINDOWS;
ldd [%o0], %o6

View File

@ -228,7 +228,6 @@ tsetup_mmu_patchme:
*/
#define glob_tmp g1
.globl tsetup_sun4c_stackchk
tsetup_sun4c_stackchk:
/* Done by caller: andcc %sp, 0x7, %g0 */
bne trap_setup_user_stack_is_bolixed

View File

@ -32,7 +32,6 @@
*/
.align 4
.globl cputyp
cputyp:
.word 1
@ -1280,7 +1279,6 @@ halt_me:
* gets initialized in c-code so all routines can use it.
*/
.globl prom_vector_p
prom_vector_p:
.word 0

View File

@ -24,7 +24,7 @@ static struct idprom idprom_buffer;
* of the Sparc CPU and have a meaningful IDPROM machtype value that we
* know about. See asm-sparc/machines.h for empirical constants.
*/
struct Sun_Machine_Models Sun_Machines[NUM_SUN_MACHINES] = {
static struct Sun_Machine_Models Sun_Machines[NUM_SUN_MACHINES] = {
/* First, Sun4's */
{ "Sun 4/100 Series", (SM_SUN4 | SM_4_110) },
{ "Sun 4/200 Series", (SM_SUN4 | SM_4_260) },

View File

@ -49,13 +49,16 @@
#define mmu_inval_dma_area(p, l) /* Anton pulled it out for 2.4.0-xx */
struct resource *_sparc_find_resource(struct resource *r, unsigned long);
static struct resource *_sparc_find_resource(struct resource *r,
unsigned long);
static void __iomem *_sparc_ioremap(struct resource *res, u32 bus, u32 pa, int sz);
static void __iomem *_sparc_alloc_io(unsigned int busno, unsigned long phys,
unsigned long size, char *name);
static void _sparc_free_io(struct resource *res);
static void register_proc_sparc_ioport(void);
/* This points to the next to use virtual memory for DVMA mappings */
static struct resource _sparc_dvma = {
.name = "sparc_dvma", .start = DVMA_VADDR, .end = DVMA_END - 1
@ -539,8 +542,6 @@ void __init sbus_setup_arch_props(struct sbus_bus *sbus, struct device_node *dp)
int __init sbus_arch_preinit(void)
{
extern void register_proc_sparc_ioport(void);
register_proc_sparc_ioport();
#ifdef CONFIG_SUN4
@ -853,8 +854,8 @@ _sparc_io_get_info(char *buf, char **start, off_t fpos, int length, int *eof,
* XXX Too slow. Can have 8192 DVMA pages on sun4m in the worst case.
* This probably warrants some sort of hashing.
*/
struct resource *
_sparc_find_resource(struct resource *root, unsigned long hit)
static struct resource *_sparc_find_resource(struct resource *root,
unsigned long hit)
{
struct resource *tmp;
@ -865,7 +866,7 @@ _sparc_find_resource(struct resource *root, unsigned long hit)
return NULL;
}
void register_proc_sparc_ioport(void)
static void register_proc_sparc_ioport(void)
{
#ifdef CONFIG_PROC_FS
create_proc_read_entry("io_map",0,NULL,_sparc_io_get_info,&sparc_iomap);

View File

@ -154,7 +154,7 @@ void (*sparc_init_timers)(irq_handler_t ) =
struct irqaction static_irqaction[MAX_STATIC_ALLOC];
int static_irq_count;
struct {
static struct {
struct irqaction *action;
int flags;
} sparc_irq[NR_IRQS];

View File

@ -1,6 +1,6 @@
/* linux/arch/sparc/kernel/process.c
*
* Copyright (C) 1995 David S. Miller (davem@davemloft.net)
* Copyright (C) 1995, 2008 David S. Miller (davem@davemloft.net)
* Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be)
*/
@ -14,7 +14,6 @@
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/kallsyms.h>
#include <linux/mm.h>
#include <linux/stddef.h>
#include <linux/ptrace.h>
@ -177,6 +176,8 @@ void machine_power_off(void)
machine_halt();
}
#if 0
static DEFINE_SPINLOCK(sparc_backtrace_lock);
void __show_backtrace(unsigned long fp)
@ -196,7 +197,7 @@ void __show_backtrace(unsigned long fp)
rw->ins[4], rw->ins[5],
rw->ins[6],
rw->ins[7]);
print_symbol("%s\n", rw->ins[7]);
printk("%pS\n", (void *) rw->ins[7]);
rw = (struct reg_window *) rw->ins[6];
}
spin_unlock_irqrestore(&sparc_backtrace_lock, flags);
@ -228,7 +229,6 @@ void smp_show_backtrace_all_cpus(void)
}
#endif
#if 0
void show_stackframe(struct sparc_stackf *sf)
{
unsigned long size;
@ -264,14 +264,14 @@ void show_regs(struct pt_regs *r)
printk("PSR: %08lx PC: %08lx NPC: %08lx Y: %08lx %s\n",
r->psr, r->pc, r->npc, r->y, print_tainted());
print_symbol("PC: <%s>\n", r->pc);
printk("PC: <%pS>\n", (void *) r->pc);
printk("%%G: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
r->u_regs[0], r->u_regs[1], r->u_regs[2], r->u_regs[3],
r->u_regs[4], r->u_regs[5], r->u_regs[6], r->u_regs[7]);
printk("%%O: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
r->u_regs[8], r->u_regs[9], r->u_regs[10], r->u_regs[11],
r->u_regs[12], r->u_regs[13], r->u_regs[14], r->u_regs[15]);
print_symbol("RPC: <%s>\n", r->u_regs[15]);
printk("RPC: <%pS>\n", (void *) r->u_regs[15]);
printk("%%L: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
rw->locals[0], rw->locals[1], rw->locals[2], rw->locals[3],
@ -306,7 +306,7 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
rw = (struct reg_window *) fp;
pc = rw->ins[7];
printk("[%08lx : ", pc);
print_symbol("%s ] ", pc);
printk("%pS ] ", (void *) pc);
fp = rw->ins[6];
} while (++count < 16);
printk("\n");

View File

@ -224,8 +224,6 @@ ret_trap_user_stack_is_bolixed:
b signal_p
ld [%curptr + TI_FLAGS], %g2
.globl sun4c_rett_stackchk
sun4c_rett_stackchk:
be 1f
and %fp, 0xfff, %g1 ! delay slot

View File

@ -67,7 +67,7 @@ struct screen_info screen_info = {
extern unsigned long trapbase;
/* Pretty sick eh? */
void prom_sync_me(void)
static void prom_sync_me(void)
{
unsigned long prom_tbr, flags;
@ -97,7 +97,7 @@ void prom_sync_me(void)
return;
}
unsigned int boot_flags __initdata = 0;
static unsigned int boot_flags __initdata = 0;
#define BOOTME_DEBUG 0x1
/* Exported for mm/init.c:paging_init. */

View File

@ -35,13 +35,9 @@
#include "irq.h"
int smp_num_cpus = 1;
volatile unsigned long cpu_callin_map[NR_CPUS] __initdata = {0,};
unsigned char boot_cpu_id = 0;
unsigned char boot_cpu_id4 = 0; /* boot_cpu_id << 2 */
int smp_activated = 0;
volatile int __cpu_number_map[NR_CPUS];
volatile int __cpu_logical_map[NR_CPUS];
cpumask_t cpu_online_map = CPU_MASK_NONE;
cpumask_t phys_cpu_present_map = CPU_MASK_NONE;
@ -55,9 +51,6 @@ cpumask_t smp_commenced_mask = CPU_MASK_NONE;
* instruction which is much better...
*/
/* Used to make bitops atomic */
unsigned char bitops_spinlock = 0;
void __cpuinit smp_store_cpu_info(int id)
{
int cpu_node;

View File

@ -68,7 +68,8 @@ unsigned char *interrupt_enable = NULL;
static int sun4c_pil_map[] = { 0, 1, 2, 3, 5, 7, 8, 9 };
unsigned int sun4c_sbint_to_irq(struct sbus_dev *sdev, unsigned int sbint)
static unsigned int sun4c_sbint_to_irq(struct sbus_dev *sdev,
unsigned int sbint)
{
if (sbint >= sizeof(sun4c_pil_map)) {
printk(KERN_ERR "%s: bogus SBINT %d\n", sdev->prom_name, sbint);

View File

@ -52,13 +52,13 @@ extern struct irqaction static_irqaction[MAX_STATIC_ALLOC];
extern int static_irq_count;
unsigned char cpu_leds[32];
#ifdef CONFIG_SMP
unsigned char sbus_tid[32];
static unsigned char sbus_tid[32];
#endif
static struct irqaction *irq_action[NR_IRQS];
extern spinlock_t irq_action_lock;
struct sbus_action {
static struct sbus_action {
struct irqaction *action;
/* For SMP this needs to be extended */
} *sbus_actions;
@ -267,7 +267,8 @@ unsigned int sun4d_build_irq(struct sbus_dev *sdev, int irq)
return irq;
}
unsigned int sun4d_sbint_to_irq(struct sbus_dev *sdev, unsigned int sbint)
static unsigned int sun4d_sbint_to_irq(struct sbus_dev *sdev,
unsigned int sbint)
{
if (sbint >= sizeof(sbus_to_pil)) {
printk(KERN_ERR "%s: bogus SBINT %d\n", sdev->prom_name, sbint);

View File

@ -154,7 +154,8 @@ static unsigned long irq_mask[] = {
static int sun4m_pil_map[] = { 0, 2, 3, 5, 7, 9, 11, 13 };
unsigned int sun4m_sbint_to_irq(struct sbus_dev *sdev, unsigned int sbint)
static unsigned int sun4m_sbint_to_irq(struct sbus_dev *sdev,
unsigned int sbint)
{
if (sbint >= sizeof(sun4m_pil_map)) {
printk(KERN_ERR "%s: bogus SBINT %d\n", sdev->prom_name, sbint);
@ -163,7 +164,7 @@ unsigned int sun4m_sbint_to_irq(struct sbus_dev *sdev, unsigned int sbint)
return sun4m_pil_map[sbint] | 0x30;
}
inline unsigned long sun4m_get_irqmask(unsigned int irq)
static unsigned long sun4m_get_irqmask(unsigned int irq)
{
unsigned long mask;
@ -281,7 +282,7 @@ static void sun4m_set_udt(int cpu)
#define TIMER_IRQ (OBIO_INTR | 10)
#define PROFILE_IRQ (OBIO_INTR | 14)
struct sun4m_timer_regs *sun4m_timers;
static struct sun4m_timer_regs *sun4m_timers;
unsigned int lvl14_resolution = (((1000000/HZ) + 1) << 10);
static void sun4m_clear_clock_irq(void)

View File

@ -244,8 +244,9 @@ static struct smp_funcall {
static DEFINE_SPINLOCK(cross_call_lock);
/* Cross calls must be serialized, at least currently. */
void smp4m_cross_call(smpfunc_t func, unsigned long arg1, unsigned long arg2,
unsigned long arg3, unsigned long arg4, unsigned long arg5)
static void smp4m_cross_call(smpfunc_t func, unsigned long arg1,
unsigned long arg2, unsigned long arg3,
unsigned long arg4, unsigned long arg5)
{
register int ncpus = SUN4M_NCPUS;
unsigned long flags;
@ -344,7 +345,7 @@ static void __init smp_setup_percpu_timer(void)
enable_pil_irq(14);
}
void __init smp4m_blackbox_id(unsigned *addr)
static void __init smp4m_blackbox_id(unsigned *addr)
{
int rd = *addr & 0x3e000000;
int rs1 = rd >> 11;
@ -354,7 +355,7 @@ void __init smp4m_blackbox_id(unsigned *addr)
addr[2] = 0x80082003 | rd | rs1; /* and reg, 3, reg */
}
void __init smp4m_blackbox_current(unsigned *addr)
static void __init smp4m_blackbox_current(unsigned *addr)
{
int rd = *addr & 0x3e000000;
int rs1 = rd >> 11;

View File

@ -46,7 +46,7 @@
#include "irq.h"
DEFINE_SPINLOCK(rtc_lock);
enum sparc_clock_type sp_clock_typ;
static enum sparc_clock_type sp_clock_typ;
DEFINE_SPINLOCK(mostek_lock);
void __iomem *mstk48t02_regs = NULL;
static struct mostek48t08 __iomem *mstk48t08_regs = NULL;
@ -366,7 +366,7 @@ static int __init clock_init(void)
fs_initcall(clock_init);
#endif /* !CONFIG_SUN4 */
void __init sbus_time_init(void)
static void __init sbus_time_init(void)
{
BTFIXUPSET_CALL(bus_do_settimeofday, sbus_do_settimeofday, BTFIXUPCALL_NORM);

View File

@ -1,7 +1,7 @@
/*
* arch/sparc/kernel/traps.c
*
* Copyright 1995 David S. Miller (davem@caip.rutgers.edu)
* Copyright 1995, 2008 David S. Miller (davem@davemloft.net)
* Copyright 2000 Jakub Jelinek (jakub@redhat.com)
*/
@ -11,7 +11,6 @@
#include <linux/sched.h> /* for jiffies */
#include <linux/kernel.h>
#include <linux/kallsyms.h>
#include <linux/signal.h>
#include <linux/smp.h>
#include <linux/smp_lock.h>
@ -33,9 +32,6 @@ struct trap_trace_entry {
unsigned long type;
};
int trap_curbuf = 0;
struct trap_trace_entry trapbuf[1024];
void syscall_trace_entry(struct pt_regs *regs)
{
printk("%s[%d]: ", current->comm, task_pid_nr(current));
@ -72,7 +68,7 @@ void sun4d_nmi(struct pt_regs *regs)
prom_halt();
}
void instruction_dump (unsigned long *pc)
static void instruction_dump(unsigned long *pc)
{
int i;
@ -119,8 +115,8 @@ void die_if_kernel(char *str, struct pt_regs *regs)
count++ < 30 &&
(((unsigned long) rw) >= PAGE_OFFSET) &&
!(((unsigned long) rw) & 0x7)) {
printk("Caller[%08lx]", rw->ins[7]);
print_symbol(": %s\n", rw->ins[7]);
printk("Caller[%08lx]: %pS\n", rw->ins[7],
(void *) rw->ins[7]);
rw = (struct reg_window *)rw->ins[6];
}
}
@ -479,10 +475,6 @@ void do_BUG(const char *file, int line)
extern void sparc_cpu_startup(void);
int linux_smp_still_initting;
unsigned int thiscpus_tbr;
int thiscpus_mid;
void trap_init(void)
{
extern void thread_info_offsets_are_bolixed_pete(void);

View File

@ -306,7 +306,6 @@ spwin_bad_ustack_from_kernel:
* As noted above %curptr cannot be touched by this routine at all.
*/
.globl spwin_sun4c_stackchk
spwin_sun4c_stackchk:
/* LOCATION: Window to be saved on the stack */

View File

@ -243,7 +243,6 @@ fwin_user_finish_up:
*/
.align 4
.globl sun4c_fwin_stackchk
sun4c_fwin_stackchk:
/* LOCATION: Window 'W' */

View File

@ -451,7 +451,7 @@ asmlinkage void do_sun4c_fault(struct pt_regs *regs, int text_fault, int write,
}
/* This always deals with user addresses. */
inline void force_user_fault(unsigned long address, int write)
static void force_user_fault(unsigned long address, int write)
{
struct vm_area_struct *vma;
struct task_struct *tsk = current;

View File

@ -22,6 +22,7 @@
#include <linux/init.h>
#include <linux/highmem.h>
#include <linux/bootmem.h>
#include <linux/pagemap.h>
#include <asm/system.h>
#include <asm/vac-ops.h>
@ -128,7 +129,7 @@ unsigned long calc_highpages(void)
return nr;
}
unsigned long calc_max_low_pfn(void)
static unsigned long calc_max_low_pfn(void)
{
int i;
unsigned long tmp = pfn_base + (SRMMU_MAXMEM >> PAGE_SHIFT);
@ -292,7 +293,7 @@ unsigned long __init bootmem_init(unsigned long *pages_avail)
*
* We simply copy the 2.4 implementation for now.
*/
int pgt_cache_water[2] = { 25, 50 };
static int pgt_cache_water[2] = { 25, 50 };
void check_pgt_cache(void)
{
@ -356,8 +357,6 @@ void __init paging_init(void)
device_scan();
}
struct cache_palias *sparc_aliases;
static void __init taint_real_pages(void)
{
int i;
@ -375,7 +374,7 @@ static void __init taint_real_pages(void)
}
}
void map_high_region(unsigned long start_pfn, unsigned long end_pfn)
static void map_high_region(unsigned long start_pfn, unsigned long end_pfn)
{
unsigned long tmp;

View File

@ -50,7 +50,7 @@
#include <asm/btfixup.h>
enum mbus_module srmmu_modtype;
unsigned int hwbug_bitmask;
static unsigned int hwbug_bitmask;
int vac_cache_size;
int vac_line_size;
@ -60,7 +60,7 @@ extern unsigned long last_valid_pfn;
extern unsigned long page_kernel;
pgd_t *srmmu_swapper_pg_dir;
static pgd_t *srmmu_swapper_pg_dir;
#ifdef CONFIG_SMP
#define FLUSH_BEGIN(mm)
@ -83,12 +83,12 @@ BTFIXUPDEF_CALL(void, local_flush_page_for_dma, unsigned long)
char *srmmu_name;
ctxd_t *srmmu_ctx_table_phys;
ctxd_t *srmmu_context_table;
static ctxd_t *srmmu_context_table;
int viking_mxcc_present;
static DEFINE_SPINLOCK(srmmu_context_spinlock);
int is_hypersparc;
static int is_hypersparc;
/*
* In general all page table modifications should use the V8 atomic
@ -112,11 +112,11 @@ static inline int srmmu_device_memory(unsigned long x)
return ((x & 0xF0000000) != 0);
}
int srmmu_cache_pagetables;
static int srmmu_cache_pagetables;
/* these will be initialized in srmmu_nocache_calcsize() */
unsigned long srmmu_nocache_size;
unsigned long srmmu_nocache_end;
static unsigned long srmmu_nocache_size;
static unsigned long srmmu_nocache_end;
/* 1 bit <=> 256 bytes of nocache <=> 64 PTEs */
#define SRMMU_NOCACHE_BITMAP_SHIFT (PAGE_SHIFT - 4)
@ -324,7 +324,7 @@ static unsigned long __srmmu_get_nocache(int size, int align)
return (SRMMU_NOCACHE_VADDR + (offset << SRMMU_NOCACHE_BITMAP_SHIFT));
}
unsigned inline long srmmu_get_nocache(int size, int align)
static unsigned long srmmu_get_nocache(int size, int align)
{
unsigned long tmp;
@ -336,7 +336,7 @@ unsigned inline long srmmu_get_nocache(int size, int align)
return tmp;
}
void srmmu_free_nocache(unsigned long vaddr, int size)
static void srmmu_free_nocache(unsigned long vaddr, int size)
{
int offset;
@ -369,7 +369,8 @@ void srmmu_free_nocache(unsigned long vaddr, int size)
bit_map_clear(&srmmu_nocache_map, offset, size);
}
void srmmu_early_allocate_ptable_skeleton(unsigned long start, unsigned long end);
static void srmmu_early_allocate_ptable_skeleton(unsigned long start,
unsigned long end);
extern unsigned long probe_memory(void); /* in fault.c */
@ -377,7 +378,7 @@ extern unsigned long probe_memory(void); /* in fault.c */
* Reserve nocache dynamically proportionally to the amount of
* system RAM. -- Tomas Szepe <szepe@pinerecords.com>, June 2002
*/
void srmmu_nocache_calcsize(void)
static void srmmu_nocache_calcsize(void)
{
unsigned long sysmemavail = probe_memory() / 1024;
int srmmu_nocache_npages;
@ -398,7 +399,7 @@ void srmmu_nocache_calcsize(void)
srmmu_nocache_end = SRMMU_NOCACHE_VADDR + srmmu_nocache_size;
}
void __init srmmu_nocache_init(void)
static void __init srmmu_nocache_init(void)
{
unsigned int bitmap_bits;
pgd_t *pgd;
@ -645,7 +646,7 @@ static void srmmu_unmapiorange(unsigned long virt_addr, unsigned int len)
* mappings on the kernel stack without any special code as we did
* need on the sun4c.
*/
struct thread_info *srmmu_alloc_thread_info(void)
static struct thread_info *srmmu_alloc_thread_info(void)
{
struct thread_info *ret;
@ -1045,13 +1046,14 @@ extern void hypersparc_setup_blockops(void);
* around 8mb mapped for us.
*/
void __init early_pgtable_allocfail(char *type)
static void __init early_pgtable_allocfail(char *type)
{
prom_printf("inherit_prom_mappings: Cannot alloc kernel %s.\n", type);
prom_halt();
}
void __init srmmu_early_allocate_ptable_skeleton(unsigned long start, unsigned long end)
static void __init srmmu_early_allocate_ptable_skeleton(unsigned long start,
unsigned long end)
{
pgd_t *pgdp;
pmd_t *pmdp;
@ -1081,7 +1083,8 @@ void __init srmmu_early_allocate_ptable_skeleton(unsigned long start, unsigned l
}
}
void __init srmmu_allocate_ptable_skeleton(unsigned long start, unsigned long end)
static void __init srmmu_allocate_ptable_skeleton(unsigned long start,
unsigned long end)
{
pgd_t *pgdp;
pmd_t *pmdp;
@ -1116,7 +1119,8 @@ void __init srmmu_allocate_ptable_skeleton(unsigned long start, unsigned long en
* looking at the prom's page table directly which is what most
* other OS's do. Yuck... this is much better.
*/
void __init srmmu_inherit_prom_mappings(unsigned long start,unsigned long end)
static void __init srmmu_inherit_prom_mappings(unsigned long start,
unsigned long end)
{
pgd_t *pgdp;
pmd_t *pmdp;

View File

@ -93,7 +93,6 @@ tsunami_flush_tlb_page_out:
ldd [src + offset + 0x00], t2; \
std t2, [dst + offset + 0x00];
.globl tsunami_copy_1page
tsunami_copy_1page:
/* NOTE: This routine has to be shorter than 70insns --jj */
or %g0, (PAGE_SIZE >> 8), %g1

View File

@ -16,6 +16,7 @@ config SPARC64
select HAVE_IDE
select HAVE_LMB
select HAVE_ARCH_KGDB
select USE_GENERIC_SMP_HELPERS if SMP
config GENERIC_TIME
bool
@ -81,6 +82,10 @@ config GENERIC_HARDIRQS_NO__DO_IRQ
bool
def_bool y
source "init/Kconfig"
menu "Processor type and features"
choice
prompt "Kernel page size"
default SPARC64_PAGE_SIZE_8KB
@ -93,19 +98,11 @@ config SPARC64_PAGE_SIZE_8KB
8KB and 64KB work quite well, since SPARC ELF sections
provide for up to 64KB alignment.
Therefore, 512KB and 4MB are for expert hackers only.
If you don't know what to do, choose 8KB.
config SPARC64_PAGE_SIZE_64KB
bool "64KB"
config SPARC64_PAGE_SIZE_512KB
bool "512KB"
config SPARC64_PAGE_SIZE_4MB
bool "4MB"
endchoice
config SECCOMP
@ -136,14 +133,10 @@ config HOTPLUG_CPU
can be controlled through /sys/devices/system/cpu/cpu#.
Say N if you want to disable CPU hotplug.
source "init/Kconfig"
config GENERIC_HARDIRQS
bool
default y
menu "General machine setup"
source "kernel/time/Kconfig"
config SMP
@ -225,11 +218,10 @@ config HUGETLB_PAGE_SIZE_4MB
bool "4MB"
config HUGETLB_PAGE_SIZE_512K
depends on !SPARC64_PAGE_SIZE_4MB && !SPARC64_PAGE_SIZE_512KB
bool "512K"
config HUGETLB_PAGE_SIZE_64K
depends on !SPARC64_PAGE_SIZE_4MB && !SPARC64_PAGE_SIZE_512KB && !SPARC64_PAGE_SIZE_64KB
depends on !SPARC64_PAGE_SIZE_64KB
bool "64K"
endchoice

View File

@ -9,7 +9,9 @@
CHECKFLAGS += -D__sparc__ -D__sparc_v9__ -m64
CPPFLAGS_vmlinux.lds += -Usparc
# Undefine sparc when processing vmlinux.lds - it is used
# And teach CPP we are doing 64 bit builds (for this case)
CPPFLAGS_vmlinux.lds += -m64 -Usparc
LDFLAGS := -m elf64_sparc

View File

@ -1,7 +1,7 @@
#
# Automatically generated make config: don't edit
# Linux kernel version: 2.6.26-rc2
# Fri May 16 13:36:07 2008
# Linux kernel version: 2.6.26
# Fri Jul 18 00:47:07 2008
#
CONFIG_SPARC=y
CONFIG_SPARC64=y
@ -22,18 +22,6 @@ CONFIG_HAVE_SETUP_PER_CPU_AREA=y
CONFIG_ARCH_NO_VIRT_TO_BUS=y
CONFIG_OF=y
CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
CONFIG_SPARC64_PAGE_SIZE_8KB=y
# CONFIG_SPARC64_PAGE_SIZE_64KB is not set
# CONFIG_SPARC64_PAGE_SIZE_512KB is not set
# CONFIG_SPARC64_PAGE_SIZE_4MB is not set
CONFIG_SECCOMP=y
CONFIG_HZ_100=y
# CONFIG_HZ_250 is not set
# CONFIG_HZ_300 is not set
# CONFIG_HZ_1000 is not set
CONFIG_HZ=100
# CONFIG_SCHED_HRTICK is not set
CONFIG_HOTPLUG_CPU=y
CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
#
@ -105,6 +93,7 @@ CONFIG_KRETPROBES=y
CONFIG_HAVE_KPROBES=y
CONFIG_HAVE_KRETPROBES=y
# CONFIG_HAVE_DMA_ATTRS is not set
CONFIG_USE_GENERIC_SMP_HELPERS=y
CONFIG_PROC_PAGE_MONITOR=y
CONFIG_SLABINFO=y
CONFIG_RT_MUTEXES=y
@ -121,6 +110,7 @@ CONFIG_STOP_MACHINE=y
CONFIG_BLOCK=y
CONFIG_BLK_DEV_IO_TRACE=y
CONFIG_BLK_DEV_BSG=y
# CONFIG_BLK_DEV_INTEGRITY is not set
CONFIG_BLOCK_COMPAT=y
#
@ -136,11 +126,21 @@ CONFIG_DEFAULT_AS=y
# CONFIG_DEFAULT_NOOP is not set
CONFIG_DEFAULT_IOSCHED="anticipatory"
CONFIG_CLASSIC_RCU=y
CONFIG_GENERIC_HARDIRQS=y
#
# General machine setup
# Processor type and features
#
CONFIG_SPARC64_PAGE_SIZE_8KB=y
# CONFIG_SPARC64_PAGE_SIZE_64KB is not set
CONFIG_SECCOMP=y
CONFIG_HZ_100=y
# CONFIG_HZ_250 is not set
# CONFIG_HZ_300 is not set
# CONFIG_HZ_1000 is not set
CONFIG_HZ=100
# CONFIG_SCHED_HRTICK is not set
CONFIG_HOTPLUG_CPU=y
CONFIG_GENERIC_HARDIRQS=y
CONFIG_TICK_ONESHOT=y
CONFIG_NO_HZ=y
CONFIG_HIGH_RES_TIMERS=y
@ -342,6 +342,8 @@ CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_STANDALONE=y
# CONFIG_PREVENT_FIRMWARE_BUILD is not set
CONFIG_FW_LOADER=y
CONFIG_FIRMWARE_IN_KERNEL=y
CONFIG_EXTRA_FIRMWARE=""
# CONFIG_DEBUG_DRIVER is not set
# CONFIG_DEBUG_DEVRES is not set
# CONFIG_SYS_HYPERVISOR is not set
@ -366,6 +368,7 @@ CONFIG_CDROM_PKTCDVD_BUFFERS=8
CONFIG_CDROM_PKTCDVD_WCACHE=y
CONFIG_ATA_OVER_ETH=m
CONFIG_SUNVDC=m
# CONFIG_BLK_DEV_HD is not set
CONFIG_MISC_DEVICES=y
# CONFIG_PHANTOM is not set
# CONFIG_EEPROM_93CX6 is not set
@ -379,6 +382,7 @@ CONFIG_BLK_DEV_IDE=y
#
# Please see Documentation/ide/ide.txt for help/info on IDE drives
#
CONFIG_IDE_TIMINGS=y
# CONFIG_BLK_DEV_IDE_SATA is not set
CONFIG_BLK_DEV_IDEDISK=y
# CONFIG_IDEDISK_MULTI_MODE is not set
@ -429,8 +433,6 @@ CONFIG_BLK_DEV_ALI15X3=y
# CONFIG_BLK_DEV_VIA82CXXX is not set
# CONFIG_BLK_DEV_TC86C001 is not set
CONFIG_BLK_DEV_IDEDMA=y
# CONFIG_BLK_DEV_HD_ONLY is not set
# CONFIG_BLK_DEV_HD is not set
#
# SCSI device support
@ -504,6 +506,7 @@ CONFIG_SCSI_LOWLEVEL=y
# CONFIG_SCSI_DEBUG is not set
# CONFIG_SCSI_SUNESP is not set
# CONFIG_SCSI_SRP is not set
# CONFIG_SCSI_DH is not set
# CONFIG_ATA is not set
CONFIG_MD=y
CONFIG_BLK_DEV_MD=m
@ -529,6 +532,10 @@ CONFIG_DM_ZERO=m
#
# IEEE 1394 (FireWire) support
#
#
# Enable only one of the two stacks, unless you know what you are doing
#
# CONFIG_FIREWIRE is not set
# CONFIG_IEEE1394 is not set
# CONFIG_I2O is not set
@ -745,7 +752,8 @@ CONFIG_SERIAL_CORE_CONSOLE=y
CONFIG_UNIX98_PTYS=y
# CONFIG_LEGACY_PTYS is not set
# CONFIG_IPMI_HANDLER is not set
# CONFIG_HW_RANDOM is not set
CONFIG_HW_RANDOM=m
CONFIG_HW_RANDOM_N2RNG=m
# CONFIG_R3964 is not set
# CONFIG_APPLICOM is not set
# CONFIG_RAW_DRIVER is not set
@ -759,38 +767,58 @@ CONFIG_I2C_ALGOBIT=y
#
# I2C Hardware Bus support
#
#
# PC SMBus host controller drivers
#
# CONFIG_I2C_ALI1535 is not set
# CONFIG_I2C_ALI1563 is not set
# CONFIG_I2C_ALI15X3 is not set
# CONFIG_I2C_AMD756 is not set
# CONFIG_I2C_AMD8111 is not set
# CONFIG_I2C_I801 is not set
# CONFIG_I2C_I810 is not set
# CONFIG_I2C_ISCH is not set
# CONFIG_I2C_PIIX4 is not set
# CONFIG_I2C_NFORCE2 is not set
# CONFIG_I2C_OCORES is not set
# CONFIG_I2C_PARPORT_LIGHT is not set
# CONFIG_I2C_PROSAVAGE is not set
# CONFIG_I2C_SAVAGE4 is not set
# CONFIG_I2C_SIMTEC is not set
# CONFIG_I2C_SIS5595 is not set
# CONFIG_I2C_SIS630 is not set
# CONFIG_I2C_SIS96X is not set
# CONFIG_I2C_TAOS_EVM is not set
# CONFIG_I2C_STUB is not set
# CONFIG_I2C_TINY_USB is not set
# CONFIG_I2C_VIA is not set
# CONFIG_I2C_VIAPRO is not set
#
# I2C system bus drivers (mostly embedded / system-on-chip)
#
# CONFIG_I2C_OCORES is not set
# CONFIG_I2C_SIMTEC is not set
#
# External I2C/SMBus adapter drivers
#
# CONFIG_I2C_PARPORT_LIGHT is not set
# CONFIG_I2C_TAOS_EVM is not set
# CONFIG_I2C_TINY_USB is not set
#
# Graphics adapter I2C/DDC channel drivers
#
# CONFIG_I2C_VOODOO3 is not set
#
# Other I2C/SMBus bus drivers
#
# CONFIG_I2C_PCA_PLATFORM is not set
# CONFIG_I2C_STUB is not set
#
# Miscellaneous I2C Chip support
#
# CONFIG_DS1682 is not set
# CONFIG_AT24 is not set
# CONFIG_SENSORS_EEPROM is not set
# CONFIG_SENSORS_PCF8574 is not set
# CONFIG_PCF8575 is not set
# CONFIG_SENSORS_PCA9539 is not set
# CONFIG_SENSORS_PCF8591 is not set
# CONFIG_SENSORS_MAX6875 is not set
# CONFIG_SENSORS_TSL2550 is not set
@ -856,6 +884,7 @@ CONFIG_HWMON=y
# CONFIG_SENSORS_W83627EHF is not set
# CONFIG_HWMON_DEBUG_CHIP is not set
# CONFIG_THERMAL is not set
# CONFIG_THERMAL_HWMON is not set
# CONFIG_WATCHDOG is not set
#
@ -985,15 +1014,7 @@ CONFIG_LOGO=y
# CONFIG_LOGO_LINUX_VGA16 is not set
# CONFIG_LOGO_LINUX_CLUT224 is not set
CONFIG_LOGO_SUN_CLUT224=y
#
# Sound
#
CONFIG_SOUND=m
#
# Advanced Linux Sound Architecture
#
CONFIG_SND=m
CONFIG_SND_TIMER=m
CONFIG_SND_PCM=m
@ -1010,21 +1031,17 @@ CONFIG_SND_SUPPORT_OLD_API=y
CONFIG_SND_VERBOSE_PROCFS=y
# CONFIG_SND_VERBOSE_PRINTK is not set
# CONFIG_SND_DEBUG is not set
#
# Generic devices
#
CONFIG_SND_VMASTER=y
CONFIG_SND_MPU401_UART=m
CONFIG_SND_AC97_CODEC=m
CONFIG_SND_DRIVERS=y
CONFIG_SND_DUMMY=m
CONFIG_SND_VIRMIDI=m
CONFIG_SND_MTPAV=m
# CONFIG_SND_SERIAL_U16550 is not set
# CONFIG_SND_MPU401 is not set
#
# PCI devices
#
# CONFIG_SND_AC97_POWER_SAVE is not set
CONFIG_SND_PCI=y
# CONFIG_SND_AD1889 is not set
# CONFIG_SND_ALS300 is not set
CONFIG_SND_ALI5451=m
@ -1084,37 +1101,14 @@ CONFIG_SND_ALI5451=m
# CONFIG_SND_VIRTUOSO is not set
# CONFIG_SND_VX222 is not set
# CONFIG_SND_YMFPCI is not set
# CONFIG_SND_AC97_POWER_SAVE is not set
#
# USB devices
#
CONFIG_SND_USB=y
# CONFIG_SND_USB_AUDIO is not set
# CONFIG_SND_USB_CAIAQ is not set
#
# ALSA Sparc devices
#
CONFIG_SND_SPARC=y
# CONFIG_SND_SUN_AMD7930 is not set
CONFIG_SND_SUN_CS4231=m
# CONFIG_SND_SUN_DBRI is not set
#
# System on Chip audio support
#
# CONFIG_SND_SOC is not set
#
# ALSA SoC audio for Freescale SOCs
#
#
# SoC Audio for the Texas Instruments OMAP
#
#
# Open Sound System
#
# CONFIG_SOUND_PRIME is not set
CONFIG_AC97_BUS=m
CONFIG_HID_SUPPORT=y
@ -1167,6 +1161,7 @@ CONFIG_USB_UHCI_HCD=m
#
# CONFIG_USB_ACM is not set
# CONFIG_USB_PRINTER is not set
# CONFIG_USB_WDM is not set
#
# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support'
@ -1226,6 +1221,7 @@ CONFIG_USB_STORAGE=m
# CONFIG_USB_TRANCEVIBRATOR is not set
# CONFIG_USB_IOWARRIOR is not set
# CONFIG_USB_TEST is not set
# CONFIG_USB_ISIGHTFW is not set
# CONFIG_USB_GADGET is not set
# CONFIG_MMC is not set
# CONFIG_MEMSTICK is not set
@ -1420,6 +1416,12 @@ CONFIG_DEBUG_BUGVERBOSE=y
# CONFIG_BACKTRACE_SELF_TEST is not set
# CONFIG_LKDTM is not set
# CONFIG_FAULT_INJECTION is not set
CONFIG_HAVE_FTRACE=y
CONFIG_HAVE_DYNAMIC_FTRACE=y
# CONFIG_FTRACE is not set
# CONFIG_IRQSOFF_TRACER is not set
# CONFIG_SCHED_TRACER is not set
# CONFIG_CONTEXT_SWITCH_TRACER is not set
# CONFIG_SAMPLES is not set
CONFIG_HAVE_ARCH_KGDB=y
# CONFIG_KGDB is not set
@ -1486,6 +1488,10 @@ CONFIG_CRYPTO_CRC32C=m
CONFIG_CRYPTO_MD4=y
CONFIG_CRYPTO_MD5=y
CONFIG_CRYPTO_MICHAEL_MIC=m
# CONFIG_CRYPTO_RMD128 is not set
# CONFIG_CRYPTO_RMD160 is not set
# CONFIG_CRYPTO_RMD256 is not set
# CONFIG_CRYPTO_RMD320 is not set
CONFIG_CRYPTO_SHA1=y
CONFIG_CRYPTO_SHA256=m
CONFIG_CRYPTO_SHA512=m
@ -1527,6 +1533,7 @@ CONFIG_BITREVERSE=y
# CONFIG_GENERIC_FIND_FIRST_BIT is not set
CONFIG_CRC_CCITT=m
CONFIG_CRC16=m
# CONFIG_CRC_T10DIF is not set
# CONFIG_CRC_ITU_T is not set
CONFIG_CRC32=y
# CONFIG_CRC7 is not set

View File

@ -16,8 +16,8 @@
#include <asm/fhc.h>
#include <asm/starfire.h>
struct linux_central *central_bus = NULL;
struct linux_fhc *fhc_list = NULL;
static struct linux_central *central_bus = NULL;
static struct linux_fhc *fhc_list = NULL;
#define IS_CENTRAL_FHC(__fhc) ((__fhc) == central_bus->child)
@ -79,9 +79,9 @@ static void adjust_regs(struct linux_prom_registers *regp, int nregs,
}
/* Apply probed fhc ranges to registers passed, if no ranges return. */
void apply_fhc_ranges(struct linux_fhc *fhc,
struct linux_prom_registers *regs,
int nregs)
static void apply_fhc_ranges(struct linux_fhc *fhc,
struct linux_prom_registers *regs,
int nregs)
{
if (fhc->num_fhc_ranges)
adjust_regs(regs, nregs, fhc->fhc_ranges,
@ -89,8 +89,8 @@ void apply_fhc_ranges(struct linux_fhc *fhc,
}
/* Apply probed central ranges to registers passed, if no ranges return. */
void apply_central_ranges(struct linux_central *central,
struct linux_prom_registers *regs, int nregs)
static void apply_central_ranges(struct linux_central *central,
struct linux_prom_registers *regs, int nregs)
{
if (central->num_central_ranges)
adjust_regs(regs, nregs, central->central_ranges,

View File

@ -159,7 +159,7 @@ static void ds_var_data(struct ds_info *dp,
struct ds_cap_state *cp,
void *buf, int len);
struct ds_cap_state ds_states_template[] = {
static struct ds_cap_state ds_states_template[] = {
{
.service_id = "md-update",
.data = md_update_data,

View File

@ -34,8 +34,12 @@ static struct api_info api_table[] = {
{ .group = HV_GRP_LDOM, },
{ .group = HV_GRP_SVC_CHAN, .flags = FLAG_PRE_API },
{ .group = HV_GRP_NCS, .flags = FLAG_PRE_API },
{ .group = HV_GRP_RNG, },
{ .group = HV_GRP_NIAG_PERF, .flags = FLAG_PRE_API },
{ .group = HV_GRP_FIRE_PERF, },
{ .group = HV_GRP_N2_CPU, },
{ .group = HV_GRP_NIU, },
{ .group = HV_GRP_VF_CPU, },
{ .group = HV_GRP_DIAG, .flags = FLAG_PRE_API },
};

View File

@ -120,9 +120,9 @@ static struct irq_chip msi_irq = {
/* XXX affinity XXX */
};
int sparc64_setup_msi_irq(unsigned int *virt_irq_p,
struct pci_dev *pdev,
struct msi_desc *entry)
static int sparc64_setup_msi_irq(unsigned int *virt_irq_p,
struct pci_dev *pdev,
struct msi_desc *entry)
{
struct pci_pbm_info *pbm = pdev->dev.archdata.host_controller;
const struct sparc64_msiq_ops *ops = pbm->msi_ops;
@ -179,8 +179,8 @@ out_err:
return err;
}
void sparc64_teardown_msi_irq(unsigned int virt_irq,
struct pci_dev *pdev)
static void sparc64_teardown_msi_irq(unsigned int virt_irq,
struct pci_dev *pdev)
{
struct pci_pbm_info *pbm = pdev->dev.archdata.host_controller;
const struct sparc64_msiq_ops *ops = pbm->msi_ops;

View File

@ -531,7 +531,7 @@ static void dma_4v_sync_sg_for_cpu(struct device *dev,
/* Nothing to do... */
}
const struct dma_ops sun4v_dma_ops = {
static const struct dma_ops sun4v_dma_ops = {
.alloc_coherent = dma_4v_alloc_coherent,
.free_coherent = dma_4v_free_coherent,
.map_single = dma_4v_map_single,

View File

@ -15,7 +15,6 @@
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/kallsyms.h>
#include <linux/mm.h>
#include <linux/fs.h>
#include <linux/smp.h>
@ -211,7 +210,7 @@ static void show_regwindow(struct pt_regs *regs)
printk("i4: %016lx i5: %016lx i6: %016lx i7: %016lx\n",
rwk->ins[4], rwk->ins[5], rwk->ins[6], rwk->ins[7]);
if (regs->tstate & TSTATE_PRIV)
print_symbol("I7: <%s>\n", rwk->ins[7]);
printk("I7: <%pS>\n", (void *) rwk->ins[7]);
}
#ifdef CONFIG_SMP
@ -232,7 +231,7 @@ void __show_regs(struct pt_regs * regs)
#endif
printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x %s\n", regs->tstate,
regs->tpc, regs->tnpc, regs->y, print_tainted());
print_symbol("TPC: <%s>\n", regs->tpc);
printk("TPC: <%pS>\n", (void *) regs->tpc);
printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n",
regs->u_regs[0], regs->u_regs[1], regs->u_regs[2],
regs->u_regs[3]);
@ -245,7 +244,7 @@ void __show_regs(struct pt_regs * regs)
printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n",
regs->u_regs[12], regs->u_regs[13], regs->u_regs[14],
regs->u_regs[15]);
print_symbol("RPC: <%s>\n", regs->u_regs[15]);
printk("RPC: <%pS>\n", (void *) regs->u_regs[15]);
show_regwindow(regs);
#ifdef CONFIG_SMP
spin_unlock(&regdump_lock);
@ -346,9 +345,6 @@ static void sysrq_handle_globreg(int key, struct tty_struct *tty)
{
struct thread_info *tp = current_thread_info();
struct pt_regs *regs = get_irq_regs();
#ifdef CONFIG_KALLSYMS
char buffer[KSYM_SYMBOL_LEN];
#endif
unsigned long flags;
int this_cpu, cpu;
@ -377,17 +373,13 @@ static void sysrq_handle_globreg(int key, struct tty_struct *tty)
gp->tstate, gp->tpc, gp->tnpc,
((tp && tp->task) ? tp->task->comm : "NULL"),
((tp && tp->task) ? tp->task->pid : -1));
#ifdef CONFIG_KALLSYMS
if (gp->tstate & TSTATE_PRIV) {
sprint_symbol(buffer, gp->tpc);
printk(" TPC[%s] ", buffer);
sprint_symbol(buffer, gp->o7);
printk("O7[%s] ", buffer);
sprint_symbol(buffer, gp->i7);
printk("I7[%s]\n", buffer);
} else
#endif
{
printk(" TPC[%pS] O7[%pS] I7[%pS]\n",
(void *) gp->tpc,
(void *) gp->o7,
(void *) gp->i7);
} else {
printk(" TPC[%lx] O7[%lx] I7[%lx]\n",
gp->tpc, gp->o7, gp->i7);
}

View File

@ -788,89 +788,36 @@ static void smp_start_sync_tick_client(int cpu)
0, 0, 0, mask);
}
extern unsigned long xcall_call_function;
void arch_send_call_function_ipi(cpumask_t mask)
{
smp_cross_call_masked(&xcall_call_function, 0, 0, 0, mask);
}
extern unsigned long xcall_call_function_single;
void arch_send_call_function_single_ipi(int cpu)
{
cpumask_t mask = cpumask_of_cpu(cpu);
smp_cross_call_masked(&xcall_call_function_single, 0, 0, 0, mask);
}
/* Send cross call to all processors except self. */
#define smp_cross_call(func, ctx, data1, data2) \
smp_cross_call_masked(func, ctx, data1, data2, cpu_online_map)
struct call_data_struct {
void (*func) (void *info);
void *info;
atomic_t finished;
int wait;
};
static struct call_data_struct *call_data;
extern unsigned long xcall_call_function;
/**
* smp_call_function(): Run a function on all other CPUs.
* @func: The function to run. This must be fast and non-blocking.
* @info: An arbitrary pointer to pass to the function.
* @wait: If true, wait (atomically) until function has completed on other CPUs.
*
* Returns 0 on success, else a negative status code. Does not return until
* remote CPUs are nearly ready to execute <<func>> or are or have executed.
*
* You must not call this function with disabled interrupts or from a
* hardware interrupt handler or from a bottom half handler.
*/
static int sparc64_smp_call_function_mask(void (*func)(void *info), void *info,
int wait, cpumask_t mask)
{
struct call_data_struct data;
int cpus;
/* Can deadlock when called with interrupts disabled */
WARN_ON(irqs_disabled());
data.func = func;
data.info = info;
atomic_set(&data.finished, 0);
data.wait = wait;
spin_lock(&call_lock);
cpu_clear(smp_processor_id(), mask);
cpus = cpus_weight(mask);
if (!cpus)
goto out_unlock;
call_data = &data;
mb();
smp_cross_call_masked(&xcall_call_function, 0, 0, 0, mask);
/* Wait for response */
while (atomic_read(&data.finished) != cpus)
cpu_relax();
out_unlock:
spin_unlock(&call_lock);
return 0;
}
int smp_call_function(void (*func)(void *info), void *info, int wait)
{
return sparc64_smp_call_function_mask(func, info, wait, cpu_online_map);
}
void smp_call_function_client(int irq, struct pt_regs *regs)
{
void (*func) (void *info) = call_data->func;
void *info = call_data->info;
clear_softint(1 << irq);
if (call_data->wait) {
/* let initiator proceed only after completion */
func(info);
atomic_inc(&call_data->finished);
} else {
/* let initiator proceed after getting data */
atomic_inc(&call_data->finished);
func(info);
}
generic_smp_call_function_interrupt();
}
void smp_call_function_single_client(int irq, struct pt_regs *regs)
{
clear_softint(1 << irq);
generic_smp_call_function_single_interrupt();
}
static void tsb_sync(void *info)
@ -890,7 +837,7 @@ static void tsb_sync(void *info)
void smp_tsb_sync(struct mm_struct *mm)
{
sparc64_smp_call_function_mask(tsb_sync, mm, 1, mm->cpu_vm_mask);
smp_call_function_mask(mm->cpu_vm_mask, tsb_sync, mm, 1);
}
extern unsigned long xcall_flush_tlb_mm;

View File

@ -108,8 +108,6 @@ EXPORT_SYMBOL(__read_unlock);
EXPORT_SYMBOL(__write_lock);
EXPORT_SYMBOL(__write_unlock);
EXPORT_SYMBOL(__write_trylock);
EXPORT_SYMBOL(smp_call_function);
#endif /* CONFIG_SMP */
#ifdef CONFIG_MCOUNT

View File

@ -542,7 +542,7 @@ asmlinkage long sparc64_personality(unsigned long personality)
return ret;
}
int sparc64_mmap_check(unsigned long addr, unsigned long len)
int sparc_mmap_check(unsigned long addr, unsigned long len)
{
if (test_thread_flag(TIF_32BIT)) {
if (len >= STACK_TOP32)
@ -614,9 +614,9 @@ asmlinkage unsigned long sys64_mremap(unsigned long addr,
goto out;
if (unlikely(new_len >= VA_EXCLUDE_START))
goto out;
if (unlikely(sparc64_mmap_check(addr, old_len)))
if (unlikely(sparc_mmap_check(addr, old_len)))
goto out;
if (unlikely(sparc64_mmap_check(new_addr, new_len)))
if (unlikely(sparc_mmap_check(new_addr, new_len)))
goto out;
down_write(&current->mm->mmap_sem);

View File

@ -359,7 +359,8 @@ int cp_compat_stat(struct kstat *stat, struct compat_stat __user *statbuf)
return err;
}
int cp_compat_stat64(struct kstat *stat, struct compat_stat64 __user *statbuf)
static int cp_compat_stat64(struct kstat *stat,
struct compat_stat64 __user *statbuf)
{
int err;
@ -870,9 +871,9 @@ asmlinkage unsigned long sys32_mremap(unsigned long addr,
unsigned long ret = -EINVAL;
unsigned long new_addr = __new_addr;
if (unlikely(sparc64_mmap_check(addr, old_len)))
if (unlikely(sparc_mmap_check(addr, old_len)))
goto out;
if (unlikely(sparc64_mmap_check(new_addr, new_len)))
if (unlikely(sparc_mmap_check(new_addr, new_len)))
goto out;
down_write(&current->mm->mmap_sem);
ret = do_mremap(addr, old_len, new_len, flags, new_addr);

View File

@ -1,6 +1,6 @@
/* arch/sparc64/kernel/traps.c
*
* Copyright (C) 1995,1997 David S. Miller (davem@davemloft.net)
* Copyright (C) 1995,1997,2008 David S. Miller (davem@davemloft.net)
* Copyright (C) 1997,1999,2000 Jakub Jelinek (jakub@redhat.com)
*/
@ -11,7 +11,6 @@
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/kallsyms.h>
#include <linux/signal.h>
#include <linux/smp.h>
#include <linux/mm.h>
@ -74,7 +73,7 @@ static void dump_tl1_traplog(struct tl1_traplog *p)
i + 1,
p->trapstack[i].tstate, p->trapstack[i].tpc,
p->trapstack[i].tnpc, p->trapstack[i].tt);
print_symbol("TRAPLOG: TPC<%s>\n", p->trapstack[i].tpc);
printk("TRAPLOG: TPC<%pS>\n", (void *) p->trapstack[i].tpc);
}
}
@ -1081,7 +1080,7 @@ static void cheetah_log_errors(struct pt_regs *regs, struct cheetah_err_info *in
regs->tpc, regs->tnpc, regs->u_regs[UREG_I7], regs->tstate);
printk("%s" "ERROR(%d): ",
(recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id());
print_symbol("TPC<%s>\n", regs->tpc);
printk("TPC<%pS>\n", (void *) regs->tpc);
printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n",
(recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
(afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
@ -1689,7 +1688,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
smp_processor_id(),
(type & 0x1) ? 'I' : 'D',
regs->tpc);
print_symbol(KERN_EMERG "TPC<%s>\n", regs->tpc);
printk(KERN_EMERG "TPC<%pS>\n", (void *) regs->tpc);
panic("Irrecoverable Cheetah+ parity error.");
}
@ -1697,7 +1696,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
smp_processor_id(),
(type & 0x1) ? 'I' : 'D',
regs->tpc);
print_symbol(KERN_WARNING "TPC<%s>\n", regs->tpc);
printk(KERN_WARNING "TPC<%pS>\n", (void *) regs->tpc);
}
struct sun4v_error_entry {
@ -1904,9 +1903,10 @@ void sun4v_itlb_error_report(struct pt_regs *regs, int tl)
printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n",
regs->tpc, tl);
print_symbol(KERN_EMERG "SUN4V-ITLB: TPC<%s>\n", regs->tpc);
printk(KERN_EMERG "SUN4V-ITLB: TPC<%pS>\n", (void *) regs->tpc);
printk(KERN_EMERG "SUN4V-ITLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
print_symbol(KERN_EMERG "SUN4V-ITLB: O7<%s>\n", regs->u_regs[UREG_I7]);
printk(KERN_EMERG "SUN4V-ITLB: O7<%pS>\n",
(void *) regs->u_regs[UREG_I7]);
printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] "
"pte[%lx] error[%lx]\n",
sun4v_err_itlb_vaddr, sun4v_err_itlb_ctx,
@ -1927,9 +1927,10 @@ void sun4v_dtlb_error_report(struct pt_regs *regs, int tl)
printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n",
regs->tpc, tl);
print_symbol(KERN_EMERG "SUN4V-DTLB: TPC<%s>\n", regs->tpc);
printk(KERN_EMERG "SUN4V-DTLB: TPC<%pS>\n", (void *) regs->tpc);
printk(KERN_EMERG "SUN4V-DTLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
print_symbol(KERN_EMERG "SUN4V-DTLB: O7<%s>\n", regs->u_regs[UREG_I7]);
printk(KERN_EMERG "SUN4V-DTLB: O7<%pS>\n",
(void *) regs->u_regs[UREG_I7]);
printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] "
"pte[%lx] error[%lx]\n",
sun4v_err_dtlb_vaddr, sun4v_err_dtlb_ctx,
@ -2111,10 +2112,7 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
fp = ksp + STACK_BIAS;
thread_base = (unsigned long) tp;
printk("Call Trace:");
#ifdef CONFIG_KALLSYMS
printk("\n");
#endif
printk("Call Trace:\n");
do {
struct sparc_stackf *sf;
struct pt_regs *regs;
@ -2137,12 +2135,8 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
fp = (unsigned long)sf->fp + STACK_BIAS;
}
printk(" [%016lx] ", pc);
print_symbol("%s\n", pc);
printk(" [%016lx] %pS\n", pc, (void *) pc);
} while (++count < 16);
#ifndef CONFIG_KALLSYMS
printk("\n");
#endif
}
void dump_stack(void)
@ -2211,9 +2205,8 @@ void die_if_kernel(char *str, struct pt_regs *regs)
while (rw &&
count++ < 30&&
is_kernel_stack(current, rw)) {
printk("Caller[%016lx]", rw->ins[7]);
print_symbol(": %s", rw->ins[7]);
printk("\n");
printk("Caller[%016lx]: %pS\n", rw->ins[7],
(void *) rw->ins[7]);
rw = kernel_stack_up(rw);
}

View File

@ -58,7 +58,12 @@ tl0_irq3: BTRAP(0x43)
tl0_irq4: BTRAP(0x44)
#endif
tl0_irq5: TRAP_IRQ(handler_irq, 5)
tl0_irq6: BTRAP(0x46) BTRAP(0x47) BTRAP(0x48) BTRAP(0x49)
#ifdef CONFIG_SMP
tl0_irq6: TRAP_IRQ(smp_call_function_single_client, 6)
#else
tl0_irq6: BTRAP(0x46)
#endif
tl0_irq7: BTRAP(0x47) BTRAP(0x48) BTRAP(0x49)
tl0_irq10: BTRAP(0x4a) BTRAP(0x4b) BTRAP(0x4c) BTRAP(0x4d)
tl0_irq14: TRAP_IRQ(timer_interrupt, 14)
tl0_irq15: TRAP_IRQ(handler_irq, 15)

View File

@ -2,7 +2,7 @@
* unaligned.c: Unaligned load/store trap handling with special
* cases for the kernel to do them more quickly.
*
* Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
* Copyright (C) 1996,2008 David S. Miller (davem@davemloft.net)
* Copyright (C) 1996,1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
*/
@ -20,7 +20,6 @@
#include <asm/uaccess.h>
#include <linux/smp.h>
#include <linux/bitops.h>
#include <linux/kallsyms.h>
#include <asm/fpumacro.h>
/* #define DEBUG_MNA */
@ -289,8 +288,8 @@ static void log_unaligned(struct pt_regs *regs)
if (count < 5) {
last_time = jiffies;
count++;
printk("Kernel unaligned access at TPC[%lx] ", regs->tpc);
print_symbol("%s\n", regs->tpc);
printk("Kernel unaligned access at TPC[%lx] %pS\n",
regs->tpc, (void *) regs->tpc);
}
}

View File

@ -25,9 +25,9 @@
#define DCACHE_SIZE (PAGE_SIZE * 2)
#if (PAGE_SHIFT == 13) || (PAGE_SHIFT == 19)
#if (PAGE_SHIFT == 13)
#define PAGE_SIZE_REM 0x80
#elif (PAGE_SHIFT == 16) || (PAGE_SHIFT == 22)
#elif (PAGE_SHIFT == 16)
#define PAGE_SIZE_REM 0x100
#else
#error Wrong PAGE_SHIFT specified
@ -198,7 +198,7 @@ cheetah_copy_page_insn:
cmp %o2, PAGE_SIZE_REM
bne,pt %xcc, 1b
add %o0, 0x40, %o0
#if (PAGE_SHIFT == 16) || (PAGE_SHIFT == 22)
#if (PAGE_SHIFT == 16)
TOUCH(f0, f2, f4, f6, f8, f10, f12, f14)
ldda [%o1] ASI_BLK_P, %f32
stda %f48, [%o0] %asi

View File

@ -1,7 +1,7 @@
/*
* arch/sparc64/mm/fault.c: Page fault handlers for the 64-bit Sparc.
*
* Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
* Copyright (C) 1996, 2008 David S. Miller (davem@davemloft.net)
* Copyright (C) 1997, 1999 Jakub Jelinek (jj@ultra.linux.cz)
*/
@ -18,7 +18,6 @@
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/kprobes.h>
#include <linux/kallsyms.h>
#include <linux/kdebug.h>
#include <asm/page.h>
@ -115,7 +114,7 @@ static void bad_kernel_pc(struct pt_regs *regs, unsigned long vaddr)
printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n",
regs->tpc);
printk(KERN_CRIT "OOPS: RPC [%016lx]\n", regs->u_regs[15]);
print_symbol("RPC: <%s>\n", regs->u_regs[15]);
printk("OOPS: RPC <%pS>\n", (void *) regs->u_regs[15]);
printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr);
dump_stack();
unhandled_fault(regs->tpc, current, regs);

View File

@ -96,12 +96,6 @@ void flush_tsb_user(struct mmu_gather *mp)
#elif defined(CONFIG_SPARC64_PAGE_SIZE_64KB)
#define HV_PGSZ_IDX_BASE HV_PGSZ_IDX_64K
#define HV_PGSZ_MASK_BASE HV_PGSZ_MASK_64K
#elif defined(CONFIG_SPARC64_PAGE_SIZE_512KB)
#define HV_PGSZ_IDX_BASE HV_PGSZ_IDX_512K
#define HV_PGSZ_MASK_BASE HV_PGSZ_MASK_512K
#elif defined(CONFIG_SPARC64_PAGE_SIZE_4MB)
#define HV_PGSZ_IDX_BASE HV_PGSZ_IDX_4MB
#define HV_PGSZ_MASK_BASE HV_PGSZ_MASK_4MB
#else
#error Broken base page size setting...
#endif

View File

@ -688,6 +688,11 @@ xcall_call_function:
wr %g0, (1 << PIL_SMP_CALL_FUNC), %set_softint
retry
.globl xcall_call_function_single
xcall_call_function_single:
wr %g0, (1 << PIL_SMP_CALL_FUNC_SNGL), %set_softint
retry
.globl xcall_receive_signal
xcall_receive_signal:
wr %g0, (1 << PIL_SMP_RECEIVE_SIGNAL), %set_softint

View File

@ -59,6 +59,19 @@ config HW_RANDOM_GEODE
If unsure, say Y.
config HW_RANDOM_N2RNG
tristate "Niagara2 Random Number Generator support"
depends on HW_RANDOM && SPARC64
default HW_RANDOM
---help---
This driver provides kernel-side support for the Random Number
Generator hardware found on Niagara2 cpus.
To compile this driver as a module, choose M here: the
module will be called n2-rng.
If unsure, say Y.
config HW_RANDOM_VIA
tristate "VIA HW Random Number Generator support"
depends on HW_RANDOM && X86_32

View File

@ -7,6 +7,8 @@ rng-core-y := core.o
obj-$(CONFIG_HW_RANDOM_INTEL) += intel-rng.o
obj-$(CONFIG_HW_RANDOM_AMD) += amd-rng.o
obj-$(CONFIG_HW_RANDOM_GEODE) += geode-rng.o
obj-$(CONFIG_HW_RANDOM_N2RNG) += n2-rng.o
n2-rng-y := n2-drv.o n2-asm.o
obj-$(CONFIG_HW_RANDOM_VIA) += via-rng.o
obj-$(CONFIG_HW_RANDOM_IXP4XX) += ixp4xx-rng.o
obj-$(CONFIG_HW_RANDOM_OMAP) += omap-rng.o

View File

@ -0,0 +1,79 @@
/* n2-asm.S: Niagara2 RNG hypervisor call assembler.
*
* Copyright (C) 2008 David S. Miller <davem@davemloft.net>
*/
#include <linux/linkage.h>
#include <asm/hypervisor.h>
#include "n2rng.h"
.text
ENTRY(sun4v_rng_get_diag_ctl)
mov HV_FAST_RNG_GET_DIAG_CTL, %o5
ta HV_FAST_TRAP
retl
nop
ENDPROC(sun4v_rng_get_diag_ctl)
ENTRY(sun4v_rng_ctl_read_v1)
mov %o1, %o3
mov %o2, %o4
mov HV_FAST_RNG_CTL_READ, %o5
ta HV_FAST_TRAP
stx %o1, [%o3]
retl
stx %o2, [%o4]
ENDPROC(sun4v_rng_ctl_read_v1)
ENTRY(sun4v_rng_ctl_read_v2)
save %sp, -192, %sp
mov %i0, %o0
mov %i1, %o1
mov HV_FAST_RNG_CTL_READ, %o5
ta HV_FAST_TRAP
stx %o1, [%i2]
stx %o2, [%i3]
stx %o3, [%i4]
stx %o4, [%i5]
ret
restore %g0, %o0, %o0
ENDPROC(sun4v_rng_ctl_read_v2)
ENTRY(sun4v_rng_ctl_write_v1)
mov %o3, %o4
mov HV_FAST_RNG_CTL_WRITE, %o5
ta HV_FAST_TRAP
retl
stx %o1, [%o4]
ENDPROC(sun4v_rng_ctl_write_v1)
ENTRY(sun4v_rng_ctl_write_v2)
mov HV_FAST_RNG_CTL_WRITE, %o5
ta HV_FAST_TRAP
retl
nop
ENDPROC(sun4v_rng_ctl_write_v2)
ENTRY(sun4v_rng_data_read_diag_v1)
mov %o2, %o4
mov HV_FAST_RNG_DATA_READ_DIAG, %o5
ta HV_FAST_TRAP
retl
stx %o1, [%o4]
ENDPROC(sun4v_rng_data_read_diag_v1)
ENTRY(sun4v_rng_data_read_diag_v2)
mov %o3, %o4
mov HV_FAST_RNG_DATA_READ_DIAG, %o5
ta HV_FAST_TRAP
retl
stx %o1, [%o4]
ENDPROC(sun4v_rng_data_read_diag_v2)
ENTRY(sun4v_rng_data_read)
mov %o1, %o4
mov HV_FAST_RNG_DATA_READ, %o5
ta HV_FAST_TRAP
retl
stx %o1, [%o4]
ENDPROC(sun4v_rng_data_read)

View File

@ -0,0 +1,771 @@
/* n2-drv.c: Niagara-2 RNG driver.
*
* Copyright (C) 2008 David S. Miller <davem@davemloft.net>
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/workqueue.h>
#include <linux/preempt.h>
#include <linux/hw_random.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <asm/hypervisor.h>
#include "n2rng.h"
#define DRV_MODULE_NAME "n2rng"
#define PFX DRV_MODULE_NAME ": "
#define DRV_MODULE_VERSION "0.1"
#define DRV_MODULE_RELDATE "May 15, 2008"
static char version[] __devinitdata =
DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
MODULE_DESCRIPTION("Niagara2 RNG driver");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_MODULE_VERSION);
/* The Niagara2 RNG provides a 64-bit read-only random number
* register, plus a control register. Access to the RNG is
* virtualized through the hypervisor so that both guests and control
* nodes can access the device.
*
* The entropy source consists of raw entropy sources, each
* constructed from a voltage controlled oscillator whose phase is
* jittered by thermal noise sources.
*
* The oscillator in each of the three raw entropy sources run at
* different frequencies. Normally, all three generator outputs are
* gathered, xored together, and fed into a CRC circuit, the output of
* which is the 64-bit read-only register.
*
* Some time is necessary for all the necessary entropy to build up
* such that a full 64-bits of entropy are available in the register.
* In normal operating mode (RNG_CTL_LFSR is set), the chip implements
* an interlock which blocks register reads until sufficient entropy
* is available.
*
* A control register is provided for adjusting various aspects of RNG
* operation, and to enable diagnostic modes. Each of the three raw
* entropy sources has an enable bit (RNG_CTL_ES{1,2,3}). Also
* provided are fields for controlling the minimum time in cycles
* between read accesses to the register (RNG_CTL_WAIT, this controls
* the interlock described in the previous paragraph).
*
* The standard setting is to have the mode bit (RNG_CTL_LFSR) set,
* all three entropy sources enabled, and the interlock time set
* appropriately.
*
* The CRC polynomial used by the chip is:
*
* P(X) = x64 + x61 + x57 + x56 + x52 + x51 + x50 + x48 + x47 + x46 +
* x43 + x42 + x41 + x39 + x38 + x37 + x35 + x32 + x28 + x25 +
* x22 + x21 + x17 + x15 + x13 + x12 + x11 + x7 + x5 + x + 1
*
* The RNG_CTL_VCO value of each noise cell must be programmed
* seperately. This is why 4 control register values must be provided
* to the hypervisor. During a write, the hypervisor writes them all,
* one at a time, to the actual RNG_CTL register. The first three
* values are used to setup the desired RNG_CTL_VCO for each entropy
* source, for example:
*
* control 0: (1 << RNG_CTL_VCO_SHIFT) | RNG_CTL_ES1
* control 1: (2 << RNG_CTL_VCO_SHIFT) | RNG_CTL_ES2
* control 2: (3 << RNG_CTL_VCO_SHIFT) | RNG_CTL_ES3
*
* And then the fourth value sets the final chip state and enables
* desired.
*/
static int n2rng_hv_err_trans(unsigned long hv_err)
{
switch (hv_err) {
case HV_EOK:
return 0;
case HV_EWOULDBLOCK:
return -EAGAIN;
case HV_ENOACCESS:
return -EPERM;
case HV_EIO:
return -EIO;
case HV_EBUSY:
return -EBUSY;
case HV_EBADALIGN:
case HV_ENORADDR:
return -EFAULT;
default:
return -EINVAL;
}
}
static unsigned long n2rng_generic_read_control_v2(unsigned long ra,
unsigned long unit)
{
unsigned long hv_err, state, ticks, watchdog_delta, watchdog_status;
int block = 0, busy = 0;
while (1) {
hv_err = sun4v_rng_ctl_read_v2(ra, unit, &state,
&ticks,
&watchdog_delta,
&watchdog_status);
if (hv_err == HV_EOK)
break;
if (hv_err == HV_EBUSY) {
if (++busy >= N2RNG_BUSY_LIMIT)
break;
udelay(1);
} else if (hv_err == HV_EWOULDBLOCK) {
if (++block >= N2RNG_BLOCK_LIMIT)
break;
__delay(ticks);
} else
break;
}
return hv_err;
}
/* In multi-socket situations, the hypervisor might need to
* queue up the RNG control register write if it's for a unit
* that is on a cpu socket other than the one we are executing on.
*
* We poll here waiting for a successful read of that control
* register to make sure the write has been actually performed.
*/
static unsigned long n2rng_control_settle_v2(struct n2rng *np, int unit)
{
unsigned long ra = __pa(&np->scratch_control[0]);
return n2rng_generic_read_control_v2(ra, unit);
}
static unsigned long n2rng_write_ctl_one(struct n2rng *np, int unit,
unsigned long state,
unsigned long control_ra,
unsigned long watchdog_timeout,
unsigned long *ticks)
{
unsigned long hv_err;
if (np->hvapi_major == 1) {
hv_err = sun4v_rng_ctl_write_v1(control_ra, state,
watchdog_timeout, ticks);
} else {
hv_err = sun4v_rng_ctl_write_v2(control_ra, state,
watchdog_timeout, unit);
if (hv_err == HV_EOK)
hv_err = n2rng_control_settle_v2(np, unit);
*ticks = N2RNG_ACCUM_CYCLES_DEFAULT;
}
return hv_err;
}
static int n2rng_generic_read_data(unsigned long data_ra)
{
unsigned long ticks, hv_err;
int block = 0, hcheck = 0;
while (1) {
hv_err = sun4v_rng_data_read(data_ra, &ticks);
if (hv_err == HV_EOK)
return 0;
if (hv_err == HV_EWOULDBLOCK) {
if (++block >= N2RNG_BLOCK_LIMIT)
return -EWOULDBLOCK;
__delay(ticks);
} else if (hv_err == HV_ENOACCESS) {
return -EPERM;
} else if (hv_err == HV_EIO) {
if (++hcheck >= N2RNG_HCHECK_LIMIT)
return -EIO;
udelay(10000);
} else
return -ENODEV;
}
}
static unsigned long n2rng_read_diag_data_one(struct n2rng *np,
unsigned long unit,
unsigned long data_ra,
unsigned long data_len,
unsigned long *ticks)
{
unsigned long hv_err;
if (np->hvapi_major == 1) {
hv_err = sun4v_rng_data_read_diag_v1(data_ra, data_len, ticks);
} else {
hv_err = sun4v_rng_data_read_diag_v2(data_ra, data_len,
unit, ticks);
if (!*ticks)
*ticks = N2RNG_ACCUM_CYCLES_DEFAULT;
}
return hv_err;
}
static int n2rng_generic_read_diag_data(struct n2rng *np,
unsigned long unit,
unsigned long data_ra,
unsigned long data_len)
{
unsigned long ticks, hv_err;
int block = 0;
while (1) {
hv_err = n2rng_read_diag_data_one(np, unit,
data_ra, data_len,
&ticks);
if (hv_err == HV_EOK)
return 0;
if (hv_err == HV_EWOULDBLOCK) {
if (++block >= N2RNG_BLOCK_LIMIT)
return -EWOULDBLOCK;
__delay(ticks);
} else if (hv_err == HV_ENOACCESS) {
return -EPERM;
} else if (hv_err == HV_EIO) {
return -EIO;
} else
return -ENODEV;
}
}
static int n2rng_generic_write_control(struct n2rng *np,
unsigned long control_ra,
unsigned long unit,
unsigned long state)
{
unsigned long hv_err, ticks;
int block = 0, busy = 0;
while (1) {
hv_err = n2rng_write_ctl_one(np, unit, state, control_ra,
np->wd_timeo, &ticks);
if (hv_err == HV_EOK)
return 0;
if (hv_err == HV_EWOULDBLOCK) {
if (++block >= N2RNG_BLOCK_LIMIT)
return -EWOULDBLOCK;
__delay(ticks);
} else if (hv_err == HV_EBUSY) {
if (++busy >= N2RNG_BUSY_LIMIT)
return -EBUSY;
udelay(1);
} else
return -ENODEV;
}
}
/* Just try to see if we can successfully access the control register
* of the RNG on the domain on which we are currently executing.
*/
static int n2rng_try_read_ctl(struct n2rng *np)
{
unsigned long hv_err;
unsigned long x;
if (np->hvapi_major == 1) {
hv_err = sun4v_rng_get_diag_ctl();
} else {
/* We purposefully give invalid arguments, HV_NOACCESS
* is higher priority than the errors we'd get from
* these other cases, and that's the error we are
* truly interested in.
*/
hv_err = sun4v_rng_ctl_read_v2(0UL, ~0UL, &x, &x, &x, &x);
switch (hv_err) {
case HV_EWOULDBLOCK:
case HV_ENOACCESS:
break;
default:
hv_err = HV_EOK;
break;
}
}
return n2rng_hv_err_trans(hv_err);
}
#define CONTROL_DEFAULT_BASE \
((2 << RNG_CTL_ASEL_SHIFT) | \
(N2RNG_ACCUM_CYCLES_DEFAULT << RNG_CTL_WAIT_SHIFT) | \
RNG_CTL_LFSR)
#define CONTROL_DEFAULT_0 \
(CONTROL_DEFAULT_BASE | \
(1 << RNG_CTL_VCO_SHIFT) | \
RNG_CTL_ES1)
#define CONTROL_DEFAULT_1 \
(CONTROL_DEFAULT_BASE | \
(2 << RNG_CTL_VCO_SHIFT) | \
RNG_CTL_ES2)
#define CONTROL_DEFAULT_2 \
(CONTROL_DEFAULT_BASE | \
(3 << RNG_CTL_VCO_SHIFT) | \
RNG_CTL_ES3)
#define CONTROL_DEFAULT_3 \
(CONTROL_DEFAULT_BASE | \
RNG_CTL_ES1 | RNG_CTL_ES2 | RNG_CTL_ES3)
static void n2rng_control_swstate_init(struct n2rng *np)
{
int i;
np->flags |= N2RNG_FLAG_CONTROL;
np->health_check_sec = N2RNG_HEALTH_CHECK_SEC_DEFAULT;
np->accum_cycles = N2RNG_ACCUM_CYCLES_DEFAULT;
np->wd_timeo = N2RNG_WD_TIMEO_DEFAULT;
for (i = 0; i < np->num_units; i++) {
struct n2rng_unit *up = &np->units[i];
up->control[0] = CONTROL_DEFAULT_0;
up->control[1] = CONTROL_DEFAULT_1;
up->control[2] = CONTROL_DEFAULT_2;
up->control[3] = CONTROL_DEFAULT_3;
}
np->hv_state = HV_RNG_STATE_UNCONFIGURED;
}
static int n2rng_grab_diag_control(struct n2rng *np)
{
int i, busy_count, err = -ENODEV;
busy_count = 0;
for (i = 0; i < 100; i++) {
err = n2rng_try_read_ctl(np);
if (err != -EAGAIN)
break;
if (++busy_count > 100) {
dev_err(&np->op->dev,
"Grab diag control timeout.\n");
return -ENODEV;
}
udelay(1);
}
return err;
}
static int n2rng_init_control(struct n2rng *np)
{
int err = n2rng_grab_diag_control(np);
/* Not in the control domain, that's OK we are only a consumer
* of the RNG data, we don't setup and program it.
*/
if (err == -EPERM)
return 0;
if (err)
return err;
n2rng_control_swstate_init(np);
return 0;
}
static int n2rng_data_read(struct hwrng *rng, u32 *data)
{
struct n2rng *np = (struct n2rng *) rng->priv;
unsigned long ra = __pa(&np->test_data);
int len;
if (!(np->flags & N2RNG_FLAG_READY)) {
len = 0;
} else if (np->flags & N2RNG_FLAG_BUFFER_VALID) {
np->flags &= ~N2RNG_FLAG_BUFFER_VALID;
*data = np->buffer;
len = 4;
} else {
int err = n2rng_generic_read_data(ra);
if (!err) {
np->buffer = np->test_data >> 32;
*data = np->test_data & 0xffffffff;
len = 4;
} else {
dev_err(&np->op->dev, "RNG error, restesting\n");
np->flags &= ~N2RNG_FLAG_READY;
if (!(np->flags & N2RNG_FLAG_SHUTDOWN))
schedule_delayed_work(&np->work, 0);
len = 0;
}
}
return len;
}
/* On a guest node, just make sure we can read random data properly.
* If a control node reboots or reloads it's n2rng driver, this won't
* work during that time. So we have to keep probing until the device
* becomes usable.
*/
static int n2rng_guest_check(struct n2rng *np)
{
unsigned long ra = __pa(&np->test_data);
return n2rng_generic_read_data(ra);
}
static int n2rng_entropy_diag_read(struct n2rng *np, unsigned long unit,
u64 *pre_control, u64 pre_state,
u64 *buffer, unsigned long buf_len,
u64 *post_control, u64 post_state)
{
unsigned long post_ctl_ra = __pa(post_control);
unsigned long pre_ctl_ra = __pa(pre_control);
unsigned long buffer_ra = __pa(buffer);
int err;
err = n2rng_generic_write_control(np, pre_ctl_ra, unit, pre_state);
if (err)
return err;
err = n2rng_generic_read_diag_data(np, unit,
buffer_ra, buf_len);
(void) n2rng_generic_write_control(np, post_ctl_ra, unit,
post_state);
return err;
}
static u64 advance_polynomial(u64 poly, u64 val, int count)
{
int i;
for (i = 0; i < count; i++) {
int highbit_set = ((s64)val < 0);
val <<= 1;
if (highbit_set)
val ^= poly;
}
return val;
}
static int n2rng_test_buffer_find(struct n2rng *np, u64 val)
{
int i, count = 0;
/* Purposefully skip over the first word. */
for (i = 1; i < SELFTEST_BUFFER_WORDS; i++) {
if (np->test_buffer[i] == val)
count++;
}
return count;
}
static void n2rng_dump_test_buffer(struct n2rng *np)
{
int i;
for (i = 0; i < SELFTEST_BUFFER_WORDS; i++)
dev_err(&np->op->dev, "Test buffer slot %d [0x%016lx]\n",
i, np->test_buffer[i]);
}
static int n2rng_check_selftest_buffer(struct n2rng *np, unsigned long unit)
{
u64 val = SELFTEST_VAL;
int err, matches, limit;
matches = 0;
for (limit = 0; limit < SELFTEST_LOOPS_MAX; limit++) {
matches += n2rng_test_buffer_find(np, val);
if (matches >= SELFTEST_MATCH_GOAL)
break;
val = advance_polynomial(SELFTEST_POLY, val, 1);
}
err = 0;
if (limit >= SELFTEST_LOOPS_MAX) {
err = -ENODEV;
dev_err(&np->op->dev, "Selftest failed on unit %lu\n", unit);
n2rng_dump_test_buffer(np);
} else
dev_info(&np->op->dev, "Selftest passed on unit %lu\n", unit);
return err;
}
static int n2rng_control_selftest(struct n2rng *np, unsigned long unit)
{
int err;
np->test_control[0] = (0x2 << RNG_CTL_ASEL_SHIFT);
np->test_control[1] = (0x2 << RNG_CTL_ASEL_SHIFT);
np->test_control[2] = (0x2 << RNG_CTL_ASEL_SHIFT);
np->test_control[3] = ((0x2 << RNG_CTL_ASEL_SHIFT) |
RNG_CTL_LFSR |
((SELFTEST_TICKS - 2) << RNG_CTL_WAIT_SHIFT));
err = n2rng_entropy_diag_read(np, unit, np->test_control,
HV_RNG_STATE_HEALTHCHECK,
np->test_buffer,
sizeof(np->test_buffer),
&np->units[unit].control[0],
np->hv_state);
if (err)
return err;
return n2rng_check_selftest_buffer(np, unit);
}
static int n2rng_control_check(struct n2rng *np)
{
int i;
for (i = 0; i < np->num_units; i++) {
int err = n2rng_control_selftest(np, i);
if (err)
return err;
}
return 0;
}
/* The sanity checks passed, install the final configuration into the
* chip, it's ready to use.
*/
static int n2rng_control_configure_units(struct n2rng *np)
{
int unit, err;
err = 0;
for (unit = 0; unit < np->num_units; unit++) {
struct n2rng_unit *up = &np->units[unit];
unsigned long ctl_ra = __pa(&up->control[0]);
int esrc;
u64 base;
base = ((np->accum_cycles << RNG_CTL_WAIT_SHIFT) |
(2 << RNG_CTL_ASEL_SHIFT) |
RNG_CTL_LFSR);
/* XXX This isn't the best. We should fetch a bunch
* XXX of words using each entropy source combined XXX
* with each VCO setting, and see which combinations
* XXX give the best random data.
*/
for (esrc = 0; esrc < 3; esrc++)
up->control[esrc] = base |
(esrc << RNG_CTL_VCO_SHIFT) |
(RNG_CTL_ES1 << esrc);
up->control[3] = base |
(RNG_CTL_ES1 | RNG_CTL_ES2 | RNG_CTL_ES3);
err = n2rng_generic_write_control(np, ctl_ra, unit,
HV_RNG_STATE_CONFIGURED);
if (err)
break;
}
return err;
}
static void n2rng_work(struct work_struct *work)
{
struct n2rng *np = container_of(work, struct n2rng, work.work);
int err = 0;
if (!(np->flags & N2RNG_FLAG_CONTROL)) {
err = n2rng_guest_check(np);
} else {
preempt_disable();
err = n2rng_control_check(np);
preempt_enable();
if (!err)
err = n2rng_control_configure_units(np);
}
if (!err) {
np->flags |= N2RNG_FLAG_READY;
dev_info(&np->op->dev, "RNG ready\n");
}
if (err && !(np->flags & N2RNG_FLAG_SHUTDOWN))
schedule_delayed_work(&np->work, HZ * 2);
}
static void __devinit n2rng_driver_version(void)
{
static int n2rng_version_printed;
if (n2rng_version_printed++ == 0)
pr_info("%s", version);
}
static int __devinit n2rng_probe(struct of_device *op,
const struct of_device_id *match)
{
int victoria_falls = (match->data != NULL);
int err = -ENOMEM;
struct n2rng *np;
n2rng_driver_version();
np = kzalloc(sizeof(*np), GFP_KERNEL);
if (!np)
goto out;
np->op = op;
INIT_DELAYED_WORK(&np->work, n2rng_work);
if (victoria_falls)
np->flags |= N2RNG_FLAG_VF;
err = -ENODEV;
np->hvapi_major = 2;
if (sun4v_hvapi_register(HV_GRP_RNG,
np->hvapi_major,
&np->hvapi_minor)) {
np->hvapi_major = 1;
if (sun4v_hvapi_register(HV_GRP_RNG,
np->hvapi_major,
&np->hvapi_minor)) {
dev_err(&op->dev, "Cannot register suitable "
"HVAPI version.\n");
goto out_free;
}
}
if (np->flags & N2RNG_FLAG_VF) {
if (np->hvapi_major < 2) {
dev_err(&op->dev, "VF RNG requires HVAPI major "
"version 2 or later, got %lu\n",
np->hvapi_major);
goto out_hvapi_unregister;
}
np->num_units = of_getintprop_default(op->node,
"rng-#units", 0);
if (!np->num_units) {
dev_err(&op->dev, "VF RNG lacks rng-#units property\n");
goto out_hvapi_unregister;
}
} else
np->num_units = 1;
dev_info(&op->dev, "Registered RNG HVAPI major %lu minor %lu\n",
np->hvapi_major, np->hvapi_minor);
np->units = kzalloc(sizeof(struct n2rng_unit) * np->num_units,
GFP_KERNEL);
err = -ENOMEM;
if (!np->units)
goto out_hvapi_unregister;
err = n2rng_init_control(np);
if (err)
goto out_free_units;
dev_info(&op->dev, "Found %s RNG, units: %d\n",
((np->flags & N2RNG_FLAG_VF) ?
"Victoria Falls" : "Niagara2"),
np->num_units);
np->hwrng.name = "n2rng";
np->hwrng.data_read = n2rng_data_read;
np->hwrng.priv = (unsigned long) np;
err = hwrng_register(&np->hwrng);
if (err)
goto out_free_units;
dev_set_drvdata(&op->dev, np);
schedule_delayed_work(&np->work, 0);
return 0;
out_free_units:
kfree(np->units);
np->units = NULL;
out_hvapi_unregister:
sun4v_hvapi_unregister(HV_GRP_RNG);
out_free:
kfree(np);
out:
return err;
}
static int __devexit n2rng_remove(struct of_device *op)
{
struct n2rng *np = dev_get_drvdata(&op->dev);
np->flags |= N2RNG_FLAG_SHUTDOWN;
cancel_delayed_work_sync(&np->work);
hwrng_unregister(&np->hwrng);
sun4v_hvapi_unregister(HV_GRP_RNG);
kfree(np->units);
np->units = NULL;
kfree(np);
dev_set_drvdata(&op->dev, NULL);
return 0;
}
static struct of_device_id n2rng_match[] = {
{
.name = "random-number-generator",
.compatible = "SUNW,n2-rng",
},
{
.name = "random-number-generator",
.compatible = "SUNW,vf-rng",
.data = (void *) 1,
},
{},
};
MODULE_DEVICE_TABLE(of, n2rng_match);
static struct of_platform_driver n2rng_driver = {
.name = "n2rng",
.match_table = n2rng_match,
.probe = n2rng_probe,
.remove = __devexit_p(n2rng_remove),
};
static int __init n2rng_init(void)
{
return of_register_driver(&n2rng_driver, &of_bus_type);
}
static void __exit n2rng_exit(void)
{
of_unregister_driver(&n2rng_driver);
}
module_init(n2rng_init);
module_exit(n2rng_exit);

View File

@ -0,0 +1,118 @@
/* n2rng.h: Niagara2 RNG defines.
*
* Copyright (C) 2008 David S. Miller <davem@davemloft.net>
*/
#ifndef _N2RNG_H
#define _N2RNG_H
#define RNG_CTL_WAIT 0x0000000001fffe00ULL /* Minimum wait time */
#define RNG_CTL_WAIT_SHIFT 9
#define RNG_CTL_BYPASS 0x0000000000000100ULL /* VCO voltage source */
#define RNG_CTL_VCO 0x00000000000000c0ULL /* VCO rate control */
#define RNG_CTL_VCO_SHIFT 6
#define RNG_CTL_ASEL 0x0000000000000030ULL /* Analog MUX select */
#define RNG_CTL_ASEL_SHIFT 4
#define RNG_CTL_LFSR 0x0000000000000008ULL /* Use LFSR or plain shift */
#define RNG_CTL_ES3 0x0000000000000004ULL /* Enable entropy source 3 */
#define RNG_CTL_ES2 0x0000000000000002ULL /* Enable entropy source 2 */
#define RNG_CTL_ES1 0x0000000000000001ULL /* Enable entropy source 1 */
#define HV_FAST_RNG_GET_DIAG_CTL 0x130
#define HV_FAST_RNG_CTL_READ 0x131
#define HV_FAST_RNG_CTL_WRITE 0x132
#define HV_FAST_RNG_DATA_READ_DIAG 0x133
#define HV_FAST_RNG_DATA_READ 0x134
#define HV_RNG_STATE_UNCONFIGURED 0
#define HV_RNG_STATE_CONFIGURED 1
#define HV_RNG_STATE_HEALTHCHECK 2
#define HV_RNG_STATE_ERROR 3
#define HV_RNG_NUM_CONTROL 4
#ifndef __ASSEMBLY__
extern unsigned long sun4v_rng_get_diag_ctl(void);
extern unsigned long sun4v_rng_ctl_read_v1(unsigned long ctl_regs_ra,
unsigned long *state,
unsigned long *tick_delta);
extern unsigned long sun4v_rng_ctl_read_v2(unsigned long ctl_regs_ra,
unsigned long unit,
unsigned long *state,
unsigned long *tick_delta,
unsigned long *watchdog,
unsigned long *write_status);
extern unsigned long sun4v_rng_ctl_write_v1(unsigned long ctl_regs_ra,
unsigned long state,
unsigned long write_timeout,
unsigned long *tick_delta);
extern unsigned long sun4v_rng_ctl_write_v2(unsigned long ctl_regs_ra,
unsigned long state,
unsigned long write_timeout,
unsigned long unit);
extern unsigned long sun4v_rng_data_read_diag_v1(unsigned long data_ra,
unsigned long len,
unsigned long *tick_delta);
extern unsigned long sun4v_rng_data_read_diag_v2(unsigned long data_ra,
unsigned long len,
unsigned long unit,
unsigned long *tick_delta);
extern unsigned long sun4v_rng_data_read(unsigned long data_ra,
unsigned long *tick_delta);
struct n2rng_unit {
u64 control[HV_RNG_NUM_CONTROL];
};
struct n2rng {
struct of_device *op;
unsigned long flags;
#define N2RNG_FLAG_VF 0x00000001 /* Victoria Falls RNG, else N2 */
#define N2RNG_FLAG_CONTROL 0x00000002 /* Operating in control domain */
#define N2RNG_FLAG_READY 0x00000008 /* Ready for hw-rng layer */
#define N2RNG_FLAG_SHUTDOWN 0x00000010 /* Driver unregistering */
#define N2RNG_FLAG_BUFFER_VALID 0x00000020 /* u32 buffer holds valid data */
int num_units;
struct n2rng_unit *units;
struct hwrng hwrng;
u32 buffer;
/* Registered hypervisor group API major and minor version. */
unsigned long hvapi_major;
unsigned long hvapi_minor;
struct delayed_work work;
unsigned long hv_state; /* HV_RNG_STATE_foo */
unsigned long health_check_sec;
unsigned long accum_cycles;
unsigned long wd_timeo;
#define N2RNG_HEALTH_CHECK_SEC_DEFAULT 0
#define N2RNG_ACCUM_CYCLES_DEFAULT 2048
#define N2RNG_WD_TIMEO_DEFAULT 0
u64 scratch_control[HV_RNG_NUM_CONTROL];
#define SELFTEST_TICKS 38859
#define SELFTEST_VAL ((u64)0xB8820C7BD387E32C)
#define SELFTEST_POLY ((u64)0x231DCEE91262B8A3)
#define SELFTEST_MATCH_GOAL 6
#define SELFTEST_LOOPS_MAX 40000
#define SELFTEST_BUFFER_WORDS 8
u64 test_data;
u64 test_control[HV_RNG_NUM_CONTROL];
u64 test_buffer[SELFTEST_BUFFER_WORDS];
};
#define N2RNG_BLOCK_LIMIT 60000
#define N2RNG_BUSY_LIMIT 100
#define N2RNG_HCHECK_LIMIT 100
#endif /* !(__ASSEMBLY__) */
#endif /* _N2RNG_H */

View File

@ -195,8 +195,8 @@ struct uctrl_driver {
static struct uctrl_driver drv;
void uctrl_get_event_status(void);
void uctrl_get_external_status(void);
static void uctrl_get_event_status(void);
static void uctrl_get_external_status(void);
static int
uctrl_ioctl(struct inode *inode, struct file *file,
@ -266,12 +266,6 @@ static struct miscdevice uctrl_dev = {
driver->regs->uctrl_stat = UCTRL_STAT_RXNE_STA; \
}
void uctrl_set_video(int status)
{
struct uctrl_driver *driver = &drv;
}
static void uctrl_do_txn(struct uctrl_txn *txn)
{
struct uctrl_driver *driver = &drv;
@ -311,7 +305,7 @@ static void uctrl_do_txn(struct uctrl_txn *txn)
}
}
void uctrl_get_event_status(void)
static void uctrl_get_event_status(void)
{
struct uctrl_driver *driver = &drv;
struct uctrl_txn txn;
@ -331,7 +325,7 @@ void uctrl_get_event_status(void)
dprintk(("ev is %x\n", driver->status.event_status));
}
void uctrl_get_external_status(void)
static void uctrl_get_external_status(void)
{
struct uctrl_driver *driver = &drv;
struct uctrl_txn txn;
@ -363,7 +357,7 @@ void uctrl_get_external_status(void)
static int __init ts102_uctrl_init(void)
{
struct uctrl_driver *driver = &drv;
int len, i;
int len;
struct linux_prom_irqs tmp_irq[2];
unsigned int vaddr[2] = { 0, 0 };
int tmpnode, uctrlnode = prom_getchild(prom_root_node);

View File

@ -133,8 +133,6 @@ struct vfc_dev {
unsigned char saa9051_state_array[VFC_SAA9051_NR];
};
extern struct vfc_dev **vfc_dev_lst;
void captstat_reset(struct vfc_dev *);
void memptr_reset(struct vfc_dev *);
@ -145,8 +143,6 @@ int vfc_i2c_sendbuf(struct vfc_dev *, unsigned char, char *, int) ;
int vfc_i2c_recvbuf(struct vfc_dev *, unsigned char, char *, int) ;
int vfc_i2c_reset_bus(struct vfc_dev *);
int vfc_init_i2c_bus(struct vfc_dev *);
void vfc_lock_device(struct vfc_dev *);
void vfc_unlock_device(struct vfc_dev *);
#define VFC_CONTROL_DIAGMODE 0x10000000
#define VFC_CONTROL_MEMPTR 0x20000000

View File

@ -45,7 +45,7 @@
#include <asm/vfc_ioctls.h>
static const struct file_operations vfc_fops;
struct vfc_dev **vfc_dev_lst;
static struct vfc_dev **vfc_dev_lst;
static char vfcstr[]="vfc";
static unsigned char saa9051_init_array[VFC_SAA9051_NR] = {
0x00, 0x64, 0x72, 0x52,
@ -54,18 +54,18 @@ static unsigned char saa9051_init_array[VFC_SAA9051_NR] = {
0x3e
};
void vfc_lock_device(struct vfc_dev *dev)
static void vfc_lock_device(struct vfc_dev *dev)
{
mutex_lock(&dev->device_lock_mtx);
}
void vfc_unlock_device(struct vfc_dev *dev)
static void vfc_unlock_device(struct vfc_dev *dev)
{
mutex_unlock(&dev->device_lock_mtx);
}
void vfc_captstat_reset(struct vfc_dev *dev)
static void vfc_captstat_reset(struct vfc_dev *dev)
{
dev->control_reg |= VFC_CONTROL_CAPTRESET;
sbus_writel(dev->control_reg, &dev->regs->control);
@ -75,7 +75,7 @@ void vfc_captstat_reset(struct vfc_dev *dev)
sbus_writel(dev->control_reg, &dev->regs->control);
}
void vfc_memptr_reset(struct vfc_dev *dev)
static void vfc_memptr_reset(struct vfc_dev *dev)
{
dev->control_reg |= VFC_CONTROL_MEMPTR;
sbus_writel(dev->control_reg, &dev->regs->control);
@ -85,7 +85,7 @@ void vfc_memptr_reset(struct vfc_dev *dev)
sbus_writel(dev->control_reg, &dev->regs->control);
}
int vfc_csr_init(struct vfc_dev *dev)
static int vfc_csr_init(struct vfc_dev *dev)
{
dev->control_reg = 0x80000000;
sbus_writel(dev->control_reg, &dev->regs->control);
@ -107,7 +107,7 @@ int vfc_csr_init(struct vfc_dev *dev)
return 0;
}
int vfc_saa9051_init(struct vfc_dev *dev)
static int vfc_saa9051_init(struct vfc_dev *dev)
{
int i;
@ -119,7 +119,7 @@ int vfc_saa9051_init(struct vfc_dev *dev)
return 0;
}
int init_vfc_hw(struct vfc_dev *dev)
static int init_vfc_hw(struct vfc_dev *dev)
{
vfc_lock_device(dev);
vfc_csr_init(dev);
@ -132,7 +132,7 @@ int init_vfc_hw(struct vfc_dev *dev)
return 0;
}
int init_vfc_devstruct(struct vfc_dev *dev, int instance)
static int init_vfc_devstruct(struct vfc_dev *dev, int instance)
{
dev->instance=instance;
mutex_init(&dev->device_lock_mtx);
@ -141,7 +141,8 @@ int init_vfc_devstruct(struct vfc_dev *dev, int instance)
return 0;
}
int init_vfc_device(struct sbus_dev *sdev,struct vfc_dev *dev, int instance)
static int init_vfc_device(struct sbus_dev *sdev,struct vfc_dev *dev,
int instance)
{
if(dev == NULL) {
printk(KERN_ERR "VFC: Bogus pointer passed\n");
@ -168,7 +169,7 @@ int init_vfc_device(struct sbus_dev *sdev,struct vfc_dev *dev, int instance)
}
struct vfc_dev *vfc_get_dev_ptr(int instance)
static struct vfc_dev *vfc_get_dev_ptr(int instance)
{
return vfc_dev_lst[instance];
}
@ -292,7 +293,7 @@ static int vfc_debug(struct vfc_dev *dev, int cmd, void __user *argp)
return 0;
}
int vfc_capture_start(struct vfc_dev *dev)
static int vfc_capture_start(struct vfc_dev *dev)
{
vfc_captstat_reset(dev);
dev->control_reg = sbus_readl(&dev->regs->control);
@ -314,7 +315,7 @@ int vfc_capture_start(struct vfc_dev *dev)
return 0;
}
int vfc_capture_poll(struct vfc_dev *dev)
static int vfc_capture_poll(struct vfc_dev *dev)
{
int timeout = 1000;
@ -390,8 +391,8 @@ static int vfc_set_control_ioctl(struct inode *inode, struct file *file,
}
int vfc_port_change_ioctl(struct inode *inode, struct file *file,
struct vfc_dev *dev, unsigned long arg)
static int vfc_port_change_ioctl(struct inode *inode, struct file *file,
struct vfc_dev *dev, unsigned long arg)
{
int ret = 0;
int cmd;
@ -460,8 +461,8 @@ int vfc_port_change_ioctl(struct inode *inode, struct file *file,
return ret;
}
int vfc_set_video_ioctl(struct inode *inode, struct file *file,
struct vfc_dev *dev, unsigned long arg)
static int vfc_set_video_ioctl(struct inode *inode, struct file *file,
struct vfc_dev *dev, unsigned long arg)
{
int ret = 0;
int cmd;
@ -511,8 +512,8 @@ int vfc_set_video_ioctl(struct inode *inode, struct file *file,
return ret;
}
int vfc_get_video_ioctl(struct inode *inode, struct file *file,
struct vfc_dev *dev, unsigned long arg)
static int vfc_get_video_ioctl(struct inode *inode, struct file *file,
struct vfc_dev *dev, unsigned long arg)
{
int ret = 0;
unsigned int status = NO_LOCK;

View File

@ -114,7 +114,7 @@ int vfc_i2c_reset_bus(struct vfc_dev *dev)
return 0;
}
int vfc_i2c_wait_for_bus(struct vfc_dev *dev)
static int vfc_i2c_wait_for_bus(struct vfc_dev *dev)
{
int timeout = 1000;
@ -126,7 +126,7 @@ int vfc_i2c_wait_for_bus(struct vfc_dev *dev)
return 0;
}
int vfc_i2c_wait_for_pin(struct vfc_dev *dev, int ack)
static int vfc_i2c_wait_for_pin(struct vfc_dev *dev, int ack)
{
int timeout = 1000;
int s1;
@ -144,7 +144,8 @@ int vfc_i2c_wait_for_pin(struct vfc_dev *dev, int ack)
}
#define SHIFT(a) ((a) << 24)
int vfc_i2c_xmit_addr(struct vfc_dev *dev, unsigned char addr, char mode)
static int vfc_i2c_xmit_addr(struct vfc_dev *dev, unsigned char addr,
char mode)
{
int ret, raddr;
#if 1
@ -195,7 +196,7 @@ int vfc_i2c_xmit_addr(struct vfc_dev *dev, unsigned char addr, char mode)
return 0;
}
int vfc_i2c_xmit_byte(struct vfc_dev *dev,unsigned char *byte)
static int vfc_i2c_xmit_byte(struct vfc_dev *dev,unsigned char *byte)
{
int ret;
u32 val = SHIFT((unsigned int)*byte);
@ -218,7 +219,8 @@ int vfc_i2c_xmit_byte(struct vfc_dev *dev,unsigned char *byte)
return ret;
}
int vfc_i2c_recv_byte(struct vfc_dev *dev, unsigned char *byte, int last)
static int vfc_i2c_recv_byte(struct vfc_dev *dev, unsigned char *byte,
int last)
{
int ret;

View File

@ -16,7 +16,7 @@
struct sbus_dma *dma_chain;
void __init init_one_dvma(struct sbus_dma *dma, int num_dma)
static void __init init_one_dvma(struct sbus_dma *dma, int num_dma)
{
printk("dma%d: ", num_dma);

View File

@ -1,14 +1 @@
include include/asm-generic/Kbuild.asm
header-y += apc.h
header-y += asi.h
header-y += bpp.h
header-y += jsflash.h
header-y += openpromio.h
header-y += reg.h
header-y += traps.h
header-y += vfc_ioctls.h
unifdef-y += fbio.h
unifdef-y += perfctr.h
unifdef-y += psr.h
# dummy file to avoid breaking make headers_install

20
include/asm-sparc/agp.h Normal file
View File

@ -0,0 +1,20 @@
#ifndef AGP_H
#define AGP_H 1
/* dummy for now */
#define map_page_into_agp(page)
#define unmap_page_from_agp(page)
#define flush_agp_cache() mb()
/* Convert a physical address to an address suitable for the GART. */
#define phys_to_gart(x) (x)
#define gart_to_phys(x) (x)
/* GATT allocation. Returns/accepts GATT kernel virtual address. */
#define alloc_gatt_pages(order) \
((char *)__get_free_pages(GFP_KERNEL, (order)))
#define free_gatt_pages(table, order) \
free_pages((unsigned long)(table), (order))
#endif

36
include/asm-sparc/apb.h Normal file
View File

@ -0,0 +1,36 @@
/*
* apb.h: Advanced PCI Bridge Configuration Registers and Bits
*
* Copyright (C) 1998 Eddie C. Dost (ecd@skynet.be)
*/
#ifndef _SPARC64_APB_H
#define _SPARC64_APB_H
#define APB_TICK_REGISTER 0xb0
#define APB_INT_ACK 0xb8
#define APB_PRIMARY_MASTER_RETRY_LIMIT 0xc0
#define APB_DMA_ASFR 0xc8
#define APB_DMA_AFAR 0xd0
#define APB_PIO_TARGET_RETRY_LIMIT 0xd8
#define APB_PIO_TARGET_LATENCY_TIMER 0xd9
#define APB_DMA_TARGET_RETRY_LIMIT 0xda
#define APB_DMA_TARGET_LATENCY_TIMER 0xdb
#define APB_SECONDARY_MASTER_RETRY_LIMIT 0xdc
#define APB_SECONDARY_CONTROL 0xdd
#define APB_IO_ADDRESS_MAP 0xde
#define APB_MEM_ADDRESS_MAP 0xdf
#define APB_PCI_CONTROL_LOW 0xe0
# define APB_PCI_CTL_LOW_ARB_PARK (1 << 21)
# define APB_PCI_CTL_LOW_ERRINT_EN (1 << 8)
#define APB_PCI_CONTROL_HIGH 0xe4
# define APB_PCI_CTL_HIGH_SERR (1 << 2)
# define APB_PCI_CTL_HIGH_ARBITER_EN (1 << 0)
#define APB_PIO_ASFR 0xe8
#define APB_PIO_AFAR 0xf0
#define APB_DIAG_REGISTER 0xf8
#endif /* !(_SPARC64_APB_H) */

View File

@ -3,7 +3,7 @@
/* asi.h: Address Space Identifier values for the sparc.
*
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
* Copyright (C) 1995,1996 David S. Miller (davem@caip.rutgers.edu)
*
* Pioneer work for sun4m: Paul Hatchman (paul@sfe.com.au)
* Joint edition for sun4c+sun4m: Pete A. Zaitcev <zaitcev@ipmce.su>
@ -108,4 +108,155 @@
#define ASI_M_ACTION 0x4c /* Breakpoint Action Register (GNU/Viking) */
/* V9 Architecture mandary ASIs. */
#define ASI_N 0x04 /* Nucleus */
#define ASI_NL 0x0c /* Nucleus, little endian */
#define ASI_AIUP 0x10 /* Primary, user */
#define ASI_AIUS 0x11 /* Secondary, user */
#define ASI_AIUPL 0x18 /* Primary, user, little endian */
#define ASI_AIUSL 0x19 /* Secondary, user, little endian */
#define ASI_P 0x80 /* Primary, implicit */
#define ASI_S 0x81 /* Secondary, implicit */
#define ASI_PNF 0x82 /* Primary, no fault */
#define ASI_SNF 0x83 /* Secondary, no fault */
#define ASI_PL 0x88 /* Primary, implicit, l-endian */
#define ASI_SL 0x89 /* Secondary, implicit, l-endian */
#define ASI_PNFL 0x8a /* Primary, no fault, l-endian */
#define ASI_SNFL 0x8b /* Secondary, no fault, l-endian */
/* SpitFire and later extended ASIs. The "(III)" marker designates
* UltraSparc-III and later specific ASIs. The "(CMT)" marker designates
* Chip Multi Threading specific ASIs. "(NG)" designates Niagara specific
* ASIs, "(4V)" designates SUN4V specific ASIs.
*/
#define ASI_PHYS_USE_EC 0x14 /* PADDR, E-cachable */
#define ASI_PHYS_BYPASS_EC_E 0x15 /* PADDR, E-bit */
#define ASI_BLK_AIUP_4V 0x16 /* (4V) Prim, user, block ld/st */
#define ASI_BLK_AIUS_4V 0x17 /* (4V) Sec, user, block ld/st */
#define ASI_PHYS_USE_EC_L 0x1c /* PADDR, E-cachable, little endian*/
#define ASI_PHYS_BYPASS_EC_E_L 0x1d /* PADDR, E-bit, little endian */
#define ASI_BLK_AIUP_L_4V 0x1e /* (4V) Prim, user, block, l-endian*/
#define ASI_BLK_AIUS_L_4V 0x1f /* (4V) Sec, user, block, l-endian */
#define ASI_SCRATCHPAD 0x20 /* (4V) Scratch Pad Registers */
#define ASI_MMU 0x21 /* (4V) MMU Context Registers */
#define ASI_BLK_INIT_QUAD_LDD_AIUS 0x23 /* (NG) init-store, twin load,
* secondary, user
*/
#define ASI_NUCLEUS_QUAD_LDD 0x24 /* Cachable, qword load */
#define ASI_QUEUE 0x25 /* (4V) Interrupt Queue Registers */
#define ASI_QUAD_LDD_PHYS_4V 0x26 /* (4V) Physical, qword load */
#define ASI_NUCLEUS_QUAD_LDD_L 0x2c /* Cachable, qword load, l-endian */
#define ASI_QUAD_LDD_PHYS_L_4V 0x2e /* (4V) Phys, qword load, l-endian */
#define ASI_PCACHE_DATA_STATUS 0x30 /* (III) PCache data stat RAM diag */
#define ASI_PCACHE_DATA 0x31 /* (III) PCache data RAM diag */
#define ASI_PCACHE_TAG 0x32 /* (III) PCache tag RAM diag */
#define ASI_PCACHE_SNOOP_TAG 0x33 /* (III) PCache snoop tag RAM diag */
#define ASI_QUAD_LDD_PHYS 0x34 /* (III+) PADDR, qword load */
#define ASI_WCACHE_VALID_BITS 0x38 /* (III) WCache Valid Bits diag */
#define ASI_WCACHE_DATA 0x39 /* (III) WCache data RAM diag */
#define ASI_WCACHE_TAG 0x3a /* (III) WCache tag RAM diag */
#define ASI_WCACHE_SNOOP_TAG 0x3b /* (III) WCache snoop tag RAM diag */
#define ASI_QUAD_LDD_PHYS_L 0x3c /* (III+) PADDR, qw-load, l-endian */
#define ASI_SRAM_FAST_INIT 0x40 /* (III+) Fast SRAM init */
#define ASI_CORE_AVAILABLE 0x41 /* (CMT) LP Available */
#define ASI_CORE_ENABLE_STAT 0x41 /* (CMT) LP Enable Status */
#define ASI_CORE_ENABLE 0x41 /* (CMT) LP Enable RW */
#define ASI_XIR_STEERING 0x41 /* (CMT) XIR Steering RW */
#define ASI_CORE_RUNNING_RW 0x41 /* (CMT) LP Running RW */
#define ASI_CORE_RUNNING_W1S 0x41 /* (CMT) LP Running Write-One Set */
#define ASI_CORE_RUNNING_W1C 0x41 /* (CMT) LP Running Write-One Clr */
#define ASI_CORE_RUNNING_STAT 0x41 /* (CMT) LP Running Status */
#define ASI_CMT_ERROR_STEERING 0x41 /* (CMT) Error Steering RW */
#define ASI_DCACHE_INVALIDATE 0x42 /* (III) DCache Invalidate diag */
#define ASI_DCACHE_UTAG 0x43 /* (III) DCache uTag diag */
#define ASI_DCACHE_SNOOP_TAG 0x44 /* (III) DCache snoop tag RAM diag */
#define ASI_LSU_CONTROL 0x45 /* Load-store control unit */
#define ASI_DCU_CONTROL_REG 0x45 /* (III) DCache Unit Control reg */
#define ASI_DCACHE_DATA 0x46 /* DCache data-ram diag access */
#define ASI_DCACHE_TAG 0x47 /* Dcache tag/valid ram diag access*/
#define ASI_INTR_DISPATCH_STAT 0x48 /* IRQ vector dispatch status */
#define ASI_INTR_RECEIVE 0x49 /* IRQ vector receive status */
#define ASI_UPA_CONFIG 0x4a /* UPA config space */
#define ASI_JBUS_CONFIG 0x4a /* (IIIi) JBUS Config Register */
#define ASI_SAFARI_CONFIG 0x4a /* (III) Safari Config Register */
#define ASI_SAFARI_ADDRESS 0x4a /* (III) Safari Address Register */
#define ASI_ESTATE_ERROR_EN 0x4b /* E-cache error enable space */
#define ASI_AFSR 0x4c /* Async fault status register */
#define ASI_AFAR 0x4d /* Async fault address register */
#define ASI_EC_TAG_DATA 0x4e /* E-cache tag/valid ram diag acc */
#define ASI_IMMU 0x50 /* Insn-MMU main register space */
#define ASI_IMMU_TSB_8KB_PTR 0x51 /* Insn-MMU 8KB TSB pointer reg */
#define ASI_IMMU_TSB_64KB_PTR 0x52 /* Insn-MMU 64KB TSB pointer reg */
#define ASI_ITLB_DATA_IN 0x54 /* Insn-MMU TLB data in reg */
#define ASI_ITLB_DATA_ACCESS 0x55 /* Insn-MMU TLB data access reg */
#define ASI_ITLB_TAG_READ 0x56 /* Insn-MMU TLB tag read reg */
#define ASI_IMMU_DEMAP 0x57 /* Insn-MMU TLB demap */
#define ASI_DMMU 0x58 /* Data-MMU main register space */
#define ASI_DMMU_TSB_8KB_PTR 0x59 /* Data-MMU 8KB TSB pointer reg */
#define ASI_DMMU_TSB_64KB_PTR 0x5a /* Data-MMU 16KB TSB pointer reg */
#define ASI_DMMU_TSB_DIRECT_PTR 0x5b /* Data-MMU TSB direct pointer reg */
#define ASI_DTLB_DATA_IN 0x5c /* Data-MMU TLB data in reg */
#define ASI_DTLB_DATA_ACCESS 0x5d /* Data-MMU TLB data access reg */
#define ASI_DTLB_TAG_READ 0x5e /* Data-MMU TLB tag read reg */
#define ASI_DMMU_DEMAP 0x5f /* Data-MMU TLB demap */
#define ASI_IIU_INST_TRAP 0x60 /* (III) Instruction Breakpoint */
#define ASI_INTR_ID 0x63 /* (CMT) Interrupt ID register */
#define ASI_CORE_ID 0x63 /* (CMT) LP ID register */
#define ASI_CESR_ID 0x63 /* (CMT) CESR ID register */
#define ASI_IC_INSTR 0x66 /* Insn cache instrucion ram diag */
#define ASI_IC_TAG 0x67 /* Insn cache tag/valid ram diag */
#define ASI_IC_STAG 0x68 /* (III) Insn cache snoop tag ram */
#define ASI_IC_PRE_DECODE 0x6e /* Insn cache pre-decode ram diag */
#define ASI_IC_NEXT_FIELD 0x6f /* Insn cache next-field ram diag */
#define ASI_BRPRED_ARRAY 0x6f /* (III) Branch Prediction RAM diag*/
#define ASI_BLK_AIUP 0x70 /* Primary, user, block load/store */
#define ASI_BLK_AIUS 0x71 /* Secondary, user, block ld/st */
#define ASI_MCU_CTRL_REG 0x72 /* (III) Memory controller regs */
#define ASI_EC_DATA 0x74 /* (III) E-cache data staging reg */
#define ASI_EC_CTRL 0x75 /* (III) E-cache control reg */
#define ASI_EC_W 0x76 /* E-cache diag write access */
#define ASI_UDB_ERROR_W 0x77 /* External UDB error regs W */
#define ASI_UDB_CONTROL_W 0x77 /* External UDB control regs W */
#define ASI_INTR_W 0x77 /* IRQ vector dispatch write */
#define ASI_INTR_DATAN_W 0x77 /* (III) Out irq vector data reg N */
#define ASI_INTR_DISPATCH_W 0x77 /* (III) Interrupt vector dispatch */
#define ASI_BLK_AIUPL 0x78 /* Primary, user, little, blk ld/st*/
#define ASI_BLK_AIUSL 0x79 /* Secondary, user, little, blk ld/st*/
#define ASI_EC_R 0x7e /* E-cache diag read access */
#define ASI_UDBH_ERROR_R 0x7f /* External UDB error regs rd hi */
#define ASI_UDBL_ERROR_R 0x7f /* External UDB error regs rd low */
#define ASI_UDBH_CONTROL_R 0x7f /* External UDB control regs rd hi */
#define ASI_UDBL_CONTROL_R 0x7f /* External UDB control regs rd low*/
#define ASI_INTR_R 0x7f /* IRQ vector dispatch read */
#define ASI_INTR_DATAN_R 0x7f /* (III) In irq vector data reg N */
#define ASI_PST8_P 0xc0 /* Primary, 8 8-bit, partial */
#define ASI_PST8_S 0xc1 /* Secondary, 8 8-bit, partial */
#define ASI_PST16_P 0xc2 /* Primary, 4 16-bit, partial */
#define ASI_PST16_S 0xc3 /* Secondary, 4 16-bit, partial */
#define ASI_PST32_P 0xc4 /* Primary, 2 32-bit, partial */
#define ASI_PST32_S 0xc5 /* Secondary, 2 32-bit, partial */
#define ASI_PST8_PL 0xc8 /* Primary, 8 8-bit, partial, L */
#define ASI_PST8_SL 0xc9 /* Secondary, 8 8-bit, partial, L */
#define ASI_PST16_PL 0xca /* Primary, 4 16-bit, partial, L */
#define ASI_PST16_SL 0xcb /* Secondary, 4 16-bit, partial, L */
#define ASI_PST32_PL 0xcc /* Primary, 2 32-bit, partial, L */
#define ASI_PST32_SL 0xcd /* Secondary, 2 32-bit, partial, L */
#define ASI_FL8_P 0xd0 /* Primary, 1 8-bit, fpu ld/st */
#define ASI_FL8_S 0xd1 /* Secondary, 1 8-bit, fpu ld/st */
#define ASI_FL16_P 0xd2 /* Primary, 1 16-bit, fpu ld/st */
#define ASI_FL16_S 0xd3 /* Secondary, 1 16-bit, fpu ld/st */
#define ASI_FL8_PL 0xd8 /* Primary, 1 8-bit, fpu ld/st, L */
#define ASI_FL8_SL 0xd9 /* Secondary, 1 8-bit, fpu ld/st, L*/
#define ASI_FL16_PL 0xda /* Primary, 1 16-bit, fpu ld/st, L */
#define ASI_FL16_SL 0xdb /* Secondary, 1 16-bit, fpu ld/st,L*/
#define ASI_BLK_COMMIT_P 0xe0 /* Primary, blk store commit */
#define ASI_BLK_COMMIT_S 0xe1 /* Secondary, blk store commit */
#define ASI_BLK_INIT_QUAD_LDD_P 0xe2 /* (NG) init-store, twin load,
* primary, implicit
*/
#define ASI_BLK_P 0xf0 /* Primary, blk ld/st */
#define ASI_BLK_S 0xf1 /* Secondary, blk ld/st */
#define ASI_BLK_PL 0xf8 /* Primary, blk ld/st, little */
#define ASI_BLK_SL 0xf9 /* Secondary, blk ld/st, little */
#endif /* _SPARC_ASI_H */

View File

@ -1,165 +1,8 @@
/* atomic.h: These still suck, but the I-cache hit rate is higher.
*
* Copyright (C) 1996 David S. Miller (davem@davemloft.net)
* Copyright (C) 2000 Anton Blanchard (anton@linuxcare.com.au)
* Copyright (C) 2007 Kyle McMartin (kyle@parisc-linux.org)
*
* Additions by Keith M Wesolowski (wesolows@foobazco.org) based
* on asm-parisc/atomic.h Copyright (C) 2000 Philipp Rumpf <prumpf@tux.org>.
*/
#ifndef __ARCH_SPARC_ATOMIC__
#define __ARCH_SPARC_ATOMIC__
#include <linux/types.h>
typedef struct { volatile int counter; } atomic_t;
#ifdef __KERNEL__
#define ATOMIC_INIT(i) { (i) }
extern int __atomic_add_return(int, atomic_t *);
extern int atomic_cmpxchg(atomic_t *, int, int);
#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
extern int atomic_add_unless(atomic_t *, int, int);
extern void atomic_set(atomic_t *, int);
#define atomic_read(v) ((v)->counter)
#define atomic_add(i, v) ((void)__atomic_add_return( (int)(i), (v)))
#define atomic_sub(i, v) ((void)__atomic_add_return(-(int)(i), (v)))
#define atomic_inc(v) ((void)__atomic_add_return( 1, (v)))
#define atomic_dec(v) ((void)__atomic_add_return( -1, (v)))
#define atomic_add_return(i, v) (__atomic_add_return( (int)(i), (v)))
#define atomic_sub_return(i, v) (__atomic_add_return(-(int)(i), (v)))
#define atomic_inc_return(v) (__atomic_add_return( 1, (v)))
#define atomic_dec_return(v) (__atomic_add_return( -1, (v)))
#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
/*
* atomic_inc_and_test - increment and test
* @v: pointer of type atomic_t
*
* Atomically increments @v by 1
* and returns true if the result is zero, or false for all
* other cases.
*/
#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
#define atomic_dec_and_test(v) (atomic_dec_return(v) == 0)
#define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
/* This is the old 24-bit implementation. It's still used internally
* by some sparc-specific code, notably the semaphore implementation.
*/
typedef struct { volatile int counter; } atomic24_t;
#ifndef CONFIG_SMP
#define ATOMIC24_INIT(i) { (i) }
#define atomic24_read(v) ((v)->counter)
#define atomic24_set(v, i) (((v)->counter) = i)
#ifndef ___ASM_SPARC_ATOMIC_H
#define ___ASM_SPARC_ATOMIC_H
#if defined(__sparc__) && defined(__arch64__)
#include <asm-sparc/atomic_64.h>
#else
/* We do the bulk of the actual work out of line in two common
* routines in assembler, see arch/sparc/lib/atomic.S for the
* "fun" details.
*
* For SMP the trick is you embed the spin lock byte within
* the word, use the low byte so signedness is easily retained
* via a quick arithmetic shift. It looks like this:
*
* ----------------------------------------
* | signed 24-bit counter value | lock | atomic_t
* ----------------------------------------
* 31 8 7 0
*/
#define ATOMIC24_INIT(i) { ((i) << 8) }
static inline int atomic24_read(const atomic24_t *v)
{
int ret = v->counter;
while(ret & 0xff)
ret = v->counter;
return ret >> 8;
}
#define atomic24_set(v, i) (((v)->counter) = ((i) << 8))
#include <asm-sparc/atomic_32.h>
#endif
#endif
static inline int __atomic24_add(int i, atomic24_t *v)
{
register volatile int *ptr asm("g1");
register int increment asm("g2");
register int tmp1 asm("g3");
register int tmp2 asm("g4");
register int tmp3 asm("g7");
ptr = &v->counter;
increment = i;
__asm__ __volatile__(
"mov %%o7, %%g4\n\t"
"call ___atomic24_add\n\t"
" add %%o7, 8, %%o7\n"
: "=&r" (increment), "=r" (tmp1), "=r" (tmp2), "=r" (tmp3)
: "0" (increment), "r" (ptr)
: "memory", "cc");
return increment;
}
static inline int __atomic24_sub(int i, atomic24_t *v)
{
register volatile int *ptr asm("g1");
register int increment asm("g2");
register int tmp1 asm("g3");
register int tmp2 asm("g4");
register int tmp3 asm("g7");
ptr = &v->counter;
increment = i;
__asm__ __volatile__(
"mov %%o7, %%g4\n\t"
"call ___atomic24_sub\n\t"
" add %%o7, 8, %%o7\n"
: "=&r" (increment), "=r" (tmp1), "=r" (tmp2), "=r" (tmp3)
: "0" (increment), "r" (ptr)
: "memory", "cc");
return increment;
}
#define atomic24_add(i, v) ((void)__atomic24_add((i), (v)))
#define atomic24_sub(i, v) ((void)__atomic24_sub((i), (v)))
#define atomic24_dec_return(v) __atomic24_sub(1, (v))
#define atomic24_inc_return(v) __atomic24_add(1, (v))
#define atomic24_sub_and_test(i, v) (__atomic24_sub((i), (v)) == 0)
#define atomic24_dec_and_test(v) (__atomic24_sub(1, (v)) == 0)
#define atomic24_inc(v) ((void)__atomic24_add(1, (v)))
#define atomic24_dec(v) ((void)__atomic24_sub(1, (v)))
#define atomic24_add_negative(i, v) (__atomic24_add((i), (v)) < 0)
/* Atomic operations are already serializing */
#define smp_mb__before_atomic_dec() barrier()
#define smp_mb__after_atomic_dec() barrier()
#define smp_mb__before_atomic_inc() barrier()
#define smp_mb__after_atomic_inc() barrier()
#endif /* !(__KERNEL__) */
#include <asm-generic/atomic.h>
#endif /* !(__ARCH_SPARC_ATOMIC__) */

View File

@ -0,0 +1,165 @@
/* atomic.h: These still suck, but the I-cache hit rate is higher.
*
* Copyright (C) 1996 David S. Miller (davem@davemloft.net)
* Copyright (C) 2000 Anton Blanchard (anton@linuxcare.com.au)
* Copyright (C) 2007 Kyle McMartin (kyle@parisc-linux.org)
*
* Additions by Keith M Wesolowski (wesolows@foobazco.org) based
* on asm-parisc/atomic.h Copyright (C) 2000 Philipp Rumpf <prumpf@tux.org>.
*/
#ifndef __ARCH_SPARC_ATOMIC__
#define __ARCH_SPARC_ATOMIC__
#include <linux/types.h>
typedef struct { volatile int counter; } atomic_t;
#ifdef __KERNEL__
#define ATOMIC_INIT(i) { (i) }
extern int __atomic_add_return(int, atomic_t *);
extern int atomic_cmpxchg(atomic_t *, int, int);
#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
extern int atomic_add_unless(atomic_t *, int, int);
extern void atomic_set(atomic_t *, int);
#define atomic_read(v) ((v)->counter)
#define atomic_add(i, v) ((void)__atomic_add_return( (int)(i), (v)))
#define atomic_sub(i, v) ((void)__atomic_add_return(-(int)(i), (v)))
#define atomic_inc(v) ((void)__atomic_add_return( 1, (v)))
#define atomic_dec(v) ((void)__atomic_add_return( -1, (v)))
#define atomic_add_return(i, v) (__atomic_add_return( (int)(i), (v)))
#define atomic_sub_return(i, v) (__atomic_add_return(-(int)(i), (v)))
#define atomic_inc_return(v) (__atomic_add_return( 1, (v)))
#define atomic_dec_return(v) (__atomic_add_return( -1, (v)))
#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
/*
* atomic_inc_and_test - increment and test
* @v: pointer of type atomic_t
*
* Atomically increments @v by 1
* and returns true if the result is zero, or false for all
* other cases.
*/
#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
#define atomic_dec_and_test(v) (atomic_dec_return(v) == 0)
#define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
/* This is the old 24-bit implementation. It's still used internally
* by some sparc-specific code, notably the semaphore implementation.
*/
typedef struct { volatile int counter; } atomic24_t;
#ifndef CONFIG_SMP
#define ATOMIC24_INIT(i) { (i) }
#define atomic24_read(v) ((v)->counter)
#define atomic24_set(v, i) (((v)->counter) = i)
#else
/* We do the bulk of the actual work out of line in two common
* routines in assembler, see arch/sparc/lib/atomic.S for the
* "fun" details.
*
* For SMP the trick is you embed the spin lock byte within
* the word, use the low byte so signedness is easily retained
* via a quick arithmetic shift. It looks like this:
*
* ----------------------------------------
* | signed 24-bit counter value | lock | atomic_t
* ----------------------------------------
* 31 8 7 0
*/
#define ATOMIC24_INIT(i) { ((i) << 8) }
static inline int atomic24_read(const atomic24_t *v)
{
int ret = v->counter;
while(ret & 0xff)
ret = v->counter;
return ret >> 8;
}
#define atomic24_set(v, i) (((v)->counter) = ((i) << 8))
#endif
static inline int __atomic24_add(int i, atomic24_t *v)
{
register volatile int *ptr asm("g1");
register int increment asm("g2");
register int tmp1 asm("g3");
register int tmp2 asm("g4");
register int tmp3 asm("g7");
ptr = &v->counter;
increment = i;
__asm__ __volatile__(
"mov %%o7, %%g4\n\t"
"call ___atomic24_add\n\t"
" add %%o7, 8, %%o7\n"
: "=&r" (increment), "=r" (tmp1), "=r" (tmp2), "=r" (tmp3)
: "0" (increment), "r" (ptr)
: "memory", "cc");
return increment;
}
static inline int __atomic24_sub(int i, atomic24_t *v)
{
register volatile int *ptr asm("g1");
register int increment asm("g2");
register int tmp1 asm("g3");
register int tmp2 asm("g4");
register int tmp3 asm("g7");
ptr = &v->counter;
increment = i;
__asm__ __volatile__(
"mov %%o7, %%g4\n\t"
"call ___atomic24_sub\n\t"
" add %%o7, 8, %%o7\n"
: "=&r" (increment), "=r" (tmp1), "=r" (tmp2), "=r" (tmp3)
: "0" (increment), "r" (ptr)
: "memory", "cc");
return increment;
}
#define atomic24_add(i, v) ((void)__atomic24_add((i), (v)))
#define atomic24_sub(i, v) ((void)__atomic24_sub((i), (v)))
#define atomic24_dec_return(v) __atomic24_sub(1, (v))
#define atomic24_inc_return(v) __atomic24_add(1, (v))
#define atomic24_sub_and_test(i, v) (__atomic24_sub((i), (v)) == 0)
#define atomic24_dec_and_test(v) (__atomic24_sub(1, (v)) == 0)
#define atomic24_inc(v) ((void)__atomic24_add(1, (v)))
#define atomic24_dec(v) ((void)__atomic24_sub(1, (v)))
#define atomic24_add_negative(i, v) (__atomic24_add((i), (v)) < 0)
/* Atomic operations are already serializing */
#define smp_mb__before_atomic_dec() barrier()
#define smp_mb__after_atomic_dec() barrier()
#define smp_mb__before_atomic_inc() barrier()
#define smp_mb__after_atomic_inc() barrier()
#endif /* !(__KERNEL__) */
#include <asm-generic/atomic.h>
#endif /* !(__ARCH_SPARC_ATOMIC__) */

View File

@ -0,0 +1,128 @@
/* atomic.h: Thankfully the V9 is at least reasonable for this
* stuff.
*
* Copyright (C) 1996, 1997, 2000 David S. Miller (davem@redhat.com)
*/
#ifndef __ARCH_SPARC64_ATOMIC__
#define __ARCH_SPARC64_ATOMIC__
#include <linux/types.h>
#include <asm/system.h>
typedef struct { volatile int counter; } atomic_t;
typedef struct { volatile __s64 counter; } atomic64_t;
#define ATOMIC_INIT(i) { (i) }
#define ATOMIC64_INIT(i) { (i) }
#define atomic_read(v) ((v)->counter)
#define atomic64_read(v) ((v)->counter)
#define atomic_set(v, i) (((v)->counter) = i)
#define atomic64_set(v, i) (((v)->counter) = i)
extern void atomic_add(int, atomic_t *);
extern void atomic64_add(int, atomic64_t *);
extern void atomic_sub(int, atomic_t *);
extern void atomic64_sub(int, atomic64_t *);
extern int atomic_add_ret(int, atomic_t *);
extern int atomic64_add_ret(int, atomic64_t *);
extern int atomic_sub_ret(int, atomic_t *);
extern int atomic64_sub_ret(int, atomic64_t *);
#define atomic_dec_return(v) atomic_sub_ret(1, v)
#define atomic64_dec_return(v) atomic64_sub_ret(1, v)
#define atomic_inc_return(v) atomic_add_ret(1, v)
#define atomic64_inc_return(v) atomic64_add_ret(1, v)
#define atomic_sub_return(i, v) atomic_sub_ret(i, v)
#define atomic64_sub_return(i, v) atomic64_sub_ret(i, v)
#define atomic_add_return(i, v) atomic_add_ret(i, v)
#define atomic64_add_return(i, v) atomic64_add_ret(i, v)
/*
* atomic_inc_and_test - increment and test
* @v: pointer of type atomic_t
*
* Atomically increments @v by 1
* and returns true if the result is zero, or false for all
* other cases.
*/
#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
#define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
#define atomic_sub_and_test(i, v) (atomic_sub_ret(i, v) == 0)
#define atomic64_sub_and_test(i, v) (atomic64_sub_ret(i, v) == 0)
#define atomic_dec_and_test(v) (atomic_sub_ret(1, v) == 0)
#define atomic64_dec_and_test(v) (atomic64_sub_ret(1, v) == 0)
#define atomic_inc(v) atomic_add(1, v)
#define atomic64_inc(v) atomic64_add(1, v)
#define atomic_dec(v) atomic_sub(1, v)
#define atomic64_dec(v) atomic64_sub(1, v)
#define atomic_add_negative(i, v) (atomic_add_ret(i, v) < 0)
#define atomic64_add_negative(i, v) (atomic64_add_ret(i, v) < 0)
#define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
static inline int atomic_add_unless(atomic_t *v, int a, int u)
{
int c, old;
c = atomic_read(v);
for (;;) {
if (unlikely(c == (u)))
break;
old = atomic_cmpxchg((v), c, c + (a));
if (likely(old == c))
break;
c = old;
}
return c != (u);
}
#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
#define atomic64_cmpxchg(v, o, n) \
((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
{
long c, old;
c = atomic64_read(v);
for (;;) {
if (unlikely(c == (u)))
break;
old = atomic64_cmpxchg((v), c, c + (a));
if (likely(old == c))
break;
c = old;
}
return c != (u);
}
#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
/* Atomic operations are already serializing */
#ifdef CONFIG_SMP
#define smp_mb__before_atomic_dec() membar_storeload_loadload();
#define smp_mb__after_atomic_dec() membar_storeload_storestore();
#define smp_mb__before_atomic_inc() membar_storeload_loadload();
#define smp_mb__after_atomic_inc() membar_storeload_storestore();
#else
#define smp_mb__before_atomic_dec() barrier()
#define smp_mb__after_atomic_dec() barrier()
#define smp_mb__before_atomic_inc() barrier()
#define smp_mb__after_atomic_inc() barrier()
#endif
#include <asm-generic/atomic.h>
#endif /* !(__ARCH_SPARC64_ATOMIC__) */

View File

@ -1,89 +1,8 @@
/*
* auxio.h: Definitions and code for the Auxiliary I/O register.
*
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
*/
#ifndef _SPARC_AUXIO_H
#define _SPARC_AUXIO_H
#include <asm/system.h>
#include <asm/vaddrs.h>
/* This register is an unsigned char in IO space. It does two things.
* First, it is used to control the front panel LED light on machines
* that have it (good for testing entry points to trap handlers and irq's)
* Secondly, it controls various floppy drive parameters.
*/
#define AUXIO_ORMEIN 0xf0 /* All writes must set these bits. */
#define AUXIO_ORMEIN4M 0xc0 /* sun4m - All writes must set these bits. */
#define AUXIO_FLPY_DENS 0x20 /* Floppy density, high if set. Read only. */
#define AUXIO_FLPY_DCHG 0x10 /* A disk change occurred. Read only. */
#define AUXIO_EDGE_ON 0x10 /* sun4m - On means Jumper block is in. */
#define AUXIO_FLPY_DSEL 0x08 /* Drive select/start-motor. Write only. */
#define AUXIO_LINK_TEST 0x08 /* sun4m - On means TPE Carrier detect. */
/* Set the following to one, then zero, after doing a pseudo DMA transfer. */
#define AUXIO_FLPY_TCNT 0x04 /* Floppy terminal count. Write only. */
/* Set the following to zero to eject the floppy. */
#define AUXIO_FLPY_EJCT 0x02 /* Eject floppy disk. Write only. */
#define AUXIO_LED 0x01 /* On if set, off if unset. Read/Write */
#ifndef __ASSEMBLY__
/*
* NOTE: these routines are implementation dependent--
* understand the hardware you are querying!
*/
extern void set_auxio(unsigned char bits_on, unsigned char bits_off);
extern unsigned char get_auxio(void); /* .../asm-sparc/floppy.h */
/*
* The following routines are provided for driver-compatibility
* with sparc64 (primarily sunlance.c)
*/
#define AUXIO_LTE_ON 1
#define AUXIO_LTE_OFF 0
/* auxio_set_lte - Set Link Test Enable (TPE Link Detect)
*
* on - AUXIO_LTE_ON or AUXIO_LTE_OFF
*/
#define auxio_set_lte(on) \
do { \
if(on) { \
set_auxio(AUXIO_LINK_TEST, 0); \
} else { \
set_auxio(0, AUXIO_LINK_TEST); \
} \
} while (0)
#define AUXIO_LED_ON 1
#define AUXIO_LED_OFF 0
/* auxio_set_led - Set system front panel LED
*
* on - AUXIO_LED_ON or AUXIO_LED_OFF
*/
#define auxio_set_led(on) \
do { \
if(on) { \
set_auxio(AUXIO_LED, 0); \
} else { \
set_auxio(0, AUXIO_LED); \
} \
} while (0)
#endif /* !(__ASSEMBLY__) */
/* AUXIO2 (Power Off Control) */
extern __volatile__ unsigned char * auxio_power_register;
#define AUXIO_POWER_DETECT_FAILURE 32
#define AUXIO_POWER_CLEAR_FAILURE 2
#define AUXIO_POWER_OFF 1
#endif /* !(_SPARC_AUXIO_H) */
#ifndef ___ASM_SPARC_AUXIO_H
#define ___ASM_SPARC_AUXIO_H
#if defined(__sparc__) && defined(__arch64__)
#include <asm-sparc/auxio_64.h>
#else
#include <asm-sparc/auxio_32.h>
#endif
#endif

View File

@ -0,0 +1,89 @@
/*
* auxio.h: Definitions and code for the Auxiliary I/O register.
*
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
*/
#ifndef _SPARC_AUXIO_H
#define _SPARC_AUXIO_H
#include <asm/system.h>
#include <asm/vaddrs.h>
/* This register is an unsigned char in IO space. It does two things.
* First, it is used to control the front panel LED light on machines
* that have it (good for testing entry points to trap handlers and irq's)
* Secondly, it controls various floppy drive parameters.
*/
#define AUXIO_ORMEIN 0xf0 /* All writes must set these bits. */
#define AUXIO_ORMEIN4M 0xc0 /* sun4m - All writes must set these bits. */
#define AUXIO_FLPY_DENS 0x20 /* Floppy density, high if set. Read only. */
#define AUXIO_FLPY_DCHG 0x10 /* A disk change occurred. Read only. */
#define AUXIO_EDGE_ON 0x10 /* sun4m - On means Jumper block is in. */
#define AUXIO_FLPY_DSEL 0x08 /* Drive select/start-motor. Write only. */
#define AUXIO_LINK_TEST 0x08 /* sun4m - On means TPE Carrier detect. */
/* Set the following to one, then zero, after doing a pseudo DMA transfer. */
#define AUXIO_FLPY_TCNT 0x04 /* Floppy terminal count. Write only. */
/* Set the following to zero to eject the floppy. */
#define AUXIO_FLPY_EJCT 0x02 /* Eject floppy disk. Write only. */
#define AUXIO_LED 0x01 /* On if set, off if unset. Read/Write */
#ifndef __ASSEMBLY__
/*
* NOTE: these routines are implementation dependent--
* understand the hardware you are querying!
*/
extern void set_auxio(unsigned char bits_on, unsigned char bits_off);
extern unsigned char get_auxio(void); /* .../asm-sparc/floppy.h */
/*
* The following routines are provided for driver-compatibility
* with sparc64 (primarily sunlance.c)
*/
#define AUXIO_LTE_ON 1
#define AUXIO_LTE_OFF 0
/* auxio_set_lte - Set Link Test Enable (TPE Link Detect)
*
* on - AUXIO_LTE_ON or AUXIO_LTE_OFF
*/
#define auxio_set_lte(on) \
do { \
if(on) { \
set_auxio(AUXIO_LINK_TEST, 0); \
} else { \
set_auxio(0, AUXIO_LINK_TEST); \
} \
} while (0)
#define AUXIO_LED_ON 1
#define AUXIO_LED_OFF 0
/* auxio_set_led - Set system front panel LED
*
* on - AUXIO_LED_ON or AUXIO_LED_OFF
*/
#define auxio_set_led(on) \
do { \
if(on) { \
set_auxio(AUXIO_LED, 0); \
} else { \
set_auxio(0, AUXIO_LED); \
} \
} while (0)
#endif /* !(__ASSEMBLY__) */
/* AUXIO2 (Power Off Control) */
extern __volatile__ unsigned char * auxio_power_register;
#define AUXIO_POWER_DETECT_FAILURE 32
#define AUXIO_POWER_CLEAR_FAILURE 2
#define AUXIO_POWER_OFF 1
#endif /* !(_SPARC_AUXIO_H) */

View File

@ -0,0 +1,100 @@
/*
* auxio.h: Definitions and code for the Auxiliary I/O registers.
*
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
*
* Refactoring for unified NCR/PCIO support 2002 Eric Brower (ebrower@usa.net)
*/
#ifndef _SPARC64_AUXIO_H
#define _SPARC64_AUXIO_H
/* AUXIO implementations:
* sbus-based NCR89C105 "Slavio"
* LED/Floppy (AUX1) register
* Power (AUX2) register
*
* ebus-based auxio on PCIO
* LED Auxio Register
* Power Auxio Register
*
* Register definitions from NCR _NCR89C105 Chip Specification_
*
* SLAVIO AUX1 @ 0x1900000
* -------------------------------------------------
* | (R) | (R) | D | (R) | E | M | T | L |
* -------------------------------------------------
* (R) - bit 7:6,4 are reserved and should be masked in s/w
* D - Floppy Density Sense (1=high density) R/O
* E - Link Test Enable, directly reflected on AT&T 7213 LTE pin
* M - Monitor/Mouse Mux, directly reflected on MON_MSE_MUX pin
* T - Terminal Count: sends TC pulse to 82077 floppy controller
* L - System LED on front panel (0=off, 1=on)
*/
#define AUXIO_AUX1_MASK 0xc0 /* Mask bits */
#define AUXIO_AUX1_FDENS 0x20 /* Floppy Density Sense */
#define AUXIO_AUX1_LTE 0x08 /* Link Test Enable */
#define AUXIO_AUX1_MMUX 0x04 /* Monitor/Mouse Mux */
#define AUXIO_AUX1_FTCNT 0x02 /* Terminal Count, */
#define AUXIO_AUX1_LED 0x01 /* System LED */
/* SLAVIO AUX2 @ 0x1910000
* -------------------------------------------------
* | (R) | (R) | D | (R) | (R) | (R) | C | F |
* -------------------------------------------------
* (R) - bits 7:6,4:2 are reserved and should be masked in s/w
* D - Power Failure Detect (1=power fail)
* C - Clear Power Failure Detect Int (1=clear)
* F - Power Off (1=power off)
*/
#define AUXIO_AUX2_MASK 0xdc /* Mask Bits */
#define AUXIO_AUX2_PFAILDET 0x20 /* Power Fail Detect */
#define AUXIO_AUX2_PFAILCLR 0x02 /* Clear Pwr Fail Det Intr */
#define AUXIO_AUX2_PWR_OFF 0x01 /* Power Off */
/* Register definitions from Sun Microsystems _PCIO_ p/n 802-7837
*
* PCIO LED Auxio @ 0x726000
* -------------------------------------------------
* | 31:1 Unused | LED |
* -------------------------------------------------
* Bits 31:1 unused
* LED - System LED on front panel (0=off, 1=on)
*/
#define AUXIO_PCIO_LED 0x01 /* System LED */
/* PCIO Power Auxio @ 0x724000
* -------------------------------------------------
* | 31:2 Unused | CPO | SPO |
* -------------------------------------------------
* Bits 31:2 unused
* CPO - Courtesy Power Off (1=off)
* SPO - System Power Off (1=off)
*/
#define AUXIO_PCIO_CPWR_OFF 0x02 /* Courtesy Power Off */
#define AUXIO_PCIO_SPWR_OFF 0x01 /* System Power Off */
#ifndef __ASSEMBLY__
extern void __iomem *auxio_register;
#define AUXIO_LTE_ON 1
#define AUXIO_LTE_OFF 0
/* auxio_set_lte - Set Link Test Enable (TPE Link Detect)
*
* on - AUXIO_LTE_ON or AUXIO_LTE_OFF
*/
extern void auxio_set_lte(int on);
#define AUXIO_LED_ON 1
#define AUXIO_LED_OFF 0
/* auxio_set_led - Set system front panel LED
*
* on - AUXIO_LED_ON or AUXIO_LED_OFF
*/
extern void auxio_set_led(int on);
#endif /* ifndef __ASSEMBLY__ */
#endif /* !(_SPARC64_AUXIO_H) */

View File

@ -0,0 +1,31 @@
#ifndef _SPARC64_BACKOFF_H
#define _SPARC64_BACKOFF_H
#define BACKOFF_LIMIT (4 * 1024)
#ifdef CONFIG_SMP
#define BACKOFF_SETUP(reg) \
mov 1, reg
#define BACKOFF_SPIN(reg, tmp, label) \
mov reg, tmp; \
88: brnz,pt tmp, 88b; \
sub tmp, 1, tmp; \
set BACKOFF_LIMIT, tmp; \
cmp reg, tmp; \
bg,pn %xcc, label; \
nop; \
ba,pt %xcc, label; \
sllx reg, 1, reg;
#else
#define BACKOFF_SETUP(reg)
#define BACKOFF_SPIN(reg, tmp, label) \
ba,pt %xcc, label; \
nop;
#endif
#endif /* _SPARC64_BACKOFF_H */

225
include/asm-sparc/bbc.h Normal file
View File

@ -0,0 +1,225 @@
/*
* bbc.h: Defines for BootBus Controller found on UltraSPARC-III
* systems.
*
* Copyright (C) 2000 David S. Miller (davem@redhat.com)
*/
#ifndef _SPARC64_BBC_H
#define _SPARC64_BBC_H
/* Register sizes are indicated by "B" (Byte, 1-byte),
* "H" (Half-word, 2 bytes), "W" (Word, 4 bytes) or
* "Q" (Quad, 8 bytes) inside brackets.
*/
#define BBC_AID 0x00 /* [B] Agent ID */
#define BBC_DEVP 0x01 /* [B] Device Present */
#define BBC_ARB 0x02 /* [B] Arbitration */
#define BBC_QUIESCE 0x03 /* [B] Quiesce */
#define BBC_WDACTION 0x04 /* [B] Watchdog Action */
#define BBC_SPG 0x06 /* [B] Soft POR Gen */
#define BBC_SXG 0x07 /* [B] Soft XIR Gen */
#define BBC_PSRC 0x08 /* [W] POR Source */
#define BBC_XSRC 0x0c /* [B] XIR Source */
#define BBC_CSC 0x0d /* [B] Clock Synthesizers Control*/
#define BBC_ES_CTRL 0x0e /* [H] Energy Star Control */
#define BBC_ES_ACT 0x10 /* [W] E* Assert Change Time */
#define BBC_ES_DACT 0x14 /* [B] E* De-Assert Change Time */
#define BBC_ES_DABT 0x15 /* [B] E* De-Assert Bypass Time */
#define BBC_ES_ABT 0x16 /* [H] E* Assert Bypass Time */
#define BBC_ES_PST 0x18 /* [W] E* PLL Settle Time */
#define BBC_ES_FSL 0x1c /* [W] E* Frequency Switch Latency*/
#define BBC_EBUST 0x20 /* [Q] EBUS Timing */
#define BBC_JTAG_CMD 0x28 /* [W] JTAG+ Command */
#define BBC_JTAG_CTRL 0x2c /* [B] JTAG+ Control */
#define BBC_I2C_SEL 0x2d /* [B] I2C Selection */
#define BBC_I2C_0_S1 0x2e /* [B] I2C ctrlr-0 reg S1 */
#define BBC_I2C_0_S0 0x2f /* [B] I2C ctrlr-0 regs S0,S0',S2,S3*/
#define BBC_I2C_1_S1 0x30 /* [B] I2C ctrlr-1 reg S1 */
#define BBC_I2C_1_S0 0x31 /* [B] I2C ctrlr-1 regs S0,S0',S2,S3*/
#define BBC_KBD_BEEP 0x32 /* [B] Keyboard Beep */
#define BBC_KBD_BCNT 0x34 /* [W] Keyboard Beep Counter */
#define BBC_REGS_SIZE 0x40
/* There is a 2K scratch ram area at offset 0x80000 but I doubt
* we will use it for anything.
*/
/* Agent ID register. This register shows the Safari Agent ID
* for the processors. The value returned depends upon which
* cpu is reading the register.
*/
#define BBC_AID_ID 0x07 /* Safari ID */
#define BBC_AID_RESV 0xf8 /* Reserved */
/* Device Present register. One can determine which cpus are actually
* present in the machine by interrogating this register.
*/
#define BBC_DEVP_CPU0 0x01 /* Processor 0 present */
#define BBC_DEVP_CPU1 0x02 /* Processor 1 present */
#define BBC_DEVP_CPU2 0x04 /* Processor 2 present */
#define BBC_DEVP_CPU3 0x08 /* Processor 3 present */
#define BBC_DEVP_RESV 0xf0 /* Reserved */
/* Arbitration register. This register is used to block access to
* the BBC from a particular cpu.
*/
#define BBC_ARB_CPU0 0x01 /* Enable cpu 0 BBC arbitratrion */
#define BBC_ARB_CPU1 0x02 /* Enable cpu 1 BBC arbitratrion */
#define BBC_ARB_CPU2 0x04 /* Enable cpu 2 BBC arbitratrion */
#define BBC_ARB_CPU3 0x08 /* Enable cpu 3 BBC arbitratrion */
#define BBC_ARB_RESV 0xf0 /* Reserved */
/* Quiesce register. Bus and BBC segments for cpus can be disabled
* with this register, ie. for hot plugging.
*/
#define BBC_QUIESCE_S02 0x01 /* Quiesce Safari segment for cpu 0 and 2 */
#define BBC_QUIESCE_S13 0x02 /* Quiesce Safari segment for cpu 1 and 3 */
#define BBC_QUIESCE_B02 0x04 /* Quiesce BBC segment for cpu 0 and 2 */
#define BBC_QUIESCE_B13 0x08 /* Quiesce BBC segment for cpu 1 and 3 */
#define BBC_QUIESCE_FD0 0x10 /* Disable Fatal_Error[0] reporting */
#define BBC_QUIESCE_FD1 0x20 /* Disable Fatal_Error[1] reporting */
#define BBC_QUIESCE_FD2 0x40 /* Disable Fatal_Error[2] reporting */
#define BBC_QUIESCE_FD3 0x80 /* Disable Fatal_Error[3] reporting */
/* Watchdog Action register. When the watchdog device timer expires
* a line is enabled to the BBC. The action BBC takes when this line
* is asserted can be controlled by this regiser.
*/
#define BBC_WDACTION_RST 0x01 /* When set, watchdog causes system reset.
* When clear, BBC ignores watchdog signal.
*/
#define BBC_WDACTION_RESV 0xfe /* Reserved */
/* Soft_POR_GEN register. The POR (Power On Reset) signal may be asserted
* for specific processors or all processors via this register.
*/
#define BBC_SPG_CPU0 0x01 /* Assert POR for processor 0 */
#define BBC_SPG_CPU1 0x02 /* Assert POR for processor 1 */
#define BBC_SPG_CPU2 0x04 /* Assert POR for processor 2 */
#define BBC_SPG_CPU3 0x08 /* Assert POR for processor 3 */
#define BBC_SPG_CPUALL 0x10 /* Reset all processors and reset
* the entire system.
*/
#define BBC_SPG_RESV 0xe0 /* Reserved */
/* Soft_XIR_GEN register. The XIR (eXternally Initiated Reset) signal
* may be asserted to specific processors via this register.
*/
#define BBC_SXG_CPU0 0x01 /* Assert XIR for processor 0 */
#define BBC_SXG_CPU1 0x02 /* Assert XIR for processor 1 */
#define BBC_SXG_CPU2 0x04 /* Assert XIR for processor 2 */
#define BBC_SXG_CPU3 0x08 /* Assert XIR for processor 3 */
#define BBC_SXG_RESV 0xf0 /* Reserved */
/* POR Source register. One may identify the cause of the most recent
* reset by reading this register.
*/
#define BBC_PSRC_SPG0 0x0001 /* CPU 0 reset via BBC_SPG register */
#define BBC_PSRC_SPG1 0x0002 /* CPU 1 reset via BBC_SPG register */
#define BBC_PSRC_SPG2 0x0004 /* CPU 2 reset via BBC_SPG register */
#define BBC_PSRC_SPG3 0x0008 /* CPU 3 reset via BBC_SPG register */
#define BBC_PSRC_SPGSYS 0x0010 /* System reset via BBC_SPG register */
#define BBC_PSRC_JTAG 0x0020 /* System reset via JTAG+ */
#define BBC_PSRC_BUTTON 0x0040 /* System reset via push-button dongle */
#define BBC_PSRC_PWRUP 0x0080 /* System reset via power-up */
#define BBC_PSRC_FE0 0x0100 /* CPU 0 reported Fatal_Error */
#define BBC_PSRC_FE1 0x0200 /* CPU 1 reported Fatal_Error */
#define BBC_PSRC_FE2 0x0400 /* CPU 2 reported Fatal_Error */
#define BBC_PSRC_FE3 0x0800 /* CPU 3 reported Fatal_Error */
#define BBC_PSRC_FE4 0x1000 /* Schizo reported Fatal_Error */
#define BBC_PSRC_FE5 0x2000 /* Safari device 5 reported Fatal_Error */
#define BBC_PSRC_FE6 0x4000 /* CPMS reported Fatal_Error */
#define BBC_PSRC_SYNTH 0x8000 /* System reset when on-board clock synthesizers
* were updated.
*/
#define BBC_PSRC_WDT 0x10000 /* System reset via Super I/O watchdog */
#define BBC_PSRC_RSC 0x20000 /* System reset via RSC remote monitoring
* device
*/
/* XIR Source register. The source of an XIR event sent to a processor may
* be determined via this register.
*/
#define BBC_XSRC_SXG0 0x01 /* CPU 0 received XIR via Soft_XIR_GEN reg */
#define BBC_XSRC_SXG1 0x02 /* CPU 1 received XIR via Soft_XIR_GEN reg */
#define BBC_XSRC_SXG2 0x04 /* CPU 2 received XIR via Soft_XIR_GEN reg */
#define BBC_XSRC_SXG3 0x08 /* CPU 3 received XIR via Soft_XIR_GEN reg */
#define BBC_XSRC_JTAG 0x10 /* All CPUs received XIR via JTAG+ */
#define BBC_XSRC_W_OR_B 0x20 /* All CPUs received XIR either because:
* a) Super I/O watchdog fired, or
* b) XIR push button was activated
*/
#define BBC_XSRC_RESV 0xc0 /* Reserved */
/* Clock Synthesizers Control register. This register provides the big-bang
* programming interface to the two clock synthesizers of the machine.
*/
#define BBC_CSC_SLOAD 0x01 /* Directly connected to S_LOAD pins */
#define BBC_CSC_SDATA 0x02 /* Directly connected to S_DATA pins */
#define BBC_CSC_SCLOCK 0x04 /* Directly connected to S_CLOCK pins */
#define BBC_CSC_RESV 0x78 /* Reserved */
#define BBC_CSC_RST 0x80 /* Generate system reset when S_LOAD==1 */
/* Energy Star Control register. This register is used to generate the
* clock frequency change trigger to the main system devices (Schizo and
* the processors). The transition occurs when bits in this register
* go from 0 to 1, only one bit must be set at once else no action
* occurs. Basically the sequence of events is:
* a) Choose new frequency: full, 1/2 or 1/32
* b) Program this desired frequency into the cpus and Schizo.
* c) Set the same value in this register.
* d) 16 system clocks later, clear this register.
*/
#define BBC_ES_CTRL_1_1 0x01 /* Full frequency */
#define BBC_ES_CTRL_1_2 0x02 /* 1/2 frequency */
#define BBC_ES_CTRL_1_32 0x20 /* 1/32 frequency */
#define BBC_ES_RESV 0xdc /* Reserved */
/* Energy Star Assert Change Time register. This determines the number
* of BBC clock cycles (which is half the system frequency) between
* the detection of FREEZE_ACK being asserted and the assertion of
* the CLK_CHANGE_L[2:0] signals.
*/
#define BBC_ES_ACT_VAL 0xff
/* Energy Star Assert Bypass Time register. This determines the number
* of BBC clock cycles (which is half the system frequency) between
* the assertion of the CLK_CHANGE_L[2:0] signals and the assertion of
* the ESTAR_PLL_BYPASS signal.
*/
#define BBC_ES_ABT_VAL 0xffff
/* Energy Star PLL Settle Time register. This determines the number of
* BBC clock cycles (which is half the system frequency) between the
* de-assertion of CLK_CHANGE_L[2:0] and the de-assertion of the FREEZE_L
* signal.
*/
#define BBC_ES_PST_VAL 0xffffffff
/* Energy Star Frequency Switch Latency register. This is the number of
* BBC clocks between the de-assertion of CLK_CHANGE_L[2:0] and the first
* edge of the Safari clock at the new frequency.
*/
#define BBC_ES_FSL_VAL 0xffffffff
/* Keyboard Beep control register. This is a simple enabler for the audio
* beep sound.
*/
#define BBC_KBD_BEEP_ENABLE 0x01 /* Enable beep */
#define BBC_KBD_BEEP_RESV 0xfe /* Reserved */
/* Keyboard Beep Counter register. There is a free-running counter inside
* the BBC which runs at half the system clock. The bit set in this register
* determines when the audio sound is generated. So for example if bit
* 10 is set, the audio beep will oscillate at 1/(2**12). The keyboard beep
* generator automatically selects a different bit to use if the system clock
* is changed via Energy Star.
*/
#define BBC_KBD_BCNT_BITS 0x0007fc00
#define BBC_KBC_BCNT_RESV 0xfff803ff
#endif /* _SPARC64_BBC_H */

View File

@ -1,111 +1,8 @@
/*
* bitops.h: Bit string operations on the Sparc.
*
* Copyright 1995 David S. Miller (davem@caip.rutgers.edu)
* Copyright 1996 Eddie C. Dost (ecd@skynet.be)
* Copyright 2001 Anton Blanchard (anton@samba.org)
*/
#ifndef _SPARC_BITOPS_H
#define _SPARC_BITOPS_H
#include <linux/compiler.h>
#include <asm/byteorder.h>
#ifdef __KERNEL__
#ifndef _LINUX_BITOPS_H
#error only <linux/bitops.h> can be included directly
#ifndef ___ASM_SPARC_BITOPS_H
#define ___ASM_SPARC_BITOPS_H
#if defined(__sparc__) && defined(__arch64__)
#include <asm-sparc/bitops_64.h>
#else
#include <asm-sparc/bitops_32.h>
#endif
#endif
extern unsigned long ___set_bit(unsigned long *addr, unsigned long mask);
extern unsigned long ___clear_bit(unsigned long *addr, unsigned long mask);
extern unsigned long ___change_bit(unsigned long *addr, unsigned long mask);
/*
* Set bit 'nr' in 32-bit quantity at address 'addr' where bit '0'
* is in the highest of the four bytes and bit '31' is the high bit
* within the first byte. Sparc is BIG-Endian. Unless noted otherwise
* all bit-ops return 0 if bit was previously clear and != 0 otherwise.
*/
static inline int test_and_set_bit(unsigned long nr, volatile unsigned long *addr)
{
unsigned long *ADDR, mask;
ADDR = ((unsigned long *) addr) + (nr >> 5);
mask = 1 << (nr & 31);
return ___set_bit(ADDR, mask) != 0;
}
static inline void set_bit(unsigned long nr, volatile unsigned long *addr)
{
unsigned long *ADDR, mask;
ADDR = ((unsigned long *) addr) + (nr >> 5);
mask = 1 << (nr & 31);
(void) ___set_bit(ADDR, mask);
}
static inline int test_and_clear_bit(unsigned long nr, volatile unsigned long *addr)
{
unsigned long *ADDR, mask;
ADDR = ((unsigned long *) addr) + (nr >> 5);
mask = 1 << (nr & 31);
return ___clear_bit(ADDR, mask) != 0;
}
static inline void clear_bit(unsigned long nr, volatile unsigned long *addr)
{
unsigned long *ADDR, mask;
ADDR = ((unsigned long *) addr) + (nr >> 5);
mask = 1 << (nr & 31);
(void) ___clear_bit(ADDR, mask);
}
static inline int test_and_change_bit(unsigned long nr, volatile unsigned long *addr)
{
unsigned long *ADDR, mask;
ADDR = ((unsigned long *) addr) + (nr >> 5);
mask = 1 << (nr & 31);
return ___change_bit(ADDR, mask) != 0;
}
static inline void change_bit(unsigned long nr, volatile unsigned long *addr)
{
unsigned long *ADDR, mask;
ADDR = ((unsigned long *) addr) + (nr >> 5);
mask = 1 << (nr & 31);
(void) ___change_bit(ADDR, mask);
}
#include <asm-generic/bitops/non-atomic.h>
#define smp_mb__before_clear_bit() do { } while(0)
#define smp_mb__after_clear_bit() do { } while(0)
#include <asm-generic/bitops/ffz.h>
#include <asm-generic/bitops/__ffs.h>
#include <asm-generic/bitops/sched.h>
#include <asm-generic/bitops/ffs.h>
#include <asm-generic/bitops/fls.h>
#include <asm-generic/bitops/fls64.h>
#include <asm-generic/bitops/hweight.h>
#include <asm-generic/bitops/lock.h>
#include <asm-generic/bitops/find.h>
#include <asm-generic/bitops/ext2-non-atomic.h>
#include <asm-generic/bitops/ext2-atomic.h>
#include <asm-generic/bitops/minix.h>
#endif /* __KERNEL__ */
#endif /* defined(_SPARC_BITOPS_H) */

View File

@ -0,0 +1,111 @@
/*
* bitops.h: Bit string operations on the Sparc.
*
* Copyright 1995 David S. Miller (davem@caip.rutgers.edu)
* Copyright 1996 Eddie C. Dost (ecd@skynet.be)
* Copyright 2001 Anton Blanchard (anton@samba.org)
*/
#ifndef _SPARC_BITOPS_H
#define _SPARC_BITOPS_H
#include <linux/compiler.h>
#include <asm/byteorder.h>
#ifdef __KERNEL__
#ifndef _LINUX_BITOPS_H
#error only <linux/bitops.h> can be included directly
#endif
extern unsigned long ___set_bit(unsigned long *addr, unsigned long mask);
extern unsigned long ___clear_bit(unsigned long *addr, unsigned long mask);
extern unsigned long ___change_bit(unsigned long *addr, unsigned long mask);
/*
* Set bit 'nr' in 32-bit quantity at address 'addr' where bit '0'
* is in the highest of the four bytes and bit '31' is the high bit
* within the first byte. Sparc is BIG-Endian. Unless noted otherwise
* all bit-ops return 0 if bit was previously clear and != 0 otherwise.
*/
static inline int test_and_set_bit(unsigned long nr, volatile unsigned long *addr)
{
unsigned long *ADDR, mask;
ADDR = ((unsigned long *) addr) + (nr >> 5);
mask = 1 << (nr & 31);
return ___set_bit(ADDR, mask) != 0;
}
static inline void set_bit(unsigned long nr, volatile unsigned long *addr)
{
unsigned long *ADDR, mask;
ADDR = ((unsigned long *) addr) + (nr >> 5);
mask = 1 << (nr & 31);
(void) ___set_bit(ADDR, mask);
}
static inline int test_and_clear_bit(unsigned long nr, volatile unsigned long *addr)
{
unsigned long *ADDR, mask;
ADDR = ((unsigned long *) addr) + (nr >> 5);
mask = 1 << (nr & 31);
return ___clear_bit(ADDR, mask) != 0;
}
static inline void clear_bit(unsigned long nr, volatile unsigned long *addr)
{
unsigned long *ADDR, mask;
ADDR = ((unsigned long *) addr) + (nr >> 5);
mask = 1 << (nr & 31);
(void) ___clear_bit(ADDR, mask);
}
static inline int test_and_change_bit(unsigned long nr, volatile unsigned long *addr)
{
unsigned long *ADDR, mask;
ADDR = ((unsigned long *) addr) + (nr >> 5);
mask = 1 << (nr & 31);
return ___change_bit(ADDR, mask) != 0;
}
static inline void change_bit(unsigned long nr, volatile unsigned long *addr)
{
unsigned long *ADDR, mask;
ADDR = ((unsigned long *) addr) + (nr >> 5);
mask = 1 << (nr & 31);
(void) ___change_bit(ADDR, mask);
}
#include <asm-generic/bitops/non-atomic.h>
#define smp_mb__before_clear_bit() do { } while(0)
#define smp_mb__after_clear_bit() do { } while(0)
#include <asm-generic/bitops/ffz.h>
#include <asm-generic/bitops/__ffs.h>
#include <asm-generic/bitops/sched.h>
#include <asm-generic/bitops/ffs.h>
#include <asm-generic/bitops/fls.h>
#include <asm-generic/bitops/fls64.h>
#include <asm-generic/bitops/hweight.h>
#include <asm-generic/bitops/lock.h>
#include <asm-generic/bitops/find.h>
#include <asm-generic/bitops/ext2-non-atomic.h>
#include <asm-generic/bitops/ext2-atomic.h>
#include <asm-generic/bitops/minix.h>
#endif /* __KERNEL__ */
#endif /* defined(_SPARC_BITOPS_H) */

View File

@ -0,0 +1,107 @@
/*
* bitops.h: Bit string operations on the V9.
*
* Copyright 1996, 1997 David S. Miller (davem@caip.rutgers.edu)
*/
#ifndef _SPARC64_BITOPS_H
#define _SPARC64_BITOPS_H
#ifndef _LINUX_BITOPS_H
#error only <linux/bitops.h> can be included directly
#endif
#include <linux/compiler.h>
#include <asm/byteorder.h>
extern int test_and_set_bit(unsigned long nr, volatile unsigned long *addr);
extern int test_and_clear_bit(unsigned long nr, volatile unsigned long *addr);
extern int test_and_change_bit(unsigned long nr, volatile unsigned long *addr);
extern void set_bit(unsigned long nr, volatile unsigned long *addr);
extern void clear_bit(unsigned long nr, volatile unsigned long *addr);
extern void change_bit(unsigned long nr, volatile unsigned long *addr);
#include <asm-generic/bitops/non-atomic.h>
#ifdef CONFIG_SMP
#define smp_mb__before_clear_bit() membar_storeload_loadload()
#define smp_mb__after_clear_bit() membar_storeload_storestore()
#else
#define smp_mb__before_clear_bit() barrier()
#define smp_mb__after_clear_bit() barrier()
#endif
#include <asm-generic/bitops/ffz.h>
#include <asm-generic/bitops/__ffs.h>
#include <asm-generic/bitops/fls.h>
#include <asm-generic/bitops/__fls.h>
#include <asm-generic/bitops/fls64.h>
#ifdef __KERNEL__
#include <asm-generic/bitops/sched.h>
#include <asm-generic/bitops/ffs.h>
/*
* hweightN: returns the hamming weight (i.e. the number
* of bits set) of a N-bit word
*/
#ifdef ULTRA_HAS_POPULATION_COUNT
static inline unsigned int hweight64(unsigned long w)
{
unsigned int res;
__asm__ ("popc %1,%0" : "=r" (res) : "r" (w));
return res;
}
static inline unsigned int hweight32(unsigned int w)
{
unsigned int res;
__asm__ ("popc %1,%0" : "=r" (res) : "r" (w & 0xffffffff));
return res;
}
static inline unsigned int hweight16(unsigned int w)
{
unsigned int res;
__asm__ ("popc %1,%0" : "=r" (res) : "r" (w & 0xffff));
return res;
}
static inline unsigned int hweight8(unsigned int w)
{
unsigned int res;
__asm__ ("popc %1,%0" : "=r" (res) : "r" (w & 0xff));
return res;
}
#else
#include <asm-generic/bitops/hweight.h>
#endif
#include <asm-generic/bitops/lock.h>
#endif /* __KERNEL__ */
#include <asm-generic/bitops/find.h>
#ifdef __KERNEL__
#include <asm-generic/bitops/ext2-non-atomic.h>
#define ext2_set_bit_atomic(lock,nr,addr) \
test_and_set_bit((nr) ^ 0x38,(unsigned long *)(addr))
#define ext2_clear_bit_atomic(lock,nr,addr) \
test_and_clear_bit((nr) ^ 0x38,(unsigned long *)(addr))
#include <asm-generic/bitops/minix.h>
#endif /* __KERNEL__ */
#endif /* defined(_SPARC64_BITOPS_H) */

View File

@ -1,85 +1,8 @@
#ifndef _SPARC_CACHEFLUSH_H
#define _SPARC_CACHEFLUSH_H
#include <linux/mm.h> /* Common for other includes */
// #include <linux/kernel.h> from pgalloc.h
// #include <linux/sched.h> from pgalloc.h
// #include <asm/page.h>
#include <asm/btfixup.h>
/*
* Fine grained cache flushing.
*/
#ifdef CONFIG_SMP
BTFIXUPDEF_CALL(void, local_flush_cache_all, void)
BTFIXUPDEF_CALL(void, local_flush_cache_mm, struct mm_struct *)
BTFIXUPDEF_CALL(void, local_flush_cache_range, struct vm_area_struct *, unsigned long, unsigned long)
BTFIXUPDEF_CALL(void, local_flush_cache_page, struct vm_area_struct *, unsigned long)
#define local_flush_cache_all() BTFIXUP_CALL(local_flush_cache_all)()
#define local_flush_cache_mm(mm) BTFIXUP_CALL(local_flush_cache_mm)(mm)
#define local_flush_cache_range(vma,start,end) BTFIXUP_CALL(local_flush_cache_range)(vma,start,end)
#define local_flush_cache_page(vma,addr) BTFIXUP_CALL(local_flush_cache_page)(vma,addr)
BTFIXUPDEF_CALL(void, local_flush_page_to_ram, unsigned long)
BTFIXUPDEF_CALL(void, local_flush_sig_insns, struct mm_struct *, unsigned long)
#define local_flush_page_to_ram(addr) BTFIXUP_CALL(local_flush_page_to_ram)(addr)
#define local_flush_sig_insns(mm,insn_addr) BTFIXUP_CALL(local_flush_sig_insns)(mm,insn_addr)
extern void smp_flush_cache_all(void);
extern void smp_flush_cache_mm(struct mm_struct *mm);
extern void smp_flush_cache_range(struct vm_area_struct *vma,
unsigned long start,
unsigned long end);
extern void smp_flush_cache_page(struct vm_area_struct *vma, unsigned long page);
extern void smp_flush_page_to_ram(unsigned long page);
extern void smp_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr);
#endif /* CONFIG_SMP */
BTFIXUPDEF_CALL(void, flush_cache_all, void)
BTFIXUPDEF_CALL(void, flush_cache_mm, struct mm_struct *)
BTFIXUPDEF_CALL(void, flush_cache_range, struct vm_area_struct *, unsigned long, unsigned long)
BTFIXUPDEF_CALL(void, flush_cache_page, struct vm_area_struct *, unsigned long)
#define flush_cache_all() BTFIXUP_CALL(flush_cache_all)()
#define flush_cache_mm(mm) BTFIXUP_CALL(flush_cache_mm)(mm)
#define flush_cache_dup_mm(mm) BTFIXUP_CALL(flush_cache_mm)(mm)
#define flush_cache_range(vma,start,end) BTFIXUP_CALL(flush_cache_range)(vma,start,end)
#define flush_cache_page(vma,addr,pfn) BTFIXUP_CALL(flush_cache_page)(vma,addr)
#define flush_icache_range(start, end) do { } while (0)
#define flush_icache_page(vma, pg) do { } while (0)
#define flush_icache_user_range(vma,pg,adr,len) do { } while (0)
#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
do { \
flush_cache_page(vma, vaddr, page_to_pfn(page));\
memcpy(dst, src, len); \
} while (0)
#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
do { \
flush_cache_page(vma, vaddr, page_to_pfn(page));\
memcpy(dst, src, len); \
} while (0)
BTFIXUPDEF_CALL(void, __flush_page_to_ram, unsigned long)
BTFIXUPDEF_CALL(void, flush_sig_insns, struct mm_struct *, unsigned long)
#define __flush_page_to_ram(addr) BTFIXUP_CALL(__flush_page_to_ram)(addr)
#define flush_sig_insns(mm,insn_addr) BTFIXUP_CALL(flush_sig_insns)(mm,insn_addr)
extern void sparc_flush_page_to_ram(struct page *page);
#define flush_dcache_page(page) sparc_flush_page_to_ram(page)
#define flush_dcache_mmap_lock(mapping) do { } while (0)
#define flush_dcache_mmap_unlock(mapping) do { } while (0)
#define flush_cache_vmap(start, end) flush_cache_all()
#define flush_cache_vunmap(start, end) flush_cache_all()
#endif /* _SPARC_CACHEFLUSH_H */
#ifndef ___ASM_SPARC_CACHEFLUSH_H
#define ___ASM_SPARC_CACHEFLUSH_H
#if defined(__sparc__) && defined(__arch64__)
#include <asm-sparc/cacheflush_64.h>
#else
#include <asm-sparc/cacheflush_32.h>
#endif
#endif

View File

@ -0,0 +1,85 @@
#ifndef _SPARC_CACHEFLUSH_H
#define _SPARC_CACHEFLUSH_H
#include <linux/mm.h> /* Common for other includes */
// #include <linux/kernel.h> from pgalloc.h
// #include <linux/sched.h> from pgalloc.h
// #include <asm/page.h>
#include <asm/btfixup.h>
/*
* Fine grained cache flushing.
*/
#ifdef CONFIG_SMP
BTFIXUPDEF_CALL(void, local_flush_cache_all, void)
BTFIXUPDEF_CALL(void, local_flush_cache_mm, struct mm_struct *)
BTFIXUPDEF_CALL(void, local_flush_cache_range, struct vm_area_struct *, unsigned long, unsigned long)
BTFIXUPDEF_CALL(void, local_flush_cache_page, struct vm_area_struct *, unsigned long)
#define local_flush_cache_all() BTFIXUP_CALL(local_flush_cache_all)()
#define local_flush_cache_mm(mm) BTFIXUP_CALL(local_flush_cache_mm)(mm)
#define local_flush_cache_range(vma,start,end) BTFIXUP_CALL(local_flush_cache_range)(vma,start,end)
#define local_flush_cache_page(vma,addr) BTFIXUP_CALL(local_flush_cache_page)(vma,addr)
BTFIXUPDEF_CALL(void, local_flush_page_to_ram, unsigned long)
BTFIXUPDEF_CALL(void, local_flush_sig_insns, struct mm_struct *, unsigned long)
#define local_flush_page_to_ram(addr) BTFIXUP_CALL(local_flush_page_to_ram)(addr)
#define local_flush_sig_insns(mm,insn_addr) BTFIXUP_CALL(local_flush_sig_insns)(mm,insn_addr)
extern void smp_flush_cache_all(void);
extern void smp_flush_cache_mm(struct mm_struct *mm);
extern void smp_flush_cache_range(struct vm_area_struct *vma,
unsigned long start,
unsigned long end);
extern void smp_flush_cache_page(struct vm_area_struct *vma, unsigned long page);
extern void smp_flush_page_to_ram(unsigned long page);
extern void smp_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr);
#endif /* CONFIG_SMP */
BTFIXUPDEF_CALL(void, flush_cache_all, void)
BTFIXUPDEF_CALL(void, flush_cache_mm, struct mm_struct *)
BTFIXUPDEF_CALL(void, flush_cache_range, struct vm_area_struct *, unsigned long, unsigned long)
BTFIXUPDEF_CALL(void, flush_cache_page, struct vm_area_struct *, unsigned long)
#define flush_cache_all() BTFIXUP_CALL(flush_cache_all)()
#define flush_cache_mm(mm) BTFIXUP_CALL(flush_cache_mm)(mm)
#define flush_cache_dup_mm(mm) BTFIXUP_CALL(flush_cache_mm)(mm)
#define flush_cache_range(vma,start,end) BTFIXUP_CALL(flush_cache_range)(vma,start,end)
#define flush_cache_page(vma,addr,pfn) BTFIXUP_CALL(flush_cache_page)(vma,addr)
#define flush_icache_range(start, end) do { } while (0)
#define flush_icache_page(vma, pg) do { } while (0)
#define flush_icache_user_range(vma,pg,adr,len) do { } while (0)
#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
do { \
flush_cache_page(vma, vaddr, page_to_pfn(page));\
memcpy(dst, src, len); \
} while (0)
#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
do { \
flush_cache_page(vma, vaddr, page_to_pfn(page));\
memcpy(dst, src, len); \
} while (0)
BTFIXUPDEF_CALL(void, __flush_page_to_ram, unsigned long)
BTFIXUPDEF_CALL(void, flush_sig_insns, struct mm_struct *, unsigned long)
#define __flush_page_to_ram(addr) BTFIXUP_CALL(__flush_page_to_ram)(addr)
#define flush_sig_insns(mm,insn_addr) BTFIXUP_CALL(flush_sig_insns)(mm,insn_addr)
extern void sparc_flush_page_to_ram(struct page *page);
#define flush_dcache_page(page) sparc_flush_page_to_ram(page)
#define flush_dcache_mmap_lock(mapping) do { } while (0)
#define flush_dcache_mmap_unlock(mapping) do { } while (0)
#define flush_cache_vmap(start, end) flush_cache_all()
#define flush_cache_vunmap(start, end) flush_cache_all()
#endif /* _SPARC_CACHEFLUSH_H */

View File

@ -0,0 +1,76 @@
#ifndef _SPARC64_CACHEFLUSH_H
#define _SPARC64_CACHEFLUSH_H
#include <asm/page.h>
#ifndef __ASSEMBLY__
#include <linux/mm.h>
/* Cache flush operations. */
/* These are the same regardless of whether this is an SMP kernel or not. */
#define flush_cache_mm(__mm) \
do { if ((__mm) == current->mm) flushw_user(); } while(0)
#define flush_cache_dup_mm(mm) flush_cache_mm(mm)
#define flush_cache_range(vma, start, end) \
flush_cache_mm((vma)->vm_mm)
#define flush_cache_page(vma, page, pfn) \
flush_cache_mm((vma)->vm_mm)
/*
* On spitfire, the icache doesn't snoop local stores and we don't
* use block commit stores (which invalidate icache lines) during
* module load, so we need this.
*/
extern void flush_icache_range(unsigned long start, unsigned long end);
extern void __flush_icache_page(unsigned long);
extern void __flush_dcache_page(void *addr, int flush_icache);
extern void flush_dcache_page_impl(struct page *page);
#ifdef CONFIG_SMP
extern void smp_flush_dcache_page_impl(struct page *page, int cpu);
extern void flush_dcache_page_all(struct mm_struct *mm, struct page *page);
#else
#define smp_flush_dcache_page_impl(page,cpu) flush_dcache_page_impl(page)
#define flush_dcache_page_all(mm,page) flush_dcache_page_impl(page)
#endif
extern void __flush_dcache_range(unsigned long start, unsigned long end);
extern void flush_dcache_page(struct page *page);
#define flush_icache_page(vma, pg) do { } while(0)
#define flush_icache_user_range(vma,pg,adr,len) do { } while (0)
extern void flush_ptrace_access(struct vm_area_struct *, struct page *,
unsigned long uaddr, void *kaddr,
unsigned long len, int write);
#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
do { \
flush_cache_page(vma, vaddr, page_to_pfn(page)); \
memcpy(dst, src, len); \
flush_ptrace_access(vma, page, vaddr, src, len, 0); \
} while (0)
#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
do { \
flush_cache_page(vma, vaddr, page_to_pfn(page)); \
memcpy(dst, src, len); \
flush_ptrace_access(vma, page, vaddr, dst, len, 1); \
} while (0)
#define flush_dcache_mmap_lock(mapping) do { } while (0)
#define flush_dcache_mmap_unlock(mapping) do { } while (0)
#define flush_cache_vmap(start, end) do { } while (0)
#define flush_cache_vunmap(start, end) do { } while (0)
#ifdef CONFIG_DEBUG_PAGEALLOC
/* internal debugging function */
void kernel_map_pages(struct page *page, int numpages, int enable);
#endif
#endif /* !__ASSEMBLY__ */
#endif /* _SPARC64_CACHEFLUSH_H */

241
include/asm-sparc/chafsr.h Normal file
View File

@ -0,0 +1,241 @@
#ifndef _SPARC64_CHAFSR_H
#define _SPARC64_CHAFSR_H
/* Cheetah Asynchronous Fault Status register, ASI=0x4C VA<63:0>=0x0 */
/* Comments indicate which processor variants on which the bit definition
* is valid. Codes are:
* ch --> cheetah
* ch+ --> cheetah plus
* jp --> jalapeno
*/
/* All bits of this register except M_SYNDROME and E_SYNDROME are
* read, write 1 to clear. M_SYNDROME and E_SYNDROME are read-only.
*/
/* Software bit set by linux trap handlers to indicate that the trap was
* signalled at %tl >= 1.
*/
#define CHAFSR_TL1 (1UL << 63UL) /* n/a */
/* Unmapped error from system bus for prefetch queue or
* store queue read operation
*/
#define CHPAFSR_DTO (1UL << 59UL) /* ch+ */
/* Bus error from system bus for prefetch queue or store queue
* read operation
*/
#define CHPAFSR_DBERR (1UL << 58UL) /* ch+ */
/* Hardware corrected E-cache Tag ECC error */
#define CHPAFSR_THCE (1UL << 57UL) /* ch+ */
/* System interface protocol error, hw timeout caused */
#define JPAFSR_JETO (1UL << 57UL) /* jp */
/* SW handled correctable E-cache Tag ECC error */
#define CHPAFSR_TSCE (1UL << 56UL) /* ch+ */
/* Parity error on system snoop results */
#define JPAFSR_SCE (1UL << 56UL) /* jp */
/* Uncorrectable E-cache Tag ECC error */
#define CHPAFSR_TUE (1UL << 55UL) /* ch+ */
/* System interface protocol error, illegal command detected */
#define JPAFSR_JEIC (1UL << 55UL) /* jp */
/* Uncorrectable system bus data ECC error due to prefetch
* or store fill request
*/
#define CHPAFSR_DUE (1UL << 54UL) /* ch+ */
/* System interface protocol error, illegal ADTYPE detected */
#define JPAFSR_JEIT (1UL << 54UL) /* jp */
/* Multiple errors of the same type have occurred. This bit is set when
* an uncorrectable error or a SW correctable error occurs and the status
* bit to report that error is already set. When multiple errors of
* different types are indicated by setting multiple status bits.
*
* This bit is not set if multiple HW corrected errors with the same
* status bit occur, only uncorrectable and SW correctable ones have
* this behavior.
*
* This bit is not set when multiple ECC errors happen within a single
* 64-byte system bus transaction. Only the first ECC error in a 16-byte
* subunit will be logged. All errors in subsequent 16-byte subunits
* from the same 64-byte transaction are ignored.
*/
#define CHAFSR_ME (1UL << 53UL) /* ch,ch+,jp */
/* Privileged state error has occurred. This is a capture of PSTATE.PRIV
* at the time the error is detected.
*/
#define CHAFSR_PRIV (1UL << 52UL) /* ch,ch+,jp */
/* The following bits 51 (CHAFSR_PERR) to 33 (CHAFSR_CE) are sticky error
* bits and record the most recently detected errors. Bits accumulate
* errors that have been detected since the last write to clear the bit.
*/
/* System interface protocol error. The processor asserts its' ERROR
* pin when this event occurs and it also logs a specific cause code
* into a JTAG scannable flop.
*/
#define CHAFSR_PERR (1UL << 51UL) /* ch,ch+,jp */
/* Internal processor error. The processor asserts its' ERROR
* pin when this event occurs and it also logs a specific cause code
* into a JTAG scannable flop.
*/
#define CHAFSR_IERR (1UL << 50UL) /* ch,ch+,jp */
/* System request parity error on incoming address */
#define CHAFSR_ISAP (1UL << 49UL) /* ch,ch+,jp */
/* HW Corrected system bus MTAG ECC error */
#define CHAFSR_EMC (1UL << 48UL) /* ch,ch+ */
/* Parity error on L2 cache tag SRAM */
#define JPAFSR_ETP (1UL << 48UL) /* jp */
/* Uncorrectable system bus MTAG ECC error */
#define CHAFSR_EMU (1UL << 47UL) /* ch,ch+ */
/* Out of range memory error has occurred */
#define JPAFSR_OM (1UL << 47UL) /* jp */
/* HW Corrected system bus data ECC error for read of interrupt vector */
#define CHAFSR_IVC (1UL << 46UL) /* ch,ch+ */
/* Error due to unsupported store */
#define JPAFSR_UMS (1UL << 46UL) /* jp */
/* Uncorrectable system bus data ECC error for read of interrupt vector */
#define CHAFSR_IVU (1UL << 45UL) /* ch,ch+,jp */
/* Unmapped error from system bus */
#define CHAFSR_TO (1UL << 44UL) /* ch,ch+,jp */
/* Bus error response from system bus */
#define CHAFSR_BERR (1UL << 43UL) /* ch,ch+,jp */
/* SW Correctable E-cache ECC error for instruction fetch or data access
* other than block load.
*/
#define CHAFSR_UCC (1UL << 42UL) /* ch,ch+,jp */
/* Uncorrectable E-cache ECC error for instruction fetch or data access
* other than block load.
*/
#define CHAFSR_UCU (1UL << 41UL) /* ch,ch+,jp */
/* Copyout HW Corrected ECC error */
#define CHAFSR_CPC (1UL << 40UL) /* ch,ch+,jp */
/* Copyout Uncorrectable ECC error */
#define CHAFSR_CPU (1UL << 39UL) /* ch,ch+,jp */
/* HW Corrected ECC error from E-cache for writeback */
#define CHAFSR_WDC (1UL << 38UL) /* ch,ch+,jp */
/* Uncorrectable ECC error from E-cache for writeback */
#define CHAFSR_WDU (1UL << 37UL) /* ch,ch+,jp */
/* HW Corrected ECC error from E-cache for store merge or block load */
#define CHAFSR_EDC (1UL << 36UL) /* ch,ch+,jp */
/* Uncorrectable ECC error from E-cache for store merge or block load */
#define CHAFSR_EDU (1UL << 35UL) /* ch,ch+,jp */
/* Uncorrectable system bus data ECC error for read of memory or I/O */
#define CHAFSR_UE (1UL << 34UL) /* ch,ch+,jp */
/* HW Corrected system bus data ECC error for read of memory or I/O */
#define CHAFSR_CE (1UL << 33UL) /* ch,ch+,jp */
/* Uncorrectable ECC error from remote cache/memory */
#define JPAFSR_RUE (1UL << 32UL) /* jp */
/* Correctable ECC error from remote cache/memory */
#define JPAFSR_RCE (1UL << 31UL) /* jp */
/* JBUS parity error on returned read data */
#define JPAFSR_BP (1UL << 30UL) /* jp */
/* JBUS parity error on data for writeback or block store */
#define JPAFSR_WBP (1UL << 29UL) /* jp */
/* Foreign read to DRAM incurring correctable ECC error */
#define JPAFSR_FRC (1UL << 28UL) /* jp */
/* Foreign read to DRAM incurring uncorrectable ECC error */
#define JPAFSR_FRU (1UL << 27UL) /* jp */
#define CHAFSR_ERRORS (CHAFSR_PERR | CHAFSR_IERR | CHAFSR_ISAP | CHAFSR_EMC | \
CHAFSR_EMU | CHAFSR_IVC | CHAFSR_IVU | CHAFSR_TO | \
CHAFSR_BERR | CHAFSR_UCC | CHAFSR_UCU | CHAFSR_CPC | \
CHAFSR_CPU | CHAFSR_WDC | CHAFSR_WDU | CHAFSR_EDC | \
CHAFSR_EDU | CHAFSR_UE | CHAFSR_CE)
#define CHPAFSR_ERRORS (CHPAFSR_DTO | CHPAFSR_DBERR | CHPAFSR_THCE | \
CHPAFSR_TSCE | CHPAFSR_TUE | CHPAFSR_DUE | \
CHAFSR_PERR | CHAFSR_IERR | CHAFSR_ISAP | CHAFSR_EMC | \
CHAFSR_EMU | CHAFSR_IVC | CHAFSR_IVU | CHAFSR_TO | \
CHAFSR_BERR | CHAFSR_UCC | CHAFSR_UCU | CHAFSR_CPC | \
CHAFSR_CPU | CHAFSR_WDC | CHAFSR_WDU | CHAFSR_EDC | \
CHAFSR_EDU | CHAFSR_UE | CHAFSR_CE)
#define JPAFSR_ERRORS (JPAFSR_JETO | JPAFSR_SCE | JPAFSR_JEIC | \
JPAFSR_JEIT | CHAFSR_PERR | CHAFSR_IERR | \
CHAFSR_ISAP | JPAFSR_ETP | JPAFSR_OM | \
JPAFSR_UMS | CHAFSR_IVU | CHAFSR_TO | \
CHAFSR_BERR | CHAFSR_UCC | CHAFSR_UCU | \
CHAFSR_CPC | CHAFSR_CPU | CHAFSR_WDC | \
CHAFSR_WDU | CHAFSR_EDC | CHAFSR_EDU | \
CHAFSR_UE | CHAFSR_CE | JPAFSR_RUE | \
JPAFSR_RCE | JPAFSR_BP | JPAFSR_WBP | \
JPAFSR_FRC | JPAFSR_FRU)
/* Active JBUS request signal when error occurred */
#define JPAFSR_JBREQ (0x7UL << 24UL) /* jp */
#define JPAFSR_JBREQ_SHIFT 24UL
/* L2 cache way information */
#define JPAFSR_ETW (0x3UL << 22UL) /* jp */
#define JPAFSR_ETW_SHIFT 22UL
/* System bus MTAG ECC syndrome. This field captures the status of the
* first occurrence of the highest-priority error according to the M_SYND
* overwrite policy. After the AFSR sticky bit, corresponding to the error
* for which the M_SYND is reported, is cleared, the contents of the M_SYND
* field will be unchanged by will be unfrozen for further error capture.
*/
#define CHAFSR_M_SYNDROME (0xfUL << 16UL) /* ch,ch+,jp */
#define CHAFSR_M_SYNDROME_SHIFT 16UL
/* Agenid Id of the foreign device causing the UE/CE errors */
#define JPAFSR_AID (0x1fUL << 9UL) /* jp */
#define JPAFSR_AID_SHIFT 9UL
/* System bus or E-cache data ECC syndrome. This field captures the status
* of the first occurrence of the highest-priority error according to the
* E_SYND overwrite policy. After the AFSR sticky bit, corresponding to the
* error for which the E_SYND is reported, is cleare, the contents of the E_SYND
* field will be unchanged but will be unfrozen for further error capture.
*/
#define CHAFSR_E_SYNDROME (0x1ffUL << 0UL) /* ch,ch+,jp */
#define CHAFSR_E_SYNDROME_SHIFT 0UL
/* The AFSR must be explicitly cleared by software, it is not cleared automatically
* by a read. Writes to bits <51:33> with bits set will clear the corresponding
* bits in the AFSR. Bits associated with disrupting traps must be cleared before
* interrupts are re-enabled to prevent multiple traps for the same error. I.e.
* PSTATE.IE and AFSR bits control delivery of disrupting traps.
*
* Since there is only one AFAR, when multiple events have been logged by the
* bits in the AFSR, at most one of these events will have its status captured
* in the AFAR. The highest priority of those event bits will get AFAR logging.
* The AFAR will be unlocked and available to capture the address of another event
* as soon as the one bit in AFSR that corresponds to the event logged in AFAR is
* cleared. For example, if AFSR.CE is detected, then AFSR.UE (which overwrites
* the AFAR), and AFSR.UE is cleared by not AFSR.CE, then the AFAR will be unlocked
* and ready for another event, even though AFSR.CE is still set. The same rules
* also apply to the M_SYNDROME and E_SYNDROME fields of the AFSR.
*/
#endif /* _SPARC64_CHAFSR_H */

View File

@ -1,241 +1,8 @@
#ifndef __SPARC_CHECKSUM_H
#define __SPARC_CHECKSUM_H
/* checksum.h: IP/UDP/TCP checksum routines on the Sparc.
*
* Copyright(C) 1995 Linus Torvalds
* Copyright(C) 1995 Miguel de Icaza
* Copyright(C) 1996 David S. Miller
* Copyright(C) 1996 Eddie C. Dost
* Copyright(C) 1997 Jakub Jelinek
*
* derived from:
* Alpha checksum c-code
* ix86 inline assembly
* RFC1071 Computing the Internet Checksum
*/
#include <linux/in6.h>
#include <asm/uaccess.h>
/* computes the checksum of a memory block at buff, length len,
* and adds in "sum" (32-bit)
*
* returns a 32-bit number suitable for feeding into itself
* or csum_tcpudp_magic
*
* this function must be called with even lengths, except
* for the last fragment, which may be odd
*
* it's best to have buff aligned on a 32-bit boundary
*/
extern __wsum csum_partial(const void *buff, int len, __wsum sum);
/* the same as csum_partial, but copies from fs:src while it
* checksums
*
* here even more important to align src and dst on a 32-bit (or even
* better 64-bit) boundary
*/
extern unsigned int __csum_partial_copy_sparc_generic (const unsigned char *, unsigned char *);
static inline __wsum
csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum)
{
register unsigned int ret asm("o0") = (unsigned int)src;
register char *d asm("o1") = dst;
register int l asm("g1") = len;
__asm__ __volatile__ (
"call __csum_partial_copy_sparc_generic\n\t"
" mov %6, %%g7\n"
: "=&r" (ret), "=&r" (d), "=&r" (l)
: "0" (ret), "1" (d), "2" (l), "r" (sum)
: "o2", "o3", "o4", "o5", "o7",
"g2", "g3", "g4", "g5", "g7",
"memory", "cc");
return (__force __wsum)ret;
}
static inline __wsum
csum_partial_copy_from_user(const void __user *src, void *dst, int len,
__wsum sum, int *err)
{
register unsigned long ret asm("o0") = (unsigned long)src;
register char *d asm("o1") = dst;
register int l asm("g1") = len;
register __wsum s asm("g7") = sum;
__asm__ __volatile__ (
".section __ex_table,#alloc\n\t"
".align 4\n\t"
".word 1f,2\n\t"
".previous\n"
"1:\n\t"
"call __csum_partial_copy_sparc_generic\n\t"
" st %8, [%%sp + 64]\n"
: "=&r" (ret), "=&r" (d), "=&r" (l), "=&r" (s)
: "0" (ret), "1" (d), "2" (l), "3" (s), "r" (err)
: "o2", "o3", "o4", "o5", "o7", "g2", "g3", "g4", "g5",
"cc", "memory");
return (__force __wsum)ret;
}
static inline __wsum
csum_partial_copy_to_user(const void *src, void __user *dst, int len,
__wsum sum, int *err)
{
if (!access_ok (VERIFY_WRITE, dst, len)) {
*err = -EFAULT;
return sum;
} else {
register unsigned long ret asm("o0") = (unsigned long)src;
register char __user *d asm("o1") = dst;
register int l asm("g1") = len;
register __wsum s asm("g7") = sum;
__asm__ __volatile__ (
".section __ex_table,#alloc\n\t"
".align 4\n\t"
".word 1f,1\n\t"
".previous\n"
"1:\n\t"
"call __csum_partial_copy_sparc_generic\n\t"
" st %8, [%%sp + 64]\n"
: "=&r" (ret), "=&r" (d), "=&r" (l), "=&r" (s)
: "0" (ret), "1" (d), "2" (l), "3" (s), "r" (err)
: "o2", "o3", "o4", "o5", "o7",
"g2", "g3", "g4", "g5",
"cc", "memory");
return (__force __wsum)ret;
}
}
#define HAVE_CSUM_COPY_USER
#define csum_and_copy_to_user csum_partial_copy_to_user
/* ihl is always 5 or greater, almost always is 5, and iph is word aligned
* the majority of the time.
*/
static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
{
__sum16 sum;
/* Note: We must read %2 before we touch %0 for the first time,
* because GCC can legitimately use the same register for
* both operands.
*/
__asm__ __volatile__("sub\t%2, 4, %%g4\n\t"
"ld\t[%1 + 0x00], %0\n\t"
"ld\t[%1 + 0x04], %%g2\n\t"
"ld\t[%1 + 0x08], %%g3\n\t"
"addcc\t%%g2, %0, %0\n\t"
"addxcc\t%%g3, %0, %0\n\t"
"ld\t[%1 + 0x0c], %%g2\n\t"
"ld\t[%1 + 0x10], %%g3\n\t"
"addxcc\t%%g2, %0, %0\n\t"
"addx\t%0, %%g0, %0\n"
"1:\taddcc\t%%g3, %0, %0\n\t"
"add\t%1, 4, %1\n\t"
"addxcc\t%0, %%g0, %0\n\t"
"subcc\t%%g4, 1, %%g4\n\t"
"be,a\t2f\n\t"
"sll\t%0, 16, %%g2\n\t"
"b\t1b\n\t"
"ld\t[%1 + 0x10], %%g3\n"
"2:\taddcc\t%0, %%g2, %%g2\n\t"
"srl\t%%g2, 16, %0\n\t"
"addx\t%0, %%g0, %0\n\t"
"xnor\t%%g0, %0, %0"
: "=r" (sum), "=&r" (iph)
: "r" (ihl), "1" (iph)
: "g2", "g3", "g4", "cc", "memory");
return sum;
}
/* Fold a partial checksum without adding pseudo headers. */
static inline __sum16 csum_fold(__wsum sum)
{
unsigned int tmp;
__asm__ __volatile__("addcc\t%0, %1, %1\n\t"
"srl\t%1, 16, %1\n\t"
"addx\t%1, %%g0, %1\n\t"
"xnor\t%%g0, %1, %0"
: "=&r" (sum), "=r" (tmp)
: "0" (sum), "1" ((__force u32)sum<<16)
: "cc");
return (__force __sum16)sum;
}
static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
unsigned short len,
unsigned short proto,
__wsum sum)
{
__asm__ __volatile__("addcc\t%1, %0, %0\n\t"
"addxcc\t%2, %0, %0\n\t"
"addxcc\t%3, %0, %0\n\t"
"addx\t%0, %%g0, %0\n\t"
: "=r" (sum), "=r" (saddr)
: "r" (daddr), "r" (proto + len), "0" (sum),
"1" (saddr)
: "cc");
return sum;
}
/*
* computes the checksum of the TCP/UDP pseudo-header
* returns a 16-bit checksum, already complemented
*/
static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr,
unsigned short len,
unsigned short proto,
__wsum sum)
{
return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum));
}
#define _HAVE_ARCH_IPV6_CSUM
static inline __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
const struct in6_addr *daddr,
__u32 len, unsigned short proto,
__wsum sum)
{
__asm__ __volatile__ (
"addcc %3, %4, %%g4\n\t"
"addxcc %5, %%g4, %%g4\n\t"
"ld [%2 + 0x0c], %%g2\n\t"
"ld [%2 + 0x08], %%g3\n\t"
"addxcc %%g2, %%g4, %%g4\n\t"
"ld [%2 + 0x04], %%g2\n\t"
"addxcc %%g3, %%g4, %%g4\n\t"
"ld [%2 + 0x00], %%g3\n\t"
"addxcc %%g2, %%g4, %%g4\n\t"
"ld [%1 + 0x0c], %%g2\n\t"
"addxcc %%g3, %%g4, %%g4\n\t"
"ld [%1 + 0x08], %%g3\n\t"
"addxcc %%g2, %%g4, %%g4\n\t"
"ld [%1 + 0x04], %%g2\n\t"
"addxcc %%g3, %%g4, %%g4\n\t"
"ld [%1 + 0x00], %%g3\n\t"
"addxcc %%g2, %%g4, %%g4\n\t"
"addxcc %%g3, %%g4, %0\n\t"
"addx 0, %0, %0\n"
: "=&r" (sum)
: "r" (saddr), "r" (daddr),
"r"(htonl(len)), "r"(htonl(proto)), "r"(sum)
: "g2", "g3", "g4", "cc");
return csum_fold(sum);
}
/* this routine is used for miscellaneous IP-like checksums, mainly in icmp.c */
static inline __sum16 ip_compute_csum(const void *buff, int len)
{
return csum_fold(csum_partial(buff, len, 0));
}
#endif /* !(__SPARC_CHECKSUM_H) */
#ifndef ___ASM_SPARC_CHECKSUM_H
#define ___ASM_SPARC_CHECKSUM_H
#if defined(__sparc__) && defined(__arch64__)
#include <asm-sparc/checksum_64.h>
#else
#include <asm-sparc/checksum_32.h>
#endif
#endif

View File

@ -0,0 +1,241 @@
#ifndef __SPARC_CHECKSUM_H
#define __SPARC_CHECKSUM_H
/* checksum.h: IP/UDP/TCP checksum routines on the Sparc.
*
* Copyright(C) 1995 Linus Torvalds
* Copyright(C) 1995 Miguel de Icaza
* Copyright(C) 1996 David S. Miller
* Copyright(C) 1996 Eddie C. Dost
* Copyright(C) 1997 Jakub Jelinek
*
* derived from:
* Alpha checksum c-code
* ix86 inline assembly
* RFC1071 Computing the Internet Checksum
*/
#include <linux/in6.h>
#include <asm/uaccess.h>
/* computes the checksum of a memory block at buff, length len,
* and adds in "sum" (32-bit)
*
* returns a 32-bit number suitable for feeding into itself
* or csum_tcpudp_magic
*
* this function must be called with even lengths, except
* for the last fragment, which may be odd
*
* it's best to have buff aligned on a 32-bit boundary
*/
extern __wsum csum_partial(const void *buff, int len, __wsum sum);
/* the same as csum_partial, but copies from fs:src while it
* checksums
*
* here even more important to align src and dst on a 32-bit (or even
* better 64-bit) boundary
*/
extern unsigned int __csum_partial_copy_sparc_generic (const unsigned char *, unsigned char *);
static inline __wsum
csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum)
{
register unsigned int ret asm("o0") = (unsigned int)src;
register char *d asm("o1") = dst;
register int l asm("g1") = len;
__asm__ __volatile__ (
"call __csum_partial_copy_sparc_generic\n\t"
" mov %6, %%g7\n"
: "=&r" (ret), "=&r" (d), "=&r" (l)
: "0" (ret), "1" (d), "2" (l), "r" (sum)
: "o2", "o3", "o4", "o5", "o7",
"g2", "g3", "g4", "g5", "g7",
"memory", "cc");
return (__force __wsum)ret;
}
static inline __wsum
csum_partial_copy_from_user(const void __user *src, void *dst, int len,
__wsum sum, int *err)
{
register unsigned long ret asm("o0") = (unsigned long)src;
register char *d asm("o1") = dst;
register int l asm("g1") = len;
register __wsum s asm("g7") = sum;
__asm__ __volatile__ (
".section __ex_table,#alloc\n\t"
".align 4\n\t"
".word 1f,2\n\t"
".previous\n"
"1:\n\t"
"call __csum_partial_copy_sparc_generic\n\t"
" st %8, [%%sp + 64]\n"
: "=&r" (ret), "=&r" (d), "=&r" (l), "=&r" (s)
: "0" (ret), "1" (d), "2" (l), "3" (s), "r" (err)
: "o2", "o3", "o4", "o5", "o7", "g2", "g3", "g4", "g5",
"cc", "memory");
return (__force __wsum)ret;
}
static inline __wsum
csum_partial_copy_to_user(const void *src, void __user *dst, int len,
__wsum sum, int *err)
{
if (!access_ok (VERIFY_WRITE, dst, len)) {
*err = -EFAULT;
return sum;
} else {
register unsigned long ret asm("o0") = (unsigned long)src;
register char __user *d asm("o1") = dst;
register int l asm("g1") = len;
register __wsum s asm("g7") = sum;
__asm__ __volatile__ (
".section __ex_table,#alloc\n\t"
".align 4\n\t"
".word 1f,1\n\t"
".previous\n"
"1:\n\t"
"call __csum_partial_copy_sparc_generic\n\t"
" st %8, [%%sp + 64]\n"
: "=&r" (ret), "=&r" (d), "=&r" (l), "=&r" (s)
: "0" (ret), "1" (d), "2" (l), "3" (s), "r" (err)
: "o2", "o3", "o4", "o5", "o7",
"g2", "g3", "g4", "g5",
"cc", "memory");
return (__force __wsum)ret;
}
}
#define HAVE_CSUM_COPY_USER
#define csum_and_copy_to_user csum_partial_copy_to_user
/* ihl is always 5 or greater, almost always is 5, and iph is word aligned
* the majority of the time.
*/
static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
{
__sum16 sum;
/* Note: We must read %2 before we touch %0 for the first time,
* because GCC can legitimately use the same register for
* both operands.
*/
__asm__ __volatile__("sub\t%2, 4, %%g4\n\t"
"ld\t[%1 + 0x00], %0\n\t"
"ld\t[%1 + 0x04], %%g2\n\t"
"ld\t[%1 + 0x08], %%g3\n\t"
"addcc\t%%g2, %0, %0\n\t"
"addxcc\t%%g3, %0, %0\n\t"
"ld\t[%1 + 0x0c], %%g2\n\t"
"ld\t[%1 + 0x10], %%g3\n\t"
"addxcc\t%%g2, %0, %0\n\t"
"addx\t%0, %%g0, %0\n"
"1:\taddcc\t%%g3, %0, %0\n\t"
"add\t%1, 4, %1\n\t"
"addxcc\t%0, %%g0, %0\n\t"
"subcc\t%%g4, 1, %%g4\n\t"
"be,a\t2f\n\t"
"sll\t%0, 16, %%g2\n\t"
"b\t1b\n\t"
"ld\t[%1 + 0x10], %%g3\n"
"2:\taddcc\t%0, %%g2, %%g2\n\t"
"srl\t%%g2, 16, %0\n\t"
"addx\t%0, %%g0, %0\n\t"
"xnor\t%%g0, %0, %0"
: "=r" (sum), "=&r" (iph)
: "r" (ihl), "1" (iph)
: "g2", "g3", "g4", "cc", "memory");
return sum;
}
/* Fold a partial checksum without adding pseudo headers. */
static inline __sum16 csum_fold(__wsum sum)
{
unsigned int tmp;
__asm__ __volatile__("addcc\t%0, %1, %1\n\t"
"srl\t%1, 16, %1\n\t"
"addx\t%1, %%g0, %1\n\t"
"xnor\t%%g0, %1, %0"
: "=&r" (sum), "=r" (tmp)
: "0" (sum), "1" ((__force u32)sum<<16)
: "cc");
return (__force __sum16)sum;
}
static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
unsigned short len,
unsigned short proto,
__wsum sum)
{
__asm__ __volatile__("addcc\t%1, %0, %0\n\t"
"addxcc\t%2, %0, %0\n\t"
"addxcc\t%3, %0, %0\n\t"
"addx\t%0, %%g0, %0\n\t"
: "=r" (sum), "=r" (saddr)
: "r" (daddr), "r" (proto + len), "0" (sum),
"1" (saddr)
: "cc");
return sum;
}
/*
* computes the checksum of the TCP/UDP pseudo-header
* returns a 16-bit checksum, already complemented
*/
static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr,
unsigned short len,
unsigned short proto,
__wsum sum)
{
return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum));
}
#define _HAVE_ARCH_IPV6_CSUM
static inline __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
const struct in6_addr *daddr,
__u32 len, unsigned short proto,
__wsum sum)
{
__asm__ __volatile__ (
"addcc %3, %4, %%g4\n\t"
"addxcc %5, %%g4, %%g4\n\t"
"ld [%2 + 0x0c], %%g2\n\t"
"ld [%2 + 0x08], %%g3\n\t"
"addxcc %%g2, %%g4, %%g4\n\t"
"ld [%2 + 0x04], %%g2\n\t"
"addxcc %%g3, %%g4, %%g4\n\t"
"ld [%2 + 0x00], %%g3\n\t"
"addxcc %%g2, %%g4, %%g4\n\t"
"ld [%1 + 0x0c], %%g2\n\t"
"addxcc %%g3, %%g4, %%g4\n\t"
"ld [%1 + 0x08], %%g3\n\t"
"addxcc %%g2, %%g4, %%g4\n\t"
"ld [%1 + 0x04], %%g2\n\t"
"addxcc %%g3, %%g4, %%g4\n\t"
"ld [%1 + 0x00], %%g3\n\t"
"addxcc %%g2, %%g4, %%g4\n\t"
"addxcc %%g3, %%g4, %0\n\t"
"addx 0, %0, %0\n"
: "=&r" (sum)
: "r" (saddr), "r" (daddr),
"r"(htonl(len)), "r"(htonl(proto)), "r"(sum)
: "g2", "g3", "g4", "cc");
return csum_fold(sum);
}
/* this routine is used for miscellaneous IP-like checksums, mainly in icmp.c */
static inline __sum16 ip_compute_csum(const void *buff, int len)
{
return csum_fold(csum_partial(buff, len, 0));
}
#endif /* !(__SPARC_CHECKSUM_H) */

View File

@ -0,0 +1,167 @@
#ifndef __SPARC64_CHECKSUM_H
#define __SPARC64_CHECKSUM_H
/* checksum.h: IP/UDP/TCP checksum routines on the V9.
*
* Copyright(C) 1995 Linus Torvalds
* Copyright(C) 1995 Miguel de Icaza
* Copyright(C) 1996 David S. Miller
* Copyright(C) 1996 Eddie C. Dost
* Copyright(C) 1997 Jakub Jelinek
*
* derived from:
* Alpha checksum c-code
* ix86 inline assembly
* RFC1071 Computing the Internet Checksum
*/
#include <linux/in6.h>
#include <asm/uaccess.h>
/* computes the checksum of a memory block at buff, length len,
* and adds in "sum" (32-bit)
*
* returns a 32-bit number suitable for feeding into itself
* or csum_tcpudp_magic
*
* this function must be called with even lengths, except
* for the last fragment, which may be odd
*
* it's best to have buff aligned on a 32-bit boundary
*/
extern __wsum csum_partial(const void * buff, int len, __wsum sum);
/* the same as csum_partial, but copies from user space while it
* checksums
*
* here even more important to align src and dst on a 32-bit (or even
* better 64-bit) boundary
*/
extern __wsum csum_partial_copy_nocheck(const void *src, void *dst,
int len, __wsum sum);
extern long __csum_partial_copy_from_user(const void __user *src,
void *dst, int len,
__wsum sum);
static inline __wsum
csum_partial_copy_from_user(const void __user *src,
void *dst, int len,
__wsum sum, int *err)
{
long ret = __csum_partial_copy_from_user(src, dst, len, sum);
if (ret < 0)
*err = -EFAULT;
return (__force __wsum) ret;
}
/*
* Copy and checksum to user
*/
#define HAVE_CSUM_COPY_USER
extern long __csum_partial_copy_to_user(const void *src,
void __user *dst, int len,
__wsum sum);
static inline __wsum
csum_and_copy_to_user(const void *src,
void __user *dst, int len,
__wsum sum, int *err)
{
long ret = __csum_partial_copy_to_user(src, dst, len, sum);
if (ret < 0)
*err = -EFAULT;
return (__force __wsum) ret;
}
/* ihl is always 5 or greater, almost always is 5, and iph is word aligned
* the majority of the time.
*/
extern __sum16 ip_fast_csum(const void *iph, unsigned int ihl);
/* Fold a partial checksum without adding pseudo headers. */
static inline __sum16 csum_fold(__wsum sum)
{
unsigned int tmp;
__asm__ __volatile__(
" addcc %0, %1, %1\n"
" srl %1, 16, %1\n"
" addc %1, %%g0, %1\n"
" xnor %%g0, %1, %0\n"
: "=&r" (sum), "=r" (tmp)
: "0" (sum), "1" ((__force u32)sum<<16)
: "cc");
return (__force __sum16)sum;
}
static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
unsigned int len,
unsigned short proto,
__wsum sum)
{
__asm__ __volatile__(
" addcc %1, %0, %0\n"
" addccc %2, %0, %0\n"
" addccc %3, %0, %0\n"
" addc %0, %%g0, %0\n"
: "=r" (sum), "=r" (saddr)
: "r" (daddr), "r" (proto + len), "0" (sum), "1" (saddr)
: "cc");
return sum;
}
/*
* computes the checksum of the TCP/UDP pseudo-header
* returns a 16-bit checksum, already complemented
*/
static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr,
unsigned short len,
unsigned short proto,
__wsum sum)
{
return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum));
}
#define _HAVE_ARCH_IPV6_CSUM
static inline __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
const struct in6_addr *daddr,
__u32 len, unsigned short proto,
__wsum sum)
{
__asm__ __volatile__ (
" addcc %3, %4, %%g7\n"
" addccc %5, %%g7, %%g7\n"
" lduw [%2 + 0x0c], %%g2\n"
" lduw [%2 + 0x08], %%g3\n"
" addccc %%g2, %%g7, %%g7\n"
" lduw [%2 + 0x04], %%g2\n"
" addccc %%g3, %%g7, %%g7\n"
" lduw [%2 + 0x00], %%g3\n"
" addccc %%g2, %%g7, %%g7\n"
" lduw [%1 + 0x0c], %%g2\n"
" addccc %%g3, %%g7, %%g7\n"
" lduw [%1 + 0x08], %%g3\n"
" addccc %%g2, %%g7, %%g7\n"
" lduw [%1 + 0x04], %%g2\n"
" addccc %%g3, %%g7, %%g7\n"
" lduw [%1 + 0x00], %%g3\n"
" addccc %%g2, %%g7, %%g7\n"
" addccc %%g3, %%g7, %0\n"
" addc 0, %0, %0\n"
: "=&r" (sum)
: "r" (saddr), "r" (daddr), "r"(htonl(len)),
"r"(htonl(proto)), "r"(sum)
: "g2", "g3", "g7", "cc");
return csum_fold(sum);
}
/* this routine is used for miscellaneous IP-like checksums, mainly in icmp.c */
static inline __sum16 ip_compute_csum(const void *buff, int len)
{
return csum_fold(csum_partial(buff, len, 0));
}
#endif /* !(__SPARC64_CHECKSUM_H) */

183
include/asm-sparc/chmctrl.h Normal file
View File

@ -0,0 +1,183 @@
#ifndef _SPARC64_CHMCTRL_H
#define _SPARC64_CHMCTRL_H
/* Cheetah memory controller programmable registers. */
#define CHMCTRL_TCTRL1 0x00 /* Memory Timing Control I */
#define CHMCTRL_TCTRL2 0x08 /* Memory Timing Control II */
#define CHMCTRL_TCTRL3 0x38 /* Memory Timing Control III */
#define CHMCTRL_TCTRL4 0x40 /* Memory Timing Control IV */
#define CHMCTRL_DECODE1 0x10 /* Memory Address Decode I */
#define CHMCTRL_DECODE2 0x18 /* Memory Address Decode II */
#define CHMCTRL_DECODE3 0x20 /* Memory Address Decode III */
#define CHMCTRL_DECODE4 0x28 /* Memory Address Decode IV */
#define CHMCTRL_MACTRL 0x30 /* Memory Address Control */
/* Memory Timing Control I */
#define TCTRL1_SDRAMCTL_DLY 0xf000000000000000UL
#define TCTRL1_SDRAMCTL_DLY_SHIFT 60
#define TCTRL1_SDRAMCLK_DLY 0x0e00000000000000UL
#define TCTRL1_SDRAMCLK_DLY_SHIFT 57
#define TCTRL1_R 0x0100000000000000UL
#define TCTRL1_R_SHIFT 56
#define TCTRL1_AUTORFR_CYCLE 0x00fe000000000000UL
#define TCTRL1_AUTORFR_CYCLE_SHIFT 49
#define TCTRL1_RD_WAIT 0x0001f00000000000UL
#define TCTRL1_RD_WAIT_SHIFT 44
#define TCTRL1_PC_CYCLE 0x00000fc000000000UL
#define TCTRL1_PC_CYCLE_SHIFT 38
#define TCTRL1_WR_MORE_RAS_PW 0x0000003f00000000UL
#define TCTRL1_WR_MORE_RAS_PW_SHIFT 32
#define TCTRL1_RD_MORE_RAW_PW 0x00000000fc000000UL
#define TCTRL1_RD_MORE_RAS_PW_SHIFT 26
#define TCTRL1_ACT_WR_DLY 0x0000000003f00000UL
#define TCTRL1_ACT_WR_DLY_SHIFT 20
#define TCTRL1_ACT_RD_DLY 0x00000000000fc000UL
#define TCTRL1_ACT_RD_DLY_SHIFT 14
#define TCTRL1_BANK_PRESENT 0x0000000000003000UL
#define TCTRL1_BANK_PRESENT_SHIFT 12
#define TCTRL1_RFR_INT 0x0000000000000ff8UL
#define TCTRL1_RFR_INT_SHIFT 3
#define TCTRL1_SET_MODE_REG 0x0000000000000004UL
#define TCTRL1_SET_MODE_REG_SHIFT 2
#define TCTRL1_RFR_ENABLE 0x0000000000000002UL
#define TCTRL1_RFR_ENABLE_SHIFT 1
#define TCTRL1_PRECHG_ALL 0x0000000000000001UL
#define TCTRL1_PRECHG_ALL_SHIFT 0
/* Memory Timing Control II */
#define TCTRL2_WR_MSEL_DLY 0xfc00000000000000UL
#define TCTRL2_WR_MSEL_DLY_SHIFT 58
#define TCTRL2_RD_MSEL_DLY 0x03f0000000000000UL
#define TCTRL2_RD_MSEL_DLY_SHIFT 52
#define TCTRL2_WRDATA_THLD 0x000c000000000000UL
#define TCTRL2_WRDATA_THLD_SHIFT 50
#define TCTRL2_RDWR_RD_TI_DLY 0x0003f00000000000UL
#define TCTRL2_RDWR_RD_TI_DLY_SHIFT 44
#define TCTRL2_AUTOPRECHG_ENBL 0x0000080000000000UL
#define TCTRL2_AUTOPRECHG_ENBL_SHIFT 43
#define TCTRL2_RDWR_PI_MORE_DLY 0x000007c000000000UL
#define TCTRL2_RDWR_PI_MORE_DLY_SHIFT 38
#define TCTRL2_RDWR_1_DLY 0x0000003f00000000UL
#define TCTRL2_RDWR_1_DLY_SHIFT 32
#define TCTRL2_WRWR_PI_MORE_DLY 0x00000000f8000000UL
#define TCTRL2_WRWR_PI_MORE_DLY_SHIFT 27
#define TCTRL2_WRWR_1_DLY 0x0000000007e00000UL
#define TCTRL2_WRWR_1_DLY_SHIFT 21
#define TCTRL2_RDWR_RD_PI_MORE_DLY 0x00000000001f0000UL
#define TCTRL2_RDWR_RD_PI_MORE_DLY_SHIFT 16
#define TCTRL2_R 0x0000000000008000UL
#define TCTRL2_R_SHIFT 15
#define TCTRL2_SDRAM_MODE_REG_DATA 0x0000000000007fffUL
#define TCTRL2_SDRAM_MODE_REG_DATA_SHIFT 0
/* Memory Timing Control III */
#define TCTRL3_SDRAM_CTL_DLY 0xf000000000000000UL
#define TCTRL3_SDRAM_CTL_DLY_SHIFT 60
#define TCTRL3_SDRAM_CLK_DLY 0x0e00000000000000UL
#define TCTRL3_SDRAM_CLK_DLY_SHIFT 57
#define TCTRL3_R 0x0100000000000000UL
#define TCTRL3_R_SHIFT 56
#define TCTRL3_AUTO_RFR_CYCLE 0x00fe000000000000UL
#define TCTRL3_AUTO_RFR_CYCLE_SHIFT 49
#define TCTRL3_RD_WAIT 0x0001f00000000000UL
#define TCTRL3_RD_WAIT_SHIFT 44
#define TCTRL3_PC_CYCLE 0x00000fc000000000UL
#define TCTRL3_PC_CYCLE_SHIFT 38
#define TCTRL3_WR_MORE_RAW_PW 0x0000003f00000000UL
#define TCTRL3_WR_MORE_RAW_PW_SHIFT 32
#define TCTRL3_RD_MORE_RAW_PW 0x00000000fc000000UL
#define TCTRL3_RD_MORE_RAW_PW_SHIFT 26
#define TCTRL3_ACT_WR_DLY 0x0000000003f00000UL
#define TCTRL3_ACT_WR_DLY_SHIFT 20
#define TCTRL3_ACT_RD_DLY 0x00000000000fc000UL
#define TCTRL3_ACT_RD_DLY_SHIFT 14
#define TCTRL3_BANK_PRESENT 0x0000000000003000UL
#define TCTRL3_BANK_PRESENT_SHIFT 12
#define TCTRL3_RFR_INT 0x0000000000000ff8UL
#define TCTRL3_RFR_INT_SHIFT 3
#define TCTRL3_SET_MODE_REG 0x0000000000000004UL
#define TCTRL3_SET_MODE_REG_SHIFT 2
#define TCTRL3_RFR_ENABLE 0x0000000000000002UL
#define TCTRL3_RFR_ENABLE_SHIFT 1
#define TCTRL3_PRECHG_ALL 0x0000000000000001UL
#define TCTRL3_PRECHG_ALL_SHIFT 0
/* Memory Timing Control IV */
#define TCTRL4_WR_MSEL_DLY 0xfc00000000000000UL
#define TCTRL4_WR_MSEL_DLY_SHIFT 58
#define TCTRL4_RD_MSEL_DLY 0x03f0000000000000UL
#define TCTRL4_RD_MSEL_DLY_SHIFT 52
#define TCTRL4_WRDATA_THLD 0x000c000000000000UL
#define TCTRL4_WRDATA_THLD_SHIFT 50
#define TCTRL4_RDWR_RD_RI_DLY 0x0003f00000000000UL
#define TCTRL4_RDWR_RD_RI_DLY_SHIFT 44
#define TCTRL4_AUTO_PRECHG_ENBL 0x0000080000000000UL
#define TCTRL4_AUTO_PRECHG_ENBL_SHIFT 43
#define TCTRL4_RD_WR_PI_MORE_DLY 0x000007c000000000UL
#define TCTRL4_RD_WR_PI_MORE_DLY_SHIFT 38
#define TCTRL4_RD_WR_TI_DLY 0x0000003f00000000UL
#define TCTRL4_RD_WR_TI_DLY_SHIFT 32
#define TCTRL4_WR_WR_PI_MORE_DLY 0x00000000f8000000UL
#define TCTRL4_WR_WR_PI_MORE_DLY_SHIFT 27
#define TCTRL4_WR_WR_TI_DLY 0x0000000007e00000UL
#define TCTRL4_WR_WR_TI_DLY_SHIFT 21
#define TCTRL4_RDWR_RD_PI_MORE_DLY 0x00000000001f000UL0
#define TCTRL4_RDWR_RD_PI_MORE_DLY_SHIFT 16
#define TCTRL4_R 0x0000000000008000UL
#define TCTRL4_R_SHIFT 15
#define TCTRL4_SDRAM_MODE_REG_DATA 0x0000000000007fffUL
#define TCTRL4_SDRAM_MODE_REG_DATA_SHIFT 0
/* All 4 memory address decoding registers have the
* same layout.
*/
#define MEM_DECODE_VALID 0x8000000000000000UL /* Valid */
#define MEM_DECODE_VALID_SHIFT 63
#define MEM_DECODE_UK 0x001ffe0000000000UL /* Upper mask */
#define MEM_DECODE_UK_SHIFT 41
#define MEM_DECODE_UM 0x0000001ffff00000UL /* Upper match */
#define MEM_DECODE_UM_SHIFT 20
#define MEM_DECODE_LK 0x000000000003c000UL /* Lower mask */
#define MEM_DECODE_LK_SHIFT 14
#define MEM_DECODE_LM 0x0000000000000f00UL /* Lower match */
#define MEM_DECODE_LM_SHIFT 8
#define PA_UPPER_BITS 0x000007fffc000000UL
#define PA_UPPER_BITS_SHIFT 26
#define PA_LOWER_BITS 0x00000000000003c0UL
#define PA_LOWER_BITS_SHIFT 6
#define MACTRL_R0 0x8000000000000000UL
#define MACTRL_R0_SHIFT 63
#define MACTRL_ADDR_LE_PW 0x7000000000000000UL
#define MACTRL_ADDR_LE_PW_SHIFT 60
#define MACTRL_CMD_PW 0x0f00000000000000UL
#define MACTRL_CMD_PW_SHIFT 56
#define MACTRL_HALF_MODE_WR_MSEL_DLY 0x00fc000000000000UL
#define MACTRL_HALF_MODE_WR_MSEL_DLY_SHIFT 50
#define MACTRL_HALF_MODE_RD_MSEL_DLY 0x0003f00000000000UL
#define MACTRL_HALF_MODE_RD_MSEL_DLY_SHIFT 44
#define MACTRL_HALF_MODE_SDRAM_CTL_DLY 0x00000f0000000000UL
#define MACTRL_HALF_MODE_SDRAM_CTL_DLY_SHIFT 40
#define MACTRL_HALF_MODE_SDRAM_CLK_DLY 0x000000e000000000UL
#define MACTRL_HALF_MODE_SDRAM_CLK_DLY_SHIFT 37
#define MACTRL_R1 0x0000001000000000UL
#define MACTRL_R1_SHIFT 36
#define MACTRL_BANKSEL_N_ROWADDR_SIZE_B3 0x0000000f00000000UL
#define MACTRL_BANKSEL_N_ROWADDR_SIZE_B3_SHIFT 32
#define MACTRL_ENC_INTLV_B3 0x00000000f8000000UL
#define MACTRL_ENC_INTLV_B3_SHIFT 27
#define MACTRL_BANKSEL_N_ROWADDR_SIZE_B2 0x0000000007800000UL
#define MACTRL_BANKSEL_N_ROWADDR_SIZE_B2_SHIFT 23
#define MACTRL_ENC_INTLV_B2 0x00000000007c0000UL
#define MACTRL_ENC_INTLV_B2_SHIFT 18
#define MACTRL_BANKSEL_N_ROWADDR_SIZE_B1 0x000000000003c000UL
#define MACTRL_BANKSEL_N_ROWADDR_SIZE_B1_SHIFT 14
#define MACTRL_ENC_INTLV_B1 0x0000000000003e00UL
#define MACTRL_ENC_INTLV_B1_SHIFT 9
#define MACTRL_BANKSEL_N_ROWADDR_SIZE_B0 0x00000000000001e0UL
#define MACTRL_BANKSEL_N_ROWADDR_SIZE_B0_SHIFT 5
#define MACTRL_ENC_INTLV_B0 0x000000000000001fUL
#define MACTRL_ENC_INTLV_B0_SHIFT 0
#endif /* _SPARC64_CHMCTRL_H */

59
include/asm-sparc/cmt.h Normal file
View File

@ -0,0 +1,59 @@
#ifndef _SPARC64_CMT_H
#define _SPARC64_CMT_H
/* cmt.h: Chip Multi-Threading register definitions
*
* Copyright (C) 2004 David S. Miller (davem@redhat.com)
*/
/* ASI_CORE_ID - private */
#define LP_ID 0x0000000000000010UL
#define LP_ID_MAX 0x00000000003f0000UL
#define LP_ID_ID 0x000000000000003fUL
/* ASI_INTR_ID - private */
#define LP_INTR_ID 0x0000000000000000UL
#define LP_INTR_ID_ID 0x00000000000003ffUL
/* ASI_CESR_ID - private */
#define CESR_ID 0x0000000000000040UL
#define CESR_ID_ID 0x00000000000000ffUL
/* ASI_CORE_AVAILABLE - shared */
#define LP_AVAIL 0x0000000000000000UL
#define LP_AVAIL_1 0x0000000000000002UL
#define LP_AVAIL_0 0x0000000000000001UL
/* ASI_CORE_ENABLE_STATUS - shared */
#define LP_ENAB_STAT 0x0000000000000010UL
#define LP_ENAB_STAT_1 0x0000000000000002UL
#define LP_ENAB_STAT_0 0x0000000000000001UL
/* ASI_CORE_ENABLE - shared */
#define LP_ENAB 0x0000000000000020UL
#define LP_ENAB_1 0x0000000000000002UL
#define LP_ENAB_0 0x0000000000000001UL
/* ASI_CORE_RUNNING - shared */
#define LP_RUNNING_RW 0x0000000000000050UL
#define LP_RUNNING_W1S 0x0000000000000060UL
#define LP_RUNNING_W1C 0x0000000000000068UL
#define LP_RUNNING_1 0x0000000000000002UL
#define LP_RUNNING_0 0x0000000000000001UL
/* ASI_CORE_RUNNING_STAT - shared */
#define LP_RUN_STAT 0x0000000000000058UL
#define LP_RUN_STAT_1 0x0000000000000002UL
#define LP_RUN_STAT_0 0x0000000000000001UL
/* ASI_XIR_STEERING - shared */
#define LP_XIR_STEER 0x0000000000000030UL
#define LP_XIR_STEER_1 0x0000000000000002UL
#define LP_XIR_STEER_0 0x0000000000000001UL
/* ASI_CMT_ERROR_STEERING - shared */
#define CMT_ER_STEER 0x0000000000000040UL
#define CMT_ER_STEER_1 0x0000000000000002UL
#define CMT_ER_STEER_0 0x0000000000000001UL
#endif /* _SPARC64_CMT_H */

243
include/asm-sparc/compat.h Normal file
View File

@ -0,0 +1,243 @@
#ifndef _ASM_SPARC64_COMPAT_H
#define _ASM_SPARC64_COMPAT_H
/*
* Architecture specific compatibility types
*/
#include <linux/types.h>
#define COMPAT_USER_HZ 100
typedef u32 compat_size_t;
typedef s32 compat_ssize_t;
typedef s32 compat_time_t;
typedef s32 compat_clock_t;
typedef s32 compat_pid_t;
typedef u16 __compat_uid_t;
typedef u16 __compat_gid_t;
typedef u32 __compat_uid32_t;
typedef u32 __compat_gid32_t;
typedef u16 compat_mode_t;
typedef u32 compat_ino_t;
typedef u16 compat_dev_t;
typedef s32 compat_off_t;
typedef s64 compat_loff_t;
typedef s16 compat_nlink_t;
typedef u16 compat_ipc_pid_t;
typedef s32 compat_daddr_t;
typedef u32 compat_caddr_t;
typedef __kernel_fsid_t compat_fsid_t;
typedef s32 compat_key_t;
typedef s32 compat_timer_t;
typedef s32 compat_int_t;
typedef s32 compat_long_t;
typedef s64 compat_s64;
typedef u32 compat_uint_t;
typedef u32 compat_ulong_t;
typedef u64 compat_u64;
struct compat_timespec {
compat_time_t tv_sec;
s32 tv_nsec;
};
struct compat_timeval {
compat_time_t tv_sec;
s32 tv_usec;
};
struct compat_stat {
compat_dev_t st_dev;
compat_ino_t st_ino;
compat_mode_t st_mode;
compat_nlink_t st_nlink;
__compat_uid_t st_uid;
__compat_gid_t st_gid;
compat_dev_t st_rdev;
compat_off_t st_size;
compat_time_t st_atime;
compat_ulong_t st_atime_nsec;
compat_time_t st_mtime;
compat_ulong_t st_mtime_nsec;
compat_time_t st_ctime;
compat_ulong_t st_ctime_nsec;
compat_off_t st_blksize;
compat_off_t st_blocks;
u32 __unused4[2];
};
struct compat_stat64 {
unsigned long long st_dev;
unsigned long long st_ino;
unsigned int st_mode;
unsigned int st_nlink;
unsigned int st_uid;
unsigned int st_gid;
unsigned long long st_rdev;
unsigned char __pad3[8];
long long st_size;
unsigned int st_blksize;
unsigned char __pad4[8];
unsigned int st_blocks;
unsigned int st_atime;
unsigned int st_atime_nsec;
unsigned int st_mtime;
unsigned int st_mtime_nsec;
unsigned int st_ctime;
unsigned int st_ctime_nsec;
unsigned int __unused4;
unsigned int __unused5;
};
struct compat_flock {
short l_type;
short l_whence;
compat_off_t l_start;
compat_off_t l_len;
compat_pid_t l_pid;
short __unused;
};
#define F_GETLK64 12
#define F_SETLK64 13
#define F_SETLKW64 14
struct compat_flock64 {
short l_type;
short l_whence;
compat_loff_t l_start;
compat_loff_t l_len;
compat_pid_t l_pid;
short __unused;
};
struct compat_statfs {
int f_type;
int f_bsize;
int f_blocks;
int f_bfree;
int f_bavail;
int f_files;
int f_ffree;
compat_fsid_t f_fsid;
int f_namelen; /* SunOS ignores this field. */
int f_frsize;
int f_spare[5];
};
#define COMPAT_RLIM_INFINITY 0x7fffffff
typedef u32 compat_old_sigset_t;
#define _COMPAT_NSIG 64
#define _COMPAT_NSIG_BPW 32
typedef u32 compat_sigset_word;
#define COMPAT_OFF_T_MAX 0x7fffffff
#define COMPAT_LOFF_T_MAX 0x7fffffffffffffffL
/*
* A pointer passed in from user mode. This should not
* be used for syscall parameters, just declare them
* as pointers because the syscall entry code will have
* appropriately converted them already.
*/
typedef u32 compat_uptr_t;
static inline void __user *compat_ptr(compat_uptr_t uptr)
{
return (void __user *)(unsigned long)uptr;
}
static inline compat_uptr_t ptr_to_compat(void __user *uptr)
{
return (u32)(unsigned long)uptr;
}
static inline void __user *compat_alloc_user_space(long len)
{
struct pt_regs *regs = current_thread_info()->kregs;
unsigned long usp = regs->u_regs[UREG_I6];
if (!(test_thread_flag(TIF_32BIT)))
usp += STACK_BIAS;
else
usp &= 0xffffffffUL;
usp -= len;
usp &= ~0x7UL;
return (void __user *) usp;
}
struct compat_ipc64_perm {
compat_key_t key;
__compat_uid32_t uid;
__compat_gid32_t gid;
__compat_uid32_t cuid;
__compat_gid32_t cgid;
unsigned short __pad1;
compat_mode_t mode;
unsigned short __pad2;
unsigned short seq;
unsigned long __unused1; /* yes they really are 64bit pads */
unsigned long __unused2;
};
struct compat_semid64_ds {
struct compat_ipc64_perm sem_perm;
unsigned int __pad1;
compat_time_t sem_otime;
unsigned int __pad2;
compat_time_t sem_ctime;
u32 sem_nsems;
u32 __unused1;
u32 __unused2;
};
struct compat_msqid64_ds {
struct compat_ipc64_perm msg_perm;
unsigned int __pad1;
compat_time_t msg_stime;
unsigned int __pad2;
compat_time_t msg_rtime;
unsigned int __pad3;
compat_time_t msg_ctime;
unsigned int msg_cbytes;
unsigned int msg_qnum;
unsigned int msg_qbytes;
compat_pid_t msg_lspid;
compat_pid_t msg_lrpid;
unsigned int __unused1;
unsigned int __unused2;
};
struct compat_shmid64_ds {
struct compat_ipc64_perm shm_perm;
unsigned int __pad1;
compat_time_t shm_atime;
unsigned int __pad2;
compat_time_t shm_dtime;
unsigned int __pad3;
compat_time_t shm_ctime;
compat_size_t shm_segsz;
compat_pid_t shm_cpid;
compat_pid_t shm_lpid;
unsigned int shm_nattch;
unsigned int __unused1;
unsigned int __unused2;
};
#endif /* _ASM_SPARC64_COMPAT_H */

View File

@ -0,0 +1,29 @@
#ifndef _COMPAT_SIGNAL_H
#define _COMPAT_SIGNAL_H
#include <linux/compat.h>
#include <asm/signal.h>
#ifdef CONFIG_COMPAT
struct __new_sigaction32 {
unsigned sa_handler;
unsigned int sa_flags;
unsigned sa_restorer; /* not used by Linux/SPARC yet */
compat_sigset_t sa_mask;
};
struct __old_sigaction32 {
unsigned sa_handler;
compat_old_sigset_t sa_mask;
unsigned int sa_flags;
unsigned sa_restorer; /* not used by Linux/SPARC yet */
};
typedef struct sigaltstack32 {
u32 ss_sp;
int ss_flags;
compat_size_t ss_size;
} stack_t32;
#endif
#endif /* !(_COMPAT_SIGNAL_H) */

View File

@ -1,27 +1,8 @@
/* cpudata.h: Per-cpu parameters.
*
* Copyright (C) 2004 Keith M Wesolowski (wesolows@foobazco.org)
*
* Based on include/asm-sparc64/cpudata.h and Linux 2.4 smp.h
* both (C) David S. Miller.
*/
#ifndef _SPARC_CPUDATA_H
#define _SPARC_CPUDATA_H
#include <linux/percpu.h>
typedef struct {
unsigned long udelay_val;
unsigned long clock_tick;
unsigned int multiplier;
unsigned int counter;
int prom_node;
int mid;
int next;
} cpuinfo_sparc;
DECLARE_PER_CPU(cpuinfo_sparc, __cpu_data);
#define cpu_data(__cpu) per_cpu(__cpu_data, (__cpu))
#endif /* _SPARC_CPUDATA_H */
#ifndef ___ASM_SPARC_CPUDATA_H
#define ___ASM_SPARC_CPUDATA_H
#if defined(__sparc__) && defined(__arch64__)
#include <asm-sparc/cpudata_64.h>
#else
#include <asm-sparc/cpudata_32.h>
#endif
#endif

View File

@ -0,0 +1,27 @@
/* cpudata.h: Per-cpu parameters.
*
* Copyright (C) 2004 Keith M Wesolowski (wesolows@foobazco.org)
*
* Based on include/asm-sparc64/cpudata.h and Linux 2.4 smp.h
* both (C) David S. Miller.
*/
#ifndef _SPARC_CPUDATA_H
#define _SPARC_CPUDATA_H
#include <linux/percpu.h>
typedef struct {
unsigned long udelay_val;
unsigned long clock_tick;
unsigned int multiplier;
unsigned int counter;
int prom_node;
int mid;
int next;
} cpuinfo_sparc;
DECLARE_PER_CPU(cpuinfo_sparc, __cpu_data);
#define cpu_data(__cpu) per_cpu(__cpu_data, (__cpu))
#endif /* _SPARC_CPUDATA_H */

View File

@ -0,0 +1,240 @@
/* cpudata.h: Per-cpu parameters.
*
* Copyright (C) 2003, 2005, 2006 David S. Miller (davem@davemloft.net)
*/
#ifndef _SPARC64_CPUDATA_H
#define _SPARC64_CPUDATA_H
#include <asm/hypervisor.h>
#include <asm/asi.h>
#ifndef __ASSEMBLY__
#include <linux/percpu.h>
#include <linux/threads.h>
typedef struct {
/* Dcache line 1 */
unsigned int __softirq_pending; /* must be 1st, see rtrap.S */
unsigned int __pad0;
unsigned long clock_tick; /* %tick's per second */
unsigned long __pad;
unsigned int __pad1;
unsigned int __pad2;
/* Dcache line 2, rarely used */
unsigned int dcache_size;
unsigned int dcache_line_size;
unsigned int icache_size;
unsigned int icache_line_size;
unsigned int ecache_size;
unsigned int ecache_line_size;
int core_id;
int proc_id;
} cpuinfo_sparc;
DECLARE_PER_CPU(cpuinfo_sparc, __cpu_data);
#define cpu_data(__cpu) per_cpu(__cpu_data, (__cpu))
#define local_cpu_data() __get_cpu_var(__cpu_data)
/* Trap handling code needs to get at a few critical values upon
* trap entry and to process TSB misses. These cannot be in the
* per_cpu() area as we really need to lock them into the TLB and
* thus make them part of the main kernel image. As a result we
* try to make this as small as possible.
*
* This is padded out and aligned to 64-bytes to avoid false sharing
* on SMP.
*/
/* If you modify the size of this structure, please update
* TRAP_BLOCK_SZ_SHIFT below.
*/
struct thread_info;
struct trap_per_cpu {
/* D-cache line 1: Basic thread information, cpu and device mondo queues */
struct thread_info *thread;
unsigned long pgd_paddr;
unsigned long cpu_mondo_pa;
unsigned long dev_mondo_pa;
/* D-cache line 2: Error Mondo Queue and kernel buffer pointers */
unsigned long resum_mondo_pa;
unsigned long resum_kernel_buf_pa;
unsigned long nonresum_mondo_pa;
unsigned long nonresum_kernel_buf_pa;
/* Dcache lines 3, 4, 5, and 6: Hypervisor Fault Status */
struct hv_fault_status fault_info;
/* Dcache line 7: Physical addresses of CPU send mondo block and CPU list. */
unsigned long cpu_mondo_block_pa;
unsigned long cpu_list_pa;
unsigned long tsb_huge;
unsigned long tsb_huge_temp;
/* Dcache line 8: IRQ work list, and keep trap_block a power-of-2 in size. */
unsigned long irq_worklist_pa;
unsigned int cpu_mondo_qmask;
unsigned int dev_mondo_qmask;
unsigned int resum_qmask;
unsigned int nonresum_qmask;
void *hdesc;
} __attribute__((aligned(64)));
extern struct trap_per_cpu trap_block[NR_CPUS];
extern void init_cur_cpu_trap(struct thread_info *);
extern void setup_tba(void);
extern int ncpus_probed;
extern void __init cpu_probe(void);
extern const struct seq_operations cpuinfo_op;
extern unsigned long real_hard_smp_processor_id(void);
struct cpuid_patch_entry {
unsigned int addr;
unsigned int cheetah_safari[4];
unsigned int cheetah_jbus[4];
unsigned int starfire[4];
unsigned int sun4v[4];
};
extern struct cpuid_patch_entry __cpuid_patch, __cpuid_patch_end;
struct sun4v_1insn_patch_entry {
unsigned int addr;
unsigned int insn;
};
extern struct sun4v_1insn_patch_entry __sun4v_1insn_patch,
__sun4v_1insn_patch_end;
struct sun4v_2insn_patch_entry {
unsigned int addr;
unsigned int insns[2];
};
extern struct sun4v_2insn_patch_entry __sun4v_2insn_patch,
__sun4v_2insn_patch_end;
#endif /* !(__ASSEMBLY__) */
#define TRAP_PER_CPU_THREAD 0x00
#define TRAP_PER_CPU_PGD_PADDR 0x08
#define TRAP_PER_CPU_CPU_MONDO_PA 0x10
#define TRAP_PER_CPU_DEV_MONDO_PA 0x18
#define TRAP_PER_CPU_RESUM_MONDO_PA 0x20
#define TRAP_PER_CPU_RESUM_KBUF_PA 0x28
#define TRAP_PER_CPU_NONRESUM_MONDO_PA 0x30
#define TRAP_PER_CPU_NONRESUM_KBUF_PA 0x38
#define TRAP_PER_CPU_FAULT_INFO 0x40
#define TRAP_PER_CPU_CPU_MONDO_BLOCK_PA 0xc0
#define TRAP_PER_CPU_CPU_LIST_PA 0xc8
#define TRAP_PER_CPU_TSB_HUGE 0xd0
#define TRAP_PER_CPU_TSB_HUGE_TEMP 0xd8
#define TRAP_PER_CPU_IRQ_WORKLIST_PA 0xe0
#define TRAP_PER_CPU_CPU_MONDO_QMASK 0xe8
#define TRAP_PER_CPU_DEV_MONDO_QMASK 0xec
#define TRAP_PER_CPU_RESUM_QMASK 0xf0
#define TRAP_PER_CPU_NONRESUM_QMASK 0xf4
#define TRAP_BLOCK_SZ_SHIFT 8
#include <asm/scratchpad.h>
#define __GET_CPUID(REG) \
/* Spitfire implementation (default). */ \
661: ldxa [%g0] ASI_UPA_CONFIG, REG; \
srlx REG, 17, REG; \
and REG, 0x1f, REG; \
nop; \
.section .cpuid_patch, "ax"; \
/* Instruction location. */ \
.word 661b; \
/* Cheetah Safari implementation. */ \
ldxa [%g0] ASI_SAFARI_CONFIG, REG; \
srlx REG, 17, REG; \
and REG, 0x3ff, REG; \
nop; \
/* Cheetah JBUS implementation. */ \
ldxa [%g0] ASI_JBUS_CONFIG, REG; \
srlx REG, 17, REG; \
and REG, 0x1f, REG; \
nop; \
/* Starfire implementation. */ \
sethi %hi(0x1fff40000d0 >> 9), REG; \
sllx REG, 9, REG; \
or REG, 0xd0, REG; \
lduwa [REG] ASI_PHYS_BYPASS_EC_E, REG;\
/* sun4v implementation. */ \
mov SCRATCHPAD_CPUID, REG; \
ldxa [REG] ASI_SCRATCHPAD, REG; \
nop; \
nop; \
.previous;
#ifdef CONFIG_SMP
#define TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \
__GET_CPUID(TMP) \
sethi %hi(trap_block), DEST; \
sllx TMP, TRAP_BLOCK_SZ_SHIFT, TMP; \
or DEST, %lo(trap_block), DEST; \
add DEST, TMP, DEST; \
/* Clobbers TMP, current address space PGD phys address into DEST. */
#define TRAP_LOAD_PGD_PHYS(DEST, TMP) \
TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \
ldx [DEST + TRAP_PER_CPU_PGD_PADDR], DEST;
/* Clobbers TMP, loads local processor's IRQ work area into DEST. */
#define TRAP_LOAD_IRQ_WORK_PA(DEST, TMP) \
TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \
add DEST, TRAP_PER_CPU_IRQ_WORKLIST_PA, DEST;
/* Clobbers TMP, loads DEST with current thread info pointer. */
#define TRAP_LOAD_THREAD_REG(DEST, TMP) \
TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \
ldx [DEST + TRAP_PER_CPU_THREAD], DEST;
/* Given the current thread info pointer in THR, load the per-cpu
* area base of the current processor into DEST. REG1, REG2, and REG3 are
* clobbered.
*
* You absolutely cannot use DEST as a temporary in this code. The
* reason is that traps can happen during execution, and return from
* trap will load the fully resolved DEST per-cpu base. This can corrupt
* the calculations done by the macro mid-stream.
*/
#define LOAD_PER_CPU_BASE(DEST, THR, REG1, REG2, REG3) \
lduh [THR + TI_CPU], REG1; \
sethi %hi(__per_cpu_shift), REG3; \
sethi %hi(__per_cpu_base), REG2; \
ldx [REG3 + %lo(__per_cpu_shift)], REG3; \
ldx [REG2 + %lo(__per_cpu_base)], REG2; \
sllx REG1, REG3, REG3; \
add REG3, REG2, DEST;
#else
#define TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \
sethi %hi(trap_block), DEST; \
or DEST, %lo(trap_block), DEST; \
/* Uniprocessor versions, we know the cpuid is zero. */
#define TRAP_LOAD_PGD_PHYS(DEST, TMP) \
TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \
ldx [DEST + TRAP_PER_CPU_PGD_PADDR], DEST;
/* Clobbers TMP, loads local processor's IRQ work area into DEST. */
#define TRAP_LOAD_IRQ_WORK_PA(DEST, TMP) \
TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \
add DEST, TRAP_PER_CPU_IRQ_WORKLIST_PA, DEST;
#define TRAP_LOAD_THREAD_REG(DEST, TMP) \
TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \
ldx [DEST + TRAP_PER_CPU_THREAD], DEST;
/* No per-cpu areas on uniprocessor, so no need to load DEST. */
#define LOAD_PER_CPU_BASE(DEST, THR, REG1, REG2, REG3)
#endif /* !(CONFIG_SMP) */
#endif /* _SPARC64_CPUDATA_H */

14
include/asm-sparc/dcr.h Normal file
View File

@ -0,0 +1,14 @@
#ifndef _SPARC64_DCR_H
#define _SPARC64_DCR_H
/* UltraSparc-III/III+ Dispatch Control Register, ASR 0x12 */
#define DCR_DPE 0x0000000000001000 /* III+: D$ Parity Error Enable */
#define DCR_OBS 0x0000000000000fc0 /* Observability Bus Controls */
#define DCR_BPE 0x0000000000000020 /* Branch Predict Enable */
#define DCR_RPE 0x0000000000000010 /* Return Address Prediction Enable */
#define DCR_SI 0x0000000000000008 /* Single Instruction Disable */
#define DCR_IPE 0x0000000000000004 /* III+: I$ Parity Error Enable */
#define DCR_IFPOE 0x0000000000000002 /* IRQ FP Operation Enable */
#define DCR_MS 0x0000000000000001 /* Multi-Scalar dispatch */
#endif /* _SPARC64_DCR_H */

27
include/asm-sparc/dcu.h Normal file
View File

@ -0,0 +1,27 @@
#ifndef _SPARC64_DCU_H
#define _SPARC64_DCU_H
#include <linux/const.h>
/* UltraSparc-III Data Cache Unit Control Register */
#define DCU_CP _AC(0x0002000000000000,UL) /* Phys Cache Enable w/o mmu */
#define DCU_CV _AC(0x0001000000000000,UL) /* Virt Cache Enable w/o mmu */
#define DCU_ME _AC(0x0000800000000000,UL) /* NC-store Merging Enable */
#define DCU_RE _AC(0x0000400000000000,UL) /* RAW bypass Enable */
#define DCU_PE _AC(0x0000200000000000,UL) /* PCache Enable */
#define DCU_HPE _AC(0x0000100000000000,UL) /* HW prefetch Enable */
#define DCU_SPE _AC(0x0000080000000000,UL) /* SW prefetch Enable */
#define DCU_SL _AC(0x0000040000000000,UL) /* Secondary ld-steering Enab*/
#define DCU_WE _AC(0x0000020000000000,UL) /* WCache enable */
#define DCU_PM _AC(0x000001fe00000000,UL) /* PA Watchpoint Byte Mask */
#define DCU_VM _AC(0x00000001fe000000,UL) /* VA Watchpoint Byte Mask */
#define DCU_PR _AC(0x0000000001000000,UL) /* PA Watchpoint Read Enable */
#define DCU_PW _AC(0x0000000000800000,UL) /* PA Watchpoint Write Enable*/
#define DCU_VR _AC(0x0000000000400000,UL) /* VA Watchpoint Read Enable */
#define DCU_VW _AC(0x0000000000200000,UL) /* VA Watchpoint Write Enable*/
#define DCU_DM _AC(0x0000000000000008,UL) /* DMMU Enable */
#define DCU_IM _AC(0x0000000000000004,UL) /* IMMU Enable */
#define DCU_DC _AC(0x0000000000000002,UL) /* Data Cache Enable */
#define DCU_IC _AC(0x0000000000000001,UL) /* Instruction Cache Enable */
#endif /* _SPARC64_DCU_H */

View File

@ -1,34 +1,8 @@
/*
* delay.h: Linux delay routines on the Sparc.
*
* Copyright (C) 1994 David S. Miller (davem@caip.rutgers.edu).
*/
#ifndef __SPARC_DELAY_H
#define __SPARC_DELAY_H
#include <asm/cpudata.h>
static inline void __delay(unsigned long loops)
{
__asm__ __volatile__("cmp %0, 0\n\t"
"1: bne 1b\n\t"
"subcc %0, 1, %0\n" :
"=&r" (loops) :
"0" (loops) :
"cc");
}
/* This is too messy with inline asm on the Sparc. */
extern void __udelay(unsigned long usecs, unsigned long lpj);
extern void __ndelay(unsigned long nsecs, unsigned long lpj);
#ifdef CONFIG_SMP
#define __udelay_val cpu_data(smp_processor_id()).udelay_val
#else /* SMP */
#define __udelay_val loops_per_jiffy
#endif /* SMP */
#define udelay(__usecs) __udelay(__usecs, __udelay_val)
#define ndelay(__nsecs) __ndelay(__nsecs, __udelay_val)
#endif /* defined(__SPARC_DELAY_H) */
#ifndef ___ASM_SPARC_DELAY_H
#define ___ASM_SPARC_DELAY_H
#if defined(__sparc__) && defined(__arch64__)
#include <asm-sparc/delay_64.h>
#else
#include <asm-sparc/delay_32.h>
#endif
#endif

View File

@ -0,0 +1,34 @@
/*
* delay.h: Linux delay routines on the Sparc.
*
* Copyright (C) 1994 David S. Miller (davem@caip.rutgers.edu).
*/
#ifndef __SPARC_DELAY_H
#define __SPARC_DELAY_H
#include <asm/cpudata.h>
static inline void __delay(unsigned long loops)
{
__asm__ __volatile__("cmp %0, 0\n\t"
"1: bne 1b\n\t"
"subcc %0, 1, %0\n" :
"=&r" (loops) :
"0" (loops) :
"cc");
}
/* This is too messy with inline asm on the Sparc. */
extern void __udelay(unsigned long usecs, unsigned long lpj);
extern void __ndelay(unsigned long nsecs, unsigned long lpj);
#ifdef CONFIG_SMP
#define __udelay_val cpu_data(smp_processor_id()).udelay_val
#else /* SMP */
#define __udelay_val loops_per_jiffy
#endif /* SMP */
#define udelay(__usecs) __udelay(__usecs, __udelay_val)
#define ndelay(__nsecs) __ndelay(__nsecs, __udelay_val)
#endif /* defined(__SPARC_DELAY_H) */

View File

@ -0,0 +1,17 @@
/* delay.h: Linux delay routines on sparc64.
*
* Copyright (C) 1996, 2004, 2007 David S. Miller (davem@davemloft.net).
*/
#ifndef _SPARC64_DELAY_H
#define _SPARC64_DELAY_H
#ifndef __ASSEMBLY__
extern void __delay(unsigned long loops);
extern void udelay(unsigned long usecs);
#define mdelay(n) udelay((n) * 1000)
#endif /* !__ASSEMBLY__ */
#endif /* _SPARC64_DELAY_H */

View File

@ -0,0 +1,79 @@
/*
*
* display7seg - Driver interface for the 7-segment display
* present on Sun Microsystems CP1400 and CP1500
*
* Copyright (c) 2000 Eric Brower <ebrower@usa.net>
*
*/
#ifndef __display7seg_h__
#define __display7seg_h__
#define D7S_IOC 'p'
#define D7SIOCRD _IOR(D7S_IOC, 0x45, int) /* Read device state */
#define D7SIOCWR _IOW(D7S_IOC, 0x46, int) /* Write device state */
#define D7SIOCTM _IO (D7S_IOC, 0x47) /* Translate mode (FLIP)*/
/*
* ioctl flag definitions
*
* POINT - Toggle decimal point (0=absent 1=present)
* ALARM - Toggle alarm LED (0=green 1=red)
* FLIP - Toggle inverted mode (0=normal 1=flipped)
* bits 0-4 - Character displayed (see definitions below)
*
* Display segments are defined as follows,
* subject to D7S_FLIP register state:
*
* a
* ---
* f| |b
* -g-
* e| |c
* ---
* d
*/
#define D7S_POINT (1 << 7) /* Decimal point*/
#define D7S_ALARM (1 << 6) /* Alarm LED */
#define D7S_FLIP (1 << 5) /* Flip display */
#define D7S_0 0x00 /* Numerals 0-9 */
#define D7S_1 0x01
#define D7S_2 0x02
#define D7S_3 0x03
#define D7S_4 0x04
#define D7S_5 0x05
#define D7S_6 0x06
#define D7S_7 0x07
#define D7S_8 0x08
#define D7S_9 0x09
#define D7S_A 0x0A /* Letters A-F, H, L, P */
#define D7S_B 0x0B
#define D7S_C 0x0C
#define D7S_D 0x0D
#define D7S_E 0x0E
#define D7S_F 0x0F
#define D7S_H 0x10
#define D7S_E2 0x11
#define D7S_L 0x12
#define D7S_P 0x13
#define D7S_SEGA 0x14 /* Individual segments */
#define D7S_SEGB 0x15
#define D7S_SEGC 0x16
#define D7S_SEGD 0x17
#define D7S_SEGE 0x18
#define D7S_SEGF 0x19
#define D7S_SEGG 0x1A
#define D7S_SEGABFG 0x1B /* Segment groupings */
#define D7S_SEGCDEG 0x1C
#define D7S_SEGBCEF 0x1D
#define D7S_SEGADG 0x1E
#define D7S_BLANK 0x1F /* Clear all segments */
#define D7S_MIN_VAL 0x0
#define D7S_MAX_VAL 0x1F
#endif /* ifndef __display7seg_h__ */

View File

@ -1,11 +1,8 @@
#ifndef _ASM_SPARC_DMA_MAPPING_H
#define _ASM_SPARC_DMA_MAPPING_H
#ifdef CONFIG_PCI
#include <asm-generic/dma-mapping.h>
#ifndef ___ASM_SPARC_DMA_MAPPING_H
#define ___ASM_SPARC_DMA_MAPPING_H
#if defined(__sparc__) && defined(__arch64__)
#include <asm-sparc/dma-mapping_64.h>
#else
#include <asm-generic/dma-mapping-broken.h>
#endif /* PCI */
#endif /* _ASM_SPARC_DMA_MAPPING_H */
#include <asm-sparc/dma-mapping_32.h>
#endif
#endif

View File

@ -0,0 +1,11 @@
#ifndef _ASM_SPARC_DMA_MAPPING_H
#define _ASM_SPARC_DMA_MAPPING_H
#ifdef CONFIG_PCI
#include <asm-generic/dma-mapping.h>
#else
#include <asm-generic/dma-mapping-broken.h>
#endif /* PCI */
#endif /* _ASM_SPARC_DMA_MAPPING_H */

View File

@ -0,0 +1,154 @@
#ifndef _ASM_SPARC64_DMA_MAPPING_H
#define _ASM_SPARC64_DMA_MAPPING_H
#include <linux/scatterlist.h>
#include <linux/mm.h>
#define DMA_ERROR_CODE (~(dma_addr_t)0x0)
struct dma_ops {
void *(*alloc_coherent)(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t flag);
void (*free_coherent)(struct device *dev, size_t size,
void *cpu_addr, dma_addr_t dma_handle);
dma_addr_t (*map_single)(struct device *dev, void *cpu_addr,
size_t size,
enum dma_data_direction direction);
void (*unmap_single)(struct device *dev, dma_addr_t dma_addr,
size_t size,
enum dma_data_direction direction);
int (*map_sg)(struct device *dev, struct scatterlist *sg, int nents,
enum dma_data_direction direction);
void (*unmap_sg)(struct device *dev, struct scatterlist *sg,
int nhwentries,
enum dma_data_direction direction);
void (*sync_single_for_cpu)(struct device *dev,
dma_addr_t dma_handle, size_t size,
enum dma_data_direction direction);
void (*sync_sg_for_cpu)(struct device *dev, struct scatterlist *sg,
int nelems,
enum dma_data_direction direction);
};
extern const struct dma_ops *dma_ops;
extern int dma_supported(struct device *dev, u64 mask);
extern int dma_set_mask(struct device *dev, u64 dma_mask);
static inline void *dma_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t flag)
{
return dma_ops->alloc_coherent(dev, size, dma_handle, flag);
}
static inline void dma_free_coherent(struct device *dev, size_t size,
void *cpu_addr, dma_addr_t dma_handle)
{
dma_ops->free_coherent(dev, size, cpu_addr, dma_handle);
}
static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr,
size_t size,
enum dma_data_direction direction)
{
return dma_ops->map_single(dev, cpu_addr, size, direction);
}
static inline void dma_unmap_single(struct device *dev, dma_addr_t dma_addr,
size_t size,
enum dma_data_direction direction)
{
dma_ops->unmap_single(dev, dma_addr, size, direction);
}
static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size,
enum dma_data_direction direction)
{
return dma_ops->map_single(dev, page_address(page) + offset,
size, direction);
}
static inline void dma_unmap_page(struct device *dev, dma_addr_t dma_address,
size_t size,
enum dma_data_direction direction)
{
dma_ops->unmap_single(dev, dma_address, size, direction);
}
static inline int dma_map_sg(struct device *dev, struct scatterlist *sg,
int nents, enum dma_data_direction direction)
{
return dma_ops->map_sg(dev, sg, nents, direction);
}
static inline void dma_unmap_sg(struct device *dev, struct scatterlist *sg,
int nents, enum dma_data_direction direction)
{
dma_ops->unmap_sg(dev, sg, nents, direction);
}
static inline void dma_sync_single_for_cpu(struct device *dev,
dma_addr_t dma_handle, size_t size,
enum dma_data_direction direction)
{
dma_ops->sync_single_for_cpu(dev, dma_handle, size, direction);
}
static inline void dma_sync_single_for_device(struct device *dev,
dma_addr_t dma_handle,
size_t size,
enum dma_data_direction direction)
{
/* No flushing needed to sync cpu writes to the device. */
}
static inline void dma_sync_single_range_for_cpu(struct device *dev,
dma_addr_t dma_handle,
unsigned long offset,
size_t size,
enum dma_data_direction direction)
{
dma_sync_single_for_cpu(dev, dma_handle+offset, size, direction);
}
static inline void dma_sync_single_range_for_device(struct device *dev,
dma_addr_t dma_handle,
unsigned long offset,
size_t size,
enum dma_data_direction direction)
{
/* No flushing needed to sync cpu writes to the device. */
}
static inline void dma_sync_sg_for_cpu(struct device *dev,
struct scatterlist *sg, int nelems,
enum dma_data_direction direction)
{
dma_ops->sync_sg_for_cpu(dev, sg, nelems, direction);
}
static inline void dma_sync_sg_for_device(struct device *dev,
struct scatterlist *sg, int nelems,
enum dma_data_direction direction)
{
/* No flushing needed to sync cpu writes to the device. */
}
static inline int dma_mapping_error(dma_addr_t dma_addr)
{
return (dma_addr == DMA_ERROR_CODE);
}
static inline int dma_get_cache_alignment(void)
{
/* no easy way to get cache size on all processors, so return
* the maximum possible, to be safe */
return (1 << INTERNODE_CACHE_SHIFT);
}
#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
#define dma_is_consistent(d, h) (1)
#endif /* _ASM_SPARC64_DMA_MAPPING_H */

View File

@ -1,288 +1,8 @@
/* include/asm-sparc/dma.h
*
* Copyright 1995 (C) David S. Miller (davem@davemloft.net)
*/
#ifndef _ASM_SPARC_DMA_H
#define _ASM_SPARC_DMA_H
#include <linux/kernel.h>
#include <linux/types.h>
#include <asm/vac-ops.h> /* for invalidate's, etc. */
#include <asm/sbus.h>
#include <asm/delay.h>
#include <asm/oplib.h>
#include <asm/system.h>
#include <asm/io.h>
#include <linux/spinlock.h>
struct page;
extern spinlock_t dma_spin_lock;
static inline unsigned long claim_dma_lock(void)
{
unsigned long flags;
spin_lock_irqsave(&dma_spin_lock, flags);
return flags;
}
static inline void release_dma_lock(unsigned long flags)
{
spin_unlock_irqrestore(&dma_spin_lock, flags);
}
/* These are irrelevant for Sparc DMA, but we leave it in so that
* things can compile.
*/
#define MAX_DMA_CHANNELS 8
#define MAX_DMA_ADDRESS (~0UL)
#define DMA_MODE_READ 1
#define DMA_MODE_WRITE 2
/* Useful constants */
#define SIZE_16MB (16*1024*1024)
#define SIZE_64K (64*1024)
/* SBUS DMA controller reg offsets */
#define DMA_CSR 0x00UL /* rw DMA control/status register 0x00 */
#define DMA_ADDR 0x04UL /* rw DMA transfer address register 0x04 */
#define DMA_COUNT 0x08UL /* rw DMA transfer count register 0x08 */
#define DMA_TEST 0x0cUL /* rw DMA test/debug register 0x0c */
/* DVMA chip revisions */
enum dvma_rev {
dvmarev0,
dvmaesc1,
dvmarev1,
dvmarev2,
dvmarev3,
dvmarevplus,
dvmahme
};
#define DMA_HASCOUNT(rev) ((rev)==dvmaesc1)
/* Linux DMA information structure, filled during probe. */
struct sbus_dma {
struct sbus_dma *next;
struct sbus_dev *sdev;
void __iomem *regs;
/* Status, misc info */
int node; /* Prom node for this DMA device */
int running; /* Are we doing DMA now? */
int allocated; /* Are we "owned" by anyone yet? */
/* Transfer information. */
unsigned long addr; /* Start address of current transfer */
int nbytes; /* Size of current transfer */
int realbytes; /* For splitting up large transfers, etc. */
/* DMA revision */
enum dvma_rev revision;
};
extern struct sbus_dma *dma_chain;
/* Broken hardware... */
#ifdef CONFIG_SUN4
/* Have to sort this out. Does rev0 work fine on sun4[cmd] without isbroken?
* Or is rev0 present only on sun4 boxes? -jj */
#define DMA_ISBROKEN(dma) ((dma)->revision == dvmarev0 || (dma)->revision == dvmarev1)
#ifndef ___ASM_SPARC_DMA_H
#define ___ASM_SPARC_DMA_H
#if defined(__sparc__) && defined(__arch64__)
#include <asm-sparc/dma_64.h>
#else
#define DMA_ISBROKEN(dma) ((dma)->revision == dvmarev1)
#include <asm-sparc/dma_32.h>
#endif
#define DMA_ISESC1(dma) ((dma)->revision == dvmaesc1)
/* Main routines in dma.c */
extern void dvma_init(struct sbus_bus *);
/* Fields in the cond_reg register */
/* First, the version identification bits */
#define DMA_DEVICE_ID 0xf0000000 /* Device identification bits */
#define DMA_VERS0 0x00000000 /* Sunray DMA version */
#define DMA_ESCV1 0x40000000 /* DMA ESC Version 1 */
#define DMA_VERS1 0x80000000 /* DMA rev 1 */
#define DMA_VERS2 0xa0000000 /* DMA rev 2 */
#define DMA_VERHME 0xb0000000 /* DMA hme gate array */
#define DMA_VERSPLUS 0x90000000 /* DMA rev 1 PLUS */
#define DMA_HNDL_INTR 0x00000001 /* An IRQ needs to be handled */
#define DMA_HNDL_ERROR 0x00000002 /* We need to take an error */
#define DMA_FIFO_ISDRAIN 0x0000000c /* The DMA FIFO is draining */
#define DMA_INT_ENAB 0x00000010 /* Turn on interrupts */
#define DMA_FIFO_INV 0x00000020 /* Invalidate the FIFO */
#define DMA_ACC_SZ_ERR 0x00000040 /* The access size was bad */
#define DMA_FIFO_STDRAIN 0x00000040 /* DMA_VERS1 Drain the FIFO */
#define DMA_RST_SCSI 0x00000080 /* Reset the SCSI controller */
#define DMA_RST_ENET DMA_RST_SCSI /* Reset the ENET controller */
#define DMA_RST_BPP DMA_RST_SCSI /* Reset the BPP controller */
#define DMA_ST_WRITE 0x00000100 /* write from device to memory */
#define DMA_ENABLE 0x00000200 /* Fire up DMA, handle requests */
#define DMA_PEND_READ 0x00000400 /* DMA_VERS1/0/PLUS Pending Read */
#define DMA_ESC_BURST 0x00000800 /* 1=16byte 0=32byte */
#define DMA_READ_AHEAD 0x00001800 /* DMA read ahead partial longword */
#define DMA_DSBL_RD_DRN 0x00001000 /* No EC drain on slave reads */
#define DMA_BCNT_ENAB 0x00002000 /* If on, use the byte counter */
#define DMA_TERM_CNTR 0x00004000 /* Terminal counter */
#define DMA_SCSI_SBUS64 0x00008000 /* HME: Enable 64-bit SBUS mode. */
#define DMA_CSR_DISAB 0x00010000 /* No FIFO drains during csr */
#define DMA_SCSI_DISAB 0x00020000 /* No FIFO drains during reg */
#define DMA_DSBL_WR_INV 0x00020000 /* No EC inval. on slave writes */
#define DMA_ADD_ENABLE 0x00040000 /* Special ESC DVMA optimization */
#define DMA_E_BURSTS 0x000c0000 /* ENET: SBUS r/w burst mask */
#define DMA_E_BURST32 0x00040000 /* ENET: SBUS 32 byte r/w burst */
#define DMA_E_BURST16 0x00000000 /* ENET: SBUS 16 byte r/w burst */
#define DMA_BRST_SZ 0x000c0000 /* SCSI: SBUS r/w burst size */
#define DMA_BRST64 0x00080000 /* SCSI: 64byte bursts (HME on UltraSparc only) */
#define DMA_BRST32 0x00040000 /* SCSI/BPP: 32byte bursts */
#define DMA_BRST16 0x00000000 /* SCSI/BPP: 16byte bursts */
#define DMA_BRST0 0x00080000 /* SCSI: no bursts (non-HME gate arrays) */
#define DMA_ADDR_DISAB 0x00100000 /* No FIFO drains during addr */
#define DMA_2CLKS 0x00200000 /* Each transfer = 2 clock ticks */
#define DMA_3CLKS 0x00400000 /* Each transfer = 3 clock ticks */
#define DMA_EN_ENETAUI DMA_3CLKS /* Put lance into AUI-cable mode */
#define DMA_CNTR_DISAB 0x00800000 /* No IRQ when DMA_TERM_CNTR set */
#define DMA_AUTO_NADDR 0x01000000 /* Use "auto nxt addr" feature */
#define DMA_SCSI_ON 0x02000000 /* Enable SCSI dma */
#define DMA_BPP_ON DMA_SCSI_ON /* Enable BPP dma */
#define DMA_PARITY_OFF 0x02000000 /* HME: disable parity checking */
#define DMA_LOADED_ADDR 0x04000000 /* Address has been loaded */
#define DMA_LOADED_NADDR 0x08000000 /* Next address has been loaded */
#define DMA_RESET_FAS366 0x08000000 /* HME: Assert RESET to FAS366 */
/* Values describing the burst-size property from the PROM */
#define DMA_BURST1 0x01
#define DMA_BURST2 0x02
#define DMA_BURST4 0x04
#define DMA_BURST8 0x08
#define DMA_BURST16 0x10
#define DMA_BURST32 0x20
#define DMA_BURST64 0x40
#define DMA_BURSTBITS 0x7f
/* Determine highest possible final transfer address given a base */
#define DMA_MAXEND(addr) (0x01000000UL-(((unsigned long)(addr))&0x00ffffffUL))
/* Yes, I hack a lot of elisp in my spare time... */
#define DMA_ERROR_P(regs) ((((regs)->cond_reg) & DMA_HNDL_ERROR))
#define DMA_IRQ_P(regs) ((((regs)->cond_reg) & (DMA_HNDL_INTR | DMA_HNDL_ERROR)))
#define DMA_WRITE_P(regs) ((((regs)->cond_reg) & DMA_ST_WRITE))
#define DMA_OFF(regs) ((((regs)->cond_reg) &= (~DMA_ENABLE)))
#define DMA_INTSOFF(regs) ((((regs)->cond_reg) &= (~DMA_INT_ENAB)))
#define DMA_INTSON(regs) ((((regs)->cond_reg) |= (DMA_INT_ENAB)))
#define DMA_PUNTFIFO(regs) ((((regs)->cond_reg) |= DMA_FIFO_INV))
#define DMA_SETSTART(regs, addr) ((((regs)->st_addr) = (char *) addr))
#define DMA_BEGINDMA_W(regs) \
((((regs)->cond_reg |= (DMA_ST_WRITE|DMA_ENABLE|DMA_INT_ENAB))))
#define DMA_BEGINDMA_R(regs) \
((((regs)->cond_reg |= ((DMA_ENABLE|DMA_INT_ENAB)&(~DMA_ST_WRITE)))))
/* For certain DMA chips, we need to disable ints upon irq entry
* and turn them back on when we are done. So in any ESP interrupt
* handler you *must* call DMA_IRQ_ENTRY upon entry and DMA_IRQ_EXIT
* when leaving the handler. You have been warned...
*/
#define DMA_IRQ_ENTRY(dma, dregs) do { \
if(DMA_ISBROKEN(dma)) DMA_INTSOFF(dregs); \
} while (0)
#define DMA_IRQ_EXIT(dma, dregs) do { \
if(DMA_ISBROKEN(dma)) DMA_INTSON(dregs); \
} while(0)
#if 0 /* P3 this stuff is inline in ledma.c:init_restart_ledma() */
/* Pause until counter runs out or BIT isn't set in the DMA condition
* register.
*/
static inline void sparc_dma_pause(struct sparc_dma_registers *regs,
unsigned long bit)
{
int ctr = 50000; /* Let's find some bugs ;) */
/* Busy wait until the bit is not set any more */
while((regs->cond_reg&bit) && (ctr>0)) {
ctr--;
__delay(5);
}
/* Check for bogus outcome. */
if(!ctr)
panic("DMA timeout");
}
/* Reset the friggin' thing... */
#define DMA_RESET(dma) do { \
struct sparc_dma_registers *regs = dma->regs; \
/* Let the current FIFO drain itself */ \
sparc_dma_pause(regs, (DMA_FIFO_ISDRAIN)); \
/* Reset the logic */ \
regs->cond_reg |= (DMA_RST_SCSI); /* assert */ \
__delay(400); /* let the bits set ;) */ \
regs->cond_reg &= ~(DMA_RST_SCSI); /* de-assert */ \
sparc_dma_enable_interrupts(regs); /* Re-enable interrupts */ \
/* Enable FAST transfers if available */ \
if(dma->revision>dvmarev1) regs->cond_reg |= DMA_3CLKS; \
dma->running = 0; \
} while(0)
#endif
#define for_each_dvma(dma) \
for((dma) = dma_chain; (dma); (dma) = (dma)->next)
extern int get_dma_list(char *);
extern int request_dma(unsigned int, __const__ char *);
extern void free_dma(unsigned int);
/* From PCI */
#ifdef CONFIG_PCI
extern int isa_dma_bridge_buggy;
#else
#define isa_dma_bridge_buggy (0)
#endif
/* Routines for data transfer buffers. */
BTFIXUPDEF_CALL(char *, mmu_lockarea, char *, unsigned long)
BTFIXUPDEF_CALL(void, mmu_unlockarea, char *, unsigned long)
#define mmu_lockarea(vaddr,len) BTFIXUP_CALL(mmu_lockarea)(vaddr,len)
#define mmu_unlockarea(vaddr,len) BTFIXUP_CALL(mmu_unlockarea)(vaddr,len)
/* These are implementations for sbus_map_sg/sbus_unmap_sg... collapse later */
BTFIXUPDEF_CALL(__u32, mmu_get_scsi_one, char *, unsigned long, struct sbus_bus *sbus)
BTFIXUPDEF_CALL(void, mmu_get_scsi_sgl, struct scatterlist *, int, struct sbus_bus *sbus)
BTFIXUPDEF_CALL(void, mmu_release_scsi_one, __u32, unsigned long, struct sbus_bus *sbus)
BTFIXUPDEF_CALL(void, mmu_release_scsi_sgl, struct scatterlist *, int, struct sbus_bus *sbus)
#define mmu_get_scsi_one(vaddr,len,sbus) BTFIXUP_CALL(mmu_get_scsi_one)(vaddr,len,sbus)
#define mmu_get_scsi_sgl(sg,sz,sbus) BTFIXUP_CALL(mmu_get_scsi_sgl)(sg,sz,sbus)
#define mmu_release_scsi_one(vaddr,len,sbus) BTFIXUP_CALL(mmu_release_scsi_one)(vaddr,len,sbus)
#define mmu_release_scsi_sgl(sg,sz,sbus) BTFIXUP_CALL(mmu_release_scsi_sgl)(sg,sz,sbus)
/*
* mmu_map/unmap are provided by iommu/iounit; Invalid to call on IIep.
*
* The mmu_map_dma_area establishes two mappings in one go.
* These mappings point to pages normally mapped at 'va' (linear address).
* First mapping is for CPU visible address at 'a', uncached.
* This is an alias, but it works because it is an uncached mapping.
* Second mapping is for device visible address, or "bus" address.
* The bus address is returned at '*pba'.
*
* These functions seem distinct, but are hard to split. On sun4c,
* at least for now, 'a' is equal to bus address, and retured in *pba.
* On sun4m, page attributes depend on the CPU type, so we have to
* know if we are mapping RAM or I/O, so it has to be an additional argument
* to a separate mapping function for CPU visible mappings.
*/
BTFIXUPDEF_CALL(int, mmu_map_dma_area, dma_addr_t *, unsigned long, unsigned long, int len)
BTFIXUPDEF_CALL(struct page *, mmu_translate_dvma, unsigned long busa)
BTFIXUPDEF_CALL(void, mmu_unmap_dma_area, unsigned long busa, int len)
#define mmu_map_dma_area(pba,va,a,len) BTFIXUP_CALL(mmu_map_dma_area)(pba,va,a,len)
#define mmu_unmap_dma_area(ba,len) BTFIXUP_CALL(mmu_unmap_dma_area)(ba,len)
#define mmu_translate_dvma(ba) BTFIXUP_CALL(mmu_translate_dvma)(ba)
#endif /* !(_ASM_SPARC_DMA_H) */

288
include/asm-sparc/dma_32.h Normal file
View File

@ -0,0 +1,288 @@
/* include/asm-sparc/dma.h
*
* Copyright 1995 (C) David S. Miller (davem@davemloft.net)
*/
#ifndef _ASM_SPARC_DMA_H
#define _ASM_SPARC_DMA_H
#include <linux/kernel.h>
#include <linux/types.h>
#include <asm/vac-ops.h> /* for invalidate's, etc. */
#include <asm/sbus.h>
#include <asm/delay.h>
#include <asm/oplib.h>
#include <asm/system.h>
#include <asm/io.h>
#include <linux/spinlock.h>
struct page;
extern spinlock_t dma_spin_lock;
static inline unsigned long claim_dma_lock(void)
{
unsigned long flags;
spin_lock_irqsave(&dma_spin_lock, flags);
return flags;
}
static inline void release_dma_lock(unsigned long flags)
{
spin_unlock_irqrestore(&dma_spin_lock, flags);
}
/* These are irrelevant for Sparc DMA, but we leave it in so that
* things can compile.
*/
#define MAX_DMA_CHANNELS 8
#define MAX_DMA_ADDRESS (~0UL)
#define DMA_MODE_READ 1
#define DMA_MODE_WRITE 2
/* Useful constants */
#define SIZE_16MB (16*1024*1024)
#define SIZE_64K (64*1024)
/* SBUS DMA controller reg offsets */
#define DMA_CSR 0x00UL /* rw DMA control/status register 0x00 */
#define DMA_ADDR 0x04UL /* rw DMA transfer address register 0x04 */
#define DMA_COUNT 0x08UL /* rw DMA transfer count register 0x08 */
#define DMA_TEST 0x0cUL /* rw DMA test/debug register 0x0c */
/* DVMA chip revisions */
enum dvma_rev {
dvmarev0,
dvmaesc1,
dvmarev1,
dvmarev2,
dvmarev3,
dvmarevplus,
dvmahme
};
#define DMA_HASCOUNT(rev) ((rev)==dvmaesc1)
/* Linux DMA information structure, filled during probe. */
struct sbus_dma {
struct sbus_dma *next;
struct sbus_dev *sdev;
void __iomem *regs;
/* Status, misc info */
int node; /* Prom node for this DMA device */
int running; /* Are we doing DMA now? */
int allocated; /* Are we "owned" by anyone yet? */
/* Transfer information. */
unsigned long addr; /* Start address of current transfer */
int nbytes; /* Size of current transfer */
int realbytes; /* For splitting up large transfers, etc. */
/* DMA revision */
enum dvma_rev revision;
};
extern struct sbus_dma *dma_chain;
/* Broken hardware... */
#ifdef CONFIG_SUN4
/* Have to sort this out. Does rev0 work fine on sun4[cmd] without isbroken?
* Or is rev0 present only on sun4 boxes? -jj */
#define DMA_ISBROKEN(dma) ((dma)->revision == dvmarev0 || (dma)->revision == dvmarev1)
#else
#define DMA_ISBROKEN(dma) ((dma)->revision == dvmarev1)
#endif
#define DMA_ISESC1(dma) ((dma)->revision == dvmaesc1)
/* Main routines in dma.c */
extern void dvma_init(struct sbus_bus *);
/* Fields in the cond_reg register */
/* First, the version identification bits */
#define DMA_DEVICE_ID 0xf0000000 /* Device identification bits */
#define DMA_VERS0 0x00000000 /* Sunray DMA version */
#define DMA_ESCV1 0x40000000 /* DMA ESC Version 1 */
#define DMA_VERS1 0x80000000 /* DMA rev 1 */
#define DMA_VERS2 0xa0000000 /* DMA rev 2 */
#define DMA_VERHME 0xb0000000 /* DMA hme gate array */
#define DMA_VERSPLUS 0x90000000 /* DMA rev 1 PLUS */
#define DMA_HNDL_INTR 0x00000001 /* An IRQ needs to be handled */
#define DMA_HNDL_ERROR 0x00000002 /* We need to take an error */
#define DMA_FIFO_ISDRAIN 0x0000000c /* The DMA FIFO is draining */
#define DMA_INT_ENAB 0x00000010 /* Turn on interrupts */
#define DMA_FIFO_INV 0x00000020 /* Invalidate the FIFO */
#define DMA_ACC_SZ_ERR 0x00000040 /* The access size was bad */
#define DMA_FIFO_STDRAIN 0x00000040 /* DMA_VERS1 Drain the FIFO */
#define DMA_RST_SCSI 0x00000080 /* Reset the SCSI controller */
#define DMA_RST_ENET DMA_RST_SCSI /* Reset the ENET controller */
#define DMA_RST_BPP DMA_RST_SCSI /* Reset the BPP controller */
#define DMA_ST_WRITE 0x00000100 /* write from device to memory */
#define DMA_ENABLE 0x00000200 /* Fire up DMA, handle requests */
#define DMA_PEND_READ 0x00000400 /* DMA_VERS1/0/PLUS Pending Read */
#define DMA_ESC_BURST 0x00000800 /* 1=16byte 0=32byte */
#define DMA_READ_AHEAD 0x00001800 /* DMA read ahead partial longword */
#define DMA_DSBL_RD_DRN 0x00001000 /* No EC drain on slave reads */
#define DMA_BCNT_ENAB 0x00002000 /* If on, use the byte counter */
#define DMA_TERM_CNTR 0x00004000 /* Terminal counter */
#define DMA_SCSI_SBUS64 0x00008000 /* HME: Enable 64-bit SBUS mode. */
#define DMA_CSR_DISAB 0x00010000 /* No FIFO drains during csr */
#define DMA_SCSI_DISAB 0x00020000 /* No FIFO drains during reg */
#define DMA_DSBL_WR_INV 0x00020000 /* No EC inval. on slave writes */
#define DMA_ADD_ENABLE 0x00040000 /* Special ESC DVMA optimization */
#define DMA_E_BURSTS 0x000c0000 /* ENET: SBUS r/w burst mask */
#define DMA_E_BURST32 0x00040000 /* ENET: SBUS 32 byte r/w burst */
#define DMA_E_BURST16 0x00000000 /* ENET: SBUS 16 byte r/w burst */
#define DMA_BRST_SZ 0x000c0000 /* SCSI: SBUS r/w burst size */
#define DMA_BRST64 0x00080000 /* SCSI: 64byte bursts (HME on UltraSparc only) */
#define DMA_BRST32 0x00040000 /* SCSI/BPP: 32byte bursts */
#define DMA_BRST16 0x00000000 /* SCSI/BPP: 16byte bursts */
#define DMA_BRST0 0x00080000 /* SCSI: no bursts (non-HME gate arrays) */
#define DMA_ADDR_DISAB 0x00100000 /* No FIFO drains during addr */
#define DMA_2CLKS 0x00200000 /* Each transfer = 2 clock ticks */
#define DMA_3CLKS 0x00400000 /* Each transfer = 3 clock ticks */
#define DMA_EN_ENETAUI DMA_3CLKS /* Put lance into AUI-cable mode */
#define DMA_CNTR_DISAB 0x00800000 /* No IRQ when DMA_TERM_CNTR set */
#define DMA_AUTO_NADDR 0x01000000 /* Use "auto nxt addr" feature */
#define DMA_SCSI_ON 0x02000000 /* Enable SCSI dma */
#define DMA_BPP_ON DMA_SCSI_ON /* Enable BPP dma */
#define DMA_PARITY_OFF 0x02000000 /* HME: disable parity checking */
#define DMA_LOADED_ADDR 0x04000000 /* Address has been loaded */
#define DMA_LOADED_NADDR 0x08000000 /* Next address has been loaded */
#define DMA_RESET_FAS366 0x08000000 /* HME: Assert RESET to FAS366 */
/* Values describing the burst-size property from the PROM */
#define DMA_BURST1 0x01
#define DMA_BURST2 0x02
#define DMA_BURST4 0x04
#define DMA_BURST8 0x08
#define DMA_BURST16 0x10
#define DMA_BURST32 0x20
#define DMA_BURST64 0x40
#define DMA_BURSTBITS 0x7f
/* Determine highest possible final transfer address given a base */
#define DMA_MAXEND(addr) (0x01000000UL-(((unsigned long)(addr))&0x00ffffffUL))
/* Yes, I hack a lot of elisp in my spare time... */
#define DMA_ERROR_P(regs) ((((regs)->cond_reg) & DMA_HNDL_ERROR))
#define DMA_IRQ_P(regs) ((((regs)->cond_reg) & (DMA_HNDL_INTR | DMA_HNDL_ERROR)))
#define DMA_WRITE_P(regs) ((((regs)->cond_reg) & DMA_ST_WRITE))
#define DMA_OFF(regs) ((((regs)->cond_reg) &= (~DMA_ENABLE)))
#define DMA_INTSOFF(regs) ((((regs)->cond_reg) &= (~DMA_INT_ENAB)))
#define DMA_INTSON(regs) ((((regs)->cond_reg) |= (DMA_INT_ENAB)))
#define DMA_PUNTFIFO(regs) ((((regs)->cond_reg) |= DMA_FIFO_INV))
#define DMA_SETSTART(regs, addr) ((((regs)->st_addr) = (char *) addr))
#define DMA_BEGINDMA_W(regs) \
((((regs)->cond_reg |= (DMA_ST_WRITE|DMA_ENABLE|DMA_INT_ENAB))))
#define DMA_BEGINDMA_R(regs) \
((((regs)->cond_reg |= ((DMA_ENABLE|DMA_INT_ENAB)&(~DMA_ST_WRITE)))))
/* For certain DMA chips, we need to disable ints upon irq entry
* and turn them back on when we are done. So in any ESP interrupt
* handler you *must* call DMA_IRQ_ENTRY upon entry and DMA_IRQ_EXIT
* when leaving the handler. You have been warned...
*/
#define DMA_IRQ_ENTRY(dma, dregs) do { \
if(DMA_ISBROKEN(dma)) DMA_INTSOFF(dregs); \
} while (0)
#define DMA_IRQ_EXIT(dma, dregs) do { \
if(DMA_ISBROKEN(dma)) DMA_INTSON(dregs); \
} while(0)
#if 0 /* P3 this stuff is inline in ledma.c:init_restart_ledma() */
/* Pause until counter runs out or BIT isn't set in the DMA condition
* register.
*/
static inline void sparc_dma_pause(struct sparc_dma_registers *regs,
unsigned long bit)
{
int ctr = 50000; /* Let's find some bugs ;) */
/* Busy wait until the bit is not set any more */
while((regs->cond_reg&bit) && (ctr>0)) {
ctr--;
__delay(5);
}
/* Check for bogus outcome. */
if(!ctr)
panic("DMA timeout");
}
/* Reset the friggin' thing... */
#define DMA_RESET(dma) do { \
struct sparc_dma_registers *regs = dma->regs; \
/* Let the current FIFO drain itself */ \
sparc_dma_pause(regs, (DMA_FIFO_ISDRAIN)); \
/* Reset the logic */ \
regs->cond_reg |= (DMA_RST_SCSI); /* assert */ \
__delay(400); /* let the bits set ;) */ \
regs->cond_reg &= ~(DMA_RST_SCSI); /* de-assert */ \
sparc_dma_enable_interrupts(regs); /* Re-enable interrupts */ \
/* Enable FAST transfers if available */ \
if(dma->revision>dvmarev1) regs->cond_reg |= DMA_3CLKS; \
dma->running = 0; \
} while(0)
#endif
#define for_each_dvma(dma) \
for((dma) = dma_chain; (dma); (dma) = (dma)->next)
extern int get_dma_list(char *);
extern int request_dma(unsigned int, __const__ char *);
extern void free_dma(unsigned int);
/* From PCI */
#ifdef CONFIG_PCI
extern int isa_dma_bridge_buggy;
#else
#define isa_dma_bridge_buggy (0)
#endif
/* Routines for data transfer buffers. */
BTFIXUPDEF_CALL(char *, mmu_lockarea, char *, unsigned long)
BTFIXUPDEF_CALL(void, mmu_unlockarea, char *, unsigned long)
#define mmu_lockarea(vaddr,len) BTFIXUP_CALL(mmu_lockarea)(vaddr,len)
#define mmu_unlockarea(vaddr,len) BTFIXUP_CALL(mmu_unlockarea)(vaddr,len)
/* These are implementations for sbus_map_sg/sbus_unmap_sg... collapse later */
BTFIXUPDEF_CALL(__u32, mmu_get_scsi_one, char *, unsigned long, struct sbus_bus *sbus)
BTFIXUPDEF_CALL(void, mmu_get_scsi_sgl, struct scatterlist *, int, struct sbus_bus *sbus)
BTFIXUPDEF_CALL(void, mmu_release_scsi_one, __u32, unsigned long, struct sbus_bus *sbus)
BTFIXUPDEF_CALL(void, mmu_release_scsi_sgl, struct scatterlist *, int, struct sbus_bus *sbus)
#define mmu_get_scsi_one(vaddr,len,sbus) BTFIXUP_CALL(mmu_get_scsi_one)(vaddr,len,sbus)
#define mmu_get_scsi_sgl(sg,sz,sbus) BTFIXUP_CALL(mmu_get_scsi_sgl)(sg,sz,sbus)
#define mmu_release_scsi_one(vaddr,len,sbus) BTFIXUP_CALL(mmu_release_scsi_one)(vaddr,len,sbus)
#define mmu_release_scsi_sgl(sg,sz,sbus) BTFIXUP_CALL(mmu_release_scsi_sgl)(sg,sz,sbus)
/*
* mmu_map/unmap are provided by iommu/iounit; Invalid to call on IIep.
*
* The mmu_map_dma_area establishes two mappings in one go.
* These mappings point to pages normally mapped at 'va' (linear address).
* First mapping is for CPU visible address at 'a', uncached.
* This is an alias, but it works because it is an uncached mapping.
* Second mapping is for device visible address, or "bus" address.
* The bus address is returned at '*pba'.
*
* These functions seem distinct, but are hard to split. On sun4c,
* at least for now, 'a' is equal to bus address, and retured in *pba.
* On sun4m, page attributes depend on the CPU type, so we have to
* know if we are mapping RAM or I/O, so it has to be an additional argument
* to a separate mapping function for CPU visible mappings.
*/
BTFIXUPDEF_CALL(int, mmu_map_dma_area, dma_addr_t *, unsigned long, unsigned long, int len)
BTFIXUPDEF_CALL(struct page *, mmu_translate_dvma, unsigned long busa)
BTFIXUPDEF_CALL(void, mmu_unmap_dma_area, unsigned long busa, int len)
#define mmu_map_dma_area(pba,va,a,len) BTFIXUP_CALL(mmu_map_dma_area)(pba,va,a,len)
#define mmu_unmap_dma_area(ba,len) BTFIXUP_CALL(mmu_unmap_dma_area)(ba,len)
#define mmu_translate_dvma(ba) BTFIXUP_CALL(mmu_translate_dvma)(ba)
#endif /* !(_ASM_SPARC_DMA_H) */

205
include/asm-sparc/dma_64.h Normal file
View File

@ -0,0 +1,205 @@
/*
* include/asm-sparc64/dma.h
*
* Copyright 1996 (C) David S. Miller (davem@caip.rutgers.edu)
*/
#ifndef _ASM_SPARC64_DMA_H
#define _ASM_SPARC64_DMA_H
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/spinlock.h>
#include <asm/sbus.h>
#include <asm/delay.h>
#include <asm/oplib.h>
/* These are irrelevant for Sparc DMA, but we leave it in so that
* things can compile.
*/
#define MAX_DMA_CHANNELS 8
#define DMA_MODE_READ 1
#define DMA_MODE_WRITE 2
#define MAX_DMA_ADDRESS (~0UL)
/* Useful constants */
#define SIZE_16MB (16*1024*1024)
#define SIZE_64K (64*1024)
/* SBUS DMA controller reg offsets */
#define DMA_CSR 0x00UL /* rw DMA control/status register 0x00 */
#define DMA_ADDR 0x04UL /* rw DMA transfer address register 0x04 */
#define DMA_COUNT 0x08UL /* rw DMA transfer count register 0x08 */
#define DMA_TEST 0x0cUL /* rw DMA test/debug register 0x0c */
/* DVMA chip revisions */
enum dvma_rev {
dvmarev0,
dvmaesc1,
dvmarev1,
dvmarev2,
dvmarev3,
dvmarevplus,
dvmahme
};
#define DMA_HASCOUNT(rev) ((rev)==dvmaesc1)
/* Linux DMA information structure, filled during probe. */
struct sbus_dma {
struct sbus_dma *next;
struct sbus_dev *sdev;
void __iomem *regs;
/* Status, misc info */
int node; /* Prom node for this DMA device */
int running; /* Are we doing DMA now? */
int allocated; /* Are we "owned" by anyone yet? */
/* Transfer information. */
u32 addr; /* Start address of current transfer */
int nbytes; /* Size of current transfer */
int realbytes; /* For splitting up large transfers, etc. */
/* DMA revision */
enum dvma_rev revision;
};
extern struct sbus_dma *dma_chain;
/* Broken hardware... */
#define DMA_ISBROKEN(dma) ((dma)->revision == dvmarev1)
#define DMA_ISESC1(dma) ((dma)->revision == dvmaesc1)
/* Main routines in dma.c */
extern void dvma_init(struct sbus_bus *);
/* Fields in the cond_reg register */
/* First, the version identification bits */
#define DMA_DEVICE_ID 0xf0000000 /* Device identification bits */
#define DMA_VERS0 0x00000000 /* Sunray DMA version */
#define DMA_ESCV1 0x40000000 /* DMA ESC Version 1 */
#define DMA_VERS1 0x80000000 /* DMA rev 1 */
#define DMA_VERS2 0xa0000000 /* DMA rev 2 */
#define DMA_VERHME 0xb0000000 /* DMA hme gate array */
#define DMA_VERSPLUS 0x90000000 /* DMA rev 1 PLUS */
#define DMA_HNDL_INTR 0x00000001 /* An IRQ needs to be handled */
#define DMA_HNDL_ERROR 0x00000002 /* We need to take an error */
#define DMA_FIFO_ISDRAIN 0x0000000c /* The DMA FIFO is draining */
#define DMA_INT_ENAB 0x00000010 /* Turn on interrupts */
#define DMA_FIFO_INV 0x00000020 /* Invalidate the FIFO */
#define DMA_ACC_SZ_ERR 0x00000040 /* The access size was bad */
#define DMA_FIFO_STDRAIN 0x00000040 /* DMA_VERS1 Drain the FIFO */
#define DMA_RST_SCSI 0x00000080 /* Reset the SCSI controller */
#define DMA_RST_ENET DMA_RST_SCSI /* Reset the ENET controller */
#define DMA_ST_WRITE 0x00000100 /* write from device to memory */
#define DMA_ENABLE 0x00000200 /* Fire up DMA, handle requests */
#define DMA_PEND_READ 0x00000400 /* DMA_VERS1/0/PLUS Pending Read */
#define DMA_ESC_BURST 0x00000800 /* 1=16byte 0=32byte */
#define DMA_READ_AHEAD 0x00001800 /* DMA read ahead partial longword */
#define DMA_DSBL_RD_DRN 0x00001000 /* No EC drain on slave reads */
#define DMA_BCNT_ENAB 0x00002000 /* If on, use the byte counter */
#define DMA_TERM_CNTR 0x00004000 /* Terminal counter */
#define DMA_SCSI_SBUS64 0x00008000 /* HME: Enable 64-bit SBUS mode. */
#define DMA_CSR_DISAB 0x00010000 /* No FIFO drains during csr */
#define DMA_SCSI_DISAB 0x00020000 /* No FIFO drains during reg */
#define DMA_DSBL_WR_INV 0x00020000 /* No EC inval. on slave writes */
#define DMA_ADD_ENABLE 0x00040000 /* Special ESC DVMA optimization */
#define DMA_E_BURSTS 0x000c0000 /* ENET: SBUS r/w burst mask */
#define DMA_E_BURST32 0x00040000 /* ENET: SBUS 32 byte r/w burst */
#define DMA_E_BURST16 0x00000000 /* ENET: SBUS 16 byte r/w burst */
#define DMA_BRST_SZ 0x000c0000 /* SCSI: SBUS r/w burst size */
#define DMA_BRST64 0x000c0000 /* SCSI: 64byte bursts (HME on UltraSparc only) */
#define DMA_BRST32 0x00040000 /* SCSI: 32byte bursts */
#define DMA_BRST16 0x00000000 /* SCSI: 16byte bursts */
#define DMA_BRST0 0x00080000 /* SCSI: no bursts (non-HME gate arrays) */
#define DMA_ADDR_DISAB 0x00100000 /* No FIFO drains during addr */
#define DMA_2CLKS 0x00200000 /* Each transfer = 2 clock ticks */
#define DMA_3CLKS 0x00400000 /* Each transfer = 3 clock ticks */
#define DMA_EN_ENETAUI DMA_3CLKS /* Put lance into AUI-cable mode */
#define DMA_CNTR_DISAB 0x00800000 /* No IRQ when DMA_TERM_CNTR set */
#define DMA_AUTO_NADDR 0x01000000 /* Use "auto nxt addr" feature */
#define DMA_SCSI_ON 0x02000000 /* Enable SCSI dma */
#define DMA_PARITY_OFF 0x02000000 /* HME: disable parity checking */
#define DMA_LOADED_ADDR 0x04000000 /* Address has been loaded */
#define DMA_LOADED_NADDR 0x08000000 /* Next address has been loaded */
#define DMA_RESET_FAS366 0x08000000 /* HME: Assert RESET to FAS366 */
/* Values describing the burst-size property from the PROM */
#define DMA_BURST1 0x01
#define DMA_BURST2 0x02
#define DMA_BURST4 0x04
#define DMA_BURST8 0x08
#define DMA_BURST16 0x10
#define DMA_BURST32 0x20
#define DMA_BURST64 0x40
#define DMA_BURSTBITS 0x7f
/* Determine highest possible final transfer address given a base */
#define DMA_MAXEND(addr) (0x01000000UL-(((unsigned long)(addr))&0x00ffffffUL))
/* Yes, I hack a lot of elisp in my spare time... */
#define DMA_ERROR_P(regs) ((sbus_readl((regs) + DMA_CSR) & DMA_HNDL_ERROR))
#define DMA_IRQ_P(regs) ((sbus_readl((regs) + DMA_CSR)) & (DMA_HNDL_INTR | DMA_HNDL_ERROR))
#define DMA_WRITE_P(regs) ((sbus_readl((regs) + DMA_CSR) & DMA_ST_WRITE))
#define DMA_OFF(__regs) \
do { u32 tmp = sbus_readl((__regs) + DMA_CSR); \
tmp &= ~DMA_ENABLE; \
sbus_writel(tmp, (__regs) + DMA_CSR); \
} while(0)
#define DMA_INTSOFF(__regs) \
do { u32 tmp = sbus_readl((__regs) + DMA_CSR); \
tmp &= ~DMA_INT_ENAB; \
sbus_writel(tmp, (__regs) + DMA_CSR); \
} while(0)
#define DMA_INTSON(__regs) \
do { u32 tmp = sbus_readl((__regs) + DMA_CSR); \
tmp |= DMA_INT_ENAB; \
sbus_writel(tmp, (__regs) + DMA_CSR); \
} while(0)
#define DMA_PUNTFIFO(__regs) \
do { u32 tmp = sbus_readl((__regs) + DMA_CSR); \
tmp |= DMA_FIFO_INV; \
sbus_writel(tmp, (__regs) + DMA_CSR); \
} while(0)
#define DMA_SETSTART(__regs, __addr) \
sbus_writel((u32)(__addr), (__regs) + DMA_ADDR);
#define DMA_BEGINDMA_W(__regs) \
do { u32 tmp = sbus_readl((__regs) + DMA_CSR); \
tmp |= (DMA_ST_WRITE|DMA_ENABLE|DMA_INT_ENAB); \
sbus_writel(tmp, (__regs) + DMA_CSR); \
} while(0)
#define DMA_BEGINDMA_R(__regs) \
do { u32 tmp = sbus_readl((__regs) + DMA_CSR); \
tmp |= (DMA_ENABLE|DMA_INT_ENAB); \
tmp &= ~DMA_ST_WRITE; \
sbus_writel(tmp, (__regs) + DMA_CSR); \
} while(0)
/* For certain DMA chips, we need to disable ints upon irq entry
* and turn them back on when we are done. So in any ESP interrupt
* handler you *must* call DMA_IRQ_ENTRY upon entry and DMA_IRQ_EXIT
* when leaving the handler. You have been warned...
*/
#define DMA_IRQ_ENTRY(dma, dregs) do { \
if(DMA_ISBROKEN(dma)) DMA_INTSOFF(dregs); \
} while (0)
#define DMA_IRQ_EXIT(dma, dregs) do { \
if(DMA_ISBROKEN(dma)) DMA_INTSON(dregs); \
} while(0)
#define for_each_dvma(dma) \
for((dma) = dma_chain; (dma); (dma) = (dma)->next)
/* From PCI */
#ifdef CONFIG_PCI
extern int isa_dma_bridge_buggy;
#else
#define isa_dma_bridge_buggy (0)
#endif
#endif /* !(_ASM_SPARC64_DMA_H) */

View File

@ -1,99 +1,8 @@
/*
* ebus.h: PCI to Ebus pseudo driver software state.
*
* Copyright (C) 1997 Eddie C. Dost (ecd@skynet.be)
*
* Adopted for sparc by V. Roganov and G. Raiko.
*/
#ifndef __SPARC_EBUS_H
#define __SPARC_EBUS_H
#ifndef _LINUX_IOPORT_H
#include <linux/ioport.h>
#ifndef ___ASM_SPARC_EBUS_H
#define ___ASM_SPARC_EBUS_H
#if defined(__sparc__) && defined(__arch64__)
#include <asm-sparc/ebus_64.h>
#else
#include <asm-sparc/ebus_32.h>
#endif
#endif
#include <asm/oplib.h>
#include <asm/prom.h>
#include <asm/of_device.h>
struct linux_ebus_child {
struct linux_ebus_child *next;
struct linux_ebus_device *parent;
struct linux_ebus *bus;
struct device_node *prom_node;
struct resource resource[PROMREG_MAX];
int num_addrs;
unsigned int irqs[PROMINTR_MAX];
int num_irqs;
};
struct linux_ebus_device {
struct of_device ofdev;
struct linux_ebus_device *next;
struct linux_ebus_child *children;
struct linux_ebus *bus;
struct device_node *prom_node;
struct resource resource[PROMREG_MAX];
int num_addrs;
unsigned int irqs[PROMINTR_MAX];
int num_irqs;
};
#define to_ebus_device(d) container_of(d, struct linux_ebus_device, ofdev.dev)
struct linux_ebus {
struct of_device ofdev;
struct linux_ebus *next;
struct linux_ebus_device *devices;
struct linux_pbm_info *parent;
struct pci_dev *self;
struct device_node *prom_node;
};
#define to_ebus(d) container_of(d, struct linux_ebus, ofdev.dev)
struct linux_ebus_dma {
unsigned int dcsr;
unsigned int dacr;
unsigned int dbcr;
};
#define EBUS_DCSR_INT_PEND 0x00000001
#define EBUS_DCSR_ERR_PEND 0x00000002
#define EBUS_DCSR_DRAIN 0x00000004
#define EBUS_DCSR_INT_EN 0x00000010
#define EBUS_DCSR_RESET 0x00000080
#define EBUS_DCSR_WRITE 0x00000100
#define EBUS_DCSR_EN_DMA 0x00000200
#define EBUS_DCSR_CYC_PEND 0x00000400
#define EBUS_DCSR_DIAG_RD_DONE 0x00000800
#define EBUS_DCSR_DIAG_WR_DONE 0x00001000
#define EBUS_DCSR_EN_CNT 0x00002000
#define EBUS_DCSR_TC 0x00004000
#define EBUS_DCSR_DIS_CSR_DRN 0x00010000
#define EBUS_DCSR_BURST_SZ_MASK 0x000c0000
#define EBUS_DCSR_BURST_SZ_1 0x00080000
#define EBUS_DCSR_BURST_SZ_4 0x00000000
#define EBUS_DCSR_BURST_SZ_8 0x00040000
#define EBUS_DCSR_BURST_SZ_16 0x000c0000
#define EBUS_DCSR_DIAG_EN 0x00100000
#define EBUS_DCSR_DIS_ERR_PEND 0x00400000
#define EBUS_DCSR_TCI_DIS 0x00800000
#define EBUS_DCSR_EN_NEXT 0x01000000
#define EBUS_DCSR_DMA_ON 0x02000000
#define EBUS_DCSR_A_LOADED 0x04000000
#define EBUS_DCSR_NA_LOADED 0x08000000
#define EBUS_DCSR_DEV_ID_MASK 0xf0000000
extern struct linux_ebus *ebus_chain;
extern void ebus_init(void);
#define for_each_ebus(bus) \
for((bus) = ebus_chain; (bus); (bus) = (bus)->next)
#define for_each_ebusdev(dev, bus) \
for((dev) = (bus)->devices; (dev); (dev) = (dev)->next)
#define for_each_edevchild(dev, child) \
for((child) = (dev)->children; (child); (child) = (child)->next)
#endif /* !(__SPARC_EBUS_H) */

View File

@ -0,0 +1,99 @@
/*
* ebus.h: PCI to Ebus pseudo driver software state.
*
* Copyright (C) 1997 Eddie C. Dost (ecd@skynet.be)
*
* Adopted for sparc by V. Roganov and G. Raiko.
*/
#ifndef __SPARC_EBUS_H
#define __SPARC_EBUS_H
#ifndef _LINUX_IOPORT_H
#include <linux/ioport.h>
#endif
#include <asm/oplib.h>
#include <asm/prom.h>
#include <asm/of_device.h>
struct linux_ebus_child {
struct linux_ebus_child *next;
struct linux_ebus_device *parent;
struct linux_ebus *bus;
struct device_node *prom_node;
struct resource resource[PROMREG_MAX];
int num_addrs;
unsigned int irqs[PROMINTR_MAX];
int num_irqs;
};
struct linux_ebus_device {
struct of_device ofdev;
struct linux_ebus_device *next;
struct linux_ebus_child *children;
struct linux_ebus *bus;
struct device_node *prom_node;
struct resource resource[PROMREG_MAX];
int num_addrs;
unsigned int irqs[PROMINTR_MAX];
int num_irqs;
};
#define to_ebus_device(d) container_of(d, struct linux_ebus_device, ofdev.dev)
struct linux_ebus {
struct of_device ofdev;
struct linux_ebus *next;
struct linux_ebus_device *devices;
struct linux_pbm_info *parent;
struct pci_dev *self;
struct device_node *prom_node;
};
#define to_ebus(d) container_of(d, struct linux_ebus, ofdev.dev)
struct linux_ebus_dma {
unsigned int dcsr;
unsigned int dacr;
unsigned int dbcr;
};
#define EBUS_DCSR_INT_PEND 0x00000001
#define EBUS_DCSR_ERR_PEND 0x00000002
#define EBUS_DCSR_DRAIN 0x00000004
#define EBUS_DCSR_INT_EN 0x00000010
#define EBUS_DCSR_RESET 0x00000080
#define EBUS_DCSR_WRITE 0x00000100
#define EBUS_DCSR_EN_DMA 0x00000200
#define EBUS_DCSR_CYC_PEND 0x00000400
#define EBUS_DCSR_DIAG_RD_DONE 0x00000800
#define EBUS_DCSR_DIAG_WR_DONE 0x00001000
#define EBUS_DCSR_EN_CNT 0x00002000
#define EBUS_DCSR_TC 0x00004000
#define EBUS_DCSR_DIS_CSR_DRN 0x00010000
#define EBUS_DCSR_BURST_SZ_MASK 0x000c0000
#define EBUS_DCSR_BURST_SZ_1 0x00080000
#define EBUS_DCSR_BURST_SZ_4 0x00000000
#define EBUS_DCSR_BURST_SZ_8 0x00040000
#define EBUS_DCSR_BURST_SZ_16 0x000c0000
#define EBUS_DCSR_DIAG_EN 0x00100000
#define EBUS_DCSR_DIS_ERR_PEND 0x00400000
#define EBUS_DCSR_TCI_DIS 0x00800000
#define EBUS_DCSR_EN_NEXT 0x01000000
#define EBUS_DCSR_DMA_ON 0x02000000
#define EBUS_DCSR_A_LOADED 0x04000000
#define EBUS_DCSR_NA_LOADED 0x08000000
#define EBUS_DCSR_DEV_ID_MASK 0xf0000000
extern struct linux_ebus *ebus_chain;
extern void ebus_init(void);
#define for_each_ebus(bus) \
for((bus) = ebus_chain; (bus); (bus) = (bus)->next)
#define for_each_ebusdev(dev, bus) \
for((dev) = (bus)->devices; (dev); (dev) = (dev)->next)
#define for_each_edevchild(dev, child) \
for((child) = (dev)->children; (child); (child) = (child)->next)
#endif /* !(__SPARC_EBUS_H) */

View File

@ -0,0 +1,94 @@
/*
* ebus.h: PCI to Ebus pseudo driver software state.
*
* Copyright (C) 1997 Eddie C. Dost (ecd@skynet.be)
* Copyright (C) 1999 David S. Miller (davem@redhat.com)
*/
#ifndef __SPARC64_EBUS_H
#define __SPARC64_EBUS_H
#include <asm/oplib.h>
#include <asm/prom.h>
#include <asm/of_device.h>
struct linux_ebus_child {
struct linux_ebus_child *next;
struct linux_ebus_device *parent;
struct linux_ebus *bus;
struct device_node *prom_node;
struct resource resource[PROMREG_MAX];
int num_addrs;
unsigned int irqs[PROMINTR_MAX];
int num_irqs;
};
struct linux_ebus_device {
struct of_device ofdev;
struct linux_ebus_device *next;
struct linux_ebus_child *children;
struct linux_ebus *bus;
struct device_node *prom_node;
struct resource resource[PROMREG_MAX];
int num_addrs;
unsigned int irqs[PROMINTR_MAX];
int num_irqs;
};
#define to_ebus_device(d) container_of(d, struct linux_ebus_device, ofdev.dev)
struct linux_ebus {
struct of_device ofdev;
struct linux_ebus *next;
struct linux_ebus_device *devices;
struct pci_dev *self;
int index;
int is_rio;
struct device_node *prom_node;
};
#define to_ebus(d) container_of(d, struct linux_ebus, ofdev.dev)
struct ebus_dma_info {
spinlock_t lock;
void __iomem *regs;
unsigned int flags;
#define EBUS_DMA_FLAG_USE_EBDMA_HANDLER 0x00000001
#define EBUS_DMA_FLAG_TCI_DISABLE 0x00000002
/* These are only valid is EBUS_DMA_FLAG_USE_EBDMA_HANDLER is
* set.
*/
void (*callback)(struct ebus_dma_info *p, int event, void *cookie);
void *client_cookie;
unsigned int irq;
#define EBUS_DMA_EVENT_ERROR 1
#define EBUS_DMA_EVENT_DMA 2
#define EBUS_DMA_EVENT_DEVICE 4
unsigned char name[64];
};
extern int ebus_dma_register(struct ebus_dma_info *p);
extern int ebus_dma_irq_enable(struct ebus_dma_info *p, int on);
extern void ebus_dma_unregister(struct ebus_dma_info *p);
extern int ebus_dma_request(struct ebus_dma_info *p, dma_addr_t bus_addr,
size_t len);
extern void ebus_dma_prepare(struct ebus_dma_info *p, int write);
extern unsigned int ebus_dma_residue(struct ebus_dma_info *p);
extern unsigned int ebus_dma_addr(struct ebus_dma_info *p);
extern void ebus_dma_enable(struct ebus_dma_info *p, int on);
extern struct linux_ebus *ebus_chain;
extern void ebus_init(void);
#define for_each_ebus(bus) \
for((bus) = ebus_chain; (bus); (bus) = (bus)->next)
#define for_each_ebusdev(dev, bus) \
for((dev) = (bus)->devices; (dev); (dev) = (dev)->next)
#define for_each_edevchild(dev, child) \
for((child) = (dev)->children; (child); (child) = (child)->next)
#endif /* !(__SPARC64_EBUS_H) */

View File

@ -1,145 +1,8 @@
#ifndef __ASMSPARC_ELF_H
#define __ASMSPARC_ELF_H
/*
* ELF register definitions..
*/
#include <asm/ptrace.h>
/*
* Sparc section types
*/
#define STT_REGISTER 13
/*
* Sparc ELF relocation types
*/
#define R_SPARC_NONE 0
#define R_SPARC_8 1
#define R_SPARC_16 2
#define R_SPARC_32 3
#define R_SPARC_DISP8 4
#define R_SPARC_DISP16 5
#define R_SPARC_DISP32 6
#define R_SPARC_WDISP30 7
#define R_SPARC_WDISP22 8
#define R_SPARC_HI22 9
#define R_SPARC_22 10
#define R_SPARC_13 11
#define R_SPARC_LO10 12
#define R_SPARC_GOT10 13
#define R_SPARC_GOT13 14
#define R_SPARC_GOT22 15
#define R_SPARC_PC10 16
#define R_SPARC_PC22 17
#define R_SPARC_WPLT30 18
#define R_SPARC_COPY 19
#define R_SPARC_GLOB_DAT 20
#define R_SPARC_JMP_SLOT 21
#define R_SPARC_RELATIVE 22
#define R_SPARC_UA32 23
#define R_SPARC_PLT32 24
#define R_SPARC_HIPLT22 25
#define R_SPARC_LOPLT10 26
#define R_SPARC_PCPLT32 27
#define R_SPARC_PCPLT22 28
#define R_SPARC_PCPLT10 29
#define R_SPARC_10 30
#define R_SPARC_11 31
#define R_SPARC_64 32
#define R_SPARC_OLO10 33
#define R_SPARC_WDISP16 40
#define R_SPARC_WDISP19 41
#define R_SPARC_7 43
#define R_SPARC_5 44
#define R_SPARC_6 45
/* Bits present in AT_HWCAP, primarily for Sparc32. */
#define HWCAP_SPARC_FLUSH 1 /* CPU supports flush instruction. */
#define HWCAP_SPARC_STBAR 2
#define HWCAP_SPARC_SWAP 4
#define HWCAP_SPARC_MULDIV 8
#define HWCAP_SPARC_V9 16
#define HWCAP_SPARC_ULTRA3 32
#define CORE_DUMP_USE_REGSET
/* Format is:
* G0 --> G7
* O0 --> O7
* L0 --> L7
* I0 --> I7
* PSR, PC, nPC, Y, WIM, TBR
*/
typedef unsigned long elf_greg_t;
#define ELF_NGREG 38
typedef elf_greg_t elf_gregset_t[ELF_NGREG];
typedef struct {
union {
unsigned long pr_regs[32];
double pr_dregs[16];
} pr_fr;
unsigned long __unused;
unsigned long pr_fsr;
unsigned char pr_qcnt;
unsigned char pr_q_entrysize;
unsigned char pr_en;
unsigned int pr_q[64];
} elf_fpregset_t;
#include <asm/mbus.h>
/*
* This is used to ensure we don't load something for the wrong architecture.
*/
#define elf_check_arch(x) ((x)->e_machine == EM_SPARC)
/*
* These are used to set parameters in the core dumps.
*/
#define ELF_ARCH EM_SPARC
#define ELF_CLASS ELFCLASS32
#define ELF_DATA ELFDATA2MSB
#define USE_ELF_CORE_DUMP
#ifndef CONFIG_SUN4
#define ELF_EXEC_PAGESIZE 4096
#ifndef ___ASM_SPARC_ELF_H
#define ___ASM_SPARC_ELF_H
#if defined(__sparc__) && defined(__arch64__)
#include <asm-sparc/elf_64.h>
#else
#define ELF_EXEC_PAGESIZE 8192
#include <asm-sparc/elf_32.h>
#endif
#endif
/* This is the location that an ET_DYN program is loaded if exec'ed. Typical
use of this is to invoke "./ld.so someprog" to test out a new version of
the loader. We need to make sure that it is out of the way of the program
that it will "exec", and that there is sufficient room for the brk. */
#define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE)
/* This yields a mask that user programs can use to figure out what
instruction set this cpu supports. This can NOT be done in userspace
on Sparc. */
/* Sun4c has none of the capabilities, most sun4m's have them all.
* XXX This is gross, set some global variable at boot time. -DaveM
*/
#define ELF_HWCAP ((ARCH_SUN4C_SUN4) ? 0 : \
(HWCAP_SPARC_FLUSH | HWCAP_SPARC_STBAR | \
HWCAP_SPARC_SWAP | \
((srmmu_modtype != Cypress && \
srmmu_modtype != Cypress_vE && \
srmmu_modtype != Cypress_vD) ? \
HWCAP_SPARC_MULDIV : 0)))
/* This yields a string that ld.so will use to load implementation
specific libraries for optimization. This is more specific in
intent than poking at uname or /proc/cpuinfo. */
#define ELF_PLATFORM (NULL)
#define SET_PERSONALITY(ex, ibcs2) set_personality((ibcs2)?PER_SVR4:PER_LINUX)
#endif /* !(__ASMSPARC_ELF_H) */

Some files were not shown because too many files have changed in this diff Show More