2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-12-25 13:43:55 +08:00
linux-next/arch/sparc/include/asm/smpprim.h
Sam Ravnborg a439fe51a1 sparc, sparc64: use arch/sparc/include
The majority of this patch was created by the following script:

***
ASM=arch/sparc/include/asm
mkdir -p $ASM
git mv include/asm-sparc64/ftrace.h $ASM
git rm include/asm-sparc64/*
git mv include/asm-sparc/* $ASM
sed -ie 's/asm-sparc64/asm/g' $ASM/*
sed -ie 's/asm-sparc/asm/g' $ASM/*
***

The rest was an update of the top-level Makefile to use sparc
for header files when sparc64 is being build.
And a small fixlet to pick up the correct unistd.h from
sparc64 code.

Signed-off-by: Sam Ravnborg <sam@ravnborg.org>
2008-07-27 23:00:59 +02:00

55 lines
1.2 KiB
C

/*
* smpprim.h: SMP locking primitives on the Sparc
*
* God knows we won't be actually using this code for some time
* but I thought I'd write it since I knew how.
*
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
*/
#ifndef __SPARC_SMPPRIM_H
#define __SPARC_SMPPRIM_H
/* Test and set the unsigned byte at ADDR to 1. Returns the previous
* value. On the Sparc we use the ldstub instruction since it is
* atomic.
*/
static inline __volatile__ char test_and_set(void *addr)
{
char state = 0;
__asm__ __volatile__("ldstub [%0], %1 ! test_and_set\n\t"
"=r" (addr), "=r" (state) :
"0" (addr), "1" (state) : "memory");
return state;
}
/* Initialize a spin-lock. */
static inline __volatile__ smp_initlock(void *spinlock)
{
/* Unset the lock. */
*((unsigned char *) spinlock) = 0;
return;
}
/* This routine spins until it acquires the lock at ADDR. */
static inline __volatile__ smp_lock(void *addr)
{
while(test_and_set(addr) == 0xff)
;
/* We now have the lock */
return;
}
/* This routine releases the lock at ADDR. */
static inline __volatile__ smp_unlock(void *addr)
{
*((unsigned char *) addr) = 0;
}
#endif /* !(__SPARC_SMPPRIM_H) */