mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-15 16:24:13 +08:00
39c29657fc
EDAC has a foundation to perform software memory scrubbing, but it requires a per architecture (atomic_scrub) function for performing an atomic update operation. Under X86, this is done with a lock: add [addr],0 in the file asm-x86/edac.h This patch provides the MIPS arch with that atomic function, atomic_scrub() in asm-mips/edac.h Cc: Alan Cox <alan@lxorguk.ukuu.org.uk> Cc: Ralf Baechle <ralf@linux-mips.org> Signed-off-by: Doug Thompson <dougthompson@xmission.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
36 lines
989 B
C
36 lines
989 B
C
#ifndef ASM_EDAC_H
|
|
#define ASM_EDAC_H
|
|
|
|
/* ECC atomic, DMA, SMP and interrupt safe scrub function */
|
|
|
|
static inline void atomic_scrub(void *va, u32 size)
|
|
{
|
|
unsigned long *virt_addr = va;
|
|
unsigned long temp;
|
|
u32 i;
|
|
|
|
for (i = 0; i < size / sizeof(unsigned long); i++, virt_addr++) {
|
|
|
|
/*
|
|
* Very carefully read and write to memory atomically
|
|
* so we are interrupt, DMA and SMP safe.
|
|
*
|
|
* Intel: asm("lock; addl $0, %0"::"m"(*virt_addr));
|
|
*/
|
|
|
|
__asm__ __volatile__ (
|
|
" .set mips3 \n"
|
|
"1: ll %0, %1 # atomic_add \n"
|
|
" ll %0, %1 # atomic_add \n"
|
|
" addu %0, $0 \n"
|
|
" sc %0, %1 \n"
|
|
" beqz %0, 1b \n"
|
|
" .set mips0 \n"
|
|
: "=&r" (temp), "=m" (*virt_addr)
|
|
: "m" (*virt_addr));
|
|
|
|
}
|
|
}
|
|
|
|
#endif
|