mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-25 21:54:06 +08:00
446c92b290
This is a fix for the following crash observed in 2.6.29-rc3: http://lkml.org/lkml/2009/1/29/150 On ARM it doesn't make sense to trace a naked function because then mcount is called without stack and frame pointer being set up and there is no chance to restore the lr register to the value before mcount was called. Reported-by: Matthias Kaehlcke <matthias@kaehlcke.net> Tested-by: Matthias Kaehlcke <matthias@kaehlcke.net> Cc: Abhishek Sagar <sagar.abhishek@gmail.com> Cc: Steven Rostedt <rostedt@home.goodmis.org> Cc: Ingo Molnar <mingo@elte.hu> Signed-off-by: Uwe Kleine-König <u.kleine-koenig@pengutronix.de> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
114 lines
2.8 KiB
C
114 lines
2.8 KiB
C
/*
|
|
* linux/arch/arm/mm/copypage-xsc3.S
|
|
*
|
|
* Copyright (C) 2004 Intel Corp.
|
|
*
|
|
* This program is free software; you can redistribute it and/or modify
|
|
* it under the terms of the GNU General Public License version 2 as
|
|
* published by the Free Software Foundation.
|
|
*
|
|
* Adapted for 3rd gen XScale core, no more mini-dcache
|
|
* Author: Matt Gilbert (matthew.m.gilbert@intel.com)
|
|
*/
|
|
#include <linux/init.h>
|
|
#include <linux/highmem.h>
|
|
|
|
/*
|
|
* General note:
|
|
* We don't really want write-allocate cache behaviour for these functions
|
|
* since that will just eat through 8K of the cache.
|
|
*/
|
|
|
|
/*
|
|
* XSC3 optimised copy_user_highpage
|
|
* r0 = destination
|
|
* r1 = source
|
|
*
|
|
* The source page may have some clean entries in the cache already, but we
|
|
* can safely ignore them - break_cow() will flush them out of the cache
|
|
* if we eventually end up using our copied page.
|
|
*
|
|
*/
|
|
static void __naked
|
|
xsc3_mc_copy_user_page(void *kto, const void *kfrom)
|
|
{
|
|
asm("\
|
|
stmfd sp!, {r4, r5, lr} \n\
|
|
mov lr, %0 \n\
|
|
\n\
|
|
pld [r1, #0] \n\
|
|
pld [r1, #32] \n\
|
|
1: pld [r1, #64] \n\
|
|
pld [r1, #96] \n\
|
|
\n\
|
|
2: ldrd r2, [r1], #8 \n\
|
|
mov ip, r0 \n\
|
|
ldrd r4, [r1], #8 \n\
|
|
mcr p15, 0, ip, c7, c6, 1 @ invalidate\n\
|
|
strd r2, [r0], #8 \n\
|
|
ldrd r2, [r1], #8 \n\
|
|
strd r4, [r0], #8 \n\
|
|
ldrd r4, [r1], #8 \n\
|
|
strd r2, [r0], #8 \n\
|
|
strd r4, [r0], #8 \n\
|
|
ldrd r2, [r1], #8 \n\
|
|
mov ip, r0 \n\
|
|
ldrd r4, [r1], #8 \n\
|
|
mcr p15, 0, ip, c7, c6, 1 @ invalidate\n\
|
|
strd r2, [r0], #8 \n\
|
|
ldrd r2, [r1], #8 \n\
|
|
subs lr, lr, #1 \n\
|
|
strd r4, [r0], #8 \n\
|
|
ldrd r4, [r1], #8 \n\
|
|
strd r2, [r0], #8 \n\
|
|
strd r4, [r0], #8 \n\
|
|
bgt 1b \n\
|
|
beq 2b \n\
|
|
\n\
|
|
ldmfd sp!, {r4, r5, pc}"
|
|
:
|
|
: "I" (PAGE_SIZE / 64 - 1));
|
|
}
|
|
|
|
void xsc3_mc_copy_user_highpage(struct page *to, struct page *from,
|
|
unsigned long vaddr)
|
|
{
|
|
void *kto, *kfrom;
|
|
|
|
kto = kmap_atomic(to, KM_USER0);
|
|
kfrom = kmap_atomic(from, KM_USER1);
|
|
xsc3_mc_copy_user_page(kto, kfrom);
|
|
kunmap_atomic(kfrom, KM_USER1);
|
|
kunmap_atomic(kto, KM_USER0);
|
|
}
|
|
|
|
/*
|
|
* XScale optimised clear_user_page
|
|
* r0 = destination
|
|
* r1 = virtual user address of ultimate destination page
|
|
*/
|
|
void xsc3_mc_clear_user_highpage(struct page *page, unsigned long vaddr)
|
|
{
|
|
void *ptr, *kaddr = kmap_atomic(page, KM_USER0);
|
|
asm volatile ("\
|
|
mov r1, %2 \n\
|
|
mov r2, #0 \n\
|
|
mov r3, #0 \n\
|
|
1: mcr p15, 0, %0, c7, c6, 1 @ invalidate line\n\
|
|
strd r2, [%0], #8 \n\
|
|
strd r2, [%0], #8 \n\
|
|
strd r2, [%0], #8 \n\
|
|
strd r2, [%0], #8 \n\
|
|
subs r1, r1, #1 \n\
|
|
bne 1b"
|
|
: "=r" (ptr)
|
|
: "0" (kaddr), "I" (PAGE_SIZE / 32)
|
|
: "r1", "r2", "r3");
|
|
kunmap_atomic(kaddr, KM_USER0);
|
|
}
|
|
|
|
struct cpu_user_fns xsc3_mc_user_fns __initdata = {
|
|
.cpu_clear_user_highpage = xsc3_mc_clear_user_highpage,
|
|
.cpu_copy_user_highpage = xsc3_mc_copy_user_highpage,
|
|
};
|