From efdc1e2083e04cc70721d55803889b346c1a3de2 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Wed, 28 Sep 2005 21:06:47 -0700 Subject: [PATCH] [SPARC64]: Simplify user fault fixup handling. Instead of doing byte-at-a-time user accesses to figure out where the fault occurred, read the saved fault_address from the current thread structure. For the sake of defensive programming, if the fault_address does not fall into the user buffer range, simply assume the whole area faulted. This will cause the fixup for copy_from_user() to clear the entire kernel side buffer. Signed-off-by: David S. Miller --- arch/sparc64/lib/user_fixup.c | 65 ++++++++++++++++------------------- arch/sparc64/mm/fault.c | 10 ++---- include/asm-sparc64/uaccess.h | 6 ++-- 3 files changed, 35 insertions(+), 46 deletions(-) diff --git a/arch/sparc64/lib/user_fixup.c b/arch/sparc64/lib/user_fixup.c index 0278e34125db..19d1fdb17d0e 100644 --- a/arch/sparc64/lib/user_fixup.c +++ b/arch/sparc64/lib/user_fixup.c @@ -11,61 +11,56 @@ /* Calculating the exact fault address when using * block loads and stores can be very complicated. + * * Instead of trying to be clever and handling all * of the cases, just fix things up simply here. */ +static unsigned long compute_size(unsigned long start, unsigned long size, unsigned long *offset) +{ + unsigned long fault_addr = current_thread_info()->fault_address; + unsigned long end = start + size; + + if (fault_addr < start || fault_addr >= end) { + *offset = 0; + } else { + *offset = start - fault_addr; + size = end - fault_addr; + } + return size; +} + unsigned long copy_from_user_fixup(void *to, const void __user *from, unsigned long size) { - char *dst = to; - const char __user *src = from; + unsigned long offset; - while (size) { - if (__get_user(*dst, src)) - break; - dst++; - src++; - size--; - } - - if (size) - memset(dst, 0, size); + size = compute_size((unsigned long) from, size, &offset); + if (likely(size)) + memset(to + offset, 0, size); return size; } unsigned long copy_to_user_fixup(void __user *to, const void *from, unsigned long size) { - char __user *dst = to; - const char *src = from; + unsigned long offset; - while (size) { - if (__put_user(*src, dst)) - break; - dst++; - src++; - size--; - } - - return size; + return compute_size((unsigned long) to, size, &offset); } unsigned long copy_in_user_fixup(void __user *to, void __user *from, unsigned long size) { - char __user *dst = to; - char __user *src = from; + unsigned long fault_addr = current_thread_info()->fault_address; + unsigned long start = (unsigned long) to; + unsigned long end = start + size; - while (size) { - char tmp; + if (fault_addr >= start && fault_addr < end) + return end - fault_addr; - if (__get_user(tmp, src)) - break; - if (__put_user(tmp, dst)) - break; - dst++; - src++; - size--; - } + start = (unsigned long) from; + end = start + size; + if (fault_addr >= start && fault_addr < end) + return end - fault_addr; return size; } diff --git a/arch/sparc64/mm/fault.c b/arch/sparc64/mm/fault.c index 59dc9a2ece5a..4a52e79d515f 100644 --- a/arch/sparc64/mm/fault.c +++ b/arch/sparc64/mm/fault.c @@ -457,7 +457,7 @@ good_area: } up_read(&mm->mmap_sem); - goto fault_done; + return; /* * Something tried to access memory that isn't in our memory map.. @@ -469,8 +469,7 @@ bad_area: handle_kernel_fault: do_kernel_fault(regs, si_code, fault_code, insn, address); - - goto fault_done; + return; /* * We ran out of memory, or some other thing happened to us that made @@ -501,9 +500,4 @@ do_sigbus: /* Kernel mode? Handle exceptions or die */ if (regs->tstate & TSTATE_PRIV) goto handle_kernel_fault; - -fault_done: - /* These values are no longer needed, clear them. */ - set_thread_fault_code(0); - current_thread_info()->fault_address = 0; } diff --git a/include/asm-sparc64/uaccess.h b/include/asm-sparc64/uaccess.h index bc8ddbb1cbed..203e8eee6351 100644 --- a/include/asm-sparc64/uaccess.h +++ b/include/asm-sparc64/uaccess.h @@ -251,7 +251,7 @@ copy_from_user(void *to, const void __user *from, unsigned long size) { unsigned long ret = ___copy_from_user(to, from, size); - if (ret) + if (unlikely(ret)) ret = copy_from_user_fixup(to, from, size); return ret; } @@ -267,7 +267,7 @@ copy_to_user(void __user *to, const void *from, unsigned long size) { unsigned long ret = ___copy_to_user(to, from, size); - if (ret) + if (unlikely(ret)) ret = copy_to_user_fixup(to, from, size); return ret; } @@ -283,7 +283,7 @@ copy_in_user(void __user *to, void __user *from, unsigned long size) { unsigned long ret = ___copy_in_user(to, from, size); - if (ret) + if (unlikely(ret)) ret = copy_in_user_fixup(to, from, size); return ret; }