2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-12-19 10:44:14 +08:00

sh: __user annotations for __get/__put_user().

This adds in some more __user annotations. These weren't being
handled properly in some of the __get_user and __put_user paths,
so tidy those up.

Signed-off-by: Paul Mundt <lethal@linux-sh.org>
This commit is contained in:
Paul Mundt 2007-05-14 12:52:56 +09:00 committed by Paul Mundt
parent 7a302a9674
commit e08f457c7c
8 changed files with 49 additions and 34 deletions

View File

@ -17,6 +17,7 @@
#include <linux/kexec.h>
#include <linux/kdebug.h>
#include <linux/tick.h>
#include <linux/reboot.h>
#include <asm/uaccess.h>
#include <asm/mmu_context.h>
#include <asm/pgalloc.h>
@ -449,23 +450,20 @@ asmlinkage int sys_vfork(unsigned long r4, unsigned long r5,
/*
* sys_execve() executes a new program.
*/
asmlinkage int sys_execve(char *ufilename, char **uargv,
char **uenvp, unsigned long r7,
asmlinkage int sys_execve(char __user *ufilename, char __user * __user *uargv,
char __user * __user *uenvp, unsigned long r7,
struct pt_regs __regs)
{
struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
int error;
char *filename;
filename = getname((char __user *)ufilename);
filename = getname(ufilename);
error = PTR_ERR(filename);
if (IS_ERR(filename))
goto out;
error = do_execve(filename,
(char __user * __user *)uargv,
(char __user * __user *)uenvp,
regs);
error = do_execve(filename, uargv, uenvp, regs);
if (error == 0) {
task_lock(current);
current->ptrace &= ~PT_DTRACE;

View File

@ -99,7 +99,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
ret = -EIO;
if (copied != sizeof(tmp))
break;
ret = put_user(tmp,(unsigned long *) data);
ret = put_user(tmp,(unsigned long __user *) data);
break;
}
@ -128,7 +128,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
tmp = !!tsk_used_math(child);
else
tmp = 0;
ret = put_user(tmp, (unsigned long *)data);
ret = put_user(tmp, (unsigned long __user *)data);
break;
}
@ -196,7 +196,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
case PTRACE_SINGLESTEP: { /* set the trap flag. */
long pc;
struct pt_regs *dummy = NULL;
struct pt_regs *regs = NULL;
ret = -EIO;
if (!valid_signal(data))
@ -207,7 +207,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
child->ptrace |= PT_DTRACE;
}
pc = get_stack_long(child, (long)&dummy->pc);
pc = get_stack_long(child, (long)&regs->pc);
/* Next scheduling will set up UBC */
if (child->thread.ubc_pc == 0)

View File

@ -261,14 +261,14 @@ asmlinkage int sys_rt_sigreturn(unsigned long r4, unsigned long r5,
goto badframe;
/* It is more difficult to avoid calling this function than to
call it and ignore errors. */
do_sigaltstack(&st, NULL, regs->regs[15]);
do_sigaltstack((const stack_t __user *)&st, NULL, (unsigned long)frame);
return r0;
badframe:
force_sig(SIGSEGV, current);
return 0;
}
}
/*
* Set up a signal frame.

View File

@ -581,7 +581,7 @@ uspace_segv:
info.si_signo = SIGBUS;
info.si_errno = 0;
info.si_code = si_code;
info.si_addr = (void *) address;
info.si_addr = (void __user *)address;
force_sig_info(SIGBUS, &info, current);
} else {
if (regs->pc & 1)

View File

@ -60,6 +60,7 @@ extern void (*copy_page)(void *to, void *from);
extern unsigned long shm_align_mask;
extern unsigned long max_low_pfn, min_low_pfn;
extern unsigned long memory_start, memory_end;
#ifdef CONFIG_MMU
extern void clear_page_slow(void *to);

View File

@ -3,7 +3,5 @@
#include <asm-generic/sections.h>
extern char _end[];
#endif /* __ASM_SH_SECTIONS_H */

View File

@ -8,9 +8,13 @@
#include <linux/irqflags.h>
#include <linux/compiler.h>
#include <linux/linkage.h>
#include <asm/types.h>
#include <asm/ptrace.h>
struct task_struct *__switch_to(struct task_struct *prev,
struct task_struct *next);
/*
* switch_to() should switch tasks to task nr n, first
*/
@ -271,6 +275,16 @@ extern unsigned int instruction_size(unsigned int insn);
void disable_hlt(void);
void enable_hlt(void);
void default_idle(void);
asmlinkage void break_point_trap(void);
asmlinkage void debug_trap_handler(unsigned long r4, unsigned long r5,
unsigned long r6, unsigned long r7,
struct pt_regs __regs);
asmlinkage void bug_trap_handler(unsigned long r4, unsigned long r5,
unsigned long r6, unsigned long r7,
struct pt_regs __regs);
#define arch_align_stack(x) (x)
#endif

View File

@ -61,8 +61,6 @@ static inline void set_fs(mm_segment_t s)
*/
static inline int __access_ok(unsigned long addr, unsigned long size)
{
extern unsigned long memory_start, memory_end;
return ((addr >= memory_start) && ((addr + size) < memory_end));
}
#else /* CONFIG_MMU */
@ -76,7 +74,7 @@ static inline int __access_ok(unsigned long addr, unsigned long size)
* __access_ok: Check if address with size is OK or not.
*
* We do three checks:
* (1) is it user space?
* (1) is it user space?
* (2) addr + size --> carry?
* (3) addr + size >= 0x80000000 (PAGE_OFFSET)
*
@ -142,11 +140,12 @@ static inline int access_ok(int type, const void __user *p, unsigned long size)
__get_user_nocheck((x),(ptr),sizeof(*(ptr)))
struct __large_struct { unsigned long buf[100]; };
#define __m(x) (*(struct __large_struct *)(x))
#define __m(x) (*(struct __large_struct __user *)(x))
#define __get_user_size(x,ptr,size,retval) \
do { \
retval = 0; \
__chk_user_ptr(ptr); \
switch (size) { \
case 1: \
__get_user_asm(x, ptr, retval, "b"); \
@ -175,6 +174,7 @@ do { \
#define __get_user_check(x,ptr,size) \
({ \
long __gu_err, __gu_val; \
__chk_user_ptr(ptr); \
switch (size) { \
case 1: \
__get_user_1(__gu_val, (ptr), __gu_err); \
@ -300,6 +300,7 @@ extern void __get_user_unknown(void);
#define __put_user_size(x,ptr,size,retval) \
do { \
retval = 0; \
__chk_user_ptr(ptr); \
switch (size) { \
case 1: \
__put_user_asm(x, ptr, retval, "b"); \
@ -328,7 +329,7 @@ do { \
#define __put_user_check(x,ptr,size) \
({ \
long __pu_err = -EFAULT; \
__typeof__(*(ptr)) *__pu_addr = (ptr); \
__typeof__(*(ptr)) __user *__pu_addr = (ptr); \
\
if (__access_ok((unsigned long)__pu_addr,size)) \
__put_user_size((x),__pu_addr,(size),__pu_err); \
@ -406,10 +407,10 @@ __asm__ __volatile__( \
#endif
extern void __put_user_unknown(void);
/* Generic arbitrary sized copy. */
/* Return the number of bytes NOT copied */
extern __kernel_size_t __copy_user(void *to, const void *from, __kernel_size_t n);
__kernel_size_t __copy_user(void *to, const void *from, __kernel_size_t n);
#define copy_to_user(to,from,n) ({ \
void *__copy_to = (void *) (to); \
@ -420,14 +421,6 @@ __copy_res = __copy_user(__copy_to, (void *) (from), __copy_size); \
} else __copy_res = __copy_size; \
__copy_res; })
#define __copy_to_user(to,from,n) \
__copy_user((void *)(to), \
(void *)(from), n)
#define __copy_to_user_inatomic __copy_to_user
#define __copy_from_user_inatomic __copy_from_user
#define copy_from_user(to,from,n) ({ \
void *__copy_to = (void *) (to); \
void *__copy_from = (void *) (from); \
@ -438,9 +431,20 @@ __copy_res = __copy_user(__copy_to, __copy_from, __copy_size); \
} else __copy_res = __copy_size; \
__copy_res; })
#define __copy_from_user(to,from,n) \
__copy_user((void *)(to), \
(void *)(from), n)
static __always_inline unsigned long
__copy_from_user(void *to, const void __user *from, unsigned long n)
{
return __copy_user(to, (__force void *)from, n);
}
static __always_inline unsigned long __must_check
__copy_to_user(void __user *to, const void *from, unsigned long n)
{
return __copy_user((__force void *)to, from, n);
}
#define __copy_to_user_inatomic __copy_to_user
#define __copy_from_user_inatomic __copy_from_user
/*
* Clear the area and return remaining number of bytes