2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-12-19 10:44:14 +08:00

Merge branch 'x86/uaccess' into core/percpu

This commit is contained in:
Ingo Molnar 2009-02-10 00:40:48 +01:00
commit 5d96218b4a
9 changed files with 476 additions and 413 deletions

View File

@ -46,78 +46,83 @@ void signal_fault(struct pt_regs *regs, void __user *frame, char *where);
int copy_siginfo_to_user32(compat_siginfo_t __user *to, siginfo_t *from) int copy_siginfo_to_user32(compat_siginfo_t __user *to, siginfo_t *from)
{ {
int err; int err = 0;
if (!access_ok(VERIFY_WRITE, to, sizeof(compat_siginfo_t))) if (!access_ok(VERIFY_WRITE, to, sizeof(compat_siginfo_t)))
return -EFAULT; return -EFAULT;
/* If you change siginfo_t structure, please make sure that put_user_try {
this code is fixed accordingly. /* If you change siginfo_t structure, please make sure that
It should never copy any pad contained in the structure this code is fixed accordingly.
to avoid security leaks, but must copy the generic It should never copy any pad contained in the structure
3 ints plus the relevant union member. */ to avoid security leaks, but must copy the generic
err = __put_user(from->si_signo, &to->si_signo); 3 ints plus the relevant union member. */
err |= __put_user(from->si_errno, &to->si_errno); put_user_ex(from->si_signo, &to->si_signo);
err |= __put_user((short)from->si_code, &to->si_code); put_user_ex(from->si_errno, &to->si_errno);
put_user_ex((short)from->si_code, &to->si_code);
if (from->si_code < 0) { if (from->si_code < 0) {
err |= __put_user(from->si_pid, &to->si_pid); put_user_ex(from->si_pid, &to->si_pid);
err |= __put_user(from->si_uid, &to->si_uid); put_user_ex(from->si_uid, &to->si_uid);
err |= __put_user(ptr_to_compat(from->si_ptr), &to->si_ptr); put_user_ex(ptr_to_compat(from->si_ptr), &to->si_ptr);
} else { } else {
/* /*
* First 32bits of unions are always present: * First 32bits of unions are always present:
* si_pid === si_band === si_tid === si_addr(LS half) * si_pid === si_band === si_tid === si_addr(LS half)
*/ */
err |= __put_user(from->_sifields._pad[0], put_user_ex(from->_sifields._pad[0],
&to->_sifields._pad[0]); &to->_sifields._pad[0]);
switch (from->si_code >> 16) { switch (from->si_code >> 16) {
case __SI_FAULT >> 16: case __SI_FAULT >> 16:
break; break;
case __SI_CHLD >> 16: case __SI_CHLD >> 16:
err |= __put_user(from->si_utime, &to->si_utime); put_user_ex(from->si_utime, &to->si_utime);
err |= __put_user(from->si_stime, &to->si_stime); put_user_ex(from->si_stime, &to->si_stime);
err |= __put_user(from->si_status, &to->si_status); put_user_ex(from->si_status, &to->si_status);
/* FALL THROUGH */ /* FALL THROUGH */
default: default:
case __SI_KILL >> 16: case __SI_KILL >> 16:
err |= __put_user(from->si_uid, &to->si_uid); put_user_ex(from->si_uid, &to->si_uid);
break; break;
case __SI_POLL >> 16: case __SI_POLL >> 16:
err |= __put_user(from->si_fd, &to->si_fd); put_user_ex(from->si_fd, &to->si_fd);
break; break;
case __SI_TIMER >> 16: case __SI_TIMER >> 16:
err |= __put_user(from->si_overrun, &to->si_overrun); put_user_ex(from->si_overrun, &to->si_overrun);
err |= __put_user(ptr_to_compat(from->si_ptr), put_user_ex(ptr_to_compat(from->si_ptr),
&to->si_ptr); &to->si_ptr);
break; break;
/* This is not generated by the kernel as of now. */ /* This is not generated by the kernel as of now. */
case __SI_RT >> 16: case __SI_RT >> 16:
case __SI_MESGQ >> 16: case __SI_MESGQ >> 16:
err |= __put_user(from->si_uid, &to->si_uid); put_user_ex(from->si_uid, &to->si_uid);
err |= __put_user(from->si_int, &to->si_int); put_user_ex(from->si_int, &to->si_int);
break; break;
}
} }
} } put_user_catch(err);
return err; return err;
} }
int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from) int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from)
{ {
int err; int err = 0;
u32 ptr32; u32 ptr32;
if (!access_ok(VERIFY_READ, from, sizeof(compat_siginfo_t))) if (!access_ok(VERIFY_READ, from, sizeof(compat_siginfo_t)))
return -EFAULT; return -EFAULT;
err = __get_user(to->si_signo, &from->si_signo); get_user_try {
err |= __get_user(to->si_errno, &from->si_errno); get_user_ex(to->si_signo, &from->si_signo);
err |= __get_user(to->si_code, &from->si_code); get_user_ex(to->si_errno, &from->si_errno);
get_user_ex(to->si_code, &from->si_code);
err |= __get_user(to->si_pid, &from->si_pid); get_user_ex(to->si_pid, &from->si_pid);
err |= __get_user(to->si_uid, &from->si_uid); get_user_ex(to->si_uid, &from->si_uid);
err |= __get_user(ptr32, &from->si_ptr); get_user_ex(ptr32, &from->si_ptr);
to->si_ptr = compat_ptr(ptr32); to->si_ptr = compat_ptr(ptr32);
} get_user_catch(err);
return err; return err;
} }
@ -142,17 +147,23 @@ asmlinkage long sys32_sigaltstack(const stack_ia32_t __user *uss_ptr,
struct pt_regs *regs) struct pt_regs *regs)
{ {
stack_t uss, uoss; stack_t uss, uoss;
int ret; int ret, err = 0;
mm_segment_t seg; mm_segment_t seg;
if (uss_ptr) { if (uss_ptr) {
u32 ptr; u32 ptr;
memset(&uss, 0, sizeof(stack_t)); memset(&uss, 0, sizeof(stack_t));
if (!access_ok(VERIFY_READ, uss_ptr, sizeof(stack_ia32_t)) || if (!access_ok(VERIFY_READ, uss_ptr, sizeof(stack_ia32_t)))
__get_user(ptr, &uss_ptr->ss_sp) || return -EFAULT;
__get_user(uss.ss_flags, &uss_ptr->ss_flags) ||
__get_user(uss.ss_size, &uss_ptr->ss_size)) get_user_try {
get_user_ex(ptr, &uss_ptr->ss_sp);
get_user_ex(uss.ss_flags, &uss_ptr->ss_flags);
get_user_ex(uss.ss_size, &uss_ptr->ss_size);
} get_user_catch(err);
if (err)
return -EFAULT; return -EFAULT;
uss.ss_sp = compat_ptr(ptr); uss.ss_sp = compat_ptr(ptr);
} }
@ -161,10 +172,16 @@ asmlinkage long sys32_sigaltstack(const stack_ia32_t __user *uss_ptr,
ret = do_sigaltstack(uss_ptr ? &uss : NULL, &uoss, regs->sp); ret = do_sigaltstack(uss_ptr ? &uss : NULL, &uoss, regs->sp);
set_fs(seg); set_fs(seg);
if (ret >= 0 && uoss_ptr) { if (ret >= 0 && uoss_ptr) {
if (!access_ok(VERIFY_WRITE, uoss_ptr, sizeof(stack_ia32_t)) || if (!access_ok(VERIFY_WRITE, uoss_ptr, sizeof(stack_ia32_t)))
__put_user(ptr_to_compat(uoss.ss_sp), &uoss_ptr->ss_sp) || return -EFAULT;
__put_user(uoss.ss_flags, &uoss_ptr->ss_flags) ||
__put_user(uoss.ss_size, &uoss_ptr->ss_size)) put_user_try {
put_user_ex(ptr_to_compat(uoss.ss_sp), &uoss_ptr->ss_sp);
put_user_ex(uoss.ss_flags, &uoss_ptr->ss_flags);
put_user_ex(uoss.ss_size, &uoss_ptr->ss_size);
} put_user_catch(err);
if (err)
ret = -EFAULT; ret = -EFAULT;
} }
return ret; return ret;
@ -174,18 +191,18 @@ asmlinkage long sys32_sigaltstack(const stack_ia32_t __user *uss_ptr,
* Do a signal return; undo the signal stack. * Do a signal return; undo the signal stack.
*/ */
#define COPY(x) { \ #define COPY(x) { \
err |= __get_user(regs->x, &sc->x); \ get_user_ex(regs->x, &sc->x); \
} }
#define COPY_SEG_CPL3(seg) { \ #define COPY_SEG_CPL3(seg) { \
unsigned short tmp; \ unsigned short tmp; \
err |= __get_user(tmp, &sc->seg); \ get_user_ex(tmp, &sc->seg); \
regs->seg = tmp | 3; \ regs->seg = tmp | 3; \
} }
#define RELOAD_SEG(seg) { \ #define RELOAD_SEG(seg) { \
unsigned int cur, pre; \ unsigned int cur, pre; \
err |= __get_user(pre, &sc->seg); \ get_user_ex(pre, &sc->seg); \
savesegment(seg, cur); \ savesegment(seg, cur); \
pre |= 3; \ pre |= 3; \
if (pre != cur) \ if (pre != cur) \
@ -209,39 +226,42 @@ static int ia32_restore_sigcontext(struct pt_regs *regs,
sc, sc->err, sc->ip, sc->cs, sc->flags); sc, sc->err, sc->ip, sc->cs, sc->flags);
#endif #endif
/* get_user_try {
* Reload fs and gs if they have changed in the signal /*
* handler. This does not handle long fs/gs base changes in * Reload fs and gs if they have changed in the signal
* the handler, but does not clobber them at least in the * handler. This does not handle long fs/gs base changes in
* normal case. * the handler, but does not clobber them at least in the
*/ * normal case.
err |= __get_user(gs, &sc->gs); */
gs |= 3; get_user_ex(gs, &sc->gs);
savesegment(gs, oldgs); gs |= 3;
if (gs != oldgs) savesegment(gs, oldgs);
load_gs_index(gs); if (gs != oldgs)
load_gs_index(gs);
RELOAD_SEG(fs); RELOAD_SEG(fs);
RELOAD_SEG(ds); RELOAD_SEG(ds);
RELOAD_SEG(es); RELOAD_SEG(es);
COPY(di); COPY(si); COPY(bp); COPY(sp); COPY(bx); COPY(di); COPY(si); COPY(bp); COPY(sp); COPY(bx);
COPY(dx); COPY(cx); COPY(ip); COPY(dx); COPY(cx); COPY(ip);
/* Don't touch extended registers */ /* Don't touch extended registers */
COPY_SEG_CPL3(cs); COPY_SEG_CPL3(cs);
COPY_SEG_CPL3(ss); COPY_SEG_CPL3(ss);
err |= __get_user(tmpflags, &sc->flags); get_user_ex(tmpflags, &sc->flags);
regs->flags = (regs->flags & ~FIX_EFLAGS) | (tmpflags & FIX_EFLAGS); regs->flags = (regs->flags & ~FIX_EFLAGS) | (tmpflags & FIX_EFLAGS);
/* disable syscall checks */ /* disable syscall checks */
regs->orig_ax = -1; regs->orig_ax = -1;
err |= __get_user(tmp, &sc->fpstate); get_user_ex(tmp, &sc->fpstate);
buf = compat_ptr(tmp); buf = compat_ptr(tmp);
err |= restore_i387_xstate_ia32(buf); err |= restore_i387_xstate_ia32(buf);
get_user_ex(*pax, &sc->ax);
} get_user_catch(err);
err |= __get_user(*pax, &sc->ax);
return err; return err;
} }
@ -319,36 +339,38 @@ static int ia32_setup_sigcontext(struct sigcontext_ia32 __user *sc,
{ {
int tmp, err = 0; int tmp, err = 0;
savesegment(gs, tmp); put_user_try {
err |= __put_user(tmp, (unsigned int __user *)&sc->gs); savesegment(gs, tmp);
savesegment(fs, tmp); put_user_ex(tmp, (unsigned int __user *)&sc->gs);
err |= __put_user(tmp, (unsigned int __user *)&sc->fs); savesegment(fs, tmp);
savesegment(ds, tmp); put_user_ex(tmp, (unsigned int __user *)&sc->fs);
err |= __put_user(tmp, (unsigned int __user *)&sc->ds); savesegment(ds, tmp);
savesegment(es, tmp); put_user_ex(tmp, (unsigned int __user *)&sc->ds);
err |= __put_user(tmp, (unsigned int __user *)&sc->es); savesegment(es, tmp);
put_user_ex(tmp, (unsigned int __user *)&sc->es);
err |= __put_user(regs->di, &sc->di); put_user_ex(regs->di, &sc->di);
err |= __put_user(regs->si, &sc->si); put_user_ex(regs->si, &sc->si);
err |= __put_user(regs->bp, &sc->bp); put_user_ex(regs->bp, &sc->bp);
err |= __put_user(regs->sp, &sc->sp); put_user_ex(regs->sp, &sc->sp);
err |= __put_user(regs->bx, &sc->bx); put_user_ex(regs->bx, &sc->bx);
err |= __put_user(regs->dx, &sc->dx); put_user_ex(regs->dx, &sc->dx);
err |= __put_user(regs->cx, &sc->cx); put_user_ex(regs->cx, &sc->cx);
err |= __put_user(regs->ax, &sc->ax); put_user_ex(regs->ax, &sc->ax);
err |= __put_user(current->thread.trap_no, &sc->trapno); put_user_ex(current->thread.trap_no, &sc->trapno);
err |= __put_user(current->thread.error_code, &sc->err); put_user_ex(current->thread.error_code, &sc->err);
err |= __put_user(regs->ip, &sc->ip); put_user_ex(regs->ip, &sc->ip);
err |= __put_user(regs->cs, (unsigned int __user *)&sc->cs); put_user_ex(regs->cs, (unsigned int __user *)&sc->cs);
err |= __put_user(regs->flags, &sc->flags); put_user_ex(regs->flags, &sc->flags);
err |= __put_user(regs->sp, &sc->sp_at_signal); put_user_ex(regs->sp, &sc->sp_at_signal);
err |= __put_user(regs->ss, (unsigned int __user *)&sc->ss); put_user_ex(regs->ss, (unsigned int __user *)&sc->ss);
err |= __put_user(ptr_to_compat(fpstate), &sc->fpstate); put_user_ex(ptr_to_compat(fpstate), &sc->fpstate);
/* non-iBCS2 extensions.. */ /* non-iBCS2 extensions.. */
err |= __put_user(mask, &sc->oldmask); put_user_ex(mask, &sc->oldmask);
err |= __put_user(current->thread.cr2, &sc->cr2); put_user_ex(current->thread.cr2, &sc->cr2);
} put_user_catch(err);
return err; return err;
} }
@ -437,13 +459,17 @@ int ia32_setup_frame(int sig, struct k_sigaction *ka,
else else
restorer = &frame->retcode; restorer = &frame->retcode;
} }
err |= __put_user(ptr_to_compat(restorer), &frame->pretcode);
/* put_user_try {
* These are actually not used anymore, but left because some put_user_ex(ptr_to_compat(restorer), &frame->pretcode);
* gdb versions depend on them as a marker.
*/ /*
err |= __put_user(*((u64 *)&code), (u64 *)frame->retcode); * These are actually not used anymore, but left because some
* gdb versions depend on them as a marker.
*/
put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
} put_user_catch(err);
if (err) if (err)
return -EFAULT; return -EFAULT;
@ -496,41 +522,40 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
return -EFAULT; return -EFAULT;
err |= __put_user(sig, &frame->sig); put_user_try {
err |= __put_user(ptr_to_compat(&frame->info), &frame->pinfo); put_user_ex(sig, &frame->sig);
err |= __put_user(ptr_to_compat(&frame->uc), &frame->puc); put_user_ex(ptr_to_compat(&frame->info), &frame->pinfo);
err |= copy_siginfo_to_user32(&frame->info, info); put_user_ex(ptr_to_compat(&frame->uc), &frame->puc);
if (err) err |= copy_siginfo_to_user32(&frame->info, info);
return -EFAULT;
/* Create the ucontext. */ /* Create the ucontext. */
if (cpu_has_xsave) if (cpu_has_xsave)
err |= __put_user(UC_FP_XSTATE, &frame->uc.uc_flags); put_user_ex(UC_FP_XSTATE, &frame->uc.uc_flags);
else else
err |= __put_user(0, &frame->uc.uc_flags); put_user_ex(0, &frame->uc.uc_flags);
err |= __put_user(0, &frame->uc.uc_link); put_user_ex(0, &frame->uc.uc_link);
err |= __put_user(current->sas_ss_sp, &frame->uc.uc_stack.ss_sp); put_user_ex(current->sas_ss_sp, &frame->uc.uc_stack.ss_sp);
err |= __put_user(sas_ss_flags(regs->sp), put_user_ex(sas_ss_flags(regs->sp),
&frame->uc.uc_stack.ss_flags); &frame->uc.uc_stack.ss_flags);
err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size); put_user_ex(current->sas_ss_size, &frame->uc.uc_stack.ss_size);
err |= ia32_setup_sigcontext(&frame->uc.uc_mcontext, fpstate, err |= ia32_setup_sigcontext(&frame->uc.uc_mcontext, fpstate,
regs, set->sig[0]); regs, set->sig[0]);
err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)); err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
if (err)
return -EFAULT;
if (ka->sa.sa_flags & SA_RESTORER) if (ka->sa.sa_flags & SA_RESTORER)
restorer = ka->sa.sa_restorer; restorer = ka->sa.sa_restorer;
else else
restorer = VDSO32_SYMBOL(current->mm->context.vdso, restorer = VDSO32_SYMBOL(current->mm->context.vdso,
rt_sigreturn); rt_sigreturn);
err |= __put_user(ptr_to_compat(restorer), &frame->pretcode); put_user_ex(ptr_to_compat(restorer), &frame->pretcode);
/*
* Not actually used anymore, but left because some gdb
* versions need it.
*/
put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
} put_user_catch(err);
/*
* Not actually used anymore, but left because some gdb
* versions need it.
*/
err |= __put_user(*((u64 *)&code), (u64 *)frame->retcode);
if (err) if (err)
return -EFAULT; return -EFAULT;

View File

@ -1471,8 +1471,6 @@ u64 _paravirt_ident_64(u64);
#define paravirt_nop ((void *)_paravirt_nop) #define paravirt_nop ((void *)_paravirt_nop)
void paravirt_use_bytelocks(void);
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
static inline int __raw_spin_is_locked(struct raw_spinlock *lock) static inline int __raw_spin_is_locked(struct raw_spinlock *lock)

View File

@ -172,70 +172,8 @@ static inline int __ticket_spin_is_contended(raw_spinlock_t *lock)
return (((tmp >> TICKET_SHIFT) - tmp) & ((1 << TICKET_SHIFT) - 1)) > 1; return (((tmp >> TICKET_SHIFT) - tmp) & ((1 << TICKET_SHIFT) - 1)) > 1;
} }
#ifdef CONFIG_PARAVIRT #ifndef CONFIG_PARAVIRT
/*
* Define virtualization-friendly old-style lock byte lock, for use in
* pv_lock_ops if desired.
*
* This differs from the pre-2.6.24 spinlock by always using xchgb
* rather than decb to take the lock; this allows it to use a
* zero-initialized lock structure. It also maintains a 1-byte
* contention counter, so that we can implement
* __byte_spin_is_contended.
*/
struct __byte_spinlock {
s8 lock;
s8 spinners;
};
static inline int __byte_spin_is_locked(raw_spinlock_t *lock)
{
struct __byte_spinlock *bl = (struct __byte_spinlock *)lock;
return bl->lock != 0;
}
static inline int __byte_spin_is_contended(raw_spinlock_t *lock)
{
struct __byte_spinlock *bl = (struct __byte_spinlock *)lock;
return bl->spinners != 0;
}
static inline void __byte_spin_lock(raw_spinlock_t *lock)
{
struct __byte_spinlock *bl = (struct __byte_spinlock *)lock;
s8 val = 1;
asm("1: xchgb %1, %0\n"
" test %1,%1\n"
" jz 3f\n"
" " LOCK_PREFIX "incb %2\n"
"2: rep;nop\n"
" cmpb $1, %0\n"
" je 2b\n"
" " LOCK_PREFIX "decb %2\n"
" jmp 1b\n"
"3:"
: "+m" (bl->lock), "+q" (val), "+m" (bl->spinners): : "memory");
}
static inline int __byte_spin_trylock(raw_spinlock_t *lock)
{
struct __byte_spinlock *bl = (struct __byte_spinlock *)lock;
u8 old = 1;
asm("xchgb %1,%0"
: "+m" (bl->lock), "+q" (old) : : "memory");
return old == 0;
}
static inline void __byte_spin_unlock(raw_spinlock_t *lock)
{
struct __byte_spinlock *bl = (struct __byte_spinlock *)lock;
smp_wmb();
bl->lock = 0;
}
#else /* !CONFIG_PARAVIRT */
static inline int __raw_spin_is_locked(raw_spinlock_t *lock) static inline int __raw_spin_is_locked(raw_spinlock_t *lock)
{ {
return __ticket_spin_is_locked(lock); return __ticket_spin_is_locked(lock);
@ -267,7 +205,7 @@ static __always_inline void __raw_spin_lock_flags(raw_spinlock_t *lock,
__raw_spin_lock(lock); __raw_spin_lock(lock);
} }
#endif /* CONFIG_PARAVIRT */ #endif
static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock) static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock)
{ {

View File

@ -40,6 +40,7 @@ struct thread_info {
*/ */
__u8 supervisor_stack[0]; __u8 supervisor_stack[0];
#endif #endif
int uaccess_err;
}; };
#define INIT_THREAD_INFO(tsk) \ #define INIT_THREAD_INFO(tsk) \

View File

@ -121,7 +121,7 @@ extern int __get_user_bad(void);
#define __get_user_x(size, ret, x, ptr) \ #define __get_user_x(size, ret, x, ptr) \
asm volatile("call __get_user_" #size \ asm volatile("call __get_user_" #size \
: "=a" (ret),"=d" (x) \ : "=a" (ret), "=d" (x) \
: "0" (ptr)) \ : "0" (ptr)) \
/* Careful: we have to cast the result to the type of the pointer /* Careful: we have to cast the result to the type of the pointer
@ -181,12 +181,12 @@ extern int __get_user_bad(void);
#define __put_user_x(size, x, ptr, __ret_pu) \ #define __put_user_x(size, x, ptr, __ret_pu) \
asm volatile("call __put_user_" #size : "=a" (__ret_pu) \ asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
:"0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx") : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
#ifdef CONFIG_X86_32 #ifdef CONFIG_X86_32
#define __put_user_u64(x, addr, err) \ #define __put_user_asm_u64(x, addr, err, errret) \
asm volatile("1: movl %%eax,0(%2)\n" \ asm volatile("1: movl %%eax,0(%2)\n" \
"2: movl %%edx,4(%2)\n" \ "2: movl %%edx,4(%2)\n" \
"3:\n" \ "3:\n" \
@ -197,14 +197,24 @@ extern int __get_user_bad(void);
_ASM_EXTABLE(1b, 4b) \ _ASM_EXTABLE(1b, 4b) \
_ASM_EXTABLE(2b, 4b) \ _ASM_EXTABLE(2b, 4b) \
: "=r" (err) \ : "=r" (err) \
: "A" (x), "r" (addr), "i" (-EFAULT), "0" (err)) : "A" (x), "r" (addr), "i" (errret), "0" (err))
#define __put_user_asm_ex_u64(x, addr) \
asm volatile("1: movl %%eax,0(%1)\n" \
"2: movl %%edx,4(%1)\n" \
"3:\n" \
_ASM_EXTABLE(1b, 2b - 1b) \
_ASM_EXTABLE(2b, 3b - 2b) \
: : "A" (x), "r" (addr))
#define __put_user_x8(x, ptr, __ret_pu) \ #define __put_user_x8(x, ptr, __ret_pu) \
asm volatile("call __put_user_8" : "=a" (__ret_pu) \ asm volatile("call __put_user_8" : "=a" (__ret_pu) \
: "A" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx") : "A" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
#else #else
#define __put_user_u64(x, ptr, retval) \ #define __put_user_asm_u64(x, ptr, retval, errret) \
__put_user_asm(x, ptr, retval, "q", "", "Zr", -EFAULT) __put_user_asm(x, ptr, retval, "q", "", "Zr", errret)
#define __put_user_asm_ex_u64(x, addr) \
__put_user_asm_ex(x, addr, "q", "", "Zr")
#define __put_user_x8(x, ptr, __ret_pu) __put_user_x(8, x, ptr, __ret_pu) #define __put_user_x8(x, ptr, __ret_pu) __put_user_x(8, x, ptr, __ret_pu)
#endif #endif
@ -276,10 +286,32 @@ do { \
__put_user_asm(x, ptr, retval, "w", "w", "ir", errret); \ __put_user_asm(x, ptr, retval, "w", "w", "ir", errret); \
break; \ break; \
case 4: \ case 4: \
__put_user_asm(x, ptr, retval, "l", "k", "ir", errret);\ __put_user_asm(x, ptr, retval, "l", "k", "ir", errret); \
break; \ break; \
case 8: \ case 8: \
__put_user_u64((__typeof__(*ptr))(x), ptr, retval); \ __put_user_asm_u64((__typeof__(*ptr))(x), ptr, retval, \
errret); \
break; \
default: \
__put_user_bad(); \
} \
} while (0)
#define __put_user_size_ex(x, ptr, size) \
do { \
__chk_user_ptr(ptr); \
switch (size) { \
case 1: \
__put_user_asm_ex(x, ptr, "b", "b", "iq"); \
break; \
case 2: \
__put_user_asm_ex(x, ptr, "w", "w", "ir"); \
break; \
case 4: \
__put_user_asm_ex(x, ptr, "l", "k", "ir"); \
break; \
case 8: \
__put_user_asm_ex_u64((__typeof__(*ptr))(x), ptr); \
break; \ break; \
default: \ default: \
__put_user_bad(); \ __put_user_bad(); \
@ -311,9 +343,12 @@ do { \
#ifdef CONFIG_X86_32 #ifdef CONFIG_X86_32
#define __get_user_asm_u64(x, ptr, retval, errret) (x) = __get_user_bad() #define __get_user_asm_u64(x, ptr, retval, errret) (x) = __get_user_bad()
#define __get_user_asm_ex_u64(x, ptr) (x) = __get_user_bad()
#else #else
#define __get_user_asm_u64(x, ptr, retval, errret) \ #define __get_user_asm_u64(x, ptr, retval, errret) \
__get_user_asm(x, ptr, retval, "q", "", "=r", errret) __get_user_asm(x, ptr, retval, "q", "", "=r", errret)
#define __get_user_asm_ex_u64(x, ptr) \
__get_user_asm_ex(x, ptr, "q", "", "=r")
#endif #endif
#define __get_user_size(x, ptr, size, retval, errret) \ #define __get_user_size(x, ptr, size, retval, errret) \
@ -350,6 +385,33 @@ do { \
: "=r" (err), ltype(x) \ : "=r" (err), ltype(x) \
: "m" (__m(addr)), "i" (errret), "0" (err)) : "m" (__m(addr)), "i" (errret), "0" (err))
#define __get_user_size_ex(x, ptr, size) \
do { \
__chk_user_ptr(ptr); \
switch (size) { \
case 1: \
__get_user_asm_ex(x, ptr, "b", "b", "=q"); \
break; \
case 2: \
__get_user_asm_ex(x, ptr, "w", "w", "=r"); \
break; \
case 4: \
__get_user_asm_ex(x, ptr, "l", "k", "=r"); \
break; \
case 8: \
__get_user_asm_ex_u64(x, ptr); \
break; \
default: \
(x) = __get_user_bad(); \
} \
} while (0)
#define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
asm volatile("1: mov"itype" %1,%"rtype"0\n" \
"2:\n" \
_ASM_EXTABLE(1b, 2b - 1b) \
: ltype(x) : "m" (__m(addr)))
#define __put_user_nocheck(x, ptr, size) \ #define __put_user_nocheck(x, ptr, size) \
({ \ ({ \
int __pu_err; \ int __pu_err; \
@ -385,6 +447,26 @@ struct __large_struct { unsigned long buf[100]; };
_ASM_EXTABLE(1b, 3b) \ _ASM_EXTABLE(1b, 3b) \
: "=r"(err) \ : "=r"(err) \
: ltype(x), "m" (__m(addr)), "i" (errret), "0" (err)) : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
#define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
asm volatile("1: mov"itype" %"rtype"0,%1\n" \
"2:\n" \
_ASM_EXTABLE(1b, 2b - 1b) \
: : ltype(x), "m" (__m(addr)))
/*
* uaccess_try and catch
*/
#define uaccess_try do { \
int prev_err = current_thread_info()->uaccess_err; \
current_thread_info()->uaccess_err = 0; \
barrier();
#define uaccess_catch(err) \
(err) |= current_thread_info()->uaccess_err; \
current_thread_info()->uaccess_err = prev_err; \
} while (0)
/** /**
* __get_user: - Get a simple variable from user space, with less checking. * __get_user: - Get a simple variable from user space, with less checking.
* @x: Variable to store result. * @x: Variable to store result.
@ -408,6 +490,7 @@ struct __large_struct { unsigned long buf[100]; };
#define __get_user(x, ptr) \ #define __get_user(x, ptr) \
__get_user_nocheck((x), (ptr), sizeof(*(ptr))) __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
/** /**
* __put_user: - Write a simple value into user space, with less checking. * __put_user: - Write a simple value into user space, with less checking.
* @x: Value to copy to user space. * @x: Value to copy to user space.
@ -434,6 +517,45 @@ struct __large_struct { unsigned long buf[100]; };
#define __get_user_unaligned __get_user #define __get_user_unaligned __get_user
#define __put_user_unaligned __put_user #define __put_user_unaligned __put_user
/*
* {get|put}_user_try and catch
*
* get_user_try {
* get_user_ex(...);
* } get_user_catch(err)
*/
#define get_user_try uaccess_try
#define get_user_catch(err) uaccess_catch(err)
#define get_user_ex(x, ptr) do { \
unsigned long __gue_val; \
__get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
(x) = (__force __typeof__(*(ptr)))__gue_val; \
} while (0)
#ifdef CONFIG_X86_WP_WORKS_OK
#define put_user_try uaccess_try
#define put_user_catch(err) uaccess_catch(err)
#define put_user_ex(x, ptr) \
__put_user_size_ex((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
#else /* !CONFIG_X86_WP_WORKS_OK */
#define put_user_try do { \
int __uaccess_err = 0;
#define put_user_catch(err) \
(err) |= __uaccess_err; \
} while (0)
#define put_user_ex(x, ptr) do { \
__uaccess_err |= __put_user(x, ptr); \
} while (0)
#endif /* CONFIG_X86_WP_WORKS_OK */
/* /*
* movsl can be slow when source and dest are not both 8-byte aligned * movsl can be slow when source and dest are not both 8-byte aligned
*/ */

View File

@ -3466,40 +3466,6 @@ static int setup_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc, int irq)
return 0; return 0;
} }
int arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc)
{
unsigned int irq;
int ret;
unsigned int irq_want;
irq_want = nr_irqs_gsi;
irq = create_irq_nr(irq_want);
if (irq == 0)
return -1;
#ifdef CONFIG_INTR_REMAP
if (!intr_remapping_enabled)
goto no_ir;
ret = msi_alloc_irte(dev, irq, 1);
if (ret < 0)
goto error;
no_ir:
#endif
ret = setup_msi_irq(dev, msidesc, irq);
if (ret < 0) {
destroy_irq(irq);
return ret;
}
return 0;
#ifdef CONFIG_INTR_REMAP
error:
destroy_irq(irq);
return ret;
#endif
}
int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
{ {
unsigned int irq; unsigned int irq;

View File

@ -26,13 +26,3 @@ struct pv_lock_ops pv_lock_ops = {
}; };
EXPORT_SYMBOL(pv_lock_ops); EXPORT_SYMBOL(pv_lock_ops);
void __init paravirt_use_bytelocks(void)
{
#ifdef CONFIG_SMP
pv_lock_ops.spin_is_locked = __byte_spin_is_locked;
pv_lock_ops.spin_is_contended = __byte_spin_is_contended;
pv_lock_ops.spin_lock = __byte_spin_lock;
pv_lock_ops.spin_trylock = __byte_spin_trylock;
pv_lock_ops.spin_unlock = __byte_spin_unlock;
#endif
}

View File

@ -51,24 +51,24 @@
#endif #endif
#define COPY(x) { \ #define COPY(x) { \
err |= __get_user(regs->x, &sc->x); \ get_user_ex(regs->x, &sc->x); \
} }
#define COPY_SEG(seg) { \ #define COPY_SEG(seg) { \
unsigned short tmp; \ unsigned short tmp; \
err |= __get_user(tmp, &sc->seg); \ get_user_ex(tmp, &sc->seg); \
regs->seg = tmp; \ regs->seg = tmp; \
} }
#define COPY_SEG_CPL3(seg) { \ #define COPY_SEG_CPL3(seg) { \
unsigned short tmp; \ unsigned short tmp; \
err |= __get_user(tmp, &sc->seg); \ get_user_ex(tmp, &sc->seg); \
regs->seg = tmp | 3; \ regs->seg = tmp | 3; \
} }
#define GET_SEG(seg) { \ #define GET_SEG(seg) { \
unsigned short tmp; \ unsigned short tmp; \
err |= __get_user(tmp, &sc->seg); \ get_user_ex(tmp, &sc->seg); \
loadsegment(seg, tmp); \ loadsegment(seg, tmp); \
} }
@ -83,45 +83,49 @@ restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc,
/* Always make any pending restarted system calls return -EINTR */ /* Always make any pending restarted system calls return -EINTR */
current_thread_info()->restart_block.fn = do_no_restart_syscall; current_thread_info()->restart_block.fn = do_no_restart_syscall;
get_user_try {
#ifdef CONFIG_X86_32 #ifdef CONFIG_X86_32
GET_SEG(gs); GET_SEG(gs);
COPY_SEG(fs); COPY_SEG(fs);
COPY_SEG(es); COPY_SEG(es);
COPY_SEG(ds); COPY_SEG(ds);
#endif /* CONFIG_X86_32 */ #endif /* CONFIG_X86_32 */
COPY(di); COPY(si); COPY(bp); COPY(sp); COPY(bx); COPY(di); COPY(si); COPY(bp); COPY(sp); COPY(bx);
COPY(dx); COPY(cx); COPY(ip); COPY(dx); COPY(cx); COPY(ip);
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
COPY(r8); COPY(r8);
COPY(r9); COPY(r9);
COPY(r10); COPY(r10);
COPY(r11); COPY(r11);
COPY(r12); COPY(r12);
COPY(r13); COPY(r13);
COPY(r14); COPY(r14);
COPY(r15); COPY(r15);
#endif /* CONFIG_X86_64 */ #endif /* CONFIG_X86_64 */
#ifdef CONFIG_X86_32 #ifdef CONFIG_X86_32
COPY_SEG_CPL3(cs); COPY_SEG_CPL3(cs);
COPY_SEG_CPL3(ss); COPY_SEG_CPL3(ss);
#else /* !CONFIG_X86_32 */ #else /* !CONFIG_X86_32 */
/* Kernel saves and restores only the CS segment register on signals, /* Kernel saves and restores only the CS segment register on signals,
* which is the bare minimum needed to allow mixed 32/64-bit code. * which is the bare minimum needed to allow mixed 32/64-bit code.
* App's signal handler can save/restore other segments if needed. */ * App's signal handler can save/restore other segments if needed. */
COPY_SEG_CPL3(cs); COPY_SEG_CPL3(cs);
#endif /* CONFIG_X86_32 */ #endif /* CONFIG_X86_32 */
err |= __get_user(tmpflags, &sc->flags); get_user_ex(tmpflags, &sc->flags);
regs->flags = (regs->flags & ~FIX_EFLAGS) | (tmpflags & FIX_EFLAGS); regs->flags = (regs->flags & ~FIX_EFLAGS) | (tmpflags & FIX_EFLAGS);
regs->orig_ax = -1; /* disable syscall checks */ regs->orig_ax = -1; /* disable syscall checks */
err |= __get_user(buf, &sc->fpstate); get_user_ex(buf, &sc->fpstate);
err |= restore_i387_xstate(buf); err |= restore_i387_xstate(buf);
get_user_ex(*pax, &sc->ax);
} get_user_catch(err);
err |= __get_user(*pax, &sc->ax);
return err; return err;
} }
@ -131,57 +135,60 @@ setup_sigcontext(struct sigcontext __user *sc, void __user *fpstate,
{ {
int err = 0; int err = 0;
#ifdef CONFIG_X86_32 put_user_try {
{
unsigned int tmp;
savesegment(gs, tmp); #ifdef CONFIG_X86_32
err |= __put_user(tmp, (unsigned int __user *)&sc->gs); {
} unsigned int tmp;
err |= __put_user(regs->fs, (unsigned int __user *)&sc->fs);
err |= __put_user(regs->es, (unsigned int __user *)&sc->es); savesegment(gs, tmp);
err |= __put_user(regs->ds, (unsigned int __user *)&sc->ds); put_user_ex(tmp, (unsigned int __user *)&sc->gs);
}
put_user_ex(regs->fs, (unsigned int __user *)&sc->fs);
put_user_ex(regs->es, (unsigned int __user *)&sc->es);
put_user_ex(regs->ds, (unsigned int __user *)&sc->ds);
#endif /* CONFIG_X86_32 */ #endif /* CONFIG_X86_32 */
err |= __put_user(regs->di, &sc->di); put_user_ex(regs->di, &sc->di);
err |= __put_user(regs->si, &sc->si); put_user_ex(regs->si, &sc->si);
err |= __put_user(regs->bp, &sc->bp); put_user_ex(regs->bp, &sc->bp);
err |= __put_user(regs->sp, &sc->sp); put_user_ex(regs->sp, &sc->sp);
err |= __put_user(regs->bx, &sc->bx); put_user_ex(regs->bx, &sc->bx);
err |= __put_user(regs->dx, &sc->dx); put_user_ex(regs->dx, &sc->dx);
err |= __put_user(regs->cx, &sc->cx); put_user_ex(regs->cx, &sc->cx);
err |= __put_user(regs->ax, &sc->ax); put_user_ex(regs->ax, &sc->ax);
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
err |= __put_user(regs->r8, &sc->r8); put_user_ex(regs->r8, &sc->r8);
err |= __put_user(regs->r9, &sc->r9); put_user_ex(regs->r9, &sc->r9);
err |= __put_user(regs->r10, &sc->r10); put_user_ex(regs->r10, &sc->r10);
err |= __put_user(regs->r11, &sc->r11); put_user_ex(regs->r11, &sc->r11);
err |= __put_user(regs->r12, &sc->r12); put_user_ex(regs->r12, &sc->r12);
err |= __put_user(regs->r13, &sc->r13); put_user_ex(regs->r13, &sc->r13);
err |= __put_user(regs->r14, &sc->r14); put_user_ex(regs->r14, &sc->r14);
err |= __put_user(regs->r15, &sc->r15); put_user_ex(regs->r15, &sc->r15);
#endif /* CONFIG_X86_64 */ #endif /* CONFIG_X86_64 */
err |= __put_user(current->thread.trap_no, &sc->trapno); put_user_ex(current->thread.trap_no, &sc->trapno);
err |= __put_user(current->thread.error_code, &sc->err); put_user_ex(current->thread.error_code, &sc->err);
err |= __put_user(regs->ip, &sc->ip); put_user_ex(regs->ip, &sc->ip);
#ifdef CONFIG_X86_32 #ifdef CONFIG_X86_32
err |= __put_user(regs->cs, (unsigned int __user *)&sc->cs); put_user_ex(regs->cs, (unsigned int __user *)&sc->cs);
err |= __put_user(regs->flags, &sc->flags); put_user_ex(regs->flags, &sc->flags);
err |= __put_user(regs->sp, &sc->sp_at_signal); put_user_ex(regs->sp, &sc->sp_at_signal);
err |= __put_user(regs->ss, (unsigned int __user *)&sc->ss); put_user_ex(regs->ss, (unsigned int __user *)&sc->ss);
#else /* !CONFIG_X86_32 */ #else /* !CONFIG_X86_32 */
err |= __put_user(regs->flags, &sc->flags); put_user_ex(regs->flags, &sc->flags);
err |= __put_user(regs->cs, &sc->cs); put_user_ex(regs->cs, &sc->cs);
err |= __put_user(0, &sc->gs); put_user_ex(0, &sc->gs);
err |= __put_user(0, &sc->fs); put_user_ex(0, &sc->fs);
#endif /* CONFIG_X86_32 */ #endif /* CONFIG_X86_32 */
err |= __put_user(fpstate, &sc->fpstate); put_user_ex(fpstate, &sc->fpstate);
/* non-iBCS2 extensions.. */ /* non-iBCS2 extensions.. */
err |= __put_user(mask, &sc->oldmask); put_user_ex(mask, &sc->oldmask);
err |= __put_user(current->thread.cr2, &sc->cr2); put_user_ex(current->thread.cr2, &sc->cr2);
} put_user_catch(err);
return err; return err;
} }
@ -336,43 +343,41 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
return -EFAULT; return -EFAULT;
err |= __put_user(sig, &frame->sig); put_user_try {
err |= __put_user(&frame->info, &frame->pinfo); put_user_ex(sig, &frame->sig);
err |= __put_user(&frame->uc, &frame->puc); put_user_ex(&frame->info, &frame->pinfo);
err |= copy_siginfo_to_user(&frame->info, info); put_user_ex(&frame->uc, &frame->puc);
if (err) err |= copy_siginfo_to_user(&frame->info, info);
return -EFAULT;
/* Create the ucontext. */ /* Create the ucontext. */
if (cpu_has_xsave) if (cpu_has_xsave)
err |= __put_user(UC_FP_XSTATE, &frame->uc.uc_flags); put_user_ex(UC_FP_XSTATE, &frame->uc.uc_flags);
else else
err |= __put_user(0, &frame->uc.uc_flags); put_user_ex(0, &frame->uc.uc_flags);
err |= __put_user(0, &frame->uc.uc_link); put_user_ex(0, &frame->uc.uc_link);
err |= __put_user(current->sas_ss_sp, &frame->uc.uc_stack.ss_sp); put_user_ex(current->sas_ss_sp, &frame->uc.uc_stack.ss_sp);
err |= __put_user(sas_ss_flags(regs->sp), put_user_ex(sas_ss_flags(regs->sp),
&frame->uc.uc_stack.ss_flags); &frame->uc.uc_stack.ss_flags);
err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size); put_user_ex(current->sas_ss_size, &frame->uc.uc_stack.ss_size);
err |= setup_sigcontext(&frame->uc.uc_mcontext, fpstate, err |= setup_sigcontext(&frame->uc.uc_mcontext, fpstate,
regs, set->sig[0]); regs, set->sig[0]);
err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)); err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
if (err)
return -EFAULT;
/* Set up to return from userspace. */ /* Set up to return from userspace. */
restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn); restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
if (ka->sa.sa_flags & SA_RESTORER) if (ka->sa.sa_flags & SA_RESTORER)
restorer = ka->sa.sa_restorer; restorer = ka->sa.sa_restorer;
err |= __put_user(restorer, &frame->pretcode); put_user_ex(restorer, &frame->pretcode);
/* /*
* This is movl $__NR_rt_sigreturn, %ax ; int $0x80 * This is movl $__NR_rt_sigreturn, %ax ; int $0x80
* *
* WE DO NOT USE IT ANY MORE! It's only left here for historical * WE DO NOT USE IT ANY MORE! It's only left here for historical
* reasons and because gdb uses it as a signature to notice * reasons and because gdb uses it as a signature to notice
* signal handler stack frames. * signal handler stack frames.
*/ */
err |= __put_user(*((u64 *)&rt_retcode), (u64 *)frame->retcode); put_user_ex(*((u64 *)&rt_retcode), (u64 *)frame->retcode);
} put_user_catch(err);
if (err) if (err)
return -EFAULT; return -EFAULT;
@ -436,28 +441,30 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
return -EFAULT; return -EFAULT;
} }
/* Create the ucontext. */ put_user_try {
if (cpu_has_xsave) /* Create the ucontext. */
err |= __put_user(UC_FP_XSTATE, &frame->uc.uc_flags); if (cpu_has_xsave)
else put_user_ex(UC_FP_XSTATE, &frame->uc.uc_flags);
err |= __put_user(0, &frame->uc.uc_flags); else
err |= __put_user(0, &frame->uc.uc_link); put_user_ex(0, &frame->uc.uc_flags);
err |= __put_user(me->sas_ss_sp, &frame->uc.uc_stack.ss_sp); put_user_ex(0, &frame->uc.uc_link);
err |= __put_user(sas_ss_flags(regs->sp), put_user_ex(me->sas_ss_sp, &frame->uc.uc_stack.ss_sp);
&frame->uc.uc_stack.ss_flags); put_user_ex(sas_ss_flags(regs->sp),
err |= __put_user(me->sas_ss_size, &frame->uc.uc_stack.ss_size); &frame->uc.uc_stack.ss_flags);
err |= setup_sigcontext(&frame->uc.uc_mcontext, fp, regs, set->sig[0]); put_user_ex(me->sas_ss_size, &frame->uc.uc_stack.ss_size);
err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)); err |= setup_sigcontext(&frame->uc.uc_mcontext, fp, regs, set->sig[0]);
err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
/* Set up to return from userspace. If provided, use a stub /* Set up to return from userspace. If provided, use a stub
already in userspace. */ already in userspace. */
/* x86-64 should always use SA_RESTORER. */ /* x86-64 should always use SA_RESTORER. */
if (ka->sa.sa_flags & SA_RESTORER) { if (ka->sa.sa_flags & SA_RESTORER) {
err |= __put_user(ka->sa.sa_restorer, &frame->pretcode); put_user_ex(ka->sa.sa_restorer, &frame->pretcode);
} else { } else {
/* could use a vstub here */ /* could use a vstub here */
return -EFAULT; err |= -EFAULT;
} }
} put_user_catch(err);
if (err) if (err)
return -EFAULT; return -EFAULT;
@ -509,31 +516,41 @@ sys_sigaction(int sig, const struct old_sigaction __user *act,
struct old_sigaction __user *oact) struct old_sigaction __user *oact)
{ {
struct k_sigaction new_ka, old_ka; struct k_sigaction new_ka, old_ka;
int ret; int ret = 0;
if (act) { if (act) {
old_sigset_t mask; old_sigset_t mask;
if (!access_ok(VERIFY_READ, act, sizeof(*act)) || if (!access_ok(VERIFY_READ, act, sizeof(*act)))
__get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
__get_user(new_ka.sa.sa_restorer, &act->sa_restorer))
return -EFAULT; return -EFAULT;
__get_user(new_ka.sa.sa_flags, &act->sa_flags); get_user_try {
__get_user(mask, &act->sa_mask); get_user_ex(new_ka.sa.sa_handler, &act->sa_handler);
get_user_ex(new_ka.sa.sa_flags, &act->sa_flags);
get_user_ex(mask, &act->sa_mask);
get_user_ex(new_ka.sa.sa_restorer, &act->sa_restorer);
} get_user_catch(ret);
if (ret)
return -EFAULT;
siginitset(&new_ka.sa.sa_mask, mask); siginitset(&new_ka.sa.sa_mask, mask);
} }
ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL); ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
if (!ret && oact) { if (!ret && oact) {
if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) || if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)))
__put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
__put_user(old_ka.sa.sa_restorer, &oact->sa_restorer))
return -EFAULT; return -EFAULT;
__put_user(old_ka.sa.sa_flags, &oact->sa_flags); put_user_try {
__put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask); put_user_ex(old_ka.sa.sa_handler, &oact->sa_handler);
put_user_ex(old_ka.sa.sa_flags, &oact->sa_flags);
put_user_ex(old_ka.sa.sa_mask.sig[0], &oact->sa_mask);
put_user_ex(old_ka.sa.sa_restorer, &oact->sa_restorer);
} put_user_catch(ret);
if (ret)
return -EFAULT;
} }
return ret; return ret;

View File

@ -23,6 +23,12 @@ int fixup_exception(struct pt_regs *regs)
fixup = search_exception_tables(regs->ip); fixup = search_exception_tables(regs->ip);
if (fixup) { if (fixup) {
/* If fixup is less than 16, it means uaccess error */
if (fixup->fixup < 16) {
current_thread_info()->uaccess_err = -EFAULT;
regs->ip += fixup->fixup;
return 1;
}
regs->ip = fixup->fixup; regs->ip = fixup->fixup;
return 1; return 1;
} }