2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-12-26 06:04:14 +08:00

[POWERPC] Alignment exception uses __get/put_user_inatomic

Make the alignment exception handler use the new _inatomic variants
of __get/put_user. This fixes erroneous warnings in the very rare
cases where we manage to have copy_tofrom_user_inatomic() trigger
an alignment exception.

Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>

 arch/powerpc/kernel/align.c |   56 ++++++++++++++++++++++++--------------------
 1 file changed, 31 insertions(+), 25 deletions(-)
Signed-off-by: Paul Mackerras <paulus@samba.org>
This commit is contained in:
Benjamin Herrenschmidt 2007-04-11 16:13:19 +10:00 committed by Paul Mackerras
parent e68c825bb0
commit e4ee3891db

View File

@ -241,7 +241,7 @@ static int emulate_dcbz(struct pt_regs *regs, unsigned char __user *addr)
if (user_mode(regs) && !access_ok(VERIFY_WRITE, p, size)) if (user_mode(regs) && !access_ok(VERIFY_WRITE, p, size))
return -EFAULT; return -EFAULT;
for (i = 0; i < size / sizeof(long); ++i) for (i = 0; i < size / sizeof(long); ++i)
if (__put_user(0, p+i)) if (__put_user_inatomic(0, p+i))
return -EFAULT; return -EFAULT;
return 1; return 1;
} }
@ -288,7 +288,8 @@ static int emulate_multiple(struct pt_regs *regs, unsigned char __user *addr,
} else { } else {
unsigned long pc = regs->nip ^ (swiz & 4); unsigned long pc = regs->nip ^ (swiz & 4);
if (__get_user(instr, (unsigned int __user *)pc)) if (__get_user_inatomic(instr,
(unsigned int __user *)pc))
return -EFAULT; return -EFAULT;
if (swiz == 0 && (flags & SW)) if (swiz == 0 && (flags & SW))
instr = cpu_to_le32(instr); instr = cpu_to_le32(instr);
@ -324,26 +325,30 @@ static int emulate_multiple(struct pt_regs *regs, unsigned char __user *addr,
((nb0 + 3) / 4) * sizeof(unsigned long)); ((nb0 + 3) / 4) * sizeof(unsigned long));
for (i = 0; i < nb; ++i, ++p) for (i = 0; i < nb; ++i, ++p)
if (__get_user(REG_BYTE(rptr, i ^ bswiz), SWIZ_PTR(p))) if (__get_user_inatomic(REG_BYTE(rptr, i ^ bswiz),
SWIZ_PTR(p)))
return -EFAULT; return -EFAULT;
if (nb0 > 0) { if (nb0 > 0) {
rptr = &regs->gpr[0]; rptr = &regs->gpr[0];
addr += nb; addr += nb;
for (i = 0; i < nb0; ++i, ++p) for (i = 0; i < nb0; ++i, ++p)
if (__get_user(REG_BYTE(rptr, i ^ bswiz), if (__get_user_inatomic(REG_BYTE(rptr,
i ^ bswiz),
SWIZ_PTR(p))) SWIZ_PTR(p)))
return -EFAULT; return -EFAULT;
} }
} else { } else {
for (i = 0; i < nb; ++i, ++p) for (i = 0; i < nb; ++i, ++p)
if (__put_user(REG_BYTE(rptr, i ^ bswiz), SWIZ_PTR(p))) if (__put_user_inatomic(REG_BYTE(rptr, i ^ bswiz),
SWIZ_PTR(p)))
return -EFAULT; return -EFAULT;
if (nb0 > 0) { if (nb0 > 0) {
rptr = &regs->gpr[0]; rptr = &regs->gpr[0];
addr += nb; addr += nb;
for (i = 0; i < nb0; ++i, ++p) for (i = 0; i < nb0; ++i, ++p)
if (__put_user(REG_BYTE(rptr, i ^ bswiz), if (__put_user_inatomic(REG_BYTE(rptr,
i ^ bswiz),
SWIZ_PTR(p))) SWIZ_PTR(p)))
return -EFAULT; return -EFAULT;
} }
@ -398,7 +403,8 @@ int fix_alignment(struct pt_regs *regs)
if (cpu_has_feature(CPU_FTR_PPC_LE) && (regs->msr & MSR_LE)) if (cpu_has_feature(CPU_FTR_PPC_LE) && (regs->msr & MSR_LE))
pc ^= 4; pc ^= 4;
if (unlikely(__get_user(instr, (unsigned int __user *)pc))) if (unlikely(__get_user_inatomic(instr,
(unsigned int __user *)pc)))
return -EFAULT; return -EFAULT;
if (cpu_has_feature(CPU_FTR_REAL_LE) && (regs->msr & MSR_LE)) if (cpu_has_feature(CPU_FTR_REAL_LE) && (regs->msr & MSR_LE))
instr = cpu_to_le32(instr); instr = cpu_to_le32(instr);
@ -474,16 +480,16 @@ int fix_alignment(struct pt_regs *regs)
p = (unsigned long) addr; p = (unsigned long) addr;
switch (nb) { switch (nb) {
case 8: case 8:
ret |= __get_user(data.v[0], SWIZ_PTR(p++)); ret |= __get_user_inatomic(data.v[0], SWIZ_PTR(p++));
ret |= __get_user(data.v[1], SWIZ_PTR(p++)); ret |= __get_user_inatomic(data.v[1], SWIZ_PTR(p++));
ret |= __get_user(data.v[2], SWIZ_PTR(p++)); ret |= __get_user_inatomic(data.v[2], SWIZ_PTR(p++));
ret |= __get_user(data.v[3], SWIZ_PTR(p++)); ret |= __get_user_inatomic(data.v[3], SWIZ_PTR(p++));
case 4: case 4:
ret |= __get_user(data.v[4], SWIZ_PTR(p++)); ret |= __get_user_inatomic(data.v[4], SWIZ_PTR(p++));
ret |= __get_user(data.v[5], SWIZ_PTR(p++)); ret |= __get_user_inatomic(data.v[5], SWIZ_PTR(p++));
case 2: case 2:
ret |= __get_user(data.v[6], SWIZ_PTR(p++)); ret |= __get_user_inatomic(data.v[6], SWIZ_PTR(p++));
ret |= __get_user(data.v[7], SWIZ_PTR(p++)); ret |= __get_user_inatomic(data.v[7], SWIZ_PTR(p++));
if (unlikely(ret)) if (unlikely(ret))
return -EFAULT; return -EFAULT;
} }
@ -551,16 +557,16 @@ int fix_alignment(struct pt_regs *regs)
p = (unsigned long) addr; p = (unsigned long) addr;
switch (nb) { switch (nb) {
case 8: case 8:
ret |= __put_user(data.v[0], SWIZ_PTR(p++)); ret |= __put_user_inatomic(data.v[0], SWIZ_PTR(p++));
ret |= __put_user(data.v[1], SWIZ_PTR(p++)); ret |= __put_user_inatomic(data.v[1], SWIZ_PTR(p++));
ret |= __put_user(data.v[2], SWIZ_PTR(p++)); ret |= __put_user_inatomic(data.v[2], SWIZ_PTR(p++));
ret |= __put_user(data.v[3], SWIZ_PTR(p++)); ret |= __put_user_inatomic(data.v[3], SWIZ_PTR(p++));
case 4: case 4:
ret |= __put_user(data.v[4], SWIZ_PTR(p++)); ret |= __put_user_inatomic(data.v[4], SWIZ_PTR(p++));
ret |= __put_user(data.v[5], SWIZ_PTR(p++)); ret |= __put_user_inatomic(data.v[5], SWIZ_PTR(p++));
case 2: case 2:
ret |= __put_user(data.v[6], SWIZ_PTR(p++)); ret |= __put_user_inatomic(data.v[6], SWIZ_PTR(p++));
ret |= __put_user(data.v[7], SWIZ_PTR(p++)); ret |= __put_user_inatomic(data.v[7], SWIZ_PTR(p++));
} }
if (unlikely(ret)) if (unlikely(ret))
return -EFAULT; return -EFAULT;