2019-06-03 13:44:50 +08:00
|
|
|
/* SPDX-License-Identifier: GPL-2.0-only */
|
2012-03-05 19:49:32 +08:00
|
|
|
/*
|
|
|
|
* Copyright (C) 2012 ARM Ltd.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/linkage.h>
|
2015-07-23 02:05:54 +08:00
|
|
|
|
2016-12-26 17:10:19 +08:00
|
|
|
#include <asm/asm-uaccess.h>
|
2018-12-08 02:08:20 +08:00
|
|
|
#include <asm/assembler.h>
|
|
|
|
#include <asm/cache.h>
|
2012-03-05 19:49:32 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Copy to user space from a kernel buffer (alignment handled by the hardware)
|
|
|
|
*
|
|
|
|
* Parameters:
|
|
|
|
* x0 - to
|
|
|
|
* x1 - from
|
|
|
|
* x2 - n
|
|
|
|
* Returns:
|
|
|
|
* x0 - bytes not copied
|
|
|
|
*/
|
2020-04-30 02:37:02 +08:00
|
|
|
.macro ldrb1 reg, ptr, val
|
|
|
|
ldrb \reg, [\ptr], \val
|
2015-09-24 02:55:39 +08:00
|
|
|
.endm
|
|
|
|
|
2020-04-30 02:37:02 +08:00
|
|
|
.macro strb1 reg, ptr, val
|
2020-12-02 21:15:54 +08:00
|
|
|
user_ldst 9998f, sttrb, \reg, \ptr, \val
|
2015-09-24 02:55:39 +08:00
|
|
|
.endm
|
|
|
|
|
2020-04-30 02:37:02 +08:00
|
|
|
.macro ldrh1 reg, ptr, val
|
|
|
|
ldrh \reg, [\ptr], \val
|
2015-09-24 02:55:39 +08:00
|
|
|
.endm
|
|
|
|
|
2020-04-30 02:37:02 +08:00
|
|
|
.macro strh1 reg, ptr, val
|
2021-07-12 22:27:46 +08:00
|
|
|
user_ldst 9997f, sttrh, \reg, \ptr, \val
|
2015-09-24 02:55:39 +08:00
|
|
|
.endm
|
|
|
|
|
2020-04-30 02:37:02 +08:00
|
|
|
.macro ldr1 reg, ptr, val
|
|
|
|
ldr \reg, [\ptr], \val
|
2015-09-24 02:55:39 +08:00
|
|
|
.endm
|
|
|
|
|
2020-04-30 02:37:02 +08:00
|
|
|
.macro str1 reg, ptr, val
|
2021-07-12 22:27:46 +08:00
|
|
|
user_ldst 9997f, sttr, \reg, \ptr, \val
|
2015-09-24 02:55:39 +08:00
|
|
|
.endm
|
|
|
|
|
2020-04-30 02:37:02 +08:00
|
|
|
.macro ldp1 reg1, reg2, ptr, val
|
|
|
|
ldp \reg1, \reg2, [\ptr], \val
|
2015-09-24 02:55:39 +08:00
|
|
|
.endm
|
|
|
|
|
2020-04-30 02:37:02 +08:00
|
|
|
.macro stp1 reg1, reg2, ptr, val
|
2021-07-12 22:27:46 +08:00
|
|
|
user_stp 9997f, \reg1, \reg2, \ptr, \val
|
2015-09-24 02:55:39 +08:00
|
|
|
.endm
|
|
|
|
|
|
|
|
end .req x5
|
2021-07-12 22:27:46 +08:00
|
|
|
srcin .req x15
|
2020-01-07 03:58:17 +08:00
|
|
|
SYM_FUNC_START(__arch_copy_to_user)
|
2015-09-24 02:55:39 +08:00
|
|
|
add end, x0, x2
|
2021-07-12 22:27:46 +08:00
|
|
|
mov srcin, x1
|
2015-09-24 02:55:39 +08:00
|
|
|
#include "copy_template.S"
|
|
|
|
mov x0, #0
|
2012-03-05 19:49:32 +08:00
|
|
|
ret
|
|
|
|
|
arm64: lib: __arch_copy_to_user(): fold fixups into body
Like other functions, __arch_copy_to_user() places its exception fixups
in the `.fixup` section without any clear association with
__arch_copy_to_user() itself. If we backtrace the fixup code, it will be
symbolized as an offset from the nearest prior symbol, which happens to
be `__entry_tramp_text_end`. Further, since the PC adjustment for the
fixup is akin to a direct branch rather than a function call,
__arch_copy_to_user() itself will be missing from the backtrace.
This is confusing and hinders debugging. In general this pattern will
also be problematic for CONFIG_LIVEPATCH, since fixups often return to
their associated function, but this isn't accurately captured in the
stacktrace.
To solve these issues for assembly functions, we must move fixups into
the body of the functions themselves, after the usual fast-path returns.
This patch does so for __arch_copy_to_user().
Inline assembly will be dealt with in subsequent patches.
Other than the improved backtracing, there should be no functional
change as a result of this patch.
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Acked-by: Robin Murphy <robin.murphy@arm.com>
Reviewed-by: Ard Biesheuvel <ardb@kernel.org>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: James Morse <james.morse@arm.com>
Cc: Mark Brown <broonie@kernel.org>
Cc: Will Deacon <will@kernel.org>
Link: https://lore.kernel.org/r/20211019160219.5202-4-mark.rutland@arm.com
Signed-off-by: Will Deacon <will@kernel.org>
2021-10-20 00:02:09 +08:00
|
|
|
// Exception fixups
|
2021-07-12 22:27:46 +08:00
|
|
|
9997: cmp dst, dstin
|
|
|
|
b.ne 9998f
|
|
|
|
// Before being absolutely sure we couldn't copy anything, try harder
|
|
|
|
ldrb tmp1w, [srcin]
|
|
|
|
USER(9998f, sttrb tmp1w, [dst])
|
|
|
|
add dst, dst, #1
|
2015-09-24 02:55:39 +08:00
|
|
|
9998: sub x0, end, dst // bytes not copied
|
2012-03-05 19:49:32 +08:00
|
|
|
ret
|
arm64: lib: __arch_copy_to_user(): fold fixups into body
Like other functions, __arch_copy_to_user() places its exception fixups
in the `.fixup` section without any clear association with
__arch_copy_to_user() itself. If we backtrace the fixup code, it will be
symbolized as an offset from the nearest prior symbol, which happens to
be `__entry_tramp_text_end`. Further, since the PC adjustment for the
fixup is akin to a direct branch rather than a function call,
__arch_copy_to_user() itself will be missing from the backtrace.
This is confusing and hinders debugging. In general this pattern will
also be problematic for CONFIG_LIVEPATCH, since fixups often return to
their associated function, but this isn't accurately captured in the
stacktrace.
To solve these issues for assembly functions, we must move fixups into
the body of the functions themselves, after the usual fast-path returns.
This patch does so for __arch_copy_to_user().
Inline assembly will be dealt with in subsequent patches.
Other than the improved backtracing, there should be no functional
change as a result of this patch.
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Acked-by: Robin Murphy <robin.murphy@arm.com>
Reviewed-by: Ard Biesheuvel <ardb@kernel.org>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: James Morse <james.morse@arm.com>
Cc: Mark Brown <broonie@kernel.org>
Cc: Will Deacon <will@kernel.org>
Link: https://lore.kernel.org/r/20211019160219.5202-4-mark.rutland@arm.com
Signed-off-by: Will Deacon <will@kernel.org>
2021-10-20 00:02:09 +08:00
|
|
|
SYM_FUNC_END(__arch_copy_to_user)
|
|
|
|
EXPORT_SYMBOL(__arch_copy_to_user)
|