mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-16 16:54:20 +08:00
bce5a1e8a3
When building ARCH=i386 with CONFIG_LTO_CLANG_FULL=y, it's possible
(depending on additional configs which I have not been able to isolate)
to observe a failure during register allocation:
error: inline assembly requires more registers than available
when memmove is inlined into tcp_v4_fill_cb() or tcp_v6_fill_cb().
memmove is quite large and probably shouldn't be inlined due to size
alone. A noinline function attribute would be the simplest fix, but
there's a few things that stand out with the current definition:
In addition to having complex constraints that can't always be resolved,
the clobber list seems to be missing %bx. By using numbered operands
rather than symbolic operands, the constraints are quite obnoxious to
refactor.
Having a large function be 99% inline asm is a code smell that this
function should simply be written in stand-alone out-of-line assembler.
Moving this to out of line assembler guarantees that the
compiler cannot inline calls to memmove.
This has been done previously for 64b:
commit 9599ec0471
("x86-64, mem: Convert memmove() to assembly file
and fix return value bug")
That gives the opportunity for other cleanups like fixing the
inconsistent use of tabs vs spaces and instruction suffixes, and the
label 3 appearing twice. Symbolic operands, local labels, and
additional comments would provide this code with a fresh coat of paint.
Finally, add a test that tickles the `rep movsl` implementation to test
it for correctness, since it has implicit operands.
Suggested-by: Ingo Molnar <mingo@kernel.org>
Suggested-by: David Laight <David.Laight@aculab.com>
Signed-off-by: Nick Desaulniers <ndesaulniers@google.com>
Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com>
Reviewed-by: Kees Cook <keescook@chromium.org>
Tested-by: Kees Cook <keescook@chromium.org>
Tested-by: Nathan Chancellor <nathan@kernel.org>
Link: https://lore.kernel.org/all/20221018172155.287409-1-ndesaulniers%40google.com
77 lines
2.3 KiB
Makefile
77 lines
2.3 KiB
Makefile
# SPDX-License-Identifier: GPL-2.0
|
|
#
|
|
# Makefile for x86 specific library files.
|
|
#
|
|
|
|
# Produces uninteresting flaky coverage.
|
|
KCOV_INSTRUMENT_delay.o := n
|
|
|
|
# KCSAN uses udelay for introducing watchpoint delay; avoid recursion.
|
|
KCSAN_SANITIZE_delay.o := n
|
|
ifdef CONFIG_KCSAN
|
|
# In case KCSAN+lockdep+ftrace are enabled, disable ftrace for delay.o to avoid
|
|
# lockdep -> [other libs] -> KCSAN -> udelay -> ftrace -> lockdep recursion.
|
|
CFLAGS_REMOVE_delay.o = $(CC_FLAGS_FTRACE)
|
|
endif
|
|
|
|
# Early boot use of cmdline; don't instrument it
|
|
ifdef CONFIG_AMD_MEM_ENCRYPT
|
|
KCOV_INSTRUMENT_cmdline.o := n
|
|
KASAN_SANITIZE_cmdline.o := n
|
|
KCSAN_SANITIZE_cmdline.o := n
|
|
|
|
ifdef CONFIG_FUNCTION_TRACER
|
|
CFLAGS_REMOVE_cmdline.o = -pg
|
|
endif
|
|
|
|
CFLAGS_cmdline.o := -fno-stack-protector -fno-jump-tables
|
|
endif
|
|
|
|
inat_tables_script = $(srctree)/arch/x86/tools/gen-insn-attr-x86.awk
|
|
inat_tables_maps = $(srctree)/arch/x86/lib/x86-opcode-map.txt
|
|
quiet_cmd_inat_tables = GEN $@
|
|
cmd_inat_tables = $(AWK) -f $(inat_tables_script) $(inat_tables_maps) > $@
|
|
|
|
$(obj)/inat-tables.c: $(inat_tables_script) $(inat_tables_maps)
|
|
$(call cmd,inat_tables)
|
|
|
|
$(obj)/inat.o: $(obj)/inat-tables.c
|
|
|
|
clean-files := inat-tables.c
|
|
|
|
obj-$(CONFIG_SMP) += msr-smp.o cache-smp.o
|
|
|
|
lib-y := delay.o misc.o cmdline.o cpu.o
|
|
lib-y += usercopy_$(BITS).o usercopy.o getuser.o putuser.o
|
|
lib-y += memcpy_$(BITS).o
|
|
lib-y += pc-conf-reg.o
|
|
lib-$(CONFIG_ARCH_HAS_COPY_MC) += copy_mc.o copy_mc_64.o
|
|
lib-$(CONFIG_INSTRUCTION_DECODER) += insn.o inat.o insn-eval.o
|
|
lib-$(CONFIG_RANDOMIZE_BASE) += kaslr.o
|
|
lib-$(CONFIG_FUNCTION_ERROR_INJECTION) += error-inject.o
|
|
lib-$(CONFIG_RETPOLINE) += retpoline.o
|
|
|
|
obj-y += msr.o msr-reg.o msr-reg-export.o hweight.o
|
|
obj-y += iomem.o
|
|
|
|
ifeq ($(CONFIG_X86_32),y)
|
|
obj-y += atomic64_32.o
|
|
lib-y += atomic64_cx8_32.o
|
|
lib-y += checksum_32.o
|
|
lib-y += strstr_32.o
|
|
lib-y += string_32.o
|
|
lib-y += memmove_32.o
|
|
ifneq ($(CONFIG_X86_CMPXCHG64),y)
|
|
lib-y += cmpxchg8b_emu.o atomic64_386_32.o
|
|
endif
|
|
else
|
|
obj-y += iomap_copy_64.o
|
|
ifneq ($(CONFIG_GENERIC_CSUM),y)
|
|
lib-y += csum-partial_64.o csum-copy_64.o csum-wrappers_64.o
|
|
endif
|
|
lib-y += clear_page_64.o copy_page_64.o
|
|
lib-y += memmove_64.o memset_64.o
|
|
lib-y += copy_user_64.o
|
|
lib-y += cmpxchg16b_emu.o
|
|
endif
|