mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-12-15 06:55:13 +08:00
d4fde568a3
Currently we have optimized hand-coded assembly checksum routines for big-endian 64-bit systems, but for little-endian we use the generic C routines. This modifies the optimized routines to work for little-endian. With this, we no longer need to enable CONFIG_GENERIC_CSUM. This also fixes a couple of comments in checksum_64.S so they accurately reflect what the associated instruction does. Signed-off-by: Paul Mackerras <paulus@ozlabs.org> [mpe: Use the more common __BIG_ENDIAN__] Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
36 lines
948 B
Makefile
36 lines
948 B
Makefile
#
|
|
# Makefile for ppc-specific library files..
|
|
#
|
|
|
|
subdir-ccflags-$(CONFIG_PPC_WERROR) := -Werror
|
|
|
|
ccflags-$(CONFIG_PPC64) := $(NO_MINIMAL_TOC)
|
|
|
|
CFLAGS_REMOVE_code-patching.o = $(CC_FLAGS_FTRACE)
|
|
CFLAGS_REMOVE_feature-fixups.o = $(CC_FLAGS_FTRACE)
|
|
|
|
obj-y += string.o alloc.o crtsavres.o code-patching.o \
|
|
feature-fixups.o
|
|
|
|
obj-$(CONFIG_PPC32) += div64.o copy_32.o
|
|
|
|
obj64-y += copypage_64.o copyuser_64.o usercopy_64.o mem_64.o hweight_64.o \
|
|
copyuser_power7.o string_64.o copypage_power7.o memcpy_power7.o \
|
|
memcpy_64.o memcmp_64.o
|
|
|
|
obj64-$(CONFIG_SMP) += locks.o
|
|
obj64-$(CONFIG_ALTIVEC) += vmx-helper.o
|
|
|
|
obj-y += checksum_$(BITS).o checksum_wrappers.o
|
|
|
|
obj-$(CONFIG_PPC_EMULATE_SSTEP) += sstep.o ldstfp.o
|
|
|
|
obj-$(CONFIG_PPC_LIB_RHEAP) += rheap.o
|
|
|
|
obj-$(CONFIG_FTR_FIXUP_SELFTEST) += feature-fixups-test.o
|
|
|
|
obj-$(CONFIG_ALTIVEC) += xor_vmx.o
|
|
CFLAGS_xor_vmx.o += -maltivec $(call cc-option,-mabi=altivec)
|
|
|
|
obj-$(CONFIG_PPC64) += $(obj64-y)
|