mirror of
https://github.com/edk2-porting/linux-next.git
synced 2025-01-08 21:53:54 +08:00
ab7c01fdc3
There are five MIPS32/64 architecture releases currently available: from 1 to 6 except fourth one, which was intentionally skipped. Three of them can be called as major: 1st, 2nd and 6th, that not only have some system level alterations, but also introduced significant core/ISA level updates. The rest of the MIPS architecture releases are minor. Even though they don't have as much ISA/system/core level changes as the major ones with respect to the previous releases, they still provide a set of updates (I'd say they were intended to be the intermediate releases before a major one) that might be useful for the kernel and user-level code, when activated by the kernel or compiler. In particular the following features were introduced or ended up being available at/after MIPS32/64 Release 5 architecture: + the last release of the misaligned memory access instructions, + virtualisation - VZ ASE - is optional component of the arch, + SIMD - MSA ASE - is optional component of the arch, + DSP ASE is optional component of the arch, + CP0.Status.FR=1 for CP1.FIR.F64=1 (pure 64-bit FPU general registers) must be available if FPU is implemented, + CP1.FIR.Has2008 support is required so CP1.FCSR.{ABS2008,NAN2008} bits are available. + UFR/UNFR aliases to access CP0.Status.FR from user-space by means of ctc1/cfc1 instructions (enabled by CP0.Config5.UFR), + CP0.COnfig5.LLB=1 and eretnc instruction are implemented to without accidentally clearing LL-bit when returning from an interrupt, exception, or error trap, + XPA feature together with extended versions of CPx registers is introduced, which needs to have mfhc0/mthc0 instructions available. So due to these changes GNU GCC provides an extended instructions set support for MIPS32/64 Release 5 by default like eretnc/mfhc0/mthc0. Even though the architecture alteration isn't that big, it still worth to be taken into account by the kernel software. Finally we can't deny that some optimization/limitations might be found in future and implemented on some level in kernel or compiler. In this case having even intermediate MIPS architecture releases support would be more than useful. So the most of the changes provided by this commit can be split into either compile- or runtime configs related. The compile-time related changes are caused by adding the new CONFIG_CPU_MIPS32_R5/CONFIG_CPU_MIPSR5 configs and concern the code activating MIPSR2 or MIPSR6 already implemented features (like eretnc/LLbit, mthc0/mfhc0). In addition CPU_HAS_MSA can be now freely enabled for MIPS32/64 release 5 based platforms as this is done for CPU_MIPS32_R6 CPUs. The runtime changes concerns the features which are handled with respect to the MIPS ISA revision detected at run-time by means of CP0.Config.{AT,AR} bits. Alas these fields can be used to detect either r1 or r2 or r6 releases. But since we know which CPUs in fact support the R5 arch, we can manually set MIPS_CPU_ISA_M32R5/MIPS_CPU_ISA_M64R5 bit of c->isa_level and then use cpu_has_mips32r5/cpu_has_mips64r5 where it's appropriate. Since XPA/EVA provide too complex alterationss and to have them used with MIPS32 Release 2 charged kernels (for compatibility with current platform configs) they are left to be setup as a separate kernel configs. Co-developed-by: Alexey Malahov <Alexey.Malahov@baikalelectronics.ru> Signed-off-by: Alexey Malahov <Alexey.Malahov@baikalelectronics.ru> Signed-off-by: Serge Semin <Sergey.Semin@baikalelectronics.ru> Cc: Thomas Bogendoerfer <tsbogend@alpha.franken.de> Cc: Paul Burton <paulburton@kernel.org> Cc: Ralf Baechle <ralf@linux-mips.org> Cc: Arnd Bergmann <arnd@arndb.de> Cc: Rob Herring <robh+dt@kernel.org> Cc: devicetree@vger.kernel.org Signed-off-by: Thomas Bogendoerfer <tsbogend@alpha.franken.de>
423 lines
8.5 KiB
C
423 lines
8.5 KiB
C
/*
|
|
* This file is subject to the terms and conditions of the GNU General Public
|
|
* License. See the file "COPYING" in the main directory of this archive
|
|
* for more details.
|
|
*
|
|
* Copyright (C) 2003, 04, 07 Ralf Baechle <ralf@linux-mips.org>
|
|
* Copyright (C) MIPS Technologies, Inc.
|
|
* written by Ralf Baechle <ralf@linux-mips.org>
|
|
*/
|
|
#ifndef _ASM_HAZARDS_H
|
|
#define _ASM_HAZARDS_H
|
|
|
|
#include <linux/stringify.h>
|
|
#include <asm/compiler.h>
|
|
|
|
#define ___ssnop \
|
|
sll $0, $0, 1
|
|
|
|
#define ___ehb \
|
|
sll $0, $0, 3
|
|
|
|
/*
|
|
* TLB hazards
|
|
*/
|
|
#if (defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR5) || \
|
|
defined(CONFIG_CPU_MIPSR6)) && \
|
|
!defined(CONFIG_CPU_CAVIUM_OCTEON) && !defined(CONFIG_CPU_LOONGSON64)
|
|
|
|
/*
|
|
* MIPSR2 defines ehb for hazard avoidance
|
|
*/
|
|
|
|
#define __mtc0_tlbw_hazard \
|
|
___ehb
|
|
|
|
#define __mtc0_tlbr_hazard \
|
|
___ehb
|
|
|
|
#define __tlbw_use_hazard \
|
|
___ehb
|
|
|
|
#define __tlb_read_hazard \
|
|
___ehb
|
|
|
|
#define __tlb_probe_hazard \
|
|
___ehb
|
|
|
|
#define __irq_enable_hazard \
|
|
___ehb
|
|
|
|
#define __irq_disable_hazard \
|
|
___ehb
|
|
|
|
#define __back_to_back_c0_hazard \
|
|
___ehb
|
|
|
|
/*
|
|
* gcc has a tradition of misscompiling the previous construct using the
|
|
* address of a label as argument to inline assembler. Gas otoh has the
|
|
* annoying difference between la and dla which are only usable for 32-bit
|
|
* rsp. 64-bit code, so can't be used without conditional compilation.
|
|
* The alternative is switching the assembler to 64-bit code which happens
|
|
* to work right even for 32-bit code...
|
|
*/
|
|
#define instruction_hazard() \
|
|
do { \
|
|
unsigned long tmp; \
|
|
\
|
|
__asm__ __volatile__( \
|
|
" .set push \n" \
|
|
" .set "MIPS_ISA_LEVEL" \n" \
|
|
" dla %0, 1f \n" \
|
|
" jr.hb %0 \n" \
|
|
" .set pop \n" \
|
|
"1: \n" \
|
|
: "=r" (tmp)); \
|
|
} while (0)
|
|
|
|
#elif (defined(CONFIG_CPU_MIPSR1) && !defined(CONFIG_MIPS_ALCHEMY)) || \
|
|
defined(CONFIG_CPU_BMIPS)
|
|
|
|
/*
|
|
* These are slightly complicated by the fact that we guarantee R1 kernels to
|
|
* run fine on R2 processors.
|
|
*/
|
|
|
|
#define __mtc0_tlbw_hazard \
|
|
___ssnop; \
|
|
___ssnop; \
|
|
___ehb
|
|
|
|
#define __mtc0_tlbr_hazard \
|
|
___ssnop; \
|
|
___ssnop; \
|
|
___ehb
|
|
|
|
#define __tlbw_use_hazard \
|
|
___ssnop; \
|
|
___ssnop; \
|
|
___ssnop; \
|
|
___ehb
|
|
|
|
#define __tlb_read_hazard \
|
|
___ssnop; \
|
|
___ssnop; \
|
|
___ssnop; \
|
|
___ehb
|
|
|
|
#define __tlb_probe_hazard \
|
|
___ssnop; \
|
|
___ssnop; \
|
|
___ssnop; \
|
|
___ehb
|
|
|
|
#define __irq_enable_hazard \
|
|
___ssnop; \
|
|
___ssnop; \
|
|
___ssnop; \
|
|
___ehb
|
|
|
|
#define __irq_disable_hazard \
|
|
___ssnop; \
|
|
___ssnop; \
|
|
___ssnop; \
|
|
___ehb
|
|
|
|
#define __back_to_back_c0_hazard \
|
|
___ssnop; \
|
|
___ssnop; \
|
|
___ssnop; \
|
|
___ehb
|
|
|
|
/*
|
|
* gcc has a tradition of misscompiling the previous construct using the
|
|
* address of a label as argument to inline assembler. Gas otoh has the
|
|
* annoying difference between la and dla which are only usable for 32-bit
|
|
* rsp. 64-bit code, so can't be used without conditional compilation.
|
|
* The alternative is switching the assembler to 64-bit code which happens
|
|
* to work right even for 32-bit code...
|
|
*/
|
|
#define __instruction_hazard() \
|
|
do { \
|
|
unsigned long tmp; \
|
|
\
|
|
__asm__ __volatile__( \
|
|
" .set push \n" \
|
|
" .set mips64r2 \n" \
|
|
" dla %0, 1f \n" \
|
|
" jr.hb %0 \n" \
|
|
" .set pop \n" \
|
|
"1: \n" \
|
|
: "=r" (tmp)); \
|
|
} while (0)
|
|
|
|
#define instruction_hazard() \
|
|
do { \
|
|
if (cpu_has_mips_r2_r6) \
|
|
__instruction_hazard(); \
|
|
} while (0)
|
|
|
|
#elif defined(CONFIG_MIPS_ALCHEMY) || defined(CONFIG_CPU_CAVIUM_OCTEON) || \
|
|
defined(CONFIG_CPU_LOONGSON2EF) || defined(CONFIG_CPU_LOONGSON64) || \
|
|
defined(CONFIG_CPU_R10000) || defined(CONFIG_CPU_R5500) || defined(CONFIG_CPU_XLR)
|
|
|
|
/*
|
|
* R10000 rocks - all hazards handled in hardware, so this becomes a nobrainer.
|
|
*/
|
|
|
|
#define __mtc0_tlbw_hazard
|
|
|
|
#define __mtc0_tlbr_hazard
|
|
|
|
#define __tlbw_use_hazard
|
|
|
|
#define __tlb_read_hazard
|
|
|
|
#define __tlb_probe_hazard
|
|
|
|
#define __irq_enable_hazard
|
|
|
|
#define __irq_disable_hazard
|
|
|
|
#define __back_to_back_c0_hazard
|
|
|
|
#define instruction_hazard() do { } while (0)
|
|
|
|
#elif defined(CONFIG_CPU_SB1)
|
|
|
|
/*
|
|
* Mostly like R4000 for historic reasons
|
|
*/
|
|
#define __mtc0_tlbw_hazard
|
|
|
|
#define __mtc0_tlbr_hazard
|
|
|
|
#define __tlbw_use_hazard
|
|
|
|
#define __tlb_read_hazard
|
|
|
|
#define __tlb_probe_hazard
|
|
|
|
#define __irq_enable_hazard
|
|
|
|
#define __irq_disable_hazard \
|
|
___ssnop; \
|
|
___ssnop; \
|
|
___ssnop
|
|
|
|
#define __back_to_back_c0_hazard
|
|
|
|
#define instruction_hazard() do { } while (0)
|
|
|
|
#else
|
|
|
|
/*
|
|
* Finally the catchall case for all other processors including R4000, R4400,
|
|
* R4600, R4700, R5000, RM7000, NEC VR41xx etc.
|
|
*
|
|
* The taken branch will result in a two cycle penalty for the two killed
|
|
* instructions on R4000 / R4400. Other processors only have a single cycle
|
|
* hazard so this is nice trick to have an optimal code for a range of
|
|
* processors.
|
|
*/
|
|
#define __mtc0_tlbw_hazard \
|
|
nop; \
|
|
nop
|
|
|
|
#define __mtc0_tlbr_hazard \
|
|
nop; \
|
|
nop
|
|
|
|
#define __tlbw_use_hazard \
|
|
nop; \
|
|
nop; \
|
|
nop
|
|
|
|
#define __tlb_read_hazard \
|
|
nop; \
|
|
nop; \
|
|
nop
|
|
|
|
#define __tlb_probe_hazard \
|
|
nop; \
|
|
nop; \
|
|
nop
|
|
|
|
#define __irq_enable_hazard \
|
|
___ssnop; \
|
|
___ssnop; \
|
|
___ssnop
|
|
|
|
#define __irq_disable_hazard \
|
|
nop; \
|
|
nop; \
|
|
nop
|
|
|
|
#define __back_to_back_c0_hazard \
|
|
___ssnop; \
|
|
___ssnop; \
|
|
___ssnop
|
|
|
|
#define instruction_hazard() do { } while (0)
|
|
|
|
#endif
|
|
|
|
|
|
/* FPU hazards */
|
|
|
|
#if defined(CONFIG_CPU_SB1)
|
|
|
|
#define __enable_fpu_hazard \
|
|
.set push; \
|
|
.set mips64; \
|
|
.set noreorder; \
|
|
___ssnop; \
|
|
bnezl $0, .+4; \
|
|
___ssnop; \
|
|
.set pop
|
|
|
|
#define __disable_fpu_hazard
|
|
|
|
#elif defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR5) || \
|
|
defined(CONFIG_CPU_MIPSR6)
|
|
|
|
#define __enable_fpu_hazard \
|
|
___ehb
|
|
|
|
#define __disable_fpu_hazard \
|
|
___ehb
|
|
|
|
#else
|
|
|
|
#define __enable_fpu_hazard \
|
|
nop; \
|
|
nop; \
|
|
nop; \
|
|
nop
|
|
|
|
#define __disable_fpu_hazard \
|
|
___ehb
|
|
|
|
#endif
|
|
|
|
#ifdef __ASSEMBLY__
|
|
|
|
#define _ssnop ___ssnop
|
|
#define _ehb ___ehb
|
|
#define mtc0_tlbw_hazard __mtc0_tlbw_hazard
|
|
#define mtc0_tlbr_hazard __mtc0_tlbr_hazard
|
|
#define tlbw_use_hazard __tlbw_use_hazard
|
|
#define tlb_read_hazard __tlb_read_hazard
|
|
#define tlb_probe_hazard __tlb_probe_hazard
|
|
#define irq_enable_hazard __irq_enable_hazard
|
|
#define irq_disable_hazard __irq_disable_hazard
|
|
#define back_to_back_c0_hazard __back_to_back_c0_hazard
|
|
#define enable_fpu_hazard __enable_fpu_hazard
|
|
#define disable_fpu_hazard __disable_fpu_hazard
|
|
|
|
#else
|
|
|
|
#define _ssnop() \
|
|
do { \
|
|
__asm__ __volatile__( \
|
|
__stringify(___ssnop) \
|
|
); \
|
|
} while (0)
|
|
|
|
#define _ehb() \
|
|
do { \
|
|
__asm__ __volatile__( \
|
|
__stringify(___ehb) \
|
|
); \
|
|
} while (0)
|
|
|
|
|
|
#define mtc0_tlbw_hazard() \
|
|
do { \
|
|
__asm__ __volatile__( \
|
|
__stringify(__mtc0_tlbw_hazard) \
|
|
); \
|
|
} while (0)
|
|
|
|
|
|
#define mtc0_tlbr_hazard() \
|
|
do { \
|
|
__asm__ __volatile__( \
|
|
__stringify(__mtc0_tlbr_hazard) \
|
|
); \
|
|
} while (0)
|
|
|
|
|
|
#define tlbw_use_hazard() \
|
|
do { \
|
|
__asm__ __volatile__( \
|
|
__stringify(__tlbw_use_hazard) \
|
|
); \
|
|
} while (0)
|
|
|
|
|
|
#define tlb_read_hazard() \
|
|
do { \
|
|
__asm__ __volatile__( \
|
|
__stringify(__tlb_read_hazard) \
|
|
); \
|
|
} while (0)
|
|
|
|
|
|
#define tlb_probe_hazard() \
|
|
do { \
|
|
__asm__ __volatile__( \
|
|
__stringify(__tlb_probe_hazard) \
|
|
); \
|
|
} while (0)
|
|
|
|
|
|
#define irq_enable_hazard() \
|
|
do { \
|
|
__asm__ __volatile__( \
|
|
__stringify(__irq_enable_hazard) \
|
|
); \
|
|
} while (0)
|
|
|
|
|
|
#define irq_disable_hazard() \
|
|
do { \
|
|
__asm__ __volatile__( \
|
|
__stringify(__irq_disable_hazard) \
|
|
); \
|
|
} while (0)
|
|
|
|
|
|
#define back_to_back_c0_hazard() \
|
|
do { \
|
|
__asm__ __volatile__( \
|
|
__stringify(__back_to_back_c0_hazard) \
|
|
); \
|
|
} while (0)
|
|
|
|
|
|
#define enable_fpu_hazard() \
|
|
do { \
|
|
__asm__ __volatile__( \
|
|
__stringify(__enable_fpu_hazard) \
|
|
); \
|
|
} while (0)
|
|
|
|
|
|
#define disable_fpu_hazard() \
|
|
do { \
|
|
__asm__ __volatile__( \
|
|
__stringify(__disable_fpu_hazard) \
|
|
); \
|
|
} while (0)
|
|
|
|
/*
|
|
* MIPS R2 instruction hazard barrier. Needs to be called as a subroutine.
|
|
*/
|
|
extern void mips_ihb(void);
|
|
|
|
#endif /* __ASSEMBLY__ */
|
|
|
|
#endif /* _ASM_HAZARDS_H */
|