2005-04-17 06:20:36 +08:00
|
|
|
/*
|
|
|
|
* This file is subject to the terms and conditions of the GNU General Public
|
|
|
|
* License. See the file "COPYING" in the main directory of this archive
|
|
|
|
* for more details.
|
|
|
|
*
|
|
|
|
* Copyright (C) 1994 - 2000, 2001, 2003 Ralf Baechle
|
|
|
|
* Copyright (C) 1999, 2000 Silicon Graphics, Inc.
|
2007-10-23 19:43:25 +08:00
|
|
|
* Copyright (C) 2002, 2007 Maciej W. Rozycki
|
2013-03-26 01:15:55 +08:00
|
|
|
* Copyright (C) 2001, 2012 MIPS Technologies, Inc. All rights reserved.
|
2005-04-17 06:20:36 +08:00
|
|
|
*/
|
|
|
|
#include <linux/init.h>
|
|
|
|
|
|
|
|
#include <asm/asm.h>
|
2006-04-05 16:45:45 +08:00
|
|
|
#include <asm/asmmacro.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
#include <asm/cacheops.h>
|
2006-07-07 21:07:18 +08:00
|
|
|
#include <asm/irqflags.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
#include <asm/regdef.h>
|
|
|
|
#include <asm/fpregdef.h>
|
|
|
|
#include <asm/mipsregs.h>
|
|
|
|
#include <asm/stackframe.h>
|
|
|
|
#include <asm/war.h>
|
2007-11-12 01:05:18 +08:00
|
|
|
#include <asm/thread_info.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
__INIT
|
|
|
|
|
|
|
|
/*
|
|
|
|
* General exception vector for all other CPUs.
|
|
|
|
*
|
|
|
|
* Be careful when changing this, it has to be at most 128 bytes
|
|
|
|
* to fit into space reserved for the exception handler.
|
|
|
|
*/
|
|
|
|
NESTED(except_vec3_generic, 0, sp)
|
|
|
|
.set push
|
|
|
|
.set noat
|
|
|
|
#if R5432_CP0_INTERRUPT_WAR
|
|
|
|
mfc0 k0, CP0_INDEX
|
|
|
|
#endif
|
|
|
|
mfc0 k1, CP0_CAUSE
|
|
|
|
andi k1, k1, 0x7c
|
2005-09-04 06:56:16 +08:00
|
|
|
#ifdef CONFIG_64BIT
|
2005-04-17 06:20:36 +08:00
|
|
|
dsll k1, k1, 1
|
|
|
|
#endif
|
|
|
|
PTR_L k0, exception_handlers(k1)
|
|
|
|
jr k0
|
|
|
|
.set pop
|
|
|
|
END(except_vec3_generic)
|
|
|
|
|
|
|
|
/*
|
|
|
|
* General exception handler for CPUs with virtual coherency exception.
|
|
|
|
*
|
|
|
|
* Be careful when changing this, it has to be at most 256 (as a special
|
|
|
|
* exception) bytes to fit into space reserved for the exception handler.
|
|
|
|
*/
|
|
|
|
NESTED(except_vec3_r4000, 0, sp)
|
|
|
|
.set push
|
2014-03-30 19:20:10 +08:00
|
|
|
.set arch=r4000
|
2005-04-17 06:20:36 +08:00
|
|
|
.set noat
|
|
|
|
mfc0 k1, CP0_CAUSE
|
|
|
|
li k0, 31<<2
|
|
|
|
andi k1, k1, 0x7c
|
|
|
|
.set push
|
|
|
|
.set noreorder
|
|
|
|
.set nomacro
|
|
|
|
beq k1, k0, handle_vced
|
|
|
|
li k0, 14<<2
|
|
|
|
beq k1, k0, handle_vcei
|
2005-09-04 06:56:16 +08:00
|
|
|
#ifdef CONFIG_64BIT
|
2004-12-08 18:32:45 +08:00
|
|
|
dsll k1, k1, 1
|
2005-04-17 06:20:36 +08:00
|
|
|
#endif
|
|
|
|
.set pop
|
|
|
|
PTR_L k0, exception_handlers(k1)
|
|
|
|
jr k0
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Big shit, we now may have two dirty primary cache lines for the same
|
2004-12-08 18:32:45 +08:00
|
|
|
* physical address. We can safely invalidate the line pointed to by
|
2005-04-17 06:20:36 +08:00
|
|
|
* c0_badvaddr because after return from this exception handler the
|
|
|
|
* load / store will be re-executed.
|
|
|
|
*/
|
|
|
|
handle_vced:
|
2004-12-08 18:32:45 +08:00
|
|
|
MFC0 k0, CP0_BADVADDR
|
2005-04-17 06:20:36 +08:00
|
|
|
li k1, -4 # Is this ...
|
|
|
|
and k0, k1 # ... really needed?
|
|
|
|
mtc0 zero, CP0_TAGLO
|
2004-12-08 18:32:45 +08:00
|
|
|
cache Index_Store_Tag_D, (k0)
|
|
|
|
cache Hit_Writeback_Inv_SD, (k0)
|
2005-04-17 06:20:36 +08:00
|
|
|
#ifdef CONFIG_PROC_FS
|
|
|
|
PTR_LA k0, vced_count
|
|
|
|
lw k1, (k0)
|
|
|
|
addiu k1, 1
|
|
|
|
sw k1, (k0)
|
|
|
|
#endif
|
|
|
|
eret
|
|
|
|
|
|
|
|
handle_vcei:
|
|
|
|
MFC0 k0, CP0_BADVADDR
|
|
|
|
cache Hit_Writeback_Inv_SD, (k0) # also cleans pi
|
|
|
|
#ifdef CONFIG_PROC_FS
|
|
|
|
PTR_LA k0, vcei_count
|
|
|
|
lw k1, (k0)
|
|
|
|
addiu k1, 1
|
|
|
|
sw k1, (k0)
|
|
|
|
#endif
|
|
|
|
eret
|
|
|
|
.set pop
|
|
|
|
END(except_vec3_r4000)
|
|
|
|
|
2006-04-04 00:56:36 +08:00
|
|
|
__FINIT
|
|
|
|
|
2007-11-12 01:05:18 +08:00
|
|
|
.align 5 /* 32 byte rollback region */
|
2013-05-21 23:33:32 +08:00
|
|
|
LEAF(__r4k_wait)
|
2007-11-12 01:05:18 +08:00
|
|
|
.set push
|
|
|
|
.set noreorder
|
|
|
|
/* start of rollback region */
|
|
|
|
LONG_L t0, TI_FLAGS($28)
|
|
|
|
nop
|
|
|
|
andi t0, _TIF_NEED_RESCHED
|
|
|
|
bnez t0, 1f
|
|
|
|
nop
|
|
|
|
nop
|
|
|
|
nop
|
2013-03-26 01:15:55 +08:00
|
|
|
#ifdef CONFIG_CPU_MICROMIPS
|
|
|
|
nop
|
|
|
|
nop
|
|
|
|
nop
|
|
|
|
nop
|
|
|
|
#endif
|
2014-11-24 21:17:27 +08:00
|
|
|
.set MIPS_ISA_ARCH_LEVEL_RAW
|
2007-11-12 01:05:18 +08:00
|
|
|
wait
|
|
|
|
/* end of rollback region (the region size must be power of two) */
|
|
|
|
1:
|
|
|
|
jr ra
|
2016-04-30 00:29:29 +08:00
|
|
|
nop
|
2013-03-26 01:15:55 +08:00
|
|
|
.set pop
|
2013-05-21 23:33:32 +08:00
|
|
|
END(__r4k_wait)
|
2007-11-12 01:05:18 +08:00
|
|
|
|
|
|
|
.macro BUILD_ROLLBACK_PROLOGUE handler
|
|
|
|
FEXPORT(rollback_\handler)
|
|
|
|
.set push
|
|
|
|
.set noat
|
|
|
|
MFC0 k0, CP0_EPC
|
2013-05-21 23:33:32 +08:00
|
|
|
PTR_LA k1, __r4k_wait
|
2007-11-12 01:05:18 +08:00
|
|
|
ori k0, 0x1f /* 32 byte rollback region */
|
|
|
|
xori k0, 0x1f
|
2016-08-20 01:15:40 +08:00
|
|
|
bne k0, k1, \handler
|
2007-11-12 01:05:18 +08:00
|
|
|
MTC0 k0, CP0_EPC
|
|
|
|
.set pop
|
|
|
|
.endm
|
|
|
|
|
2013-01-22 19:59:30 +08:00
|
|
|
.align 5
|
2007-11-12 01:05:18 +08:00
|
|
|
BUILD_ROLLBACK_PROLOGUE handle_int
|
2006-04-04 00:56:36 +08:00
|
|
|
NESTED(handle_int, PT_SIZE, sp)
|
2017-08-11 02:27:39 +08:00
|
|
|
.cfi_signal_frame
|
2007-03-26 21:48:50 +08:00
|
|
|
#ifdef CONFIG_TRACE_IRQFLAGS
|
|
|
|
/*
|
|
|
|
* Check to see if the interrupted code has just disabled
|
|
|
|
* interrupts and ignore this interrupt for now if so.
|
|
|
|
*
|
|
|
|
* local_irq_disable() disables interrupts and then calls
|
|
|
|
* trace_hardirqs_off() to track the state. If an interrupt is taken
|
|
|
|
* after interrupts are disabled but before the state is updated
|
|
|
|
* it will appear to restore_all that it is incorrectly returning with
|
|
|
|
* interrupts disabled
|
|
|
|
*/
|
|
|
|
.set push
|
|
|
|
.set noat
|
|
|
|
mfc0 k0, CP0_STATUS
|
|
|
|
#if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
|
|
|
|
and k0, ST0_IEP
|
|
|
|
bnez k0, 1f
|
|
|
|
|
2007-11-07 00:08:48 +08:00
|
|
|
mfc0 k0, CP0_EPC
|
2007-03-26 21:48:50 +08:00
|
|
|
.set noreorder
|
|
|
|
j k0
|
2016-04-30 00:29:29 +08:00
|
|
|
rfe
|
2007-03-26 21:48:50 +08:00
|
|
|
#else
|
|
|
|
and k0, ST0_IE
|
|
|
|
bnez k0, 1f
|
|
|
|
|
|
|
|
eret
|
|
|
|
#endif
|
|
|
|
1:
|
|
|
|
.set pop
|
|
|
|
#endif
|
2017-08-11 02:27:39 +08:00
|
|
|
SAVE_ALL docfi=1
|
2006-04-04 00:56:36 +08:00
|
|
|
CLI
|
2006-07-07 21:07:18 +08:00
|
|
|
TRACE_IRQS_OFF
|
2006-04-04 00:56:36 +08:00
|
|
|
|
2006-10-08 02:44:33 +08:00
|
|
|
LONG_L s0, TI_REGS($28)
|
|
|
|
LONG_S sp, TI_REGS($28)
|
2016-12-19 22:20:59 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* SAVE_ALL ensures we are using a valid kernel stack for the thread.
|
|
|
|
* Check if we are already using the IRQ stack.
|
|
|
|
*/
|
|
|
|
move s1, sp # Preserve the sp
|
|
|
|
|
|
|
|
/* Get IRQ stack for this CPU */
|
|
|
|
ASM_CPUID_MFC0 k0, ASM_SMP_CPUID_REG
|
|
|
|
#if defined(CONFIG_32BIT) || defined(KBUILD_64BIT_SYM32)
|
|
|
|
lui k1, %hi(irq_stack)
|
|
|
|
#else
|
|
|
|
lui k1, %highest(irq_stack)
|
|
|
|
daddiu k1, %higher(irq_stack)
|
|
|
|
dsll k1, 16
|
|
|
|
daddiu k1, %hi(irq_stack)
|
|
|
|
dsll k1, 16
|
|
|
|
#endif
|
|
|
|
LONG_SRL k0, SMP_CPUID_PTRSHIFT
|
|
|
|
LONG_ADDU k1, k0
|
|
|
|
LONG_L t0, %lo(irq_stack)(k1)
|
|
|
|
|
|
|
|
# Check if already on IRQ stack
|
|
|
|
PTR_LI t1, ~(_THREAD_SIZE-1)
|
|
|
|
and t1, t1, sp
|
|
|
|
beq t0, t1, 2f
|
|
|
|
|
|
|
|
/* Switch to IRQ stack */
|
2017-03-21 22:52:25 +08:00
|
|
|
li t1, _IRQ_STACK_START
|
2016-12-19 22:20:59 +08:00
|
|
|
PTR_ADD sp, t0, t1
|
|
|
|
|
2017-03-21 22:52:25 +08:00
|
|
|
/* Save task's sp on IRQ stack so that unwinding can follow it */
|
|
|
|
LONG_S s1, 0(sp)
|
2016-12-19 22:20:59 +08:00
|
|
|
2:
|
|
|
|
jal plat_irq_dispatch
|
|
|
|
|
|
|
|
/* Restore sp */
|
|
|
|
move sp, s1
|
|
|
|
|
|
|
|
j ret_from_irq
|
2013-03-26 01:15:55 +08:00
|
|
|
#ifdef CONFIG_CPU_MICROMIPS
|
|
|
|
nop
|
|
|
|
#endif
|
2006-04-04 00:56:36 +08:00
|
|
|
END(handle_int)
|
|
|
|
|
|
|
|
__INIT
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
|
|
|
* Special interrupt vector for MIPS64 ISA & embedded MIPS processors.
|
|
|
|
* This is a dedicated interrupt exception vector which reduces the
|
|
|
|
* interrupt processing overhead. The jump instruction will be replaced
|
|
|
|
* at the initialization time.
|
|
|
|
*
|
|
|
|
* Be careful when changing this, it has to be at most 128 bytes
|
|
|
|
* to fit into space reserved for the exception handler.
|
|
|
|
*/
|
|
|
|
NESTED(except_vec4, 0, sp)
|
|
|
|
1: j 1b /* Dummy, will be replaced */
|
|
|
|
END(except_vec4)
|
|
|
|
|
|
|
|
/*
|
|
|
|
* EJTAG debug exception handler.
|
|
|
|
* The EJTAG debug exception entry point is 0xbfc00480, which
|
2013-03-26 01:15:55 +08:00
|
|
|
* normally is in the boot PROM, so the boot PROM must do an
|
2005-04-17 06:20:36 +08:00
|
|
|
* unconditional jump to this vector.
|
|
|
|
*/
|
|
|
|
NESTED(except_vec_ejtag_debug, 0, sp)
|
|
|
|
j ejtag_debug_handler
|
2013-03-26 01:15:55 +08:00
|
|
|
#ifdef CONFIG_CPU_MICROMIPS
|
|
|
|
nop
|
|
|
|
#endif
|
2005-04-17 06:20:36 +08:00
|
|
|
END(except_vec_ejtag_debug)
|
|
|
|
|
|
|
|
__FINIT
|
|
|
|
|
2005-07-14 23:57:16 +08:00
|
|
|
/*
|
|
|
|
* Vectored interrupt handler.
|
|
|
|
* This prototype is copied to ebase + n*IntCtl.VS and patched
|
|
|
|
* to invoke the handler
|
|
|
|
*/
|
2007-11-12 01:05:18 +08:00
|
|
|
BUILD_ROLLBACK_PROLOGUE except_vec_vi
|
2005-07-14 23:57:16 +08:00
|
|
|
NESTED(except_vec_vi, 0, sp)
|
2017-08-11 02:27:39 +08:00
|
|
|
SAVE_SOME docfi=1
|
|
|
|
SAVE_AT docfi=1
|
2005-07-14 23:57:16 +08:00
|
|
|
.set push
|
|
|
|
.set noreorder
|
2013-03-26 01:15:55 +08:00
|
|
|
PTR_LA v1, except_vec_vi_handler
|
2007-03-19 23:29:39 +08:00
|
|
|
FEXPORT(except_vec_vi_lui)
|
2005-07-14 23:57:16 +08:00
|
|
|
lui v0, 0 /* Patched */
|
2013-03-26 01:15:55 +08:00
|
|
|
jr v1
|
2007-03-19 23:29:39 +08:00
|
|
|
FEXPORT(except_vec_vi_ori)
|
2005-07-14 23:57:16 +08:00
|
|
|
ori v0, 0 /* Patched */
|
|
|
|
.set pop
|
|
|
|
END(except_vec_vi)
|
|
|
|
EXPORT(except_vec_vi_end)
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Common Vectored Interrupt code
|
|
|
|
* Complete the register saves and invoke the handler which is passed in $v0
|
|
|
|
*/
|
|
|
|
NESTED(except_vec_vi_handler, 0, sp)
|
|
|
|
SAVE_TEMP
|
|
|
|
SAVE_STATIC
|
|
|
|
CLI
|
2007-03-18 00:21:28 +08:00
|
|
|
#ifdef CONFIG_TRACE_IRQFLAGS
|
|
|
|
move s0, v0
|
2006-07-07 21:07:18 +08:00
|
|
|
TRACE_IRQS_OFF
|
2007-03-18 00:21:28 +08:00
|
|
|
move v0, s0
|
|
|
|
#endif
|
2006-10-08 02:44:33 +08:00
|
|
|
|
|
|
|
LONG_L s0, TI_REGS($28)
|
|
|
|
LONG_S sp, TI_REGS($28)
|
2016-12-19 22:20:59 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* SAVE_ALL ensures we are using a valid kernel stack for the thread.
|
|
|
|
* Check if we are already using the IRQ stack.
|
|
|
|
*/
|
|
|
|
move s1, sp # Preserve the sp
|
|
|
|
|
|
|
|
/* Get IRQ stack for this CPU */
|
|
|
|
ASM_CPUID_MFC0 k0, ASM_SMP_CPUID_REG
|
|
|
|
#if defined(CONFIG_32BIT) || defined(KBUILD_64BIT_SYM32)
|
|
|
|
lui k1, %hi(irq_stack)
|
|
|
|
#else
|
|
|
|
lui k1, %highest(irq_stack)
|
|
|
|
daddiu k1, %higher(irq_stack)
|
|
|
|
dsll k1, 16
|
|
|
|
daddiu k1, %hi(irq_stack)
|
|
|
|
dsll k1, 16
|
|
|
|
#endif
|
|
|
|
LONG_SRL k0, SMP_CPUID_PTRSHIFT
|
|
|
|
LONG_ADDU k1, k0
|
|
|
|
LONG_L t0, %lo(irq_stack)(k1)
|
|
|
|
|
|
|
|
# Check if already on IRQ stack
|
|
|
|
PTR_LI t1, ~(_THREAD_SIZE-1)
|
|
|
|
and t1, t1, sp
|
|
|
|
beq t0, t1, 2f
|
|
|
|
|
|
|
|
/* Switch to IRQ stack */
|
2017-03-21 22:52:25 +08:00
|
|
|
li t1, _IRQ_STACK_START
|
2016-12-19 22:20:59 +08:00
|
|
|
PTR_ADD sp, t0, t1
|
|
|
|
|
2017-03-21 22:52:25 +08:00
|
|
|
/* Save task's sp on IRQ stack so that unwinding can follow it */
|
|
|
|
LONG_S s1, 0(sp)
|
2016-12-19 22:20:59 +08:00
|
|
|
2:
|
2017-01-26 01:00:25 +08:00
|
|
|
jalr v0
|
2016-12-19 22:20:59 +08:00
|
|
|
|
|
|
|
/* Restore sp */
|
|
|
|
move sp, s1
|
|
|
|
|
|
|
|
j ret_from_irq
|
2005-07-14 23:57:16 +08:00
|
|
|
END(except_vec_vi_handler)
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
|
|
|
* EJTAG debug exception handler.
|
|
|
|
*/
|
|
|
|
NESTED(ejtag_debug_handler, PT_SIZE, sp)
|
|
|
|
.set push
|
|
|
|
.set noat
|
|
|
|
MTC0 k0, CP0_DESAVE
|
|
|
|
mfc0 k0, CP0_DEBUG
|
|
|
|
|
|
|
|
sll k0, k0, 30 # Check for SDBBP.
|
|
|
|
bgez k0, ejtag_return
|
|
|
|
|
|
|
|
PTR_LA k0, ejtag_debug_buffer
|
|
|
|
LONG_S k1, 0(k0)
|
|
|
|
SAVE_ALL
|
|
|
|
move a0, sp
|
|
|
|
jal ejtag_exception_handler
|
|
|
|
RESTORE_ALL
|
|
|
|
PTR_LA k0, ejtag_debug_buffer
|
|
|
|
LONG_L k1, 0(k0)
|
|
|
|
|
|
|
|
ejtag_return:
|
|
|
|
MFC0 k0, CP0_DESAVE
|
|
|
|
.set mips32
|
|
|
|
deret
|
2016-04-30 00:29:29 +08:00
|
|
|
.set pop
|
2005-04-17 06:20:36 +08:00
|
|
|
END(ejtag_debug_handler)
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This buffer is reserved for the use of the EJTAG debug
|
|
|
|
* handler.
|
|
|
|
*/
|
|
|
|
.data
|
|
|
|
EXPORT(ejtag_debug_buffer)
|
|
|
|
.fill LONGSIZE
|
|
|
|
.previous
|
|
|
|
|
|
|
|
__INIT
|
|
|
|
|
|
|
|
/*
|
|
|
|
* NMI debug exception handler for MIPS reference boards.
|
|
|
|
* The NMI debug exception entry point is 0xbfc00000, which
|
|
|
|
* normally is in the boot PROM, so the boot PROM must do a
|
|
|
|
* unconditional jump to this vector.
|
|
|
|
*/
|
|
|
|
NESTED(except_vec_nmi, 0, sp)
|
|
|
|
j nmi_handler
|
2013-03-26 01:15:55 +08:00
|
|
|
#ifdef CONFIG_CPU_MICROMIPS
|
|
|
|
nop
|
|
|
|
#endif
|
2005-04-17 06:20:36 +08:00
|
|
|
END(except_vec_nmi)
|
|
|
|
|
|
|
|
__FINIT
|
|
|
|
|
|
|
|
NESTED(nmi_handler, PT_SIZE, sp)
|
2017-08-11 02:27:39 +08:00
|
|
|
.cfi_signal_frame
|
2005-04-17 06:20:36 +08:00
|
|
|
.set push
|
|
|
|
.set noat
|
2013-10-08 19:39:31 +08:00
|
|
|
/*
|
|
|
|
* Clear ERL - restore segment mapping
|
|
|
|
* Clear BEV - required for page fault exception handler to work
|
|
|
|
*/
|
|
|
|
mfc0 k0, CP0_STATUS
|
2016-04-30 00:29:29 +08:00
|
|
|
ori k0, k0, ST0_EXL
|
2013-10-08 19:39:31 +08:00
|
|
|
li k1, ~(ST0_BEV | ST0_ERL)
|
2016-04-30 00:29:29 +08:00
|
|
|
and k0, k0, k1
|
|
|
|
mtc0 k0, CP0_STATUS
|
2013-10-08 19:39:31 +08:00
|
|
|
_ehb
|
2005-04-17 06:20:36 +08:00
|
|
|
SAVE_ALL
|
2013-01-22 19:59:30 +08:00
|
|
|
move a0, sp
|
2005-04-17 06:20:36 +08:00
|
|
|
jal nmi_exception_handler
|
2013-10-08 19:39:31 +08:00
|
|
|
/* nmi_exception_handler never returns */
|
2005-04-17 06:20:36 +08:00
|
|
|
.set pop
|
|
|
|
END(nmi_handler)
|
|
|
|
|
|
|
|
.macro __build_clear_none
|
|
|
|
.endm
|
|
|
|
|
|
|
|
.macro __build_clear_sti
|
2006-07-07 21:07:18 +08:00
|
|
|
TRACE_IRQS_ON
|
2005-04-17 06:20:36 +08:00
|
|
|
STI
|
|
|
|
.endm
|
|
|
|
|
|
|
|
.macro __build_clear_cli
|
|
|
|
CLI
|
2006-07-07 21:07:18 +08:00
|
|
|
TRACE_IRQS_OFF
|
2005-04-17 06:20:36 +08:00
|
|
|
.endm
|
|
|
|
|
|
|
|
.macro __build_clear_fpe
|
2008-12-12 07:33:25 +08:00
|
|
|
.set push
|
|
|
|
/* gas fails to assemble cfc1 for some archs (octeon).*/ \
|
|
|
|
.set mips1
|
2014-11-07 21:13:54 +08:00
|
|
|
SET_HARDFLOAT
|
2005-04-17 06:20:36 +08:00
|
|
|
cfc1 a1, fcr31
|
2008-12-12 07:33:25 +08:00
|
|
|
.set pop
|
MIPS: Clear [MSA]FPE CSR.Cause after notify_die()
When handling floating point exceptions (FPEs) and MSA FPEs the Cause
bits of the appropriate control and status register (FCSR for FPEs and
MSACSR for MSA FPEs) are read and cleared before enabling interrupts,
presumably so that it doesn't have to go through the pain of restoring
those bits if the process is pre-empted, since writing those bits would
cause another immediate exception while still in the kernel.
The bits aren't normally ever restored again, since userland never
expects to see them set.
However for virtualisation it is necessary for the kernel to be able to
restore these Cause bits, as the guest may have been interrupted in an
FP exception handler but before it could read the Cause bits. This can
be done by registering a die notifier, to get notified of the exception
when such a value is restored, and if the PC was at the instruction
which is used to restore the guest state, the handler can step over it
and continue execution. The Cause bits can then remain set without
causing further exceptions.
For this to work safely a few changes are made:
- __build_clear_fpe and __build_clear_msa_fpe no longer clear the Cause
bits, and now return from exception level with interrupts disabled
instead of enabled.
- do_fpe() now clears the Cause bits and enables interrupts after
notify_die() is called, so that the notifier can chose to return from
exception without this happening.
- do_msa_fpe() acts similarly, but now actually makes use of the second
argument (msacsr) and calls notify_die() with the new DIE_MSAFP,
allowing die notifiers to be informed of MSA FPEs too.
Signed-off-by: James Hogan <james.hogan@imgtec.com>
Acked-by: Ralf Baechle <ralf@linux-mips.org>
Cc: Paul Burton <paul.burton@imgtec.com>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: Gleb Natapov <gleb@kernel.org>
Cc: linux-mips@linux-mips.org
Cc: kvm@vger.kernel.org
2014-12-02 21:44:13 +08:00
|
|
|
CLI
|
|
|
|
TRACE_IRQS_OFF
|
2005-04-17 06:20:36 +08:00
|
|
|
.endm
|
|
|
|
|
2015-01-30 20:09:34 +08:00
|
|
|
.macro __build_clear_msa_fpe
|
|
|
|
_cfcmsa a1, MSA_CSR
|
MIPS: Clear [MSA]FPE CSR.Cause after notify_die()
When handling floating point exceptions (FPEs) and MSA FPEs the Cause
bits of the appropriate control and status register (FCSR for FPEs and
MSACSR for MSA FPEs) are read and cleared before enabling interrupts,
presumably so that it doesn't have to go through the pain of restoring
those bits if the process is pre-empted, since writing those bits would
cause another immediate exception while still in the kernel.
The bits aren't normally ever restored again, since userland never
expects to see them set.
However for virtualisation it is necessary for the kernel to be able to
restore these Cause bits, as the guest may have been interrupted in an
FP exception handler but before it could read the Cause bits. This can
be done by registering a die notifier, to get notified of the exception
when such a value is restored, and if the PC was at the instruction
which is used to restore the guest state, the handler can step over it
and continue execution. The Cause bits can then remain set without
causing further exceptions.
For this to work safely a few changes are made:
- __build_clear_fpe and __build_clear_msa_fpe no longer clear the Cause
bits, and now return from exception level with interrupts disabled
instead of enabled.
- do_fpe() now clears the Cause bits and enables interrupts after
notify_die() is called, so that the notifier can chose to return from
exception without this happening.
- do_msa_fpe() acts similarly, but now actually makes use of the second
argument (msacsr) and calls notify_die() with the new DIE_MSAFP,
allowing die notifiers to be informed of MSA FPEs too.
Signed-off-by: James Hogan <james.hogan@imgtec.com>
Acked-by: Ralf Baechle <ralf@linux-mips.org>
Cc: Paul Burton <paul.burton@imgtec.com>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: Gleb Natapov <gleb@kernel.org>
Cc: linux-mips@linux-mips.org
Cc: kvm@vger.kernel.org
2014-12-02 21:44:13 +08:00
|
|
|
CLI
|
|
|
|
TRACE_IRQS_OFF
|
2015-01-30 20:09:34 +08:00
|
|
|
.endm
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
.macro __build_clear_ade
|
|
|
|
MFC0 t0, CP0_BADVADDR
|
|
|
|
PTR_S t0, PT_BVADDR(sp)
|
|
|
|
KMODE
|
|
|
|
.endm
|
|
|
|
|
|
|
|
.macro __BUILD_silent exception
|
|
|
|
.endm
|
|
|
|
|
|
|
|
/* Gas tries to parse the PRINT argument as a string containing
|
|
|
|
string escapes and emits bogus warnings if it believes to
|
|
|
|
recognize an unknown escape code. So make the arguments
|
|
|
|
start with an n and gas will believe \n is ok ... */
|
2013-01-22 19:59:30 +08:00
|
|
|
.macro __BUILD_verbose nexception
|
2005-04-17 06:20:36 +08:00
|
|
|
LONG_L a1, PT_EPC(sp)
|
2005-09-04 06:56:22 +08:00
|
|
|
#ifdef CONFIG_32BIT
|
2005-04-17 06:20:36 +08:00
|
|
|
PRINT("Got \nexception at %08lx\012")
|
2005-09-04 06:56:17 +08:00
|
|
|
#endif
|
2005-09-04 06:56:22 +08:00
|
|
|
#ifdef CONFIG_64BIT
|
2005-04-17 06:20:36 +08:00
|
|
|
PRINT("Got \nexception at %016lx\012")
|
2005-09-04 06:56:17 +08:00
|
|
|
#endif
|
2005-04-17 06:20:36 +08:00
|
|
|
.endm
|
|
|
|
|
|
|
|
.macro __BUILD_count exception
|
|
|
|
LONG_L t0,exception_count_\exception
|
2016-04-30 00:29:29 +08:00
|
|
|
LONG_ADDIU t0, 1
|
2005-04-17 06:20:36 +08:00
|
|
|
LONG_S t0,exception_count_\exception
|
|
|
|
.comm exception_count\exception, 8, 8
|
|
|
|
.endm
|
|
|
|
|
|
|
|
.macro __BUILD_HANDLER exception handler clear verbose ext
|
|
|
|
.align 5
|
|
|
|
NESTED(handle_\exception, PT_SIZE, sp)
|
2017-08-11 02:27:39 +08:00
|
|
|
.cfi_signal_frame
|
2005-04-17 06:20:36 +08:00
|
|
|
.set noat
|
|
|
|
SAVE_ALL
|
|
|
|
FEXPORT(handle_\exception\ext)
|
2015-08-18 17:25:50 +08:00
|
|
|
__build_clear_\clear
|
2005-04-17 06:20:36 +08:00
|
|
|
.set at
|
|
|
|
__BUILD_\verbose \exception
|
|
|
|
move a0, sp
|
2017-08-11 02:27:39 +08:00
|
|
|
jal do_\handler
|
|
|
|
j ret_from_exception
|
2005-04-17 06:20:36 +08:00
|
|
|
END(handle_\exception)
|
|
|
|
.endm
|
|
|
|
|
|
|
|
.macro BUILD_HANDLER exception handler clear verbose
|
2013-01-22 19:59:30 +08:00
|
|
|
__BUILD_HANDLER \exception \handler \clear \verbose _int
|
2005-04-17 06:20:36 +08:00
|
|
|
.endm
|
|
|
|
|
|
|
|
BUILD_HANDLER adel ade ade silent /* #4 */
|
|
|
|
BUILD_HANDLER ades ade ade silent /* #5 */
|
|
|
|
BUILD_HANDLER ibe be cli silent /* #6 */
|
|
|
|
BUILD_HANDLER dbe be cli silent /* #7 */
|
|
|
|
BUILD_HANDLER bp bp sti silent /* #9 */
|
|
|
|
BUILD_HANDLER ri ri sti silent /* #10 */
|
|
|
|
BUILD_HANDLER cpu cpu sti silent /* #11 */
|
|
|
|
BUILD_HANDLER ov ov sti silent /* #12 */
|
|
|
|
BUILD_HANDLER tr tr sti silent /* #13 */
|
2015-01-30 20:09:34 +08:00
|
|
|
BUILD_HANDLER msa_fpe msa_fpe msa_fpe silent /* #14 */
|
2005-04-17 06:20:36 +08:00
|
|
|
BUILD_HANDLER fpe fpe fpe silent /* #15 */
|
2013-11-15 00:12:31 +08:00
|
|
|
BUILD_HANDLER ftlb ftlb none silent /* #16 */
|
2014-01-27 23:23:11 +08:00
|
|
|
BUILD_HANDLER msa msa sti silent /* #21 */
|
2005-04-17 06:20:36 +08:00
|
|
|
BUILD_HANDLER mdmx mdmx sti silent /* #22 */
|
2013-01-22 19:59:30 +08:00
|
|
|
#ifdef CONFIG_HARDWARE_WATCHPOINTS
|
2009-01-06 07:29:58 +08:00
|
|
|
/*
|
|
|
|
* For watch, interrupts will be enabled after the watch
|
|
|
|
* registers are read.
|
|
|
|
*/
|
|
|
|
BUILD_HANDLER watch watch cli silent /* #23 */
|
2008-09-23 15:08:45 +08:00
|
|
|
#else
|
2005-04-17 06:20:36 +08:00
|
|
|
BUILD_HANDLER watch watch sti verbose /* #23 */
|
2008-09-23 15:08:45 +08:00
|
|
|
#endif
|
2005-04-17 06:20:36 +08:00
|
|
|
BUILD_HANDLER mcheck mcheck cli verbose /* #24 */
|
2006-06-30 21:19:45 +08:00
|
|
|
BUILD_HANDLER mt mt sti silent /* #25 */
|
2005-05-31 19:49:19 +08:00
|
|
|
BUILD_HANDLER dsp dsp sti silent /* #26 */
|
2005-04-17 06:20:36 +08:00
|
|
|
BUILD_HANDLER reserved reserved sti verbose /* others */
|
|
|
|
|
2006-09-11 16:50:29 +08:00
|
|
|
.align 5
|
MIPS: Check TLB before handle_ri_rdhwr() for Loongson-3
Loongson-3's micro TLB (ITLB) is not strictly a subset of JTLB. That
means: when a JTLB entry is replaced by hardware, there may be an old
valid entry exists in ITLB. So, a TLB miss exception may occur while
handle_ri_rdhwr() is running because it try to access EPC's content.
However, handle_ri_rdhwr() doesn't clear EXL, which makes a TLB Refill
exception be treated as a TLB Invalid exception and tlbp may fail. In
this case, if FTLB (which is usually set-associative instead of set-
associative) is enabled, a tlbp failure will cause an invalid tlbwi,
which will hang the whole system.
This patch rename handle_ri_rdhwr_vivt to handle_ri_rdhwr_tlbp and use
it for Loongson-3. It try to solve the same problem described as below,
but more straightforwards.
https://patchwork.linux-mips.org/patch/12591/
I think Loongson-2 has the same problem, but it has no FTLB, so we just
keep it as is.
Signed-off-by: Huacai Chen <chenhc@lemote.com>
Cc: Rui Wang <wangr@lemote.com>
Cc: John Crispin <john@phrozen.org>
Cc: Steven J . Hill <Steven.Hill@caviumnetworks.com>
Cc: Fuxin Zhang <zhangfx@lemote.com>
Cc: Zhangjin Wu <wuzhangjin@gmail.com>
Cc: Huacai Chen <chenhc@lemote.com>
Cc: linux-mips@linux-mips.org
Cc: stable@vger.kernel.org
Patchwork: https://patchwork.linux-mips.org/patch/15753/
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
2017-03-16 21:00:26 +08:00
|
|
|
LEAF(handle_ri_rdhwr_tlbp)
|
2006-09-11 16:50:29 +08:00
|
|
|
.set push
|
|
|
|
.set noat
|
|
|
|
.set noreorder
|
|
|
|
/* check if TLB contains a entry for EPC */
|
|
|
|
MFC0 k1, CP0_ENTRYHI
|
2016-05-06 21:36:24 +08:00
|
|
|
andi k1, MIPS_ENTRYHI_ASID | MIPS_ENTRYHI_ASIDX
|
2006-09-11 16:50:29 +08:00
|
|
|
MFC0 k0, CP0_EPC
|
2016-04-30 00:29:29 +08:00
|
|
|
PTR_SRL k0, _PAGE_SHIFT + 1
|
|
|
|
PTR_SLL k0, _PAGE_SHIFT + 1
|
2006-09-11 16:50:29 +08:00
|
|
|
or k1, k0
|
|
|
|
MTC0 k1, CP0_ENTRYHI
|
|
|
|
mtc0_tlbw_hazard
|
|
|
|
tlbp
|
|
|
|
tlb_probe_hazard
|
|
|
|
mfc0 k1, CP0_INDEX
|
|
|
|
.set pop
|
|
|
|
bltz k1, handle_ri /* slow path */
|
|
|
|
/* fall thru */
|
MIPS: Check TLB before handle_ri_rdhwr() for Loongson-3
Loongson-3's micro TLB (ITLB) is not strictly a subset of JTLB. That
means: when a JTLB entry is replaced by hardware, there may be an old
valid entry exists in ITLB. So, a TLB miss exception may occur while
handle_ri_rdhwr() is running because it try to access EPC's content.
However, handle_ri_rdhwr() doesn't clear EXL, which makes a TLB Refill
exception be treated as a TLB Invalid exception and tlbp may fail. In
this case, if FTLB (which is usually set-associative instead of set-
associative) is enabled, a tlbp failure will cause an invalid tlbwi,
which will hang the whole system.
This patch rename handle_ri_rdhwr_vivt to handle_ri_rdhwr_tlbp and use
it for Loongson-3. It try to solve the same problem described as below,
but more straightforwards.
https://patchwork.linux-mips.org/patch/12591/
I think Loongson-2 has the same problem, but it has no FTLB, so we just
keep it as is.
Signed-off-by: Huacai Chen <chenhc@lemote.com>
Cc: Rui Wang <wangr@lemote.com>
Cc: John Crispin <john@phrozen.org>
Cc: Steven J . Hill <Steven.Hill@caviumnetworks.com>
Cc: Fuxin Zhang <zhangfx@lemote.com>
Cc: Zhangjin Wu <wuzhangjin@gmail.com>
Cc: Huacai Chen <chenhc@lemote.com>
Cc: linux-mips@linux-mips.org
Cc: stable@vger.kernel.org
Patchwork: https://patchwork.linux-mips.org/patch/15753/
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
2017-03-16 21:00:26 +08:00
|
|
|
END(handle_ri_rdhwr_tlbp)
|
2006-09-11 16:50:29 +08:00
|
|
|
|
|
|
|
LEAF(handle_ri_rdhwr)
|
|
|
|
.set push
|
|
|
|
.set noat
|
|
|
|
.set noreorder
|
2013-03-26 01:15:55 +08:00
|
|
|
/* MIPS32: 0x7c03e83b: rdhwr v1,$29 */
|
|
|
|
/* microMIPS: 0x007d6b3c: rdhwr v1,$29 */
|
2006-09-11 16:50:29 +08:00
|
|
|
MFC0 k1, CP0_EPC
|
2013-03-26 01:15:55 +08:00
|
|
|
#if defined(CONFIG_CPU_MICROMIPS) || defined(CONFIG_CPU_MIPS32_R2) || defined(CONFIG_CPU_MIPS64_R2)
|
2016-04-30 00:29:29 +08:00
|
|
|
and k0, k1, 1
|
|
|
|
beqz k0, 1f
|
|
|
|
xor k1, k0
|
|
|
|
lhu k0, (k1)
|
|
|
|
lhu k1, 2(k1)
|
|
|
|
ins k1, k0, 16, 16
|
|
|
|
lui k0, 0x007d
|
|
|
|
b docheck
|
|
|
|
ori k0, 0x6b3c
|
2013-03-26 01:15:55 +08:00
|
|
|
1:
|
2016-04-30 00:29:29 +08:00
|
|
|
lui k0, 0x7c03
|
|
|
|
lw k1, (k1)
|
|
|
|
ori k0, 0xe83b
|
2013-03-26 01:15:55 +08:00
|
|
|
#else
|
2016-04-30 00:29:29 +08:00
|
|
|
andi k0, k1, 1
|
|
|
|
bnez k0, handle_ri
|
|
|
|
lui k0, 0x7c03
|
|
|
|
lw k1, (k1)
|
|
|
|
ori k0, 0xe83b
|
2013-03-26 01:15:55 +08:00
|
|
|
#endif
|
2016-04-30 00:29:29 +08:00
|
|
|
.set reorder
|
2013-03-26 01:15:55 +08:00
|
|
|
docheck:
|
2006-09-11 16:50:29 +08:00
|
|
|
bne k0, k1, handle_ri /* if not ours */
|
2013-03-26 01:15:55 +08:00
|
|
|
|
|
|
|
isrdhwr:
|
2006-09-11 16:50:29 +08:00
|
|
|
/* The insn is rdhwr. No need to check CAUSE.BD here. */
|
|
|
|
get_saved_sp /* k1 := current_thread_info */
|
|
|
|
.set noreorder
|
|
|
|
MFC0 k0, CP0_EPC
|
|
|
|
#if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
|
|
|
|
ori k1, _THREAD_MASK
|
|
|
|
xori k1, _THREAD_MASK
|
|
|
|
LONG_L v1, TI_TP_VALUE(k1)
|
|
|
|
LONG_ADDIU k0, 4
|
|
|
|
jr k0
|
|
|
|
rfe
|
|
|
|
#else
|
2007-10-23 19:43:25 +08:00
|
|
|
#ifndef CONFIG_CPU_DADDI_WORKAROUNDS
|
2006-09-11 16:50:29 +08:00
|
|
|
LONG_ADDIU k0, 4 /* stall on $k0 */
|
2007-10-23 19:43:25 +08:00
|
|
|
#else
|
|
|
|
.set at=v1
|
|
|
|
LONG_ADDIU k0, 4
|
|
|
|
.set noat
|
|
|
|
#endif
|
2006-09-11 16:50:29 +08:00
|
|
|
MTC0 k0, CP0_EPC
|
|
|
|
/* I hope three instructions between MTC0 and ERET are enough... */
|
|
|
|
ori k1, _THREAD_MASK
|
|
|
|
xori k1, _THREAD_MASK
|
|
|
|
LONG_L v1, TI_TP_VALUE(k1)
|
2014-03-30 19:20:10 +08:00
|
|
|
.set arch=r4000
|
2006-09-11 16:50:29 +08:00
|
|
|
eret
|
|
|
|
.set mips0
|
|
|
|
#endif
|
|
|
|
.set pop
|
|
|
|
END(handle_ri_rdhwr)
|
|
|
|
|
2005-09-04 06:56:16 +08:00
|
|
|
#ifdef CONFIG_64BIT
|
2005-04-17 06:20:36 +08:00
|
|
|
/* A temporary overflow handler used by check_daddi(). */
|
|
|
|
|
|
|
|
__INIT
|
|
|
|
|
|
|
|
BUILD_HANDLER daddi_ov daddi_ov none silent /* #12 */
|
|
|
|
#endif
|