1708644f19
Prevents crashes when IRQs arrive when the current kernel stack context already contains deeply nested function calls, e.g. when stacking lots of network devices on top of each other Signed-off-by: Felix Fietkau <nbd@nbd.name>
117 lines
2.8 KiB
Diff
117 lines
2.8 KiB
Diff
From: Matt Redfearn <matt.redfearn@imgtec.com>
|
|
Date: Mon, 19 Dec 2016 14:20:59 +0000
|
|
Subject: [PATCH] MIPS: Switch to the irq_stack in interrupts
|
|
|
|
When enterring interrupt context via handle_int or except_vec_vi, switch
|
|
to the irq_stack of the current CPU if it is not already in use.
|
|
|
|
The current stack pointer is masked with the thread size and compared to
|
|
the base or the irq stack. If it does not match then the stack pointer
|
|
is set to the top of that stack, otherwise this is a nested irq being
|
|
handled on the irq stack so the stack pointer should be left as it was.
|
|
|
|
The in-use stack pointer is placed in the callee saved register s1. It
|
|
will be saved to the stack when plat_irq_dispatch is invoked and can be
|
|
restored once control returns here.
|
|
|
|
Signed-off-by: Matt Redfearn <matt.redfearn@imgtec.com>
|
|
---
|
|
|
|
--- a/arch/mips/kernel/genex.S
|
|
+++ b/arch/mips/kernel/genex.S
|
|
@@ -188,9 +188,44 @@ NESTED(handle_int, PT_SIZE, sp)
|
|
|
|
LONG_L s0, TI_REGS($28)
|
|
LONG_S sp, TI_REGS($28)
|
|
- PTR_LA ra, ret_from_irq
|
|
- PTR_LA v0, plat_irq_dispatch
|
|
- jr v0
|
|
+
|
|
+ /*
|
|
+ * SAVE_ALL ensures we are using a valid kernel stack for the thread.
|
|
+ * Check if we are already using the IRQ stack.
|
|
+ */
|
|
+ move s1, sp # Preserve the sp
|
|
+
|
|
+ /* Get IRQ stack for this CPU */
|
|
+ ASM_CPUID_MFC0 k0, ASM_SMP_CPUID_REG
|
|
+#if defined(CONFIG_32BIT) || defined(KBUILD_64BIT_SYM32)
|
|
+ lui k1, %hi(irq_stack)
|
|
+#else
|
|
+ lui k1, %highest(irq_stack)
|
|
+ daddiu k1, %higher(irq_stack)
|
|
+ dsll k1, 16
|
|
+ daddiu k1, %hi(irq_stack)
|
|
+ dsll k1, 16
|
|
+#endif
|
|
+ LONG_SRL k0, SMP_CPUID_PTRSHIFT
|
|
+ LONG_ADDU k1, k0
|
|
+ LONG_L t0, %lo(irq_stack)(k1)
|
|
+
|
|
+ # Check if already on IRQ stack
|
|
+ PTR_LI t1, ~(_THREAD_SIZE-1)
|
|
+ and t1, t1, sp
|
|
+ beq t0, t1, 2f
|
|
+
|
|
+ /* Switch to IRQ stack */
|
|
+ li t1, _IRQ_STACK_SIZE
|
|
+ PTR_ADD sp, t0, t1
|
|
+
|
|
+2:
|
|
+ jal plat_irq_dispatch
|
|
+
|
|
+ /* Restore sp */
|
|
+ move sp, s1
|
|
+
|
|
+ j ret_from_irq
|
|
#ifdef CONFIG_CPU_MICROMIPS
|
|
nop
|
|
#endif
|
|
@@ -263,8 +298,44 @@ NESTED(except_vec_vi_handler, 0, sp)
|
|
|
|
LONG_L s0, TI_REGS($28)
|
|
LONG_S sp, TI_REGS($28)
|
|
- PTR_LA ra, ret_from_irq
|
|
- jr v0
|
|
+
|
|
+ /*
|
|
+ * SAVE_ALL ensures we are using a valid kernel stack for the thread.
|
|
+ * Check if we are already using the IRQ stack.
|
|
+ */
|
|
+ move s1, sp # Preserve the sp
|
|
+
|
|
+ /* Get IRQ stack for this CPU */
|
|
+ ASM_CPUID_MFC0 k0, ASM_SMP_CPUID_REG
|
|
+#if defined(CONFIG_32BIT) || defined(KBUILD_64BIT_SYM32)
|
|
+ lui k1, %hi(irq_stack)
|
|
+#else
|
|
+ lui k1, %highest(irq_stack)
|
|
+ daddiu k1, %higher(irq_stack)
|
|
+ dsll k1, 16
|
|
+ daddiu k1, %hi(irq_stack)
|
|
+ dsll k1, 16
|
|
+#endif
|
|
+ LONG_SRL k0, SMP_CPUID_PTRSHIFT
|
|
+ LONG_ADDU k1, k0
|
|
+ LONG_L t0, %lo(irq_stack)(k1)
|
|
+
|
|
+ # Check if already on IRQ stack
|
|
+ PTR_LI t1, ~(_THREAD_SIZE-1)
|
|
+ and t1, t1, sp
|
|
+ beq t0, t1, 2f
|
|
+
|
|
+ /* Switch to IRQ stack */
|
|
+ li t1, _IRQ_STACK_SIZE
|
|
+ PTR_ADD sp, t0, t1
|
|
+
|
|
+2:
|
|
+ jal plat_irq_dispatch
|
|
+
|
|
+ /* Restore sp */
|
|
+ move sp, s1
|
|
+
|
|
+ j ret_from_irq
|
|
END(except_vec_vi_handler)
|
|
|
|
/*
|