mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-12-16 23:45:31 +08:00
63a87fe0d0
octeon_cpu_disable() will unconditionally enable interrupts when called. We can assume that the routine is always called with interrupts disabled, so just delete the incorrect local_irq_disable/enable(). The patch fixes the following crash when offlining a CPU: [ 93.818785] ------------[ cut here ]------------ [ 93.823421] WARNING: CPU: 1 PID: 10 at kernel/smp.c:231 flush_smp_call_function_queue+0x1c4/0x1d0() [ 93.836215] Modules linked in: [ 93.839287] CPU: 1 PID: 10 Comm: migration/1 Not tainted 3.19.0-rc4-octeon-los_b5f0 #1 [ 93.847212] Stack : 0000000000000001 ffffffff81b2cf90 0000000000000004 ffffffff81630000 0000000000000000 0000000000000000 0000000000000000 000000000000004a 0000000000000006 ffffffff8117e550 0000000000000000 0000000000000000 ffffffff81b30000 ffffffff81b26808 8000000032c77748 ffffffff81627e07 ffffffff81595ec8 ffffffff81b26808 000000000000000a 0000000000000001 0000000000000001 0000000000000003 0000000010008ce1 ffffffff815030c8 8000000032cbbb38 ffffffff8113d42c 0000000010008ce1 ffffffff8117f36c 8000000032c77300 8000000032cbba50 0000000000000001 ffffffff81503984 0000000000000000 0000000000000000 0000000000000000 0000000000000000 0000000000000000 ffffffff81121668 0000000000000000 0000000000000000 ... [ 93.912819] Call Trace: [ 93.915273] [<ffffffff81121668>] show_stack+0x68/0x80 [ 93.920335] [<ffffffff81503984>] dump_stack+0x6c/0x90 [ 93.925395] [<ffffffff8113d58c>] warn_slowpath_common+0x94/0xd8 [ 93.931324] [<ffffffff811a402c>] flush_smp_call_function_queue+0x1c4/0x1d0 [ 93.938208] [<ffffffff811a4128>] hotplug_cfd+0xf0/0x108 [ 93.943444] [<ffffffff8115bacc>] notifier_call_chain+0x5c/0xb8 [ 93.949286] [<ffffffff8113d704>] cpu_notify+0x24/0x60 [ 93.954348] [<ffffffff81501738>] take_cpu_down+0x38/0x58 [ 93.959670] [<ffffffff811b343c>] multi_cpu_stop+0x154/0x180 [ 93.965250] [<ffffffff811b3768>] cpu_stopper_thread+0xd8/0x160 [ 93.971093] [<ffffffff8115ea4c>] smpboot_thread_fn+0x1ec/0x1f8 [ 93.976936] [<ffffffff8115ab04>] kthread+0xd4/0xf0 [ 93.981735] [<ffffffff8111c4f0>] ret_from_kernel_thread+0x14/0x1c [ 93.987835] [ 93.989326] ---[ end trace c9e3815ee655bda9 ]--- [ 93.993951] Kernel bug detected[#1]: [ 93.997533] CPU: 1 PID: 10 Comm: migration/1 Tainted: G W 3.19.0-rc4-octeon-los_b5f0 #1 [ 94.006591] task: 8000000032c77300 ti: 8000000032cb8000 task.ti: 8000000032cb8000 [ 94.014081] $ 0 : 0000000000000000 0000000010000ce1 0000000000000001 ffffffff81620000 [ 94.022146] $ 4 : 8000000002c72ac0 0000000000000000 00000000000001a7 ffffffff813b06f0 [ 94.030210] $ 8 : ffffffff813b20d8 0000000000000000 0000000000000000 ffffffff81630000 [ 94.038275] $12 : 0000000000000087 0000000000000000 0000000000000086 0000000000000000 [ 94.046339] $16 : ffffffff81623168 0000000000000001 0000000000000000 0000000000000008 [ 94.054405] $20 : 0000000000000001 0000000000000001 0000000000000001 0000000000000003 [ 94.062470] $24 : 0000000000000038 ffffffff813b7f10 [ 94.070536] $28 : 8000000032cb8000 8000000032cbbc20 0000000010008ce1 ffffffff811bcaf4 [ 94.078601] Hi : 0000000000f188e8 [ 94.082179] Lo : d4fdf3b646c09d55 [ 94.085760] epc : ffffffff811bc9d0 irq_work_run_list+0x8/0xf8 [ 94.091686] Tainted: G W [ 94.095613] ra : ffffffff811bcaf4 irq_work_run+0x34/0x60 [ 94.101192] Status: 10000ce3 KX SX UX KERNEL EXL IE [ 94.106235] Cause : 40808034 [ 94.109119] PrId : 000d9301 (Cavium Octeon II) [ 94.113653] Modules linked in: [ 94.116721] Process migration/1 (pid: 10, threadinfo=8000000032cb8000, task=8000000032c77300, tls=0000000000000000) [ 94.127168] Stack : 8000000002c74c80 ffffffff811a4128 0000000000000001 ffffffff81635720 fffffffffffffff2 ffffffff8115bacc 80000000320fbce0 80000000320fbca4 80000000320fbc80 0000000000000002 0000000000000004 ffffffff8113d704 80000000320fbce0 ffffffff81501738 0000000000000003 ffffffff811b343c 8000000002c72aa0 8000000002c72aa8 ffffffff8159cae8 ffffffff8159caa0 ffffffff81650000 80000000320fbbf0 80000000320fbc80 ffffffff811b32e8 0000000000000000 ffffffff811b3768 ffffffff81622b80 ffffffff815148a8 8000000032c77300 8000000002c73e80 ffffffff815148a8 8000000032c77300 ffffffff81622b80 ffffffff815148a8 8000000032c77300 ffffffff81503f48 ffffffff8115ea0c ffffffff81620000 0000000000000000 ffffffff81174d64 ... [ 94.192771] Call Trace: [ 94.195222] [<ffffffff811bc9d0>] irq_work_run_list+0x8/0xf8 [ 94.200802] [<ffffffff811bcaf4>] irq_work_run+0x34/0x60 [ 94.206036] [<ffffffff811a4128>] hotplug_cfd+0xf0/0x108 [ 94.211269] [<ffffffff8115bacc>] notifier_call_chain+0x5c/0xb8 [ 94.217111] [<ffffffff8113d704>] cpu_notify+0x24/0x60 [ 94.222171] [<ffffffff81501738>] take_cpu_down+0x38/0x58 [ 94.227491] [<ffffffff811b343c>] multi_cpu_stop+0x154/0x180 [ 94.233072] [<ffffffff811b3768>] cpu_stopper_thread+0xd8/0x160 [ 94.238914] [<ffffffff8115ea4c>] smpboot_thread_fn+0x1ec/0x1f8 [ 94.244757] [<ffffffff8115ab04>] kthread+0xd4/0xf0 [ 94.249555] [<ffffffff8111c4f0>] ret_from_kernel_thread+0x14/0x1c [ 94.255654] [ 94.257146] Code: a2423c40 40026000 30420001 <00020336> dc820000 10400037 00000000 0000010f 0000010f [ 94.267183] ---[ end trace c9e3815ee655bdaa ]--- [ 94.271804] Fatal exception: panic in 5 seconds Reported-by: Hemmo Nieminen <hemmo.nieminen@iki.fi> Signed-off-by: Aaro Koskinen <aaro.koskinen@iki.fi> Acked-by: David Daney <david.daney@cavium.com> Cc: stable@vger.kernel.org # v3.18+ Cc: linux-mips@linux-mips.org Cc: linux-kernel@vger.kernel.org Patchwork: https://patchwork.linux-mips.org/patch/8952/ Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
391 lines
9.2 KiB
C
391 lines
9.2 KiB
C
/*
|
|
* This file is subject to the terms and conditions of the GNU General Public
|
|
* License. See the file "COPYING" in the main directory of this archive
|
|
* for more details.
|
|
*
|
|
* Copyright (C) 2004-2008, 2009, 2010 Cavium Networks
|
|
*/
|
|
#include <linux/cpu.h>
|
|
#include <linux/delay.h>
|
|
#include <linux/smp.h>
|
|
#include <linux/interrupt.h>
|
|
#include <linux/kernel_stat.h>
|
|
#include <linux/sched.h>
|
|
#include <linux/module.h>
|
|
|
|
#include <asm/mmu_context.h>
|
|
#include <asm/time.h>
|
|
#include <asm/setup.h>
|
|
|
|
#include <asm/octeon/octeon.h>
|
|
|
|
#include "octeon_boot.h"
|
|
|
|
volatile unsigned long octeon_processor_boot = 0xff;
|
|
volatile unsigned long octeon_processor_sp;
|
|
volatile unsigned long octeon_processor_gp;
|
|
|
|
#ifdef CONFIG_HOTPLUG_CPU
|
|
uint64_t octeon_bootloader_entry_addr;
|
|
EXPORT_SYMBOL(octeon_bootloader_entry_addr);
|
|
#endif
|
|
|
|
static irqreturn_t mailbox_interrupt(int irq, void *dev_id)
|
|
{
|
|
const int coreid = cvmx_get_core_num();
|
|
uint64_t action;
|
|
|
|
/* Load the mailbox register to figure out what we're supposed to do */
|
|
action = cvmx_read_csr(CVMX_CIU_MBOX_CLRX(coreid)) & 0xffff;
|
|
|
|
/* Clear the mailbox to clear the interrupt */
|
|
cvmx_write_csr(CVMX_CIU_MBOX_CLRX(coreid), action);
|
|
|
|
if (action & SMP_CALL_FUNCTION)
|
|
smp_call_function_interrupt();
|
|
if (action & SMP_RESCHEDULE_YOURSELF)
|
|
scheduler_ipi();
|
|
|
|
/* Check if we've been told to flush the icache */
|
|
if (action & SMP_ICACHE_FLUSH)
|
|
asm volatile ("synci 0($0)\n");
|
|
return IRQ_HANDLED;
|
|
}
|
|
|
|
/**
|
|
* Cause the function described by call_data to be executed on the passed
|
|
* cpu. When the function has finished, increment the finished field of
|
|
* call_data.
|
|
*/
|
|
void octeon_send_ipi_single(int cpu, unsigned int action)
|
|
{
|
|
int coreid = cpu_logical_map(cpu);
|
|
/*
|
|
pr_info("SMP: Mailbox send cpu=%d, coreid=%d, action=%u\n", cpu,
|
|
coreid, action);
|
|
*/
|
|
cvmx_write_csr(CVMX_CIU_MBOX_SETX(coreid), action);
|
|
}
|
|
|
|
static inline void octeon_send_ipi_mask(const struct cpumask *mask,
|
|
unsigned int action)
|
|
{
|
|
unsigned int i;
|
|
|
|
for_each_cpu_mask(i, *mask)
|
|
octeon_send_ipi_single(i, action);
|
|
}
|
|
|
|
/**
|
|
* Detect available CPUs, populate cpu_possible_mask
|
|
*/
|
|
static void octeon_smp_hotplug_setup(void)
|
|
{
|
|
#ifdef CONFIG_HOTPLUG_CPU
|
|
struct linux_app_boot_info *labi;
|
|
|
|
if (!setup_max_cpus)
|
|
return;
|
|
|
|
labi = (struct linux_app_boot_info *)PHYS_TO_XKSEG_CACHED(LABI_ADDR_IN_BOOTLOADER);
|
|
if (labi->labi_signature != LABI_SIGNATURE) {
|
|
pr_info("The bootloader on this board does not support HOTPLUG_CPU.");
|
|
return;
|
|
}
|
|
|
|
octeon_bootloader_entry_addr = labi->InitTLBStart_addr;
|
|
#endif
|
|
}
|
|
|
|
static void octeon_smp_setup(void)
|
|
{
|
|
const int coreid = cvmx_get_core_num();
|
|
int cpus;
|
|
int id;
|
|
int core_mask = octeon_get_boot_coremask();
|
|
#ifdef CONFIG_HOTPLUG_CPU
|
|
unsigned int num_cores = cvmx_octeon_num_cores();
|
|
#endif
|
|
|
|
/* The present CPUs are initially just the boot cpu (CPU 0). */
|
|
for (id = 0; id < NR_CPUS; id++) {
|
|
set_cpu_possible(id, id == 0);
|
|
set_cpu_present(id, id == 0);
|
|
}
|
|
|
|
__cpu_number_map[coreid] = 0;
|
|
__cpu_logical_map[0] = coreid;
|
|
|
|
/* The present CPUs get the lowest CPU numbers. */
|
|
cpus = 1;
|
|
for (id = 0; id < NR_CPUS; id++) {
|
|
if ((id != coreid) && (core_mask & (1 << id))) {
|
|
set_cpu_possible(cpus, true);
|
|
set_cpu_present(cpus, true);
|
|
__cpu_number_map[id] = cpus;
|
|
__cpu_logical_map[cpus] = id;
|
|
cpus++;
|
|
}
|
|
}
|
|
|
|
#ifdef CONFIG_HOTPLUG_CPU
|
|
/*
|
|
* The possible CPUs are all those present on the chip. We
|
|
* will assign CPU numbers for possible cores as well. Cores
|
|
* are always consecutively numberd from 0.
|
|
*/
|
|
for (id = 0; setup_max_cpus && octeon_bootloader_entry_addr &&
|
|
id < num_cores && id < NR_CPUS; id++) {
|
|
if (!(core_mask & (1 << id))) {
|
|
set_cpu_possible(cpus, true);
|
|
__cpu_number_map[id] = cpus;
|
|
__cpu_logical_map[cpus] = id;
|
|
cpus++;
|
|
}
|
|
}
|
|
#endif
|
|
|
|
octeon_smp_hotplug_setup();
|
|
}
|
|
|
|
/**
|
|
* Firmware CPU startup hook
|
|
*
|
|
*/
|
|
static void octeon_boot_secondary(int cpu, struct task_struct *idle)
|
|
{
|
|
int count;
|
|
|
|
pr_info("SMP: Booting CPU%02d (CoreId %2d)...\n", cpu,
|
|
cpu_logical_map(cpu));
|
|
|
|
octeon_processor_sp = __KSTK_TOS(idle);
|
|
octeon_processor_gp = (unsigned long)(task_thread_info(idle));
|
|
octeon_processor_boot = cpu_logical_map(cpu);
|
|
mb();
|
|
|
|
count = 10000;
|
|
while (octeon_processor_sp && count) {
|
|
/* Waiting for processor to get the SP and GP */
|
|
udelay(1);
|
|
count--;
|
|
}
|
|
if (count == 0)
|
|
pr_err("Secondary boot timeout\n");
|
|
}
|
|
|
|
/**
|
|
* After we've done initial boot, this function is called to allow the
|
|
* board code to clean up state, if needed
|
|
*/
|
|
static void octeon_init_secondary(void)
|
|
{
|
|
unsigned int sr;
|
|
|
|
sr = set_c0_status(ST0_BEV);
|
|
write_c0_ebase((u32)ebase);
|
|
write_c0_status(sr);
|
|
|
|
octeon_check_cpu_bist();
|
|
octeon_init_cvmcount();
|
|
|
|
octeon_irq_setup_secondary();
|
|
}
|
|
|
|
/**
|
|
* Callout to firmware before smp_init
|
|
*
|
|
*/
|
|
void octeon_prepare_cpus(unsigned int max_cpus)
|
|
{
|
|
/*
|
|
* Only the low order mailbox bits are used for IPIs, leave
|
|
* the other bits alone.
|
|
*/
|
|
cvmx_write_csr(CVMX_CIU_MBOX_CLRX(cvmx_get_core_num()), 0xffff);
|
|
if (request_irq(OCTEON_IRQ_MBOX0, mailbox_interrupt,
|
|
IRQF_PERCPU | IRQF_NO_THREAD, "SMP-IPI",
|
|
mailbox_interrupt)) {
|
|
panic("Cannot request_irq(OCTEON_IRQ_MBOX0)");
|
|
}
|
|
}
|
|
|
|
/**
|
|
* Last chance for the board code to finish SMP initialization before
|
|
* the CPU is "online".
|
|
*/
|
|
static void octeon_smp_finish(void)
|
|
{
|
|
octeon_user_io_init();
|
|
|
|
/* to generate the first CPU timer interrupt */
|
|
write_c0_compare(read_c0_count() + mips_hpt_frequency / HZ);
|
|
local_irq_enable();
|
|
}
|
|
|
|
#ifdef CONFIG_HOTPLUG_CPU
|
|
|
|
/* State of each CPU. */
|
|
DEFINE_PER_CPU(int, cpu_state);
|
|
|
|
static int octeon_cpu_disable(void)
|
|
{
|
|
unsigned int cpu = smp_processor_id();
|
|
|
|
if (cpu == 0)
|
|
return -EBUSY;
|
|
|
|
if (!octeon_bootloader_entry_addr)
|
|
return -ENOTSUPP;
|
|
|
|
set_cpu_online(cpu, false);
|
|
cpu_clear(cpu, cpu_callin_map);
|
|
octeon_fixup_irqs();
|
|
|
|
flush_cache_all();
|
|
local_flush_tlb_all();
|
|
|
|
return 0;
|
|
}
|
|
|
|
static void octeon_cpu_die(unsigned int cpu)
|
|
{
|
|
int coreid = cpu_logical_map(cpu);
|
|
uint32_t mask, new_mask;
|
|
const struct cvmx_bootmem_named_block_desc *block_desc;
|
|
|
|
while (per_cpu(cpu_state, cpu) != CPU_DEAD)
|
|
cpu_relax();
|
|
|
|
/*
|
|
* This is a bit complicated strategics of getting/settig available
|
|
* cores mask, copied from bootloader
|
|
*/
|
|
|
|
mask = 1 << coreid;
|
|
/* LINUX_APP_BOOT_BLOCK is initialized in bootoct binary */
|
|
block_desc = cvmx_bootmem_find_named_block(LINUX_APP_BOOT_BLOCK_NAME);
|
|
|
|
if (!block_desc) {
|
|
struct linux_app_boot_info *labi;
|
|
|
|
labi = (struct linux_app_boot_info *)PHYS_TO_XKSEG_CACHED(LABI_ADDR_IN_BOOTLOADER);
|
|
|
|
labi->avail_coremask |= mask;
|
|
new_mask = labi->avail_coremask;
|
|
} else { /* alternative, already initialized */
|
|
uint32_t *p = (uint32_t *)PHYS_TO_XKSEG_CACHED(block_desc->base_addr +
|
|
AVAIL_COREMASK_OFFSET_IN_LINUX_APP_BOOT_BLOCK);
|
|
*p |= mask;
|
|
new_mask = *p;
|
|
}
|
|
|
|
pr_info("Reset core %d. Available Coremask = 0x%x \n", coreid, new_mask);
|
|
mb();
|
|
cvmx_write_csr(CVMX_CIU_PP_RST, 1 << coreid);
|
|
cvmx_write_csr(CVMX_CIU_PP_RST, 0);
|
|
}
|
|
|
|
void play_dead(void)
|
|
{
|
|
int cpu = cpu_number_map(cvmx_get_core_num());
|
|
|
|
idle_task_exit();
|
|
octeon_processor_boot = 0xff;
|
|
per_cpu(cpu_state, cpu) = CPU_DEAD;
|
|
|
|
mb();
|
|
|
|
while (1) /* core will be reset here */
|
|
;
|
|
}
|
|
|
|
extern void kernel_entry(unsigned long arg1, ...);
|
|
|
|
static void start_after_reset(void)
|
|
{
|
|
kernel_entry(0, 0, 0); /* set a2 = 0 for secondary core */
|
|
}
|
|
|
|
static int octeon_update_boot_vector(unsigned int cpu)
|
|
{
|
|
|
|
int coreid = cpu_logical_map(cpu);
|
|
uint32_t avail_coremask;
|
|
const struct cvmx_bootmem_named_block_desc *block_desc;
|
|
struct boot_init_vector *boot_vect =
|
|
(struct boot_init_vector *)PHYS_TO_XKSEG_CACHED(BOOTLOADER_BOOT_VECTOR);
|
|
|
|
block_desc = cvmx_bootmem_find_named_block(LINUX_APP_BOOT_BLOCK_NAME);
|
|
|
|
if (!block_desc) {
|
|
struct linux_app_boot_info *labi;
|
|
|
|
labi = (struct linux_app_boot_info *)PHYS_TO_XKSEG_CACHED(LABI_ADDR_IN_BOOTLOADER);
|
|
|
|
avail_coremask = labi->avail_coremask;
|
|
labi->avail_coremask &= ~(1 << coreid);
|
|
} else { /* alternative, already initialized */
|
|
avail_coremask = *(uint32_t *)PHYS_TO_XKSEG_CACHED(
|
|
block_desc->base_addr + AVAIL_COREMASK_OFFSET_IN_LINUX_APP_BOOT_BLOCK);
|
|
}
|
|
|
|
if (!(avail_coremask & (1 << coreid))) {
|
|
/* core not available, assume, that catched by simple-executive */
|
|
cvmx_write_csr(CVMX_CIU_PP_RST, 1 << coreid);
|
|
cvmx_write_csr(CVMX_CIU_PP_RST, 0);
|
|
}
|
|
|
|
boot_vect[coreid].app_start_func_addr =
|
|
(uint32_t) (unsigned long) start_after_reset;
|
|
boot_vect[coreid].code_addr = octeon_bootloader_entry_addr;
|
|
|
|
mb();
|
|
|
|
cvmx_write_csr(CVMX_CIU_NMI, (1 << coreid) & avail_coremask);
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int octeon_cpu_callback(struct notifier_block *nfb,
|
|
unsigned long action, void *hcpu)
|
|
{
|
|
unsigned int cpu = (unsigned long)hcpu;
|
|
|
|
switch (action) {
|
|
case CPU_UP_PREPARE:
|
|
octeon_update_boot_vector(cpu);
|
|
break;
|
|
case CPU_ONLINE:
|
|
pr_info("Cpu %d online\n", cpu);
|
|
break;
|
|
case CPU_DEAD:
|
|
break;
|
|
}
|
|
|
|
return NOTIFY_OK;
|
|
}
|
|
|
|
static int register_cavium_notifier(void)
|
|
{
|
|
hotcpu_notifier(octeon_cpu_callback, 0);
|
|
return 0;
|
|
}
|
|
late_initcall(register_cavium_notifier);
|
|
|
|
#endif /* CONFIG_HOTPLUG_CPU */
|
|
|
|
struct plat_smp_ops octeon_smp_ops = {
|
|
.send_ipi_single = octeon_send_ipi_single,
|
|
.send_ipi_mask = octeon_send_ipi_mask,
|
|
.init_secondary = octeon_init_secondary,
|
|
.smp_finish = octeon_smp_finish,
|
|
.boot_secondary = octeon_boot_secondary,
|
|
.smp_setup = octeon_smp_setup,
|
|
.prepare_cpus = octeon_prepare_cpus,
|
|
#ifdef CONFIG_HOTPLUG_CPU
|
|
.cpu_disable = octeon_cpu_disable,
|
|
.cpu_die = octeon_cpu_die,
|
|
#endif
|
|
};
|