mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-19 18:53:52 +08:00
MIPS: Netlogic: Add cpu to node mapping for XLP9XX
XLP9XX has 20 cores per node, opposed to 8 on earlier XLP8XX. Update code that calculates node id from cpu id to handle this. Signed-off-by: Jayachandran C <jchandra@broadcom.com> Signed-off-by: John Crispin <blogic@openwrt.org> Patchwork: http://patchwork.linux-mips.org/patch/6283/
This commit is contained in:
parent
e7aa6c66b0
commit
98d4884ca5
@ -47,9 +47,16 @@
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#define NLM_CORES_PER_NODE 8
|
||||
#define NLM_THREADS_PER_CORE 4
|
||||
#define NLM_CPUS_PER_NODE (NLM_CORES_PER_NODE * NLM_THREADS_PER_CORE)
|
||||
#ifdef CONFIG_CPU_XLR
|
||||
#define nlm_cores_per_node() 8
|
||||
#else
|
||||
extern unsigned int xlp_cores_per_node;
|
||||
#define nlm_cores_per_node() xlp_cores_per_node
|
||||
#endif
|
||||
|
||||
#define nlm_threads_per_node() (nlm_cores_per_node() * NLM_THREADS_PER_CORE)
|
||||
#define nlm_cpuid_to_node(c) ((c) / nlm_threads_per_node())
|
||||
|
||||
struct nlm_soc_info {
|
||||
unsigned long coremask; /* cores enabled on the soc */
|
||||
|
@ -146,6 +146,11 @@ static inline int hard_smp_processor_id(void)
|
||||
|
||||
static inline int nlm_nodeid(void)
|
||||
{
|
||||
uint32_t prid = read_c0_prid();
|
||||
|
||||
if ((prid & 0xff00) == PRID_IMP_NETLOGIC_XLP9XX)
|
||||
return (__read_32bit_c0_register($15, 1) >> 7) & 0x7;
|
||||
else
|
||||
return (__read_32bit_c0_register($15, 1) >> 5) & 0x3;
|
||||
}
|
||||
|
||||
|
@ -223,7 +223,7 @@ static void nlm_init_node_irqs(int node)
|
||||
continue;
|
||||
|
||||
nlm_pic_init_irt(nodep->picbase, irt, i,
|
||||
node * NLM_CPUS_PER_NODE, 0);
|
||||
node * nlm_threads_per_node(), 0);
|
||||
nlm_setup_pic_irq(node, i, i, irt);
|
||||
}
|
||||
}
|
||||
@ -232,8 +232,8 @@ void nlm_smp_irq_init(int hwcpuid)
|
||||
{
|
||||
int node, cpu;
|
||||
|
||||
node = hwcpuid / NLM_CPUS_PER_NODE;
|
||||
cpu = hwcpuid % NLM_CPUS_PER_NODE;
|
||||
node = nlm_cpuid_to_node(hwcpuid);
|
||||
cpu = hwcpuid % nlm_threads_per_node();
|
||||
|
||||
if (cpu == 0 && node != 0)
|
||||
nlm_init_node_irqs(node);
|
||||
|
@ -63,7 +63,7 @@ void nlm_send_ipi_single(int logical_cpu, unsigned int action)
|
||||
uint64_t picbase;
|
||||
|
||||
cpu = cpu_logical_map(logical_cpu);
|
||||
node = cpu / NLM_CPUS_PER_NODE;
|
||||
node = nlm_cpuid_to_node(cpu);
|
||||
picbase = nlm_get_node(node)->picbase;
|
||||
|
||||
if (action & SMP_CALL_FUNCTION)
|
||||
@ -152,7 +152,7 @@ void nlm_boot_secondary(int logical_cpu, struct task_struct *idle)
|
||||
int cpu, node;
|
||||
|
||||
cpu = cpu_logical_map(logical_cpu);
|
||||
node = cpu / NLM_CPUS_PER_NODE;
|
||||
node = nlm_cpuid_to_node(logical_cpu);
|
||||
nlm_next_sp = (unsigned long)__KSTK_TOS(idle);
|
||||
nlm_next_gp = (unsigned long)task_thread_info(idle);
|
||||
|
||||
@ -164,7 +164,7 @@ void nlm_boot_secondary(int logical_cpu, struct task_struct *idle)
|
||||
void __init nlm_smp_setup(void)
|
||||
{
|
||||
unsigned int boot_cpu;
|
||||
int num_cpus, i, ncore;
|
||||
int num_cpus, i, ncore, node;
|
||||
volatile u32 *cpu_ready = nlm_get_boot_data(BOOT_CPU_READY);
|
||||
char buf[64];
|
||||
|
||||
@ -187,6 +187,8 @@ void __init nlm_smp_setup(void)
|
||||
__cpu_number_map[i] = num_cpus;
|
||||
__cpu_logical_map[num_cpus] = i;
|
||||
set_cpu_possible(num_cpus, true);
|
||||
node = nlm_cpuid_to_node(i);
|
||||
cpumask_set_cpu(num_cpus, &nlm_get_node(node)->cpumask);
|
||||
++num_cpus;
|
||||
}
|
||||
}
|
||||
|
@ -51,6 +51,7 @@ uint64_t nlm_io_base;
|
||||
struct nlm_soc_info nlm_nodes[NLM_NR_NODES];
|
||||
cpumask_t nlm_cpumask = CPU_MASK_CPU0;
|
||||
unsigned int nlm_threads_per_core;
|
||||
unsigned int xlp_cores_per_node;
|
||||
|
||||
static void nlm_linux_exit(void)
|
||||
{
|
||||
@ -154,6 +155,10 @@ void __init prom_init(void)
|
||||
void *reset_vec;
|
||||
|
||||
nlm_io_base = CKSEG1ADDR(XLP_DEFAULT_IO_BASE);
|
||||
if (cpu_is_xlp9xx())
|
||||
xlp_cores_per_node = 32;
|
||||
else
|
||||
xlp_cores_per_node = 8;
|
||||
nlm_init_boot_cpu();
|
||||
xlp_mmu_init();
|
||||
nlm_node_init(0);
|
||||
|
@ -165,7 +165,7 @@ static void xlp_enable_secondary_cores(const cpumask_t *wakeup_mask)
|
||||
nodep->coremask = 1;
|
||||
|
||||
pr_info("Node %d - SYS/FUSE coremask %x\n", n, syscoremask);
|
||||
for (core = 0; core < NLM_CORES_PER_NODE; core++) {
|
||||
for (core = 0; core < nlm_cores_per_node(); core++) {
|
||||
/* we will be on node 0 core 0 */
|
||||
if (n == 0 && core == 0)
|
||||
continue;
|
||||
@ -175,7 +175,7 @@ static void xlp_enable_secondary_cores(const cpumask_t *wakeup_mask)
|
||||
continue;
|
||||
|
||||
/* see if at least the first hw thread is enabled */
|
||||
cpu = (n * NLM_CORES_PER_NODE + core)
|
||||
cpu = (n * nlm_cores_per_node() + core)
|
||||
* NLM_THREADS_PER_CORE;
|
||||
if (!cpumask_test_cpu(cpu, wakeup_mask))
|
||||
continue;
|
||||
|
@ -70,7 +70,7 @@ int xlr_wakeup_secondary_cpus(void)
|
||||
|
||||
/* Fill up the coremask early */
|
||||
nodep->coremask = 1;
|
||||
for (i = 1; i < NLM_CORES_PER_NODE; i++) {
|
||||
for (i = 1; i < nlm_cores_per_node(); i++) {
|
||||
for (j = 1000000; j > 0; j--) {
|
||||
if (cpu_ready[i * NLM_THREADS_PER_CORE])
|
||||
break;
|
||||
|
@ -280,7 +280,7 @@ static int xlp_setup_msi(uint64_t lnkbase, int node, int link,
|
||||
irt = PIC_IRT_PCIE_LINK_INDEX(link);
|
||||
nlm_setup_pic_irq(node, lirq, lirq, irt);
|
||||
nlm_pic_init_irt(nlm_get_node(node)->picbase, irt, lirq,
|
||||
node * NLM_CPUS_PER_NODE, 1 /*en */);
|
||||
node * nlm_threads_per_node(), 1 /*en */);
|
||||
}
|
||||
|
||||
/* allocate a MSI vec, and tell the bridge about it */
|
||||
@ -443,7 +443,7 @@ void __init xlp_init_node_msi_irqs(int node, int link)
|
||||
msixvec = link * XLP_MSIXVEC_PER_LINK + i;
|
||||
irt = PIC_IRT_PCIE_MSIX_INDEX(msixvec);
|
||||
nlm_pic_init_irt(nodep->picbase, irt, PIC_PCIE_MSIX_IRQ(link),
|
||||
node * NLM_CPUS_PER_NODE, 1 /* enable */);
|
||||
node * nlm_threads_per_node(), 1 /* enable */);
|
||||
|
||||
/* Initialize MSI-X extended irq space for the link */
|
||||
irq = nlm_irq_to_xirq(node, nlm_link_msixirq(link, i));
|
||||
|
Loading…
Reference in New Issue
Block a user