Merge tag 'irq-core-2024-09-16' into loongarch-next

LoongArch architecture changes for 6.12 depend on the irq core
changes about AVEC irqchip to avoid confliction, so merge them
to create a base.
This commit is contained in:
Huacai Chen 2024-09-17 22:20:12 +08:00
commit 987cbafe62
58 changed files with 1268 additions and 683 deletions

View File

@ -31,13 +31,25 @@ description: |
This device also represents the FIQ interrupt sources on platforms using AIC,
which do not go through a discrete interrupt controller.
IPIs may be performed via MMIO registers on all variants of AIC. Starting
from A11, system registers may also be used for "fast" IPIs. Starting from
M1, even faster IPIs within the same cluster may be achieved by writing to
a "local" fast IPI register as opposed to using the "global" fast IPI
register.
allOf:
- $ref: /schemas/interrupt-controller.yaml#
properties:
compatible:
items:
- const: apple,t8103-aic
- enum:
- apple,s5l8960x-aic
- apple,t7000-aic
- apple,s8000-aic
- apple,t8010-aic
- apple,t8015-aic
- apple,t8103-aic
- const: apple,aic
interrupt-controller: true

View File

@ -85,6 +85,7 @@ config LOONGARCH
select GENERIC_ENTRY
select GENERIC_GETTIMEOFDAY
select GENERIC_IOREMAP if !ARCH_IOREMAP
select GENERIC_IRQ_MATRIX_ALLOCATOR
select GENERIC_IRQ_MULTI_HANDLER
select GENERIC_IRQ_PROBE
select GENERIC_IRQ_SHOW

View File

@ -65,5 +65,6 @@
#define cpu_has_guestid cpu_opt(LOONGARCH_CPU_GUESTID)
#define cpu_has_hypervisor cpu_opt(LOONGARCH_CPU_HYPERVISOR)
#define cpu_has_ptw cpu_opt(LOONGARCH_CPU_PTW)
#define cpu_has_avecint cpu_opt(LOONGARCH_CPU_AVECINT)
#endif /* __ASM_CPU_FEATURES_H */

View File

@ -99,6 +99,7 @@ enum cpu_type_enum {
#define CPU_FEATURE_GUESTID 24 /* CPU has GuestID feature */
#define CPU_FEATURE_HYPERVISOR 25 /* CPU has hypervisor (running in VM) */
#define CPU_FEATURE_PTW 26 /* CPU has hardware page table walker */
#define CPU_FEATURE_AVECINT 27 /* CPU has avec interrupt */
#define LOONGARCH_CPU_CPUCFG BIT_ULL(CPU_FEATURE_CPUCFG)
#define LOONGARCH_CPU_LAM BIT_ULL(CPU_FEATURE_LAM)
@ -127,5 +128,6 @@ enum cpu_type_enum {
#define LOONGARCH_CPU_GUESTID BIT_ULL(CPU_FEATURE_GUESTID)
#define LOONGARCH_CPU_HYPERVISOR BIT_ULL(CPU_FEATURE_HYPERVISOR)
#define LOONGARCH_CPU_PTW BIT_ULL(CPU_FEATURE_PTW)
#define LOONGARCH_CPU_AVECINT BIT_ULL(CPU_FEATURE_AVECINT)
#endif /* _ASM_CPU_H */

View File

@ -12,12 +12,13 @@
extern void ack_bad_irq(unsigned int irq);
#define ack_bad_irq ack_bad_irq
#define NR_IPI 3
#define NR_IPI 4
enum ipi_msg_type {
IPI_RESCHEDULE,
IPI_CALL_FUNCTION,
IPI_IRQ_WORK,
IPI_CLEAR_VECTOR,
};
typedef struct {

View File

@ -39,11 +39,22 @@ void spurious_interrupt(void);
#define NR_IRQS_LEGACY 16
/*
* 256 Vectors Mapping for AVECINTC:
*
* 0 - 15: Mapping classic IPs, e.g. IP0-12.
* 16 - 255: Mapping vectors for external IRQ.
*
*/
#define NR_VECTORS 256
#define NR_LEGACY_VECTORS 16
#define IRQ_MATRIX_BITS NR_VECTORS
#define arch_trigger_cpumask_backtrace arch_trigger_cpumask_backtrace
void arch_trigger_cpumask_backtrace(const struct cpumask *mask, int exclude_cpu);
#define MAX_IO_PICS 2
#define NR_IRQS (64 + (256 * MAX_IO_PICS))
#define NR_IRQS (64 + NR_VECTORS * (NR_CPUS + MAX_IO_PICS))
struct acpi_vector_group {
int node;
@ -65,7 +76,7 @@ extern struct acpi_vector_group msi_group[MAX_IO_PICS];
#define LOONGSON_LPC_LAST_IRQ (LOONGSON_LPC_IRQ_BASE + 15)
#define LOONGSON_CPU_IRQ_BASE 16
#define LOONGSON_CPU_LAST_IRQ (LOONGSON_CPU_IRQ_BASE + 14)
#define LOONGSON_CPU_LAST_IRQ (LOONGSON_CPU_IRQ_BASE + 15)
#define LOONGSON_PCH_IRQ_BASE 64
#define LOONGSON_PCH_ACPI_IRQ (LOONGSON_PCH_IRQ_BASE + 47)
@ -88,20 +99,8 @@ struct acpi_madt_bio_pic;
struct acpi_madt_msi_pic;
struct acpi_madt_lpc_pic;
int liointc_acpi_init(struct irq_domain *parent,
struct acpi_madt_lio_pic *acpi_liointc);
int eiointc_acpi_init(struct irq_domain *parent,
struct acpi_madt_eio_pic *acpi_eiointc);
void complete_irq_moving(void);
int htvec_acpi_init(struct irq_domain *parent,
struct acpi_madt_ht_pic *acpi_htvec);
int pch_lpc_acpi_init(struct irq_domain *parent,
struct acpi_madt_lpc_pic *acpi_pchlpc);
int pch_msi_acpi_init(struct irq_domain *parent,
struct acpi_madt_msi_pic *acpi_pchmsi);
int pch_pic_acpi_init(struct irq_domain *parent,
struct acpi_madt_bio_pic *acpi_pchpic);
int find_pch_pic(u32 gsi);
struct fwnode_handle *get_pch_msi_handle(int pci_segment);
extern struct acpi_madt_lio_pic *acpi_liointc;

View File

@ -253,8 +253,8 @@
#define CSR_ESTAT_EXC_WIDTH 6
#define CSR_ESTAT_EXC (_ULCAST_(0x3f) << CSR_ESTAT_EXC_SHIFT)
#define CSR_ESTAT_IS_SHIFT 0
#define CSR_ESTAT_IS_WIDTH 14
#define CSR_ESTAT_IS (_ULCAST_(0x3fff) << CSR_ESTAT_IS_SHIFT)
#define CSR_ESTAT_IS_WIDTH 15
#define CSR_ESTAT_IS (_ULCAST_(0x7fff) << CSR_ESTAT_IS_SHIFT)
#define LOONGARCH_CSR_ERA 0x6 /* ERA */
@ -649,6 +649,13 @@
#define LOONGARCH_CSR_CTAG 0x98 /* TagLo + TagHi */
#define LOONGARCH_CSR_ISR0 0xa0
#define LOONGARCH_CSR_ISR1 0xa1
#define LOONGARCH_CSR_ISR2 0xa2
#define LOONGARCH_CSR_ISR3 0xa3
#define LOONGARCH_CSR_IRR 0xa4
#define LOONGARCH_CSR_PRID 0xc0
/* Shadow MCSR : 0xc0 ~ 0xff */
@ -1011,7 +1018,7 @@
/*
* CSR_ECFG IM
*/
#define ECFG0_IM 0x00001fff
#define ECFG0_IM 0x00005fff
#define ECFGB_SIP0 0
#define ECFGF_SIP0 (_ULCAST_(1) << ECFGB_SIP0)
#define ECFGB_SIP1 1
@ -1054,6 +1061,7 @@
#define IOCSRF_EIODECODE BIT_ULL(9)
#define IOCSRF_FLATMODE BIT_ULL(10)
#define IOCSRF_VM BIT_ULL(11)
#define IOCSRF_AVEC BIT_ULL(15)
#define LOONGARCH_IOCSR_VENDOR 0x10
@ -1065,6 +1073,7 @@
#define IOCSR_MISC_FUNC_SOFT_INT BIT_ULL(10)
#define IOCSR_MISC_FUNC_TIMER_RESET BIT_ULL(21)
#define IOCSR_MISC_FUNC_EXT_IOI_EN BIT_ULL(48)
#define IOCSR_MISC_FUNC_AVEC_EN BIT_ULL(51)
#define LOONGARCH_IOCSR_CPUTEMP 0x428
@ -1387,9 +1396,10 @@ __BUILD_CSR_OP(tlbidx)
#define INT_TI 11 /* Timer */
#define INT_IPI 12
#define INT_NMI 13
#define INT_AVEC 14
/* ExcCodes corresponding to interrupts */
#define EXCCODE_INT_NUM (INT_NMI + 1)
#define EXCCODE_INT_NUM (INT_AVEC + 1)
#define EXCCODE_INT_START 64
#define EXCCODE_INT_END (EXCCODE_INT_START + EXCCODE_INT_NUM - 1)

View File

@ -70,10 +70,12 @@ extern int __cpu_logical_map[NR_CPUS];
#define ACTION_RESCHEDULE 1
#define ACTION_CALL_FUNCTION 2
#define ACTION_IRQ_WORK 3
#define ACTION_CLEAR_VECTOR 4
#define SMP_BOOT_CPU BIT(ACTION_BOOT_CPU)
#define SMP_RESCHEDULE BIT(ACTION_RESCHEDULE)
#define SMP_CALL_FUNCTION BIT(ACTION_CALL_FUNCTION)
#define SMP_IRQ_WORK BIT(ACTION_IRQ_WORK)
#define SMP_CLEAR_VECTOR BIT(ACTION_CLEAR_VECTOR)
struct secondary_data {
unsigned long stack;

View File

@ -106,7 +106,6 @@ static void cpu_probe_common(struct cpuinfo_loongarch *c)
elf_hwcap |= HWCAP_LOONGARCH_CRC32;
}
config = read_cpucfg(LOONGARCH_CPUCFG2);
if (config & CPUCFG2_LAM) {
c->options |= LOONGARCH_CPU_LAM;
@ -174,6 +173,8 @@ static void cpu_probe_common(struct cpuinfo_loongarch *c)
c->options |= LOONGARCH_CPU_FLATMODE;
if (config & IOCSRF_EIODECODE)
c->options |= LOONGARCH_CPU_EIODECODE;
if (config & IOCSRF_AVEC)
c->options |= LOONGARCH_CPU_AVECINT;
if (config & IOCSRF_VM)
c->options |= LOONGARCH_CPU_HYPERVISOR;

View File

@ -87,6 +87,18 @@ static void __init init_vec_parent_group(void)
acpi_table_parse(ACPI_SIG_MCFG, early_pci_mcfg_parse);
}
int __init arch_probe_nr_irqs(void)
{
int nr_io_pics = bitmap_weight(loongson_sysconf.cores_io_master, NR_CPUS);
if (!cpu_has_avecint)
nr_irqs = (64 + NR_VECTORS * nr_io_pics);
else
nr_irqs = (64 + NR_VECTORS * (nr_cpu_ids + nr_io_pics));
return NR_IRQS_LEGACY;
}
void __init init_IRQ(void)
{
int i;

View File

@ -134,6 +134,11 @@ static irqreturn_t pv_ipi_interrupt(int irq, void *dev)
info->ipi_irqs[IPI_IRQ_WORK]++;
}
if (action & SMP_CLEAR_VECTOR) {
complete_irq_moving();
info->ipi_irqs[IPI_CLEAR_VECTOR]++;
}
return IRQ_HANDLED;
}

View File

@ -72,6 +72,7 @@ static const char *ipi_types[NR_IPI] __tracepoint_string = {
[IPI_RESCHEDULE] = "Rescheduling interrupts",
[IPI_CALL_FUNCTION] = "Function call interrupts",
[IPI_IRQ_WORK] = "IRQ work interrupts",
[IPI_CLEAR_VECTOR] = "Clear vector interrupts",
};
void show_ipi_list(struct seq_file *p, int prec)
@ -248,6 +249,11 @@ static irqreturn_t loongson_ipi_interrupt(int irq, void *dev)
per_cpu(irq_stat, cpu).ipi_irqs[IPI_IRQ_WORK]++;
}
if (action & SMP_CLEAR_VECTOR) {
complete_irq_moving();
per_cpu(irq_stat, cpu).ipi_irqs[IPI_CLEAR_VECTOR]++;
}
return IRQ_HANDLED;
}

View File

@ -1128,7 +1128,7 @@ static void blk_complete_reqs(struct llist_head *list)
rq->q->mq_ops->complete(rq);
}
static __latent_entropy void blk_done_softirq(struct softirq_action *h)
static __latent_entropy void blk_done_softirq(void)
{
blk_complete_reqs(this_cpu_ptr(&blk_cpu_done));
}

View File

@ -685,6 +685,7 @@ config LOONGSON_PCH_MSI
depends on PCI
default MACH_LOONGSON64
select IRQ_DOMAIN_HIERARCHY
select IRQ_MSI_LIB
select PCI_MSI
help
Support for the Loongson PCH MSI Controller.

View File

@ -110,7 +110,7 @@ obj-$(CONFIG_LS1X_IRQ) += irq-ls1x.o
obj-$(CONFIG_TI_SCI_INTR_IRQCHIP) += irq-ti-sci-intr.o
obj-$(CONFIG_TI_SCI_INTA_IRQCHIP) += irq-ti-sci-inta.o
obj-$(CONFIG_TI_PRUSS_INTC) += irq-pruss-intc.o
obj-$(CONFIG_IRQ_LOONGARCH_CPU) += irq-loongarch-cpu.o
obj-$(CONFIG_IRQ_LOONGARCH_CPU) += irq-loongarch-cpu.o irq-loongarch-avec.o
obj-$(CONFIG_LOONGSON_LIOINTC) += irq-loongson-liointc.o
obj-$(CONFIG_LOONGSON_EIOINTC) += irq-loongson-eiointc.o
obj-$(CONFIG_LOONGSON_HTPIC) += irq-loongson-htpic.o

View File

@ -234,7 +234,10 @@ enum fiq_hwirq {
AIC_NR_FIQ
};
/* True if UNCORE/UNCORE2 and Sn_... IPI registers are present and used (A11+) */
static DEFINE_STATIC_KEY_TRUE(use_fast_ipi);
/* True if SYS_IMP_APL_IPI_RR_LOCAL_EL1 exists for local fast IPIs (M1+) */
static DEFINE_STATIC_KEY_TRUE(use_local_fast_ipi);
struct aic_info {
int version;
@ -252,6 +255,7 @@ struct aic_info {
/* Features */
bool fast_ipi;
bool local_fast_ipi;
};
static const struct aic_info aic1_info __initconst = {
@ -270,17 +274,32 @@ static const struct aic_info aic1_fipi_info __initconst = {
.fast_ipi = true,
};
static const struct aic_info aic1_local_fipi_info __initconst = {
.version = 1,
.event = AIC_EVENT,
.target_cpu = AIC_TARGET_CPU,
.fast_ipi = true,
.local_fast_ipi = true,
};
static const struct aic_info aic2_info __initconst = {
.version = 2,
.irq_cfg = AIC2_IRQ_CFG,
.fast_ipi = true,
.local_fast_ipi = true,
};
static const struct of_device_id aic_info_match[] = {
{
.compatible = "apple,t8103-aic",
.data = &aic1_local_fipi_info,
},
{
.compatible = "apple,t8015-aic",
.data = &aic1_fipi_info,
},
{
@ -532,14 +551,9 @@ static void __exception_irq_entry aic_handle_fiq(struct pt_regs *regs)
* we check for everything here, even things we don't support yet.
*/
if (read_sysreg_s(SYS_IMP_APL_IPI_SR_EL1) & IPI_SR_PENDING) {
if (static_branch_likely(&use_fast_ipi)) {
aic_handle_ipi(regs);
} else {
pr_err_ratelimited("Fast IPI fired. Acking.\n");
write_sysreg_s(IPI_SR_PENDING, SYS_IMP_APL_IPI_SR_EL1);
}
}
if (static_branch_likely(&use_fast_ipi) &&
(read_sysreg_s(SYS_IMP_APL_IPI_SR_EL1) & IPI_SR_PENDING))
aic_handle_ipi(regs);
if (TIMER_FIRING(read_sysreg(cntp_ctl_el0)))
generic_handle_domain_irq(aic_irqc->hw_domain,
@ -574,8 +588,9 @@ static void __exception_irq_entry aic_handle_fiq(struct pt_regs *regs)
AIC_FIQ_HWIRQ(irq));
}
if (FIELD_GET(UPMCR0_IMODE, read_sysreg_s(SYS_IMP_APL_UPMCR0_EL1)) == UPMCR0_IMODE_FIQ &&
(read_sysreg_s(SYS_IMP_APL_UPMSR_EL1) & UPMSR_IACT)) {
if (static_branch_likely(&use_fast_ipi) &&
(FIELD_GET(UPMCR0_IMODE, read_sysreg_s(SYS_IMP_APL_UPMCR0_EL1)) == UPMCR0_IMODE_FIQ) &&
(read_sysreg_s(SYS_IMP_APL_UPMSR_EL1) & UPMSR_IACT)) {
/* Same story with uncore PMCs */
pr_err_ratelimited("Uncore PMC FIQ fired. Masking.\n");
sysreg_clear_set_s(SYS_IMP_APL_UPMCR0_EL1, UPMCR0_IMODE,
@ -750,12 +765,12 @@ static void aic_ipi_send_fast(int cpu)
u64 cluster = MPIDR_CLUSTER(mpidr);
u64 idx = MPIDR_CPU(mpidr);
if (MPIDR_CLUSTER(my_mpidr) == cluster)
write_sysreg_s(FIELD_PREP(IPI_RR_CPU, idx),
SYS_IMP_APL_IPI_RR_LOCAL_EL1);
else
if (static_branch_likely(&use_local_fast_ipi) && MPIDR_CLUSTER(my_mpidr) == cluster) {
write_sysreg_s(FIELD_PREP(IPI_RR_CPU, idx), SYS_IMP_APL_IPI_RR_LOCAL_EL1);
} else {
write_sysreg_s(FIELD_PREP(IPI_RR_CPU, idx) | FIELD_PREP(IPI_RR_CLUSTER, cluster),
SYS_IMP_APL_IPI_RR_GLOBAL_EL1);
}
isb();
}
@ -811,7 +826,8 @@ static int aic_init_cpu(unsigned int cpu)
/* Mask all hard-wired per-CPU IRQ/FIQ sources */
/* Pending Fast IPI FIQs */
write_sysreg_s(IPI_SR_PENDING, SYS_IMP_APL_IPI_SR_EL1);
if (static_branch_likely(&use_fast_ipi))
write_sysreg_s(IPI_SR_PENDING, SYS_IMP_APL_IPI_SR_EL1);
/* Timer FIQs */
sysreg_clear_set(cntp_ctl_el0, 0, ARCH_TIMER_CTRL_IT_MASK);
@ -832,8 +848,10 @@ static int aic_init_cpu(unsigned int cpu)
FIELD_PREP(PMCR0_IMODE, PMCR0_IMODE_OFF));
/* Uncore PMC FIQ */
sysreg_clear_set_s(SYS_IMP_APL_UPMCR0_EL1, UPMCR0_IMODE,
FIELD_PREP(UPMCR0_IMODE, UPMCR0_IMODE_OFF));
if (static_branch_likely(&use_fast_ipi)) {
sysreg_clear_set_s(SYS_IMP_APL_UPMCR0_EL1, UPMCR0_IMODE,
FIELD_PREP(UPMCR0_IMODE, UPMCR0_IMODE_OFF));
}
/* Commit all of the above */
isb();
@ -987,11 +1005,12 @@ static int __init aic_of_ic_init(struct device_node *node, struct device_node *p
off += sizeof(u32) * (irqc->max_irq >> 5); /* MASK_CLR */
off += sizeof(u32) * (irqc->max_irq >> 5); /* HW_STATE */
if (irqc->info.fast_ipi)
static_branch_enable(&use_fast_ipi);
else
if (!irqc->info.fast_ipi)
static_branch_disable(&use_fast_ipi);
if (!irqc->info.local_fast_ipi)
static_branch_disable(&use_local_fast_ipi);
irqc->info.die_stride = off - start_off;
irqc->hw_domain = irq_domain_create_tree(of_node_to_fwnode(node),

File diff suppressed because it is too large Load Diff

View File

@ -57,8 +57,7 @@
static struct irq_domain *aic_domain;
static asmlinkage void __exception_irq_entry
aic_handle(struct pt_regs *regs)
static void __exception_irq_entry aic_handle(struct pt_regs *regs)
{
struct irq_domain_chip_generic *dgc = aic_domain->gc;
struct irq_chip_generic *gc = dgc->gc[0];

View File

@ -67,8 +67,7 @@
static struct irq_domain *aic5_domain;
static asmlinkage void __exception_irq_entry
aic5_handle(struct pt_regs *regs)
static void __exception_irq_entry aic5_handle(struct pt_regs *regs)
{
struct irq_chip_generic *bgc = irq_get_domain_generic_chip(aic5_domain, 0);
u32 irqnr;

View File

@ -69,7 +69,7 @@ static struct {
struct irq_domain_ops ops;
} *clps711x_intc;
static asmlinkage void __exception_irq_entry clps711x_irqh(struct pt_regs *regs)
static void __exception_irq_entry clps711x_irqh(struct pt_regs *regs)
{
u32 irqstat;

View File

@ -116,8 +116,7 @@ static struct irq_chip davinci_cp_intc_irq_chip = {
.flags = IRQCHIP_SKIP_SET_WAKE,
};
static asmlinkage void __exception_irq_entry
davinci_cp_intc_handle_irq(struct pt_regs *regs)
static void __exception_irq_entry davinci_cp_intc_handle_irq(struct pt_regs *regs)
{
int gpir, irqnr, none;

View File

@ -125,7 +125,7 @@ static struct irq_chip ft010_irq_chip = {
/* Local static for the IRQ entry call */
static struct ft010_irq_data firq;
static asmlinkage void __exception_irq_entry ft010_irqchip_handle_irq(struct pt_regs *regs)
static void __exception_irq_entry ft010_irqchip_handle_irq(struct pt_regs *regs)
{
struct ft010_irq_data *f = &firq;
int irq;

View File

@ -930,7 +930,7 @@ static void __gic_handle_irq_from_irqsoff(struct pt_regs *regs)
__gic_handle_nmi(irqnr, regs);
}
static asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs)
static void __exception_irq_entry gic_handle_irq(struct pt_regs *regs)
{
if (unlikely(gic_supports_nmi() && !interrupts_enabled(regs)))
__gic_handle_irq_from_irqsoff(regs);

View File

@ -97,7 +97,7 @@ bool gic_cpuif_has_vsgi(void)
fld = cpuid_feature_extract_unsigned_field(reg, ID_AA64PFR0_EL1_GIC_SHIFT);
return fld >= 0x3;
return fld >= ID_AA64PFR0_EL1_GIC_V4P1;
}
#else
bool gic_cpuif_has_vsgi(void)

View File

@ -105,8 +105,7 @@ static void ixp4xx_irq_unmask(struct irq_data *d)
}
}
static asmlinkage void __exception_irq_entry
ixp4xx_handle_irq(struct pt_regs *regs)
static void __exception_irq_entry ixp4xx_handle_irq(struct pt_regs *regs)
{
struct ixp4xx_irq *ixi = &ixirq;
unsigned long status;

View File

@ -0,0 +1,425 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2020-2024 Loongson Technologies, Inc.
*/
#include <linux/cpuhotplug.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/irqchip.h>
#include <linux/irqchip/chained_irq.h>
#include <linux/irqdomain.h>
#include <linux/kernel.h>
#include <linux/msi.h>
#include <linux/radix-tree.h>
#include <linux/spinlock.h>
#include <asm/loongarch.h>
#include <asm/setup.h>
#include "irq-msi-lib.h"
#include "irq-loongson.h"
#define VECTORS_PER_REG 64
#define IRR_VECTOR_MASK 0xffUL
#define IRR_INVALID_MASK 0x80000000UL
#define AVEC_MSG_OFFSET 0x100000
#ifdef CONFIG_SMP
struct pending_list {
struct list_head head;
};
static struct cpumask intersect_mask;
static DEFINE_PER_CPU(struct pending_list, pending_list);
#endif
static DEFINE_PER_CPU(struct irq_desc * [NR_VECTORS], irq_map);
struct avecintc_chip {
raw_spinlock_t lock;
struct fwnode_handle *fwnode;
struct irq_domain *domain;
struct irq_matrix *vector_matrix;
phys_addr_t msi_base_addr;
};
static struct avecintc_chip loongarch_avec;
struct avecintc_data {
struct list_head entry;
unsigned int cpu;
unsigned int vec;
unsigned int prev_cpu;
unsigned int prev_vec;
unsigned int moving;
};
static inline void avecintc_ack_irq(struct irq_data *d)
{
}
static inline void avecintc_mask_irq(struct irq_data *d)
{
}
static inline void avecintc_unmask_irq(struct irq_data *d)
{
}
#ifdef CONFIG_SMP
static inline void pending_list_init(int cpu)
{
struct pending_list *plist = per_cpu_ptr(&pending_list, cpu);
INIT_LIST_HEAD(&plist->head);
}
static void avecintc_sync(struct avecintc_data *adata)
{
struct pending_list *plist;
if (cpu_online(adata->prev_cpu)) {
plist = per_cpu_ptr(&pending_list, adata->prev_cpu);
list_add_tail(&adata->entry, &plist->head);
adata->moving = 1;
mp_ops.send_ipi_single(adata->prev_cpu, ACTION_CLEAR_VECTOR);
}
}
static int avecintc_set_affinity(struct irq_data *data, const struct cpumask *dest, bool force)
{
int cpu, ret, vector;
struct avecintc_data *adata;
scoped_guard(raw_spinlock, &loongarch_avec.lock) {
adata = irq_data_get_irq_chip_data(data);
if (adata->moving)
return -EBUSY;
if (cpu_online(adata->cpu) && cpumask_test_cpu(adata->cpu, dest))
return 0;
cpumask_and(&intersect_mask, dest, cpu_online_mask);
ret = irq_matrix_alloc(loongarch_avec.vector_matrix, &intersect_mask, false, &cpu);
if (ret < 0)
return ret;
vector = ret;
adata->cpu = cpu;
adata->vec = vector;
per_cpu_ptr(irq_map, adata->cpu)[adata->vec] = irq_data_to_desc(data);
avecintc_sync(adata);
}
irq_data_update_effective_affinity(data, cpumask_of(cpu));
return IRQ_SET_MASK_OK;
}
static int avecintc_cpu_online(unsigned int cpu)
{
if (!loongarch_avec.vector_matrix)
return 0;
guard(raw_spinlock)(&loongarch_avec.lock);
irq_matrix_online(loongarch_avec.vector_matrix);
pending_list_init(cpu);
return 0;
}
static int avecintc_cpu_offline(unsigned int cpu)
{
struct pending_list *plist = per_cpu_ptr(&pending_list, cpu);
if (!loongarch_avec.vector_matrix)
return 0;
guard(raw_spinlock)(&loongarch_avec.lock);
if (!list_empty(&plist->head))
pr_warn("CPU#%d vector is busy\n", cpu);
irq_matrix_offline(loongarch_avec.vector_matrix);
return 0;
}
void complete_irq_moving(void)
{
struct pending_list *plist = this_cpu_ptr(&pending_list);
struct avecintc_data *adata, *tdata;
int cpu, vector, bias;
uint64_t isr;
guard(raw_spinlock)(&loongarch_avec.lock);
list_for_each_entry_safe(adata, tdata, &plist->head, entry) {
cpu = adata->prev_cpu;
vector = adata->prev_vec;
bias = vector / VECTORS_PER_REG;
switch (bias) {
case 0:
isr = csr_read64(LOONGARCH_CSR_ISR0);
break;
case 1:
isr = csr_read64(LOONGARCH_CSR_ISR1);
break;
case 2:
isr = csr_read64(LOONGARCH_CSR_ISR2);
break;
case 3:
isr = csr_read64(LOONGARCH_CSR_ISR3);
break;
}
if (isr & (1UL << (vector % VECTORS_PER_REG))) {
mp_ops.send_ipi_single(cpu, ACTION_CLEAR_VECTOR);
continue;
}
list_del(&adata->entry);
irq_matrix_free(loongarch_avec.vector_matrix, cpu, vector, false);
this_cpu_write(irq_map[vector], NULL);
adata->moving = 0;
adata->prev_cpu = adata->cpu;
adata->prev_vec = adata->vec;
}
}
#endif
static void avecintc_compose_msi_msg(struct irq_data *d, struct msi_msg *msg)
{
struct avecintc_data *adata = irq_data_get_irq_chip_data(d);
msg->address_hi = 0x0;
msg->address_lo = (loongarch_avec.msi_base_addr | (adata->vec & 0xff) << 4)
| ((cpu_logical_map(adata->cpu & 0xffff)) << 12);
msg->data = 0x0;
}
static struct irq_chip avec_irq_controller = {
.name = "AVECINTC",
.irq_ack = avecintc_ack_irq,
.irq_mask = avecintc_mask_irq,
.irq_unmask = avecintc_unmask_irq,
#ifdef CONFIG_SMP
.irq_set_affinity = avecintc_set_affinity,
#endif
.irq_compose_msi_msg = avecintc_compose_msi_msg,
};
static void avecintc_irq_dispatch(struct irq_desc *desc)
{
struct irq_chip *chip = irq_desc_get_chip(desc);
struct irq_desc *d;
chained_irq_enter(chip, desc);
while (true) {
unsigned long vector = csr_read64(LOONGARCH_CSR_IRR);
if (vector & IRR_INVALID_MASK)
break;
vector &= IRR_VECTOR_MASK;
d = this_cpu_read(irq_map[vector]);
if (d) {
generic_handle_irq_desc(d);
} else {
spurious_interrupt();
pr_warn("Unexpected IRQ occurs on CPU#%d [vector %ld]\n", smp_processor_id(), vector);
}
}
chained_irq_exit(chip, desc);
}
static int avecintc_alloc_vector(struct irq_data *irqd, struct avecintc_data *adata)
{
int cpu, ret;
guard(raw_spinlock_irqsave)(&loongarch_avec.lock);
ret = irq_matrix_alloc(loongarch_avec.vector_matrix, cpu_online_mask, false, &cpu);
if (ret < 0)
return ret;
adata->prev_cpu = adata->cpu = cpu;
adata->prev_vec = adata->vec = ret;
per_cpu_ptr(irq_map, adata->cpu)[adata->vec] = irq_data_to_desc(irqd);
return 0;
}
static int avecintc_domain_alloc(struct irq_domain *domain, unsigned int virq,
unsigned int nr_irqs, void *arg)
{
for (unsigned int i = 0; i < nr_irqs; i++) {
struct irq_data *irqd = irq_domain_get_irq_data(domain, virq + i);
struct avecintc_data *adata = kzalloc(sizeof(*adata), GFP_KERNEL);
int ret;
if (!adata)
return -ENOMEM;
ret = avecintc_alloc_vector(irqd, adata);
if (ret < 0) {
kfree(adata);
return ret;
}
irq_domain_set_info(domain, virq + i, virq + i, &avec_irq_controller,
adata, handle_edge_irq, NULL, NULL);
irqd_set_single_target(irqd);
irqd_set_affinity_on_activate(irqd);
}
return 0;
}
static void avecintc_free_vector(struct irq_data *irqd, struct avecintc_data *adata)
{
guard(raw_spinlock_irqsave)(&loongarch_avec.lock);
per_cpu(irq_map, adata->cpu)[adata->vec] = NULL;
irq_matrix_free(loongarch_avec.vector_matrix, adata->cpu, adata->vec, false);
#ifdef CONFIG_SMP
if (!adata->moving)
return;
per_cpu(irq_map, adata->prev_cpu)[adata->prev_vec] = NULL;
irq_matrix_free(loongarch_avec.vector_matrix, adata->prev_cpu, adata->prev_vec, false);
list_del_init(&adata->entry);
#endif
}
static void avecintc_domain_free(struct irq_domain *domain, unsigned int virq,
unsigned int nr_irqs)
{
for (unsigned int i = 0; i < nr_irqs; i++) {
struct irq_data *d = irq_domain_get_irq_data(domain, virq + i);
if (d) {
struct avecintc_data *adata = irq_data_get_irq_chip_data(d);
avecintc_free_vector(d, adata);
irq_domain_reset_irq_data(d);
kfree(adata);
}
}
}
static const struct irq_domain_ops avecintc_domain_ops = {
.alloc = avecintc_domain_alloc,
.free = avecintc_domain_free,
.select = msi_lib_irq_domain_select,
};
static int __init irq_matrix_init(void)
{
loongarch_avec.vector_matrix = irq_alloc_matrix(NR_VECTORS, 0, NR_VECTORS);
if (!loongarch_avec.vector_matrix)
return -ENOMEM;
for (int i = 0; i < NR_LEGACY_VECTORS; i++)
irq_matrix_assign_system(loongarch_avec.vector_matrix, i, false);
irq_matrix_online(loongarch_avec.vector_matrix);
return 0;
}
static int __init avecintc_init(struct irq_domain *parent)
{
int ret, parent_irq;
unsigned long value;
raw_spin_lock_init(&loongarch_avec.lock);
loongarch_avec.fwnode = irq_domain_alloc_named_fwnode("AVECINTC");
if (!loongarch_avec.fwnode) {
pr_err("Unable to allocate domain handle\n");
ret = -ENOMEM;
goto out;
}
loongarch_avec.domain = irq_domain_create_tree(loongarch_avec.fwnode,
&avecintc_domain_ops, NULL);
if (!loongarch_avec.domain) {
pr_err("Unable to create IRQ domain\n");
ret = -ENOMEM;
goto out_free_handle;
}
parent_irq = irq_create_mapping(parent, INT_AVEC);
if (!parent_irq) {
pr_err("Failed to mapping hwirq\n");
ret = -EINVAL;
goto out_remove_domain;
}
ret = irq_matrix_init();
if (ret < 0) {
pr_err("Failed to init irq matrix\n");
goto out_remove_domain;
}
irq_set_chained_handler_and_data(parent_irq, avecintc_irq_dispatch, NULL);
#ifdef CONFIG_SMP
pending_list_init(0);
cpuhp_setup_state_nocalls(CPUHP_AP_IRQ_AVECINTC_STARTING,
"irqchip/loongarch/avecintc:starting",
avecintc_cpu_online, avecintc_cpu_offline);
#endif
value = iocsr_read64(LOONGARCH_IOCSR_MISC_FUNC);
value |= IOCSR_MISC_FUNC_AVEC_EN;
iocsr_write64(value, LOONGARCH_IOCSR_MISC_FUNC);
return ret;
out_remove_domain:
irq_domain_remove(loongarch_avec.domain);
out_free_handle:
irq_domain_free_fwnode(loongarch_avec.fwnode);
out:
return ret;
}
static int __init pch_msi_parse_madt(union acpi_subtable_headers *header,
const unsigned long end)
{
struct acpi_madt_msi_pic *pchmsi_entry = (struct acpi_madt_msi_pic *)header;
loongarch_avec.msi_base_addr = pchmsi_entry->msg_address - AVEC_MSG_OFFSET;
return pch_msi_acpi_init_avec(loongarch_avec.domain);
}
static inline int __init acpi_cascade_irqdomain_init(void)
{
return acpi_table_parse_madt(ACPI_MADT_TYPE_MSI_PIC, pch_msi_parse_madt, 1);
}
int __init avecintc_acpi_init(struct irq_domain *parent)
{
int ret = avecintc_init(parent);
if (ret < 0) {
pr_err("Failed to init IRQ domain\n");
return ret;
}
ret = acpi_cascade_irqdomain_init();
if (ret < 0) {
pr_err("Failed to init cascade IRQ domain\n");
return ret;
}
return ret;
}

View File

@ -13,6 +13,8 @@
#include <asm/loongarch.h>
#include <asm/setup.h>
#include "irq-loongson.h"
static struct irq_domain *irq_domain;
struct fwnode_handle *cpuintc_handle;
@ -140,7 +142,10 @@ static int __init acpi_cascade_irqdomain_init(void)
if (r < 0)
return r;
return 0;
if (cpu_has_avecint)
r = avecintc_acpi_init(irq_domain);
return r;
}
static int __init cpuintc_acpi_init(union acpi_subtable_headers *header,

View File

@ -17,6 +17,8 @@
#include <linux/syscore_ops.h>
#include <asm/numa.h>
#include "irq-loongson.h"
#define EIOINTC_REG_NODEMAP 0x14a0
#define EIOINTC_REG_IPMAP 0x14c0
#define EIOINTC_REG_ENABLE 0x1600
@ -360,6 +362,9 @@ static int __init acpi_cascade_irqdomain_init(void)
if (r < 0)
return r;
if (cpu_has_avecint)
return 0;
r = acpi_table_parse_madt(ACPI_MADT_TYPE_MSI_PIC, pch_msi_parse_madt, 1);
if (r < 0)
return r;
@ -396,8 +401,8 @@ static int __init eiointc_init(struct eiointc_priv *priv, int parent_irq,
if (nr_pics == 1) {
register_syscore_ops(&eiointc_syscore_ops);
cpuhp_setup_state_nocalls(CPUHP_AP_IRQ_LOONGARCH_STARTING,
"irqchip/loongarch/intc:starting",
cpuhp_setup_state_nocalls(CPUHP_AP_IRQ_EIOINTC_STARTING,
"irqchip/loongarch/eiointc:starting",
eiointc_router_init, NULL);
}

View File

@ -17,6 +17,8 @@
#include <linux/of_irq.h>
#include <linux/syscore_ops.h>
#include "irq-loongson.h"
/* Registers */
#define HTVEC_EN_OFF 0x20
#define HTVEC_MAX_PARENT_IRQ 8

View File

@ -22,6 +22,8 @@
#include <asm/loongson.h>
#endif
#include "irq-loongson.h"
#define LIOINTC_CHIP_IRQ 32
#define LIOINTC_NUM_PARENT 4
#define LIOINTC_NUM_CORES 4

View File

@ -15,6 +15,8 @@
#include <linux/kernel.h>
#include <linux/syscore_ops.h>
#include "irq-loongson.h"
/* Registers */
#define LPC_INT_CTL 0x00
#define LPC_INT_ENA 0x04

View File

@ -15,6 +15,9 @@
#include <linux/pci.h>
#include <linux/slab.h>
#include "irq-msi-lib.h"
#include "irq-loongson.h"
static int nr_pics;
struct pch_msi_data {
@ -27,26 +30,6 @@ struct pch_msi_data {
static struct fwnode_handle *pch_msi_handle[MAX_IO_PICS];
static void pch_msi_mask_msi_irq(struct irq_data *d)
{
pci_msi_mask_irq(d);
irq_chip_mask_parent(d);
}
static void pch_msi_unmask_msi_irq(struct irq_data *d)
{
irq_chip_unmask_parent(d);
pci_msi_unmask_irq(d);
}
static struct irq_chip pch_msi_irq_chip = {
.name = "PCH PCI MSI",
.irq_mask = pch_msi_mask_msi_irq,
.irq_unmask = pch_msi_unmask_msi_irq,
.irq_ack = irq_chip_ack_parent,
.irq_set_affinity = irq_chip_set_affinity_parent,
};
static int pch_msi_allocate_hwirq(struct pch_msi_data *priv, int num_req)
{
int first;
@ -85,12 +68,6 @@ static void pch_msi_compose_msi_msg(struct irq_data *data,
msg->data = data->hwirq;
}
static struct msi_domain_info pch_msi_domain_info = {
.flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
MSI_FLAG_MULTI_PCI_MSI | MSI_FLAG_PCI_MSIX,
.chip = &pch_msi_irq_chip,
};
static struct irq_chip middle_irq_chip = {
.name = "PCH MSI",
.irq_mask = irq_chip_mask_parent,
@ -155,13 +132,31 @@ static void pch_msi_middle_domain_free(struct irq_domain *domain,
static const struct irq_domain_ops pch_msi_middle_domain_ops = {
.alloc = pch_msi_middle_domain_alloc,
.free = pch_msi_middle_domain_free,
.select = msi_lib_irq_domain_select,
};
#define PCH_MSI_FLAGS_REQUIRED (MSI_FLAG_USE_DEF_DOM_OPS | \
MSI_FLAG_USE_DEF_CHIP_OPS | \
MSI_FLAG_PCI_MSI_MASK_PARENT)
#define PCH_MSI_FLAGS_SUPPORTED (MSI_GENERIC_FLAGS_MASK | \
MSI_FLAG_PCI_MSIX | \
MSI_FLAG_MULTI_PCI_MSI)
static struct msi_parent_ops pch_msi_parent_ops = {
.required_flags = PCH_MSI_FLAGS_REQUIRED,
.supported_flags = PCH_MSI_FLAGS_SUPPORTED,
.bus_select_mask = MATCH_PCI_MSI,
.bus_select_token = DOMAIN_BUS_NEXUS,
.prefix = "PCH-",
.init_dev_msi_info = msi_lib_init_dev_msi_info,
};
static int pch_msi_init_domains(struct pch_msi_data *priv,
struct irq_domain *parent,
struct fwnode_handle *domain_handle)
{
struct irq_domain *middle_domain, *msi_domain;
struct irq_domain *middle_domain;
middle_domain = irq_domain_create_hierarchy(parent, 0, priv->num_irqs,
domain_handle,
@ -174,14 +169,8 @@ static int pch_msi_init_domains(struct pch_msi_data *priv,
irq_domain_update_bus_token(middle_domain, DOMAIN_BUS_NEXUS);
msi_domain = pci_msi_create_irq_domain(domain_handle,
&pch_msi_domain_info,
middle_domain);
if (!msi_domain) {
pr_err("Failed to create PCI MSI domain\n");
irq_domain_remove(middle_domain);
return -ENOMEM;
}
middle_domain->flags |= IRQ_DOMAIN_FLAG_MSI_PARENT;
middle_domain->msi_parent_ops = &pch_msi_parent_ops;
return 0;
}
@ -266,17 +255,17 @@ IRQCHIP_DECLARE(pch_msi, "loongson,pch-msi-1.0", pch_msi_of_init);
#ifdef CONFIG_ACPI
struct fwnode_handle *get_pch_msi_handle(int pci_segment)
{
int i;
if (cpu_has_avecint)
return pch_msi_handle[0];
for (i = 0; i < MAX_IO_PICS; i++) {
for (int i = 0; i < MAX_IO_PICS; i++) {
if (msi_group[i].pci_segment == pci_segment)
return pch_msi_handle[i];
}
return NULL;
return pch_msi_handle[0];
}
int __init pch_msi_acpi_init(struct irq_domain *parent,
struct acpi_madt_msi_pic *acpi_pchmsi)
int __init pch_msi_acpi_init(struct irq_domain *parent, struct acpi_madt_msi_pic *acpi_pchmsi)
{
int ret;
struct fwnode_handle *domain_handle;
@ -289,4 +278,18 @@ int __init pch_msi_acpi_init(struct irq_domain *parent,
return ret;
}
int __init pch_msi_acpi_init_avec(struct irq_domain *parent)
{
if (pch_msi_handle[0])
return 0;
pch_msi_handle[0] = parent->fwnode;
irq_domain_update_bus_token(parent, DOMAIN_BUS_NEXUS);
parent->flags |= IRQ_DOMAIN_FLAG_MSI_PARENT;
parent->msi_parent_ops = &pch_msi_parent_ops;
return 0;
}
#endif

View File

@ -17,6 +17,8 @@
#include <linux/of_irq.h>
#include <linux/syscore_ops.h>
#include "irq-loongson.h"
/* Registers */
#define PCH_PIC_MASK 0x20
#define PCH_PIC_HTMSI_EN 0x40

View File

@ -0,0 +1,27 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2024 Loongson Technology Corporation Limited
*/
#ifndef _DRIVERS_IRQCHIP_IRQ_LOONGSON_H
#define _DRIVERS_IRQCHIP_IRQ_LOONGSON_H
int find_pch_pic(u32 gsi);
int liointc_acpi_init(struct irq_domain *parent,
struct acpi_madt_lio_pic *acpi_liointc);
int eiointc_acpi_init(struct irq_domain *parent,
struct acpi_madt_eio_pic *acpi_eiointc);
int avecintc_acpi_init(struct irq_domain *parent);
int htvec_acpi_init(struct irq_domain *parent,
struct acpi_madt_ht_pic *acpi_htvec);
int pch_lpc_acpi_init(struct irq_domain *parent,
struct acpi_madt_lpc_pic *acpi_pchlpc);
int pch_pic_acpi_init(struct irq_domain *parent,
struct acpi_madt_bio_pic *acpi_pchpic);
int pch_msi_acpi_init(struct irq_domain *parent,
struct acpi_madt_msi_pic *acpi_pchmsi);
int pch_msi_acpi_init_avec(struct irq_domain *parent);
#endif /* _DRIVERS_IRQCHIP_IRQ_LOONGSON_H */

View File

@ -234,37 +234,27 @@ static int mbigen_of_create_domain(struct platform_device *pdev,
struct mbigen_device *mgn_chip)
{
struct platform_device *child;
struct device_node *np;
u32 num_pins;
int ret = 0;
for_each_child_of_node(pdev->dev.of_node, np) {
for_each_child_of_node_scoped(pdev->dev.of_node, np) {
if (!of_property_read_bool(np, "interrupt-controller"))
continue;
child = of_platform_device_create(np, NULL, NULL);
if (!child) {
ret = -ENOMEM;
break;
}
if (!child)
return -ENOMEM;
if (of_property_read_u32(child->dev.of_node, "num-pins",
&num_pins) < 0) {
dev_err(&pdev->dev, "No num-pins property\n");
ret = -EINVAL;
break;
return -EINVAL;
}
if (!mbigen_create_device_domain(&child->dev, num_pins, mgn_chip)) {
ret = -ENOMEM;
break;
}
if (!mbigen_create_device_domain(&child->dev, num_pins, mgn_chip))
return -ENOMEM;
}
if (ret)
of_node_put(np);
return ret;
return 0;
}
#ifdef CONFIG_ACPI

View File

@ -325,8 +325,7 @@ static int __init omap_init_irq(u32 base, struct device_node *node)
return ret;
}
static asmlinkage void __exception_irq_entry
omap_intc_handle_irq(struct pt_regs *regs)
static void __exception_irq_entry omap_intc_handle_irq(struct pt_regs *regs)
{
extern unsigned long irq_err_count;
u32 irqnr;

View File

@ -127,8 +127,7 @@ static int __init sa1100irq_init_devicefs(void)
device_initcall(sa1100irq_init_devicefs);
static asmlinkage void __exception_irq_entry
sa1100_handle_irq(struct pt_regs *regs)
static void __exception_irq_entry sa1100_handle_irq(struct pt_regs *regs)
{
uint32_t icip, icmr, mask;

View File

@ -128,7 +128,7 @@ static int handle_one_fpga(struct fpga_irq_data *f, struct pt_regs *regs)
* Keep iterating over all registered FPGA IRQ controllers until there are
* no pending interrupts.
*/
static asmlinkage void __exception_irq_entry fpga_handle_irq(struct pt_regs *regs)
static void __exception_irq_entry fpga_handle_irq(struct pt_regs *regs)
{
int i, handled;

View File

@ -144,7 +144,8 @@ enum cpuhp_state {
CPUHP_AP_IRQ_ARMADA_XP_STARTING,
CPUHP_AP_IRQ_BCM2836_STARTING,
CPUHP_AP_IRQ_MIPS_GIC_STARTING,
CPUHP_AP_IRQ_LOONGARCH_STARTING,
CPUHP_AP_IRQ_EIOINTC_STARTING,
CPUHP_AP_IRQ_AVECINTC_STARTING,
CPUHP_AP_IRQ_SIFIVE_PLIC_STARTING,
CPUHP_AP_IRQ_RISCV_IMSIC_STARTING,
CPUHP_AP_IRQ_RISCV_SBI_IPI_STARTING,

View File

@ -276,7 +276,7 @@ struct irq_affinity_notify {
#define IRQ_AFFINITY_MAX_SETS 4
/**
* struct irq_affinity - Description for automatic irq affinity assignements
* struct irq_affinity - Description for automatic irq affinity assignments
* @pre_vectors: Don't apply affinity to @pre_vectors at beginning of
* the MSI(-X) vector space
* @post_vectors: Don't apply affinity to @post_vectors at end of
@ -594,7 +594,7 @@ extern const char * const softirq_to_name[NR_SOFTIRQS];
struct softirq_action
{
void (*action)(struct softirq_action *);
void (*action)(void);
};
asmlinkage void do_softirq(void);
@ -609,7 +609,7 @@ static inline void do_softirq_post_smp_call_flush(unsigned int unused)
}
#endif
extern void open_softirq(int nr, void (*action)(struct softirq_action *));
extern void open_softirq(int nr, void (*action)(void));
extern void softirq_init(void);
extern void __raise_softirq_irqoff(unsigned int nr);

View File

@ -991,7 +991,6 @@ void irq_init_desc(unsigned int irq);
* @ack: Ack register offset to reg_base
* @eoi: Eoi register offset to reg_base
* @type: Type configuration register offset to reg_base
* @polarity: Polarity configuration register offset to reg_base
*/
struct irq_chip_regs {
unsigned long enable;
@ -1000,7 +999,6 @@ struct irq_chip_regs {
unsigned long ack;
unsigned long eoi;
unsigned long type;
unsigned long polarity;
};
/**
@ -1040,8 +1038,6 @@ struct irq_chip_type {
* @irq_base: Interrupt base nr for this chip
* @irq_cnt: Number of interrupts handled by this chip
* @mask_cache: Cached mask register shared between all chip types
* @type_cache: Cached type register
* @polarity_cache: Cached polarity register
* @wake_enabled: Interrupt can wakeup from suspend
* @wake_active: Interrupt is marked as an wakeup from suspend source
* @num_ct: Number of available irq_chip_type instances (usually 1)
@ -1068,8 +1064,6 @@ struct irq_chip_generic {
unsigned int irq_base;
unsigned int irq_cnt;
u32 mask_cache;
u32 type_cache;
u32 polarity_cache;
u32 wake_enabled;
u32 wake_active;
unsigned int num_ct;

View File

@ -291,7 +291,12 @@ struct irq_domain_chip_generic_info;
* @hwirq_max: Maximum number of interrupts supported by controller
* @direct_max: Maximum value of direct maps;
* Use ~0 for no limit; 0 for no direct mapping
* @hwirq_base: The first hardware interrupt number (legacy domains only)
* @virq_base: The first Linux interrupt number for legacy domains to
* immediately associate the interrupts after domain creation
* @bus_token: Domain bus token
* @name_suffix: Optional name suffix to avoid collisions when multiple
* domains are added using same fwnode
* @ops: Domain operation callbacks
* @host_data: Controller private data pointer
* @dgc_info: Geneneric chip information structure pointer used to
@ -307,7 +312,10 @@ struct irq_domain_info {
unsigned int size;
irq_hw_number_t hwirq_max;
int direct_max;
unsigned int hwirq_base;
unsigned int virq_base;
enum irq_domain_bus_token bus_token;
const char *name_suffix;
const struct irq_domain_ops *ops;
void *host_data;
#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY

View File

@ -198,7 +198,7 @@ __irq_startup_managed(struct irq_desc *desc, const struct cpumask *aff,
irqd_clr_managed_shutdown(d);
if (cpumask_any_and(aff, cpu_online_mask) >= nr_cpu_ids) {
if (!cpumask_intersects(aff, cpu_online_mask)) {
/*
* Catch code which fiddles with enable_irq() on a managed
* and potentially shutdown IRQ. Chained interrupt

View File

@ -37,7 +37,7 @@ static inline bool irq_needs_fixup(struct irq_data *d)
* has been removed from the online mask already.
*/
if (cpumask_any_but(m, cpu) < nr_cpu_ids &&
cpumask_any_and(m, cpu_online_mask) >= nr_cpu_ids) {
!cpumask_intersects(m, cpu_online_mask)) {
/*
* If this happens then there was a missed IRQ fixup at some
* point. Warn about it and enforce fixup.
@ -110,7 +110,7 @@ static bool migrate_one_irq(struct irq_desc *desc)
if (maskchip && chip->irq_mask)
chip->irq_mask(d);
if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) {
if (!cpumask_intersects(affinity, cpu_online_mask)) {
/*
* If the interrupt is managed, then shut it down and leave
* the affinity untouched.

View File

@ -13,7 +13,6 @@
struct irq_sim_work_ctx {
struct irq_work work;
int irq_base;
unsigned int irq_count;
unsigned long *pending;
struct irq_domain *domain;

View File

@ -128,72 +128,98 @@ void irq_domain_free_fwnode(struct fwnode_handle *fwnode)
}
EXPORT_SYMBOL_GPL(irq_domain_free_fwnode);
static int irq_domain_set_name(struct irq_domain *domain,
const struct fwnode_handle *fwnode,
enum irq_domain_bus_token bus_token)
static int alloc_name(struct irq_domain *domain, char *base, enum irq_domain_bus_token bus_token)
{
if (bus_token == DOMAIN_BUS_ANY)
domain->name = kasprintf(GFP_KERNEL, "%s", base);
else
domain->name = kasprintf(GFP_KERNEL, "%s-%d", base, bus_token);
if (!domain->name)
return -ENOMEM;
domain->flags |= IRQ_DOMAIN_NAME_ALLOCATED;
return 0;
}
static int alloc_fwnode_name(struct irq_domain *domain, const struct fwnode_handle *fwnode,
enum irq_domain_bus_token bus_token, const char *suffix)
{
const char *sep = suffix ? "-" : "";
const char *suf = suffix ? : "";
char *name;
if (bus_token == DOMAIN_BUS_ANY)
name = kasprintf(GFP_KERNEL, "%pfw%s%s", fwnode, sep, suf);
else
name = kasprintf(GFP_KERNEL, "%pfw%s%s-%d", fwnode, sep, suf, bus_token);
if (!name)
return -ENOMEM;
/*
* fwnode paths contain '/', which debugfs is legitimately unhappy
* about. Replace them with ':', which does the trick and is not as
* offensive as '\'...
*/
domain->name = strreplace(name, '/', ':');
domain->flags |= IRQ_DOMAIN_NAME_ALLOCATED;
return 0;
}
static int alloc_unknown_name(struct irq_domain *domain, enum irq_domain_bus_token bus_token)
{
static atomic_t unknown_domains;
struct irqchip_fwid *fwid;
int id = atomic_inc_return(&unknown_domains);
if (bus_token == DOMAIN_BUS_ANY)
domain->name = kasprintf(GFP_KERNEL, "unknown-%d", id);
else
domain->name = kasprintf(GFP_KERNEL, "unknown-%d-%d", id, bus_token);
if (!domain->name)
return -ENOMEM;
domain->flags |= IRQ_DOMAIN_NAME_ALLOCATED;
return 0;
}
static int irq_domain_set_name(struct irq_domain *domain, const struct irq_domain_info *info)
{
enum irq_domain_bus_token bus_token = info->bus_token;
const struct fwnode_handle *fwnode = info->fwnode;
if (is_fwnode_irqchip(fwnode)) {
fwid = container_of(fwnode, struct irqchip_fwid, fwnode);
struct irqchip_fwid *fwid = container_of(fwnode, struct irqchip_fwid, fwnode);
/*
* The name_suffix is only intended to be used to avoid a name
* collision when multiple domains are created for a single
* device and the name is picked using a real device node.
* (Typical use-case is regmap-IRQ controllers for devices
* providing more than one physical IRQ.) There should be no
* need to use name_suffix with irqchip-fwnode.
*/
if (info->name_suffix)
return -EINVAL;
switch (fwid->type) {
case IRQCHIP_FWNODE_NAMED:
case IRQCHIP_FWNODE_NAMED_ID:
domain->name = bus_token ?
kasprintf(GFP_KERNEL, "%s-%d",
fwid->name, bus_token) :
kstrdup(fwid->name, GFP_KERNEL);
if (!domain->name)
return -ENOMEM;
domain->flags |= IRQ_DOMAIN_NAME_ALLOCATED;
break;
return alloc_name(domain, fwid->name, bus_token);
default:
domain->name = fwid->name;
if (bus_token) {
domain->name = kasprintf(GFP_KERNEL, "%s-%d",
fwid->name, bus_token);
if (!domain->name)
return -ENOMEM;
domain->flags |= IRQ_DOMAIN_NAME_ALLOCATED;
}
break;
if (bus_token != DOMAIN_BUS_ANY)
return alloc_name(domain, fwid->name, bus_token);
}
} else if (is_of_node(fwnode) || is_acpi_device_node(fwnode) ||
is_software_node(fwnode)) {
char *name;
/*
* fwnode paths contain '/', which debugfs is legitimately
* unhappy about. Replace them with ':', which does
* the trick and is not as offensive as '\'...
*/
name = bus_token ?
kasprintf(GFP_KERNEL, "%pfw-%d", fwnode, bus_token) :
kasprintf(GFP_KERNEL, "%pfw", fwnode);
if (!name)
return -ENOMEM;
domain->name = strreplace(name, '/', ':');
domain->flags |= IRQ_DOMAIN_NAME_ALLOCATED;
} else if (is_of_node(fwnode) || is_acpi_device_node(fwnode) || is_software_node(fwnode)) {
return alloc_fwnode_name(domain, fwnode, bus_token, info->name_suffix);
}
if (!domain->name) {
if (fwnode)
pr_err("Invalid fwnode type for irqdomain\n");
domain->name = bus_token ?
kasprintf(GFP_KERNEL, "unknown-%d-%d",
atomic_inc_return(&unknown_domains),
bus_token) :
kasprintf(GFP_KERNEL, "unknown-%d",
atomic_inc_return(&unknown_domains));
if (!domain->name)
return -ENOMEM;
domain->flags |= IRQ_DOMAIN_NAME_ALLOCATED;
}
if (domain->name)
return 0;
return 0;
if (fwnode)
pr_err("Invalid fwnode type for irqdomain\n");
return alloc_unknown_name(domain, bus_token);
}
static struct irq_domain *__irq_domain_create(const struct irq_domain_info *info)
@ -211,7 +237,7 @@ static struct irq_domain *__irq_domain_create(const struct irq_domain_info *info
if (!domain)
return ERR_PTR(-ENOMEM);
err = irq_domain_set_name(domain, info->fwnode, info->bus_token);
err = irq_domain_set_name(domain, info);
if (err) {
kfree(domain);
return ERR_PTR(err);
@ -267,13 +293,20 @@ static void irq_domain_free(struct irq_domain *domain)
kfree(domain);
}
/**
* irq_domain_instantiate() - Instantiate a new irq domain data structure
* @info: Domain information pointer pointing to the information for this domain
*
* Return: A pointer to the instantiated irq domain or an ERR_PTR value.
*/
struct irq_domain *irq_domain_instantiate(const struct irq_domain_info *info)
static void irq_domain_instantiate_descs(const struct irq_domain_info *info)
{
if (!IS_ENABLED(CONFIG_SPARSE_IRQ))
return;
if (irq_alloc_descs(info->virq_base, info->virq_base, info->size,
of_node_to_nid(to_of_node(info->fwnode))) < 0) {
pr_info("Cannot allocate irq_descs @ IRQ%d, assuming pre-allocated\n",
info->virq_base);
}
}
static struct irq_domain *__irq_domain_instantiate(const struct irq_domain_info *info,
bool cond_alloc_descs, bool force_associate)
{
struct irq_domain *domain;
int err;
@ -306,6 +339,19 @@ struct irq_domain *irq_domain_instantiate(const struct irq_domain_info *info)
__irq_domain_publish(domain);
if (cond_alloc_descs && info->virq_base > 0)
irq_domain_instantiate_descs(info);
/*
* Legacy interrupt domains have a fixed Linux interrupt number
* associated. Other interrupt domains can request association by
* providing a Linux interrupt number > 0.
*/
if (force_associate || info->virq_base > 0) {
irq_domain_associate_many(domain, info->virq_base, info->hwirq_base,
info->size - info->hwirq_base);
}
return domain;
err_domain_gc_remove:
@ -315,6 +361,17 @@ err_domain_free:
irq_domain_free(domain);
return ERR_PTR(err);
}
/**
* irq_domain_instantiate() - Instantiate a new irq domain data structure
* @info: Domain information pointer pointing to the information for this domain
*
* Return: A pointer to the instantiated irq domain or an ERR_PTR value.
*/
struct irq_domain *irq_domain_instantiate(const struct irq_domain_info *info)
{
return __irq_domain_instantiate(info, false, false);
}
EXPORT_SYMBOL_GPL(irq_domain_instantiate);
/**
@ -413,28 +470,13 @@ struct irq_domain *irq_domain_create_simple(struct fwnode_handle *fwnode,
.fwnode = fwnode,
.size = size,
.hwirq_max = size,
.virq_base = first_irq,
.ops = ops,
.host_data = host_data,
};
struct irq_domain *domain;
struct irq_domain *domain = __irq_domain_instantiate(&info, true, false);
domain = irq_domain_instantiate(&info);
if (IS_ERR(domain))
return NULL;
if (first_irq > 0) {
if (IS_ENABLED(CONFIG_SPARSE_IRQ)) {
/* attempt to allocated irq_descs */
int rc = irq_alloc_descs(first_irq, first_irq, size,
of_node_to_nid(to_of_node(fwnode)));
if (rc < 0)
pr_info("Cannot allocate irq_descs @ IRQ%d, assuming pre-allocated\n",
first_irq);
}
irq_domain_associate_many(domain, first_irq, 0, size);
}
return domain;
return IS_ERR(domain) ? NULL : domain;
}
EXPORT_SYMBOL_GPL(irq_domain_create_simple);
@ -476,18 +518,14 @@ struct irq_domain *irq_domain_create_legacy(struct fwnode_handle *fwnode,
.fwnode = fwnode,
.size = first_hwirq + size,
.hwirq_max = first_hwirq + size,
.hwirq_base = first_hwirq,
.virq_base = first_irq,
.ops = ops,
.host_data = host_data,
};
struct irq_domain *domain;
struct irq_domain *domain = __irq_domain_instantiate(&info, false, true);
domain = irq_domain_instantiate(&info);
if (IS_ERR(domain))
return NULL;
irq_domain_associate_many(domain, first_irq, first_hwirq, size);
return domain;
return IS_ERR(domain) ? NULL : domain;
}
EXPORT_SYMBOL_GPL(irq_domain_create_legacy);
@ -1365,7 +1403,7 @@ static int irq_domain_trim_hierarchy(unsigned int virq)
tail = NULL;
/* The first entry must have a valid irqchip */
if (!irq_data->chip || IS_ERR(irq_data->chip))
if (IS_ERR_OR_NULL(irq_data->chip))
return -EINVAL;
/*

View File

@ -218,21 +218,20 @@ static void irq_validate_effective_affinity(struct irq_data *data)
static inline void irq_validate_effective_affinity(struct irq_data *data) { }
#endif
static DEFINE_PER_CPU(struct cpumask, __tmp_mask);
int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask,
bool force)
{
struct cpumask *tmp_mask = this_cpu_ptr(&__tmp_mask);
struct irq_desc *desc = irq_data_to_desc(data);
struct irq_chip *chip = irq_data_get_irq_chip(data);
const struct cpumask *prog_mask;
int ret;
static DEFINE_RAW_SPINLOCK(tmp_mask_lock);
static struct cpumask tmp_mask;
if (!chip || !chip->irq_set_affinity)
return -EINVAL;
raw_spin_lock(&tmp_mask_lock);
/*
* If this is a managed interrupt and housekeeping is enabled on
* it check whether the requested affinity mask intersects with
@ -258,11 +257,11 @@ int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask,
hk_mask = housekeeping_cpumask(HK_TYPE_MANAGED_IRQ);
cpumask_and(&tmp_mask, mask, hk_mask);
if (!cpumask_intersects(&tmp_mask, cpu_online_mask))
cpumask_and(tmp_mask, mask, hk_mask);
if (!cpumask_intersects(tmp_mask, cpu_online_mask))
prog_mask = mask;
else
prog_mask = &tmp_mask;
prog_mask = tmp_mask;
} else {
prog_mask = mask;
}
@ -272,16 +271,14 @@ int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask,
* unless we are being asked to force the affinity (in which
* case we do as we are told).
*/
cpumask_and(&tmp_mask, prog_mask, cpu_online_mask);
if (!force && !cpumask_empty(&tmp_mask))
ret = chip->irq_set_affinity(data, &tmp_mask, force);
cpumask_and(tmp_mask, prog_mask, cpu_online_mask);
if (!force && !cpumask_empty(tmp_mask))
ret = chip->irq_set_affinity(data, tmp_mask, force);
else if (force)
ret = chip->irq_set_affinity(data, mask, force);
else
ret = -EINVAL;
raw_spin_unlock(&tmp_mask_lock);
switch (ret) {
case IRQ_SET_MASK_OK:
case IRQ_SET_MASK_OK_DONE:

View File

@ -26,7 +26,7 @@ bool irq_fixup_move_pending(struct irq_desc *desc, bool force_clear)
* The outgoing CPU might be the last online target in a pending
* interrupt move. If that's the case clear the pending move bit.
*/
if (cpumask_any_and(desc->pending_mask, cpu_online_mask) >= nr_cpu_ids) {
if (!cpumask_intersects(desc->pending_mask, cpu_online_mask)) {
irqd_clr_move_pending(data);
return false;
}
@ -74,7 +74,7 @@ void irq_move_masked_irq(struct irq_data *idata)
* For correct operation this depends on the caller
* masking the irqs.
*/
if (cpumask_any_and(desc->pending_mask, cpu_online_mask) < nr_cpu_ids) {
if (cpumask_intersects(desc->pending_mask, cpu_online_mask)) {
int ret;
ret = irq_do_set_affinity(data, desc->pending_mask, false);

View File

@ -82,7 +82,7 @@ static struct msi_desc *msi_alloc_desc(struct device *dev, int nvec,
desc->dev = dev;
desc->nvec_used = nvec;
if (affinity) {
desc->affinity = kmemdup(affinity, nvec * sizeof(*desc->affinity), GFP_KERNEL);
desc->affinity = kmemdup_array(affinity, nvec, sizeof(*desc->affinity), GFP_KERNEL);
if (!desc->affinity) {
kfree(desc);
return NULL;

View File

@ -52,10 +52,8 @@ static int show_irq_affinity(int type, struct seq_file *m)
case AFFINITY:
case AFFINITY_LIST:
mask = desc->irq_common_data.affinity;
#ifdef CONFIG_GENERIC_PENDING_IRQ
if (irqd_is_setaffinity_pending(&desc->irq_data))
mask = desc->pending_mask;
#endif
if (irq_move_pending(&desc->irq_data))
mask = irq_desc_get_pending_mask(desc);
break;
case EFFECTIVE:
case EFFECTIVE_LIST:
@ -142,7 +140,7 @@ static ssize_t write_irq_affinity(int type, struct file *file,
int err;
if (!irq_can_set_affinity_usr(irq) || no_irq_affinity)
return -EIO;
return -EPERM;
if (!zalloc_cpumask_var(&new_value, GFP_KERNEL))
return -ENOMEM;
@ -362,8 +360,13 @@ void register_irq_proc(unsigned int irq, struct irq_desc *desc)
goto out_unlock;
#ifdef CONFIG_SMP
umode_t umode = S_IRUGO;
if (irq_can_set_affinity_usr(desc->irq_data.irq))
umode |= S_IWUSR;
/* create /proc/irq/<irq>/smp_affinity */
proc_create_data("smp_affinity", 0644, desc->dir,
proc_create_data("smp_affinity", umode, desc->dir,
&irq_affinity_proc_ops, irqp);
/* create /proc/irq/<irq>/affinity_hint */
@ -371,7 +374,7 @@ void register_irq_proc(unsigned int irq, struct irq_desc *desc)
irq_affinity_hint_proc_show, irqp);
/* create /proc/irq/<irq>/smp_affinity_list */
proc_create_data("smp_affinity_list", 0644, desc->dir,
proc_create_data("smp_affinity_list", umode, desc->dir,
&irq_affinity_list_proc_ops, irqp);
proc_create_single_data("node", 0444, desc->dir, irq_node_proc_show,

View File

@ -105,7 +105,7 @@ static inline bool rcu_reclaim_tiny(struct rcu_head *head)
}
/* Invoke the RCU callbacks whose grace period has elapsed. */
static __latent_entropy void rcu_process_callbacks(struct softirq_action *unused)
static __latent_entropy void rcu_process_callbacks(void)
{
struct rcu_head *next, *list;
unsigned long flags;

View File

@ -2855,7 +2855,7 @@ static __latent_entropy void rcu_core(void)
queue_work_on(rdp->cpu, rcu_gp_wq, &rdp->strict_work);
}
static void rcu_core_si(struct softirq_action *h)
static void rcu_core_si(void)
{
rcu_core();
}

View File

@ -12483,7 +12483,7 @@ out:
* - indirectly from a remote scheduler_tick() for NOHZ idle balancing
* through the SMP cross-call nohz_csd_func()
*/
static __latent_entropy void sched_balance_softirq(struct softirq_action *h)
static __latent_entropy void sched_balance_softirq(void)
{
struct rq *this_rq = this_rq();
enum cpu_idle_type idle = this_rq->idle_balance;

View File

@ -551,7 +551,7 @@ restart:
kstat_incr_softirqs_this_cpu(vec_nr);
trace_softirq_entry(vec_nr);
h->action(h);
h->action();
trace_softirq_exit(vec_nr);
if (unlikely(prev_count != preempt_count())) {
pr_err("huh, entered softirq %u %s %p with preempt_count %08x, exited with %08x?\n",
@ -700,7 +700,7 @@ void __raise_softirq_irqoff(unsigned int nr)
or_softirq_pending(1UL << nr);
}
void open_softirq(int nr, void (*action)(struct softirq_action *))
void open_softirq(int nr, void (*action)(void))
{
softirq_vec[nr].action = action;
}
@ -760,8 +760,7 @@ static bool tasklet_clear_sched(struct tasklet_struct *t)
return false;
}
static void tasklet_action_common(struct softirq_action *a,
struct tasklet_head *tl_head,
static void tasklet_action_common(struct tasklet_head *tl_head,
unsigned int softirq_nr)
{
struct tasklet_struct *list;
@ -805,16 +804,16 @@ static void tasklet_action_common(struct softirq_action *a,
}
}
static __latent_entropy void tasklet_action(struct softirq_action *a)
static __latent_entropy void tasklet_action(void)
{
workqueue_softirq_action(false);
tasklet_action_common(a, this_cpu_ptr(&tasklet_vec), TASKLET_SOFTIRQ);
tasklet_action_common(this_cpu_ptr(&tasklet_vec), TASKLET_SOFTIRQ);
}
static __latent_entropy void tasklet_hi_action(struct softirq_action *a)
static __latent_entropy void tasklet_hi_action(void)
{
workqueue_softirq_action(true);
tasklet_action_common(a, this_cpu_ptr(&tasklet_hi_vec), HI_SOFTIRQ);
tasklet_action_common(this_cpu_ptr(&tasklet_hi_vec), HI_SOFTIRQ);
}
void tasklet_setup(struct tasklet_struct *t,

View File

@ -1757,7 +1757,7 @@ static void __hrtimer_run_queues(struct hrtimer_cpu_base *cpu_base, ktime_t now,
}
}
static __latent_entropy void hrtimer_run_softirq(struct softirq_action *h)
static __latent_entropy void hrtimer_run_softirq(void)
{
struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases);
unsigned long flags;

View File

@ -2440,7 +2440,7 @@ static void run_timer_base(int index)
/*
* This function runs timers and the timer-tq in bottom half context.
*/
static __latent_entropy void run_timer_softirq(struct softirq_action *h)
static __latent_entropy void run_timer_softirq(void)
{
run_timer_base(BASE_LOCAL);
if (IS_ENABLED(CONFIG_NO_HZ_COMMON)) {

View File

@ -75,7 +75,7 @@ void irq_poll_complete(struct irq_poll *iop)
}
EXPORT_SYMBOL(irq_poll_complete);
static void __latent_entropy irq_poll_softirq(struct softirq_action *h)
static void __latent_entropy irq_poll_softirq(void)
{
struct list_head *list = this_cpu_ptr(&blk_cpu_iopoll);
int rearm = 0, budget = irq_poll_budget;

View File

@ -5248,7 +5248,7 @@ int netif_rx(struct sk_buff *skb)
}
EXPORT_SYMBOL(netif_rx);
static __latent_entropy void net_tx_action(struct softirq_action *h)
static __latent_entropy void net_tx_action(void)
{
struct softnet_data *sd = this_cpu_ptr(&softnet_data);
@ -6921,7 +6921,7 @@ static int napi_threaded_poll(void *data)
return 0;
}
static __latent_entropy void net_rx_action(struct softirq_action *h)
static __latent_entropy void net_rx_action(void)
{
struct softnet_data *sd = this_cpu_ptr(&softnet_data);
unsigned long time_limit = jiffies +