A set of updates for the interrupt subsystem:

- Tree wide:
 
     * Make nr_irqs static to the core code and provide accessor functions
       to remove existing and prevent future aliasing problems with local
       variables or function arguments of the same name.
 
   - Core code:
 
     * Prevent freeing an interrupt in the devres code which is not managed
       by devres in the first place.
 
     * Use seq_put_decimal_ull_width() for decimal values output in
       /proc/interrupts which increases performance significantly as it
       avoids parsing the format strings over and over.
 
     * Optimize raising the timer and hrtimer soft interrupts by using the
       'set bit only' variants instead of the combined version which checks
       whether ksoftirqd should be woken up. The latter is a pointless
       exercise as both soft interrupts are raised in the context of the
       timer interrupt and therefore never wake up ksoftirqd.
 
     * Delegate timer/hrtimer soft interrupt processing to a dedicated thread
       on RT.
 
       Timer and hrtimer soft interrupts are always processed in ksoftirqd
       on RT enabled kernels. This can lead to high latencies when other
       soft interrupts are delegated to ksoftirqd as well.
 
       The separate thread allows to run them seperately under a RT
       scheduling policy to reduce the latency overhead.
 
   - Drivers:
 
     * New drivers or extensions of existing drivers to support Renesas
       RZ/V2H(P), Aspeed AST27XX, T-HEAD C900 and ATMEL sam9x7 interrupt
       chips
 
     * Support for multi-cluster GICs on MIPS.
 
       MIPS CPUs can come with multiple CPU clusters, where each CPU cluster
       has its own GIC (Generic Interrupt Controller). This requires to
       access the GIC of a remote cluster through a redirect register block.
 
       This is encapsulated into a set of helper functions to keep the
       complexity out of the actual code paths which handle the GIC details.
 
     * Support for encrypted guests in the ARM GICV3 ITS driver
 
       The ITS page needs to be shared with the hypervisor and therefore
       must be decrypted.
 
     * Small cleanups and fixes all over the place
 -----BEGIN PGP SIGNATURE-----
 
 iQJHBAABCgAxFiEEQp8+kY+LLUocC4bMphj1TA10mKEFAmc7ggcTHHRnbHhAbGlu
 dXRyb25peC5kZQAKCRCmGPVMDXSYoaf7D/9G6FgJXx/60zqnpnOr9Yx0hxjaI47x
 PFyCd3P05qyVMBYXfI99vrSKuVdMZXJ/fH5L83y+sOaTASyLTzg37igZycIDJzLI
 FnHh/m/+UA8k2aIC5VUiNAjne2RLaTZiRN15uEHFVjByC5Y+YTlCNUE4BBhg5RfQ
 hKmskeffWdtui3ou13CSNvbFn+pmqi4g6n1ysUuLhiwM2E5b1rZMprcCOnun/cGP
 IdUQsODNWTTv9eqPJez985M6A1x2SCGNv7Z73h58B9N0pBRPEC1xnhUnCJ1sA0cJ
 pnfde2C1lztEjYbwDngy0wgq0P6LINjQ5Ma2YY2F2hTMsXGJxGPDZm24/u5uR46x
 N/gsOQMXqw6f5yvbiS7Asx9WzR6ry8rJl70QRgTyozz7xxJTaiNm2HqVFe2wc+et
 Q/BzaKdhmUJj1GMZmqD2rrgwYeDcb4wWYNtwjM4PVHHxYlJVq0mEF1kLLS8YDyjf
 HuGPVqtSkt3E0+Br3FKcv5ltUQP8clXbudc6L1u98YBfNK12hW8L+c3YSvIiFoYM
 ZOAeANPM7VtQbP2Jg2q81Dd3CShImt5jqL2um+l8g7+mUE7l9gyuO/w/a5dQ57+b
 kx7mHHIW2zCeHrkZZbRUYzI2BJfMCCOVN4Ax5OZxTLnLsL9VEehy8NM8QYT4TS8R
 XmTOYW3U9XR3gw==
 =JqxC
 -----END PGP SIGNATURE-----

Merge tag 'irq-core-2024-11-18' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull interrupt subsystem updates from Thomas Gleixner:
 "Tree wide:

   - Make nr_irqs static to the core code and provide accessor functions
     to remove existing and prevent future aliasing problems with local
     variables or function arguments of the same name.

  Core code:

   - Prevent freeing an interrupt in the devres code which is not
     managed by devres in the first place.

   - Use seq_put_decimal_ull_width() for decimal values output in
     /proc/interrupts which increases performance significantly as it
     avoids parsing the format strings over and over.

   - Optimize raising the timer and hrtimer soft interrupts by using the
     'set bit only' variants instead of the combined version which
     checks whether ksoftirqd should be woken up. The latter is a
     pointless exercise as both soft interrupts are raised in the
     context of the timer interrupt and therefore never wake up
     ksoftirqd.

   - Delegate timer/hrtimer soft interrupt processing to a dedicated
     thread on RT.

     Timer and hrtimer soft interrupts are always processed in ksoftirqd
     on RT enabled kernels. This can lead to high latencies when other
     soft interrupts are delegated to ksoftirqd as well.

     The separate thread allows to run them seperately under a RT
     scheduling policy to reduce the latency overhead.

  Drivers:

   - New drivers or extensions of existing drivers to support Renesas
     RZ/V2H(P), Aspeed AST27XX, T-HEAD C900 and ATMEL sam9x7 interrupt
     chips

   - Support for multi-cluster GICs on MIPS.

     MIPS CPUs can come with multiple CPU clusters, where each CPU
     cluster has its own GIC (Generic Interrupt Controller). This
     requires to access the GIC of a remote cluster through a redirect
     register block.

     This is encapsulated into a set of helper functions to keep the
     complexity out of the actual code paths which handle the GIC
     details.

   - Support for encrypted guests in the ARM GICV3 ITS driver

     The ITS page needs to be shared with the hypervisor and therefore
     must be decrypted.

   - Small cleanups and fixes all over the place"

* tag 'irq-core-2024-11-18' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (50 commits)
  irqchip/riscv-aplic: Prevent crash when MSI domain is missing
  genirq/proc: Use seq_put_decimal_ull_width() for decimal values
  softirq: Use a dedicated thread for timer wakeups on PREEMPT_RT.
  timers: Use __raise_softirq_irqoff() to raise the softirq.
  hrtimer: Use __raise_softirq_irqoff() to raise the softirq
  riscv: defconfig: Enable T-HEAD C900 ACLINT SSWI drivers
  irqchip: Add T-HEAD C900 ACLINT SSWI driver
  dt-bindings: interrupt-controller: Add T-HEAD C900 ACLINT SSWI device
  irqchip/stm32mp-exti: Use of_property_present() for non-boolean properties
  irqchip/mips-gic: Fix selection of GENERIC_IRQ_EFFECTIVE_AFF_MASK
  irqchip/mips-gic: Prevent indirect access to clusters without CPU cores
  irqchip/mips-gic: Multi-cluster support
  irqchip/mips-gic: Setup defaults in each cluster
  irqchip/mips-gic: Support multi-cluster in for_each_online_cpu_gic()
  irqchip/mips-gic: Replace open coded online CPU iterations
  genirq/irqdesc: Use str_enabled_disabled() helper in wakeup_show()
  genirq/devres: Don't free interrupt which is not managed by devres
  irqchip/gic-v3-its: Fix over allocation in itt_alloc_pool()
  irqchip/aspeed-intc: Add AST27XX INTC support
  dt-bindings: interrupt-controller: Add support for ASPEED AST27XX INTC
  ...
This commit is contained in:
Linus Torvalds 2024-11-19 15:54:19 -08:00
commit 5c2b050848
52 changed files with 1945 additions and 134 deletions

View File

@ -0,0 +1,86 @@
# SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
%YAML 1.2
---
$id: http://devicetree.org/schemas/interrupt-controller/aspeed,ast2700-intc.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Aspeed AST2700 Interrupt Controller
description:
This interrupt controller hardware is second level interrupt controller that
is hooked to a parent interrupt controller. It's useful to combine multiple
interrupt sources into 1 interrupt to parent interrupt controller.
maintainers:
- Kevin Chen <kevin_chen@aspeedtech.com>
properties:
compatible:
enum:
- aspeed,ast2700-intc-ic
reg:
maxItems: 1
interrupt-controller: true
'#interrupt-cells':
const: 2
description:
The first cell is the IRQ number, the second cell is the trigger
type as defined in interrupt.txt in this directory.
interrupts:
maxItems: 6
description: |
Depend to which INTC0 or INTC1 used.
INTC0 and INTC1 are two kinds of interrupt controller with enable and raw
status registers for use.
INTC0 is used to assert GIC if interrupt in INTC1 asserted.
INTC1 is used to assert INTC0 if interrupt of modules asserted.
+-----+ +-------+ +---------+---module0
| GIC |---| INTC0 |--+--| INTC1_0 |---module2
| | | | | | |---...
+-----+ +-------+ | +---------+---module31
|
| +---------+---module0
+---| INTC1_1 |---module2
| | |---...
| +---------+---module31
...
| +---------+---module0
+---| INTC1_5 |---module2
| |---...
+---------+---module31
required:
- compatible
- reg
- interrupt-controller
- '#interrupt-cells'
- interrupts
additionalProperties: false
examples:
- |
#include <dt-bindings/interrupt-controller/arm-gic.h>
bus {
#address-cells = <2>;
#size-cells = <2>;
interrupt-controller@12101b00 {
compatible = "aspeed,ast2700-intc-ic";
reg = <0 0x12101b00 0 0x10>;
#interrupt-cells = <2>;
interrupt-controller;
interrupts = <GIC_SPI 192 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 193 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 194 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 195 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 196 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 197 IRQ_TYPE_LEVEL_HIGH>;
};
};

View File

@ -23,6 +23,7 @@ properties:
- atmel,sama5d3-aic
- atmel,sama5d4-aic
- microchip,sam9x60-aic
- microchip,sam9x7-aic
reg:
maxItems: 1

View File

@ -0,0 +1,278 @@
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
%YAML 1.2
---
$id: http://devicetree.org/schemas/interrupt-controller/renesas,rzv2h-icu.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Renesas RZ/V2H(P) Interrupt Control Unit
maintainers:
- Fabrizio Castro <fabrizio.castro.jz@renesas.com>
- Geert Uytterhoeven <geert+renesas@glider.be>
allOf:
- $ref: /schemas/interrupt-controller.yaml#
description:
The Interrupt Control Unit (ICU) handles external interrupts (NMI, IRQ, and
TINT), error interrupts, DMAC requests, GPT interrupts, and internal
interrupts.
properties:
compatible:
const: renesas,r9a09g057-icu # RZ/V2H(P)
'#interrupt-cells':
description: The first cell is the SPI number of the NMI or the
PORT_IRQ[0-15] interrupt, as per user manual. The second cell is used to
specify the flag.
const: 2
'#address-cells':
const: 0
interrupt-controller: true
reg:
maxItems: 1
interrupts:
minItems: 58
items:
- description: NMI interrupt
- description: PORT_IRQ0 interrupt
- description: PORT_IRQ1 interrupt
- description: PORT_IRQ2 interrupt
- description: PORT_IRQ3 interrupt
- description: PORT_IRQ4 interrupt
- description: PORT_IRQ5 interrupt
- description: PORT_IRQ6 interrupt
- description: PORT_IRQ7 interrupt
- description: PORT_IRQ8 interrupt
- description: PORT_IRQ9 interrupt
- description: PORT_IRQ10 interrupt
- description: PORT_IRQ11 interrupt
- description: PORT_IRQ12 interrupt
- description: PORT_IRQ13 interrupt
- description: PORT_IRQ14 interrupt
- description: PORT_IRQ15 interrupt
- description: GPIO interrupt, TINT0
- description: GPIO interrupt, TINT1
- description: GPIO interrupt, TINT2
- description: GPIO interrupt, TINT3
- description: GPIO interrupt, TINT4
- description: GPIO interrupt, TINT5
- description: GPIO interrupt, TINT6
- description: GPIO interrupt, TINT7
- description: GPIO interrupt, TINT8
- description: GPIO interrupt, TINT9
- description: GPIO interrupt, TINT10
- description: GPIO interrupt, TINT11
- description: GPIO interrupt, TINT12
- description: GPIO interrupt, TINT13
- description: GPIO interrupt, TINT14
- description: GPIO interrupt, TINT15
- description: GPIO interrupt, TINT16
- description: GPIO interrupt, TINT17
- description: GPIO interrupt, TINT18
- description: GPIO interrupt, TINT19
- description: GPIO interrupt, TINT20
- description: GPIO interrupt, TINT21
- description: GPIO interrupt, TINT22
- description: GPIO interrupt, TINT23
- description: GPIO interrupt, TINT24
- description: GPIO interrupt, TINT25
- description: GPIO interrupt, TINT26
- description: GPIO interrupt, TINT27
- description: GPIO interrupt, TINT28
- description: GPIO interrupt, TINT29
- description: GPIO interrupt, TINT30
- description: GPIO interrupt, TINT31
- description: Software interrupt, INTA55_0
- description: Software interrupt, INTA55_1
- description: Software interrupt, INTA55_2
- description: Software interrupt, INTA55_3
- description: Error interrupt to CA55
- description: GTCCRA compare match/input capture (U0)
- description: GTCCRB compare match/input capture (U0)
- description: GTCCRA compare match/input capture (U1)
- description: GTCCRB compare match/input capture (U1)
interrupt-names:
minItems: 58
items:
- const: nmi
- const: port_irq0
- const: port_irq1
- const: port_irq2
- const: port_irq3
- const: port_irq4
- const: port_irq5
- const: port_irq6
- const: port_irq7
- const: port_irq8
- const: port_irq9
- const: port_irq10
- const: port_irq11
- const: port_irq12
- const: port_irq13
- const: port_irq14
- const: port_irq15
- const: tint0
- const: tint1
- const: tint2
- const: tint3
- const: tint4
- const: tint5
- const: tint6
- const: tint7
- const: tint8
- const: tint9
- const: tint10
- const: tint11
- const: tint12
- const: tint13
- const: tint14
- const: tint15
- const: tint16
- const: tint17
- const: tint18
- const: tint19
- const: tint20
- const: tint21
- const: tint22
- const: tint23
- const: tint24
- const: tint25
- const: tint26
- const: tint27
- const: tint28
- const: tint29
- const: tint30
- const: tint31
- const: int-ca55-0
- const: int-ca55-1
- const: int-ca55-2
- const: int-ca55-3
- const: icu-error-ca55
- const: gpt-u0-gtciada
- const: gpt-u0-gtciadb
- const: gpt-u1-gtciada
- const: gpt-u1-gtciadb
clocks:
maxItems: 1
power-domains:
maxItems: 1
resets:
maxItems: 1
required:
- compatible
- reg
- '#interrupt-cells'
- '#address-cells'
- interrupt-controller
- interrupts
- interrupt-names
- clocks
- power-domains
- resets
unevaluatedProperties: false
examples:
- |
#include <dt-bindings/interrupt-controller/arm-gic.h>
#include <dt-bindings/clock/renesas-cpg-mssr.h>
icu: interrupt-controller@10400000 {
compatible = "renesas,r9a09g057-icu";
reg = <0x10400000 0x10000>;
#interrupt-cells = <2>;
#address-cells = <0>;
interrupt-controller;
interrupts = <GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 1 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 3 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 4 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 5 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 6 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 7 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 8 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 9 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 15 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 16 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 419 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 420 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 421 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 422 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 423 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 424 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 425 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 426 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 427 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 428 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 429 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 430 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 431 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 432 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 433 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 434 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 435 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 436 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 437 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 438 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 439 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 440 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 441 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 442 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 443 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 444 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 445 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 446 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 447 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 448 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 449 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 450 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 262 IRQ_TYPE_EDGE_RISING>,
<GIC_SPI 263 IRQ_TYPE_EDGE_RISING>,
<GIC_SPI 264 IRQ_TYPE_EDGE_RISING>,
<GIC_SPI 265 IRQ_TYPE_EDGE_RISING>,
<GIC_SPI 266 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 451 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 452 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 453 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 454 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "nmi",
"port_irq0", "port_irq1", "port_irq2",
"port_irq3", "port_irq4", "port_irq5",
"port_irq6", "port_irq7", "port_irq8",
"port_irq9", "port_irq10", "port_irq11",
"port_irq12", "port_irq13", "port_irq14",
"port_irq15",
"tint0", "tint1", "tint2", "tint3",
"tint4", "tint5", "tint6", "tint7",
"tint8", "tint9", "tint10", "tint11",
"tint12", "tint13", "tint14", "tint15",
"tint16", "tint17", "tint18", "tint19",
"tint20", "tint21", "tint22", "tint23",
"tint24", "tint25", "tint26", "tint27",
"tint28", "tint29", "tint30", "tint31",
"int-ca55-0", "int-ca55-1",
"int-ca55-2", "int-ca55-3",
"icu-error-ca55",
"gpt-u0-gtciada", "gpt-u0-gtciadb",
"gpt-u1-gtciada", "gpt-u1-gtciadb";
clocks = <&cpg CPG_MOD 0x5>;
power-domains = <&cpg>;
resets = <&cpg 0x36>;
};

View File

@ -0,0 +1,58 @@
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
%YAML 1.2
---
$id: http://devicetree.org/schemas/interrupt-controller/thead,c900-aclint-sswi.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: T-HEAD C900 ACLINT Supervisor-level Software Interrupt Device
maintainers:
- Inochi Amaoto <inochiama@outlook.com>
description:
The SSWI device is a part of the THEAD ACLINT device. It provides
supervisor-level IPI functionality for a set of HARTs on a THEAD
platform. It provides a register to set an IPI (SETSSIP) for each
HART connected to the SSWI device.
properties:
compatible:
items:
- enum:
- sophgo,sg2044-aclint-sswi
- const: thead,c900-aclint-sswi
reg:
maxItems: 1
"#interrupt-cells":
const: 0
interrupt-controller: true
interrupts-extended:
minItems: 1
maxItems: 4095
additionalProperties: false
required:
- compatible
- reg
- "#interrupt-cells"
- interrupt-controller
- interrupts-extended
examples:
- |
interrupt-controller@94000000 {
compatible = "sophgo,sg2044-aclint-sswi", "thead,c900-aclint-sswi";
reg = <0x94000000 0x00004000>;
#interrupt-cells = <0>;
interrupt-controller;
interrupts-extended = <&cpu1intc 1>,
<&cpu2intc 1>,
<&cpu3intc 1>,
<&cpu4intc 1>;
};
...

View File

@ -111,7 +111,7 @@ void handle_IRQ(unsigned int irq, struct pt_regs *regs)
* Some hardware gives randomly wrong interrupts. Rather
* than crashing, do something sensible.
*/
if (unlikely(!irq || irq >= nr_irqs))
if (unlikely(!irq || irq >= irq_get_nr_irqs()))
desc = NULL;
else
desc = irq_to_desc(irq);
@ -151,7 +151,6 @@ void __init init_IRQ(void)
#ifdef CONFIG_SPARSE_IRQ
int __init arch_probe_nr_irqs(void)
{
nr_irqs = machine_desc->nr_irqs ? machine_desc->nr_irqs : NR_IRQS;
return nr_irqs;
return irq_set_nr_irqs(machine_desc->nr_irqs ? : NR_IRQS);
}
#endif

View File

@ -90,6 +90,95 @@
#size-cells = <2>;
ranges;
icu: interrupt-controller@10400000 {
compatible = "renesas,r9a09g057-icu";
reg = <0 0x10400000 0 0x10000>;
#interrupt-cells = <2>;
#address-cells = <0>;
interrupt-controller;
interrupts = <GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 1 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 3 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 4 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 5 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 6 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 7 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 8 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 9 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 15 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 16 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 419 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 420 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 421 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 422 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 423 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 424 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 425 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 426 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 427 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 428 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 429 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 430 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 431 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 432 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 433 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 434 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 435 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 436 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 437 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 438 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 439 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 440 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 441 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 442 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 443 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 444 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 445 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 446 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 447 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 448 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 449 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 450 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 262 IRQ_TYPE_EDGE_RISING>,
<GIC_SPI 263 IRQ_TYPE_EDGE_RISING>,
<GIC_SPI 264 IRQ_TYPE_EDGE_RISING>,
<GIC_SPI 265 IRQ_TYPE_EDGE_RISING>,
<GIC_SPI 266 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 451 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 452 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 453 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 454 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "nmi",
"port_irq0", "port_irq1", "port_irq2",
"port_irq3", "port_irq4", "port_irq5",
"port_irq6", "port_irq7", "port_irq8",
"port_irq9", "port_irq10", "port_irq11",
"port_irq12", "port_irq13", "port_irq14",
"port_irq15",
"tint0", "tint1", "tint2", "tint3",
"tint4", "tint5", "tint6", "tint7",
"tint8", "tint9", "tint10", "tint11",
"tint12", "tint13", "tint14", "tint15",
"tint16", "tint17", "tint18", "tint19",
"tint20", "tint21", "tint22", "tint23",
"tint24", "tint25", "tint26", "tint27",
"tint28", "tint29", "tint30", "tint31",
"int-ca55-0", "int-ca55-1",
"int-ca55-2", "int-ca55-3",
"icu-error-ca55",
"gpt-u0-gtciada", "gpt-u0-gtciadb",
"gpt-u1-gtciada", "gpt-u1-gtciadb";
clocks = <&cpg CPG_MOD 0x5>;
power-domains = <&cpg>;
resets = <&cpg 0x36>;
};
pinctrl: pinctrl@10410000 {
compatible = "renesas,r9a09g057-pinctrl";
reg = <0 0x10410000 0 0x10000>;
@ -99,6 +188,7 @@
gpio-ranges = <&pinctrl 0 0 96>;
#interrupt-cells = <2>;
interrupt-controller;
interrupt-parent = <&icu>;
power-domains = <&cpg>;
resets = <&cpg 0xa5>, <&cpg 0xa6>;
};

View File

@ -92,9 +92,9 @@ int __init arch_probe_nr_irqs(void)
int nr_io_pics = bitmap_weight(loongson_sysconf.cores_io_master, NR_CPUS);
if (!cpu_has_avecint)
nr_irqs = (64 + NR_VECTORS * nr_io_pics);
irq_set_nr_irqs(64 + NR_VECTORS * nr_io_pics);
else
nr_irqs = (64 + NR_VECTORS * (nr_cpu_ids + nr_io_pics));
irq_set_nr_irqs(64 + NR_VECTORS * (nr_cpu_ids + nr_io_pics));
return NR_IRQS_LEGACY;
}

View File

@ -112,7 +112,7 @@ static void axon_msi_cascade(struct irq_desc *desc)
pr_devel("axon_msi: woff %x roff %x msi %x\n",
write_offset, msic->read_offset, msi);
if (msi < nr_irqs && irq_get_chip_data(msi) == msic) {
if (msi < irq_get_nr_irqs() && irq_get_chip_data(msi) == msic) {
generic_handle_irq(msi);
msic->fifo_virt[idx] = cpu_to_le32(0xffffffff);
} else {

View File

@ -256,6 +256,7 @@ CONFIG_RPMSG_CTRL=y
CONFIG_RPMSG_VIRTIO=y
CONFIG_PM_DEVFREQ=y
CONFIG_IIO=y
CONFIG_THEAD_C900_ACLINT_SSWI=y
CONFIG_PHY_SUN4I_USB=m
CONFIG_PHY_STARFIVE_JH7110_DPHY_RX=m
CONFIG_PHY_STARFIVE_JH7110_PCIE=m

View File

@ -258,7 +258,7 @@ int show_interrupts(struct seq_file *p, void *v)
seq_putc(p, '\n');
goto out;
}
if (index < nr_irqs) {
if (index < irq_get_nr_irqs()) {
show_msi_interrupt(p, index);
goto out;
}

View File

@ -1171,7 +1171,8 @@ static int __init acpi_parse_madt_ioapic_entries(void)
}
count = acpi_table_parse_madt(ACPI_MADT_TYPE_INTERRUPT_OVERRIDE,
acpi_parse_int_src_ovr, nr_irqs);
acpi_parse_int_src_ovr,
irq_get_nr_irqs());
if (count < 0) {
pr_err("Error parsing interrupt source overrides entry\n");
/* TBD: Cleanup to allow fallback to MPS */
@ -1191,7 +1192,8 @@ static int __init acpi_parse_madt_ioapic_entries(void)
mp_config_acpi_legacy_irqs();
count = acpi_table_parse_madt(ACPI_MADT_TYPE_NMI_SOURCE,
acpi_parse_nmi_src, nr_irqs);
acpi_parse_nmi_src,
irq_get_nr_irqs());
if (count < 0) {
pr_err("Error parsing NMI SRC entry\n");
/* TBD: Cleanup to allow fallback to MPS */

View File

@ -712,8 +712,8 @@ int __init arch_probe_nr_irqs(void)
{
int nr;
if (nr_irqs > (NR_VECTORS * nr_cpu_ids))
nr_irqs = NR_VECTORS * nr_cpu_ids;
if (irq_get_nr_irqs() > NR_VECTORS * nr_cpu_ids)
irq_set_nr_irqs(NR_VECTORS * nr_cpu_ids);
nr = (gsi_top + nr_legacy_irqs()) + 8 * nr_cpu_ids;
#if defined(CONFIG_PCI_MSI)
@ -725,8 +725,8 @@ int __init arch_probe_nr_irqs(void)
else
nr += gsi_top * 16;
#endif
if (nr < nr_irqs)
nr_irqs = nr;
if (nr < irq_get_nr_irqs())
irq_set_nr_irqs(nr);
/*
* We don't know if PIC is present at this point so we need to do

View File

@ -162,6 +162,7 @@ static irqreturn_t hpet_interrupt(int irq, void *data)
static void hpet_timer_set_irq(struct hpet_dev *devp)
{
const unsigned int nr_irqs = irq_get_nr_irqs();
unsigned long v;
int irq, gsi;
struct hpet_timer __iomem *timer;

View File

@ -258,6 +258,13 @@ config RENESAS_RZG2L_IRQC
Enable support for the Renesas RZ/G2L (and alike SoC) Interrupt Controller
for external devices.
config RENESAS_RZV2H_ICU
bool "Renesas RZ/V2H(P) ICU support" if COMPILE_TEST
select GENERIC_IRQ_CHIP
select IRQ_DOMAIN_HIERARCHY
help
Enable support for the Renesas RZ/V2H(P) Interrupt Control Unit (ICU)
config SL28CPLD_INTC
bool "Kontron sl28cpld IRQ controller"
depends on MFD_SL28CPLD=y || COMPILE_TEST
@ -338,6 +345,7 @@ config KEYSTONE_IRQ
config MIPS_GIC
bool
select GENERIC_IRQ_EFFECTIVE_AFF_MASK if SMP
select GENERIC_IRQ_IPI if SMP
select IRQ_DOMAIN_HIERARCHY
select MIPS_CM
@ -604,6 +612,18 @@ config STARFIVE_JH8100_INTC
If you don't know what to do here, say Y.
config THEAD_C900_ACLINT_SSWI
bool "THEAD C9XX ACLINT S-mode IPI Interrupt Controller"
depends on RISCV
depends on SMP
select IRQ_DOMAIN_HIERARCHY
select GENERIC_IRQ_IPI_MUX
help
This enables support for T-HEAD specific ACLINT SSWI device
support.
If you don't know what to do here, say Y.
config EXYNOS_IRQ_COMBINER
bool "Samsung Exynos IRQ combiner support" if COMPILE_TEST
depends on (ARCH_EXYNOS && ARM) || COMPILE_TEST

View File

@ -51,6 +51,7 @@ obj-$(CONFIG_RENESAS_INTC_IRQPIN) += irq-renesas-intc-irqpin.o
obj-$(CONFIG_RENESAS_IRQC) += irq-renesas-irqc.o
obj-$(CONFIG_RENESAS_RZA1_IRQC) += irq-renesas-rza1.o
obj-$(CONFIG_RENESAS_RZG2L_IRQC) += irq-renesas-rzg2l.o
obj-$(CONFIG_RENESAS_RZV2H_ICU) += irq-renesas-rzv2h.o
obj-$(CONFIG_VERSATILE_FPGA_IRQ) += irq-versatile-fpga.o
obj-$(CONFIG_ARCH_NSPIRE) += irq-zevio.o
obj-$(CONFIG_ARCH_VT8500) += irq-vt8500.o
@ -84,6 +85,7 @@ obj-$(CONFIG_MVEBU_SEI) += irq-mvebu-sei.o
obj-$(CONFIG_LS_EXTIRQ) += irq-ls-extirq.o
obj-$(CONFIG_LS_SCFG_MSI) += irq-ls-scfg-msi.o
obj-$(CONFIG_ARCH_ASPEED) += irq-aspeed-vic.o irq-aspeed-i2c-ic.o irq-aspeed-scu-ic.o
obj-$(CONFIG_ARCH_ASPEED) += irq-aspeed-intc.o
obj-$(CONFIG_STM32MP_EXTI) += irq-stm32mp-exti.o
obj-$(CONFIG_STM32_EXTI) += irq-stm32-exti.o
obj-$(CONFIG_QCOM_IRQ_COMBINER) += qcom-irq-combiner.o
@ -101,6 +103,7 @@ obj-$(CONFIG_RISCV_APLIC_MSI) += irq-riscv-aplic-msi.o
obj-$(CONFIG_RISCV_IMSIC) += irq-riscv-imsic-state.o irq-riscv-imsic-early.o irq-riscv-imsic-platform.o
obj-$(CONFIG_SIFIVE_PLIC) += irq-sifive-plic.o
obj-$(CONFIG_STARFIVE_JH8100_INTC) += irq-starfive-jh8100-intc.o
obj-$(CONFIG_THEAD_C900_ACLINT_SSWI) += irq-thead-c900-aclint-sswi.o
obj-$(CONFIG_IMX_IRQSTEER) += irq-imx-irqsteer.o
obj-$(CONFIG_IMX_INTMUX) += irq-imx-intmux.o
obj-$(CONFIG_IMX_MU_MSI) += irq-imx-mu-msi.o

View File

@ -0,0 +1,139 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Aspeed Interrupt Controller.
*
* Copyright (C) 2023 ASPEED Technology Inc.
*/
#include <linux/bitops.h>
#include <linux/irq.h>
#include <linux/irqchip.h>
#include <linux/irqchip/chained_irq.h>
#include <linux/irqdomain.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/io.h>
#include <linux/spinlock.h>
#define INTC_INT_ENABLE_REG 0x00
#define INTC_INT_STATUS_REG 0x04
#define INTC_IRQS_PER_WORD 32
struct aspeed_intc_ic {
void __iomem *base;
raw_spinlock_t gic_lock;
raw_spinlock_t intc_lock;
struct irq_domain *irq_domain;
};
static void aspeed_intc_ic_irq_handler(struct irq_desc *desc)
{
struct aspeed_intc_ic *intc_ic = irq_desc_get_handler_data(desc);
struct irq_chip *chip = irq_desc_get_chip(desc);
chained_irq_enter(chip, desc);
scoped_guard(raw_spinlock, &intc_ic->gic_lock) {
unsigned long bit, status;
status = readl(intc_ic->base + INTC_INT_STATUS_REG);
for_each_set_bit(bit, &status, INTC_IRQS_PER_WORD) {
generic_handle_domain_irq(intc_ic->irq_domain, bit);
writel(BIT(bit), intc_ic->base + INTC_INT_STATUS_REG);
}
}
chained_irq_exit(chip, desc);
}
static void aspeed_intc_irq_mask(struct irq_data *data)
{
struct aspeed_intc_ic *intc_ic = irq_data_get_irq_chip_data(data);
unsigned int mask = readl(intc_ic->base + INTC_INT_ENABLE_REG) & ~BIT(data->hwirq);
guard(raw_spinlock)(&intc_ic->intc_lock);
writel(mask, intc_ic->base + INTC_INT_ENABLE_REG);
}
static void aspeed_intc_irq_unmask(struct irq_data *data)
{
struct aspeed_intc_ic *intc_ic = irq_data_get_irq_chip_data(data);
unsigned int unmask = readl(intc_ic->base + INTC_INT_ENABLE_REG) | BIT(data->hwirq);
guard(raw_spinlock)(&intc_ic->intc_lock);
writel(unmask, intc_ic->base + INTC_INT_ENABLE_REG);
}
static struct irq_chip aspeed_intc_chip = {
.name = "ASPEED INTC",
.irq_mask = aspeed_intc_irq_mask,
.irq_unmask = aspeed_intc_irq_unmask,
};
static int aspeed_intc_ic_map_irq_domain(struct irq_domain *domain, unsigned int irq,
irq_hw_number_t hwirq)
{
irq_set_chip_and_handler(irq, &aspeed_intc_chip, handle_level_irq);
irq_set_chip_data(irq, domain->host_data);
return 0;
}
static const struct irq_domain_ops aspeed_intc_ic_irq_domain_ops = {
.map = aspeed_intc_ic_map_irq_domain,
};
static int __init aspeed_intc_ic_of_init(struct device_node *node,
struct device_node *parent)
{
struct aspeed_intc_ic *intc_ic;
int irq, i, ret = 0;
intc_ic = kzalloc(sizeof(*intc_ic), GFP_KERNEL);
if (!intc_ic)
return -ENOMEM;
intc_ic->base = of_iomap(node, 0);
if (!intc_ic->base) {
pr_err("Failed to iomap intc_ic base\n");
ret = -ENOMEM;
goto err_free_ic;
}
writel(0xffffffff, intc_ic->base + INTC_INT_STATUS_REG);
writel(0x0, intc_ic->base + INTC_INT_ENABLE_REG);
intc_ic->irq_domain = irq_domain_add_linear(node, INTC_IRQS_PER_WORD,
&aspeed_intc_ic_irq_domain_ops, intc_ic);
if (!intc_ic->irq_domain) {
ret = -ENOMEM;
goto err_iounmap;
}
raw_spin_lock_init(&intc_ic->gic_lock);
raw_spin_lock_init(&intc_ic->intc_lock);
/* Check all the irq numbers valid. If not, unmaps all the base and frees the data. */
for (i = 0; i < of_irq_count(node); i++) {
irq = irq_of_parse_and_map(node, i);
if (!irq) {
pr_err("Failed to get irq number\n");
ret = -EINVAL;
goto err_iounmap;
}
}
for (i = 0; i < of_irq_count(node); i++) {
irq = irq_of_parse_and_map(node, i);
irq_set_chained_handler_and_data(irq, aspeed_intc_ic_irq_handler, intc_ic);
}
return 0;
err_iounmap:
iounmap(intc_ic->base);
err_free_ic:
kfree(intc_ic);
return ret;
}
IRQCHIP_DECLARE(ast2700_intc_ic, "aspeed,ast2700-intc-ic", aspeed_intc_ic_of_init);

View File

@ -319,6 +319,7 @@ static const struct of_device_id aic5_irq_fixups[] __initconst = {
{ .compatible = "atmel,sama5d3", .data = sama5d3_aic_irq_fixup },
{ .compatible = "atmel,sama5d4", .data = sama5d3_aic_irq_fixup },
{ .compatible = "microchip,sam9x60", .data = sam9x60_aic_irq_fixup },
{ .compatible = "microchip,sam9x7", .data = sam9x60_aic_irq_fixup },
{ /* sentinel */ },
};
@ -405,3 +406,11 @@ static int __init sam9x60_aic5_of_init(struct device_node *node,
return aic5_of_init(node, parent, NR_SAM9X60_IRQS);
}
IRQCHIP_DECLARE(sam9x60_aic5, "microchip,sam9x60-aic", sam9x60_aic5_of_init);
#define NR_SAM9X7_IRQS 70
static int __init sam9x7_aic5_of_init(struct device_node *node, struct device_node *parent)
{
return aic5_of_init(node, parent, NR_SAM9X7_IRQS);
}
IRQCHIP_DECLARE(sam9x7_aic5, "microchip,sam9x7-aic", sam9x7_aic5_of_init);

View File

@ -12,12 +12,14 @@
#include <linux/crash_dump.h>
#include <linux/delay.h>
#include <linux/efi.h>
#include <linux/genalloc.h>
#include <linux/interrupt.h>
#include <linux/iommu.h>
#include <linux/iopoll.h>
#include <linux/irqdomain.h>
#include <linux/list.h>
#include <linux/log2.h>
#include <linux/mem_encrypt.h>
#include <linux/memblock.h>
#include <linux/mm.h>
#include <linux/msi.h>
@ -27,6 +29,7 @@
#include <linux/of_pci.h>
#include <linux/of_platform.h>
#include <linux/percpu.h>
#include <linux/set_memory.h>
#include <linux/slab.h>
#include <linux/syscore_ops.h>
@ -164,6 +167,7 @@ struct its_device {
struct its_node *its;
struct event_lpi_map event_map;
void *itt;
u32 itt_sz;
u32 nr_ites;
u32 device_id;
bool shared;
@ -199,6 +203,87 @@ static DEFINE_IDA(its_vpeid_ida);
#define gic_data_rdist_rd_base() (gic_data_rdist()->rd_base)
#define gic_data_rdist_vlpi_base() (gic_data_rdist_rd_base() + SZ_128K)
static struct page *its_alloc_pages_node(int node, gfp_t gfp,
unsigned int order)
{
struct page *page;
int ret = 0;
page = alloc_pages_node(node, gfp, order);
if (!page)
return NULL;
ret = set_memory_decrypted((unsigned long)page_address(page),
1 << order);
/*
* If set_memory_decrypted() fails then we don't know what state the
* page is in, so we can't free it. Instead we leak it.
* set_memory_decrypted() will already have WARNed.
*/
if (ret)
return NULL;
return page;
}
static struct page *its_alloc_pages(gfp_t gfp, unsigned int order)
{
return its_alloc_pages_node(NUMA_NO_NODE, gfp, order);
}
static void its_free_pages(void *addr, unsigned int order)
{
/*
* If the memory cannot be encrypted again then we must leak the pages.
* set_memory_encrypted() will already have WARNed.
*/
if (set_memory_encrypted((unsigned long)addr, 1 << order))
return;
free_pages((unsigned long)addr, order);
}
static struct gen_pool *itt_pool;
static void *itt_alloc_pool(int node, int size)
{
unsigned long addr;
struct page *page;
if (size >= PAGE_SIZE) {
page = its_alloc_pages_node(node, GFP_KERNEL | __GFP_ZERO, get_order(size));
return page ? page_address(page) : NULL;
}
do {
addr = gen_pool_alloc(itt_pool, size);
if (addr)
break;
page = its_alloc_pages_node(node, GFP_KERNEL | __GFP_ZERO, 0);
if (!page)
break;
gen_pool_add(itt_pool, (unsigned long)page_address(page), PAGE_SIZE, node);
} while (!addr);
return (void *)addr;
}
static void itt_free_pool(void *addr, int size)
{
if (!addr)
return;
if (size >= PAGE_SIZE) {
its_free_pages(addr, get_order(size));
return;
}
gen_pool_free(itt_pool, (unsigned long)addr, size);
}
/*
* Skip ITSs that have no vLPIs mapped, unless we're on GICv4.1, as we
* always have vSGIs mapped.
@ -621,7 +706,6 @@ static struct its_collection *its_build_mapd_cmd(struct its_node *its,
u8 size = ilog2(desc->its_mapd_cmd.dev->nr_ites);
itt_addr = virt_to_phys(desc->its_mapd_cmd.dev->itt);
itt_addr = ALIGN(itt_addr, ITS_ITT_ALIGN);
its_encode_cmd(cmd, GITS_CMD_MAPD);
its_encode_devid(cmd, desc->its_mapd_cmd.dev->device_id);
@ -2181,7 +2265,8 @@ static struct page *its_allocate_prop_table(gfp_t gfp_flags)
{
struct page *prop_page;
prop_page = alloc_pages(gfp_flags, get_order(LPI_PROPBASE_SZ));
prop_page = its_alloc_pages(gfp_flags,
get_order(LPI_PROPBASE_SZ));
if (!prop_page)
return NULL;
@ -2192,8 +2277,7 @@ static struct page *its_allocate_prop_table(gfp_t gfp_flags)
static void its_free_prop_table(struct page *prop_page)
{
free_pages((unsigned long)page_address(prop_page),
get_order(LPI_PROPBASE_SZ));
its_free_pages(page_address(prop_page), get_order(LPI_PROPBASE_SZ));
}
static bool gic_check_reserved_range(phys_addr_t addr, unsigned long size)
@ -2315,7 +2399,7 @@ static int its_setup_baser(struct its_node *its, struct its_baser *baser,
order = get_order(GITS_BASER_PAGES_MAX * psz);
}
page = alloc_pages_node(its->numa_node, GFP_KERNEL | __GFP_ZERO, order);
page = its_alloc_pages_node(its->numa_node, GFP_KERNEL | __GFP_ZERO, order);
if (!page)
return -ENOMEM;
@ -2328,7 +2412,7 @@ static int its_setup_baser(struct its_node *its, struct its_baser *baser,
/* 52bit PA is supported only when PageSize=64K */
if (psz != SZ_64K) {
pr_err("ITS: no 52bit PA support when psz=%d\n", psz);
free_pages((unsigned long)base, order);
its_free_pages(base, order);
return -ENXIO;
}
@ -2384,7 +2468,7 @@ retry_baser:
pr_err("ITS@%pa: %s doesn't stick: %llx %llx\n",
&its->phys_base, its_base_type_string[type],
val, tmp);
free_pages((unsigned long)base, order);
its_free_pages(base, order);
return -ENXIO;
}
@ -2523,8 +2607,7 @@ static void its_free_tables(struct its_node *its)
for (i = 0; i < GITS_BASER_NR_REGS; i++) {
if (its->tables[i].base) {
free_pages((unsigned long)its->tables[i].base,
its->tables[i].order);
its_free_pages(its->tables[i].base, its->tables[i].order);
its->tables[i].base = NULL;
}
}
@ -2790,7 +2873,7 @@ static bool allocate_vpe_l2_table(int cpu, u32 id)
/* Allocate memory for 2nd level table */
if (!table[idx]) {
page = alloc_pages(GFP_KERNEL | __GFP_ZERO, get_order(psz));
page = its_alloc_pages(GFP_KERNEL | __GFP_ZERO, get_order(psz));
if (!page)
return false;
@ -2909,7 +2992,7 @@ static int allocate_vpe_l1_table(void)
pr_debug("np = %d, npg = %lld, psz = %d, epp = %d, esz = %d\n",
np, npg, psz, epp, esz);
page = alloc_pages(GFP_ATOMIC | __GFP_ZERO, get_order(np * PAGE_SIZE));
page = its_alloc_pages(GFP_ATOMIC | __GFP_ZERO, get_order(np * PAGE_SIZE));
if (!page)
return -ENOMEM;
@ -2955,8 +3038,7 @@ static struct page *its_allocate_pending_table(gfp_t gfp_flags)
{
struct page *pend_page;
pend_page = alloc_pages(gfp_flags | __GFP_ZERO,
get_order(LPI_PENDBASE_SZ));
pend_page = its_alloc_pages(gfp_flags | __GFP_ZERO, get_order(LPI_PENDBASE_SZ));
if (!pend_page)
return NULL;
@ -2968,7 +3050,7 @@ static struct page *its_allocate_pending_table(gfp_t gfp_flags)
static void its_free_pending_table(struct page *pt)
{
free_pages((unsigned long)page_address(pt), get_order(LPI_PENDBASE_SZ));
its_free_pages(page_address(pt), get_order(LPI_PENDBASE_SZ));
}
/*
@ -3303,8 +3385,8 @@ static bool its_alloc_table_entry(struct its_node *its,
/* Allocate memory for 2nd level table */
if (!table[idx]) {
page = alloc_pages_node(its->numa_node, GFP_KERNEL | __GFP_ZERO,
get_order(baser->psz));
page = its_alloc_pages_node(its->numa_node, GFP_KERNEL | __GFP_ZERO,
get_order(baser->psz));
if (!page)
return false;
@ -3399,15 +3481,18 @@ static struct its_device *its_create_device(struct its_node *its, u32 dev_id,
if (WARN_ON(!is_power_of_2(nvecs)))
nvecs = roundup_pow_of_two(nvecs);
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
/*
* Even if the device wants a single LPI, the ITT must be
* sized as a power of two (and you need at least one bit...).
*/
nr_ites = max(2, nvecs);
sz = nr_ites * (FIELD_GET(GITS_TYPER_ITT_ENTRY_SIZE, its->typer) + 1);
sz = max(sz, ITS_ITT_ALIGN) + ITS_ITT_ALIGN - 1;
itt = kzalloc_node(sz, GFP_KERNEL, its->numa_node);
sz = max(sz, ITS_ITT_ALIGN);
itt = itt_alloc_pool(its->numa_node, sz);
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
if (alloc_lpis) {
lpi_map = its_lpi_alloc(nvecs, &lpi_base, &nr_lpis);
if (lpi_map)
@ -3419,9 +3504,9 @@ static struct its_device *its_create_device(struct its_node *its, u32 dev_id,
lpi_base = 0;
}
if (!dev || !itt || !col_map || (!lpi_map && alloc_lpis)) {
if (!dev || !itt || !col_map || (!lpi_map && alloc_lpis)) {
kfree(dev);
kfree(itt);
itt_free_pool(itt, sz);
bitmap_free(lpi_map);
kfree(col_map);
return NULL;
@ -3431,6 +3516,7 @@ static struct its_device *its_create_device(struct its_node *its, u32 dev_id,
dev->its = its;
dev->itt = itt;
dev->itt_sz = sz;
dev->nr_ites = nr_ites;
dev->event_map.lpi_map = lpi_map;
dev->event_map.col_map = col_map;
@ -3458,7 +3544,7 @@ static void its_free_device(struct its_device *its_dev)
list_del(&its_dev->entry);
raw_spin_unlock_irqrestore(&its_dev->its->lock, flags);
kfree(its_dev->event_map.col_map);
kfree(its_dev->itt);
itt_free_pool(its_dev->itt, its_dev->itt_sz);
kfree(its_dev);
}
@ -5132,8 +5218,9 @@ static int __init its_probe_one(struct its_node *its)
}
}
page = alloc_pages_node(its->numa_node, GFP_KERNEL | __GFP_ZERO,
get_order(ITS_CMD_QUEUE_SZ));
page = its_alloc_pages_node(its->numa_node,
GFP_KERNEL | __GFP_ZERO,
get_order(ITS_CMD_QUEUE_SZ));
if (!page) {
err = -ENOMEM;
goto out_unmap_sgir;
@ -5197,7 +5284,7 @@ static int __init its_probe_one(struct its_node *its)
out_free_tables:
its_free_tables(its);
out_free_cmd:
free_pages((unsigned long)its->cmd_base, get_order(ITS_CMD_QUEUE_SZ));
its_free_pages(its->cmd_base, get_order(ITS_CMD_QUEUE_SZ));
out_unmap_sgir:
if (its->sgir_base)
iounmap(its->sgir_base);
@ -5683,6 +5770,10 @@ int __init its_init(struct fwnode_handle *handle, struct rdists *rdists,
bool has_v4_1 = false;
int err;
itt_pool = gen_pool_create(get_order(ITS_ITT_ALIGN), -1);
if (!itt_pool)
return -ENOMEM;
gic_rdists = rdists;
lpi_prop_prio = irq_prio;

View File

@ -66,6 +66,87 @@ static struct gic_all_vpes_chip_data {
bool mask;
} gic_all_vpes_chip_data[GIC_NUM_LOCAL_INTRS];
static int __gic_with_next_online_cpu(int prev)
{
unsigned int cpu;
/* Discover the next online CPU */
cpu = cpumask_next(prev, cpu_online_mask);
/* If there isn't one, we're done */
if (cpu >= nr_cpu_ids)
return cpu;
/*
* Move the access lock to the next CPU's GIC local register block.
*
* Set GIC_VL_OTHER. Since the caller holds gic_lock nothing can
* clobber the written value.
*/
write_gic_vl_other(mips_cm_vp_id(cpu));
return cpu;
}
static inline void gic_unlock_cluster(void)
{
if (mips_cps_multicluster_cpus())
mips_cm_unlock_other();
}
/**
* for_each_online_cpu_gic() - Iterate over online CPUs, access local registers
* @cpu: An integer variable to hold the current CPU number
* @gic_lock: A pointer to raw spin lock used as a guard
*
* Iterate over online CPUs & configure the other/redirect register region to
* access each CPUs GIC local register block, which can be accessed from the
* loop body using read_gic_vo_*() or write_gic_vo_*() accessor functions or
* their derivatives.
*/
#define for_each_online_cpu_gic(cpu, gic_lock) \
guard(raw_spinlock_irqsave)(gic_lock); \
for ((cpu) = __gic_with_next_online_cpu(-1); \
(cpu) < nr_cpu_ids; \
gic_unlock_cluster(), \
(cpu) = __gic_with_next_online_cpu(cpu))
/**
* gic_irq_lock_cluster() - Lock redirect block access to IRQ's cluster
* @d: struct irq_data corresponding to the interrupt we're interested in
*
* Locks redirect register block access to the global register block of the GIC
* within the remote cluster that the IRQ corresponding to @d is affine to,
* returning true when this redirect block setup & locking has been performed.
*
* If @d is affine to the local cluster then no locking is performed and this
* function will return false, indicating to the caller that it should access
* the local clusters registers without the overhead of indirection through the
* redirect block.
*
* In summary, if this function returns true then the caller should access GIC
* registers using redirect register block accessors & then call
* mips_cm_unlock_other() when done. If this function returns false then the
* caller should trivially access GIC registers in the local cluster.
*
* Returns true if locking performed, else false.
*/
static bool gic_irq_lock_cluster(struct irq_data *d)
{
unsigned int cpu, cl;
cpu = cpumask_first(irq_data_get_effective_affinity_mask(d));
BUG_ON(cpu >= NR_CPUS);
cl = cpu_cluster(&cpu_data[cpu]);
if (cl == cpu_cluster(&current_cpu_data))
return false;
if (mips_cps_numcores(cl) == 0)
return false;
mips_cm_lock_other(cl, 0, 0, CM_GCR_Cx_OTHER_BLOCK_GLOBAL);
return true;
}
static void gic_clear_pcpu_masks(unsigned int intr)
{
unsigned int i;
@ -112,7 +193,12 @@ static void gic_send_ipi(struct irq_data *d, unsigned int cpu)
{
irq_hw_number_t hwirq = GIC_HWIRQ_TO_SHARED(irqd_to_hwirq(d));
write_gic_wedge(GIC_WEDGE_RW | hwirq);
if (gic_irq_lock_cluster(d)) {
write_gic_redir_wedge(GIC_WEDGE_RW | hwirq);
mips_cm_unlock_other();
} else {
write_gic_wedge(GIC_WEDGE_RW | hwirq);
}
}
int gic_get_c0_compare_int(void)
@ -180,7 +266,13 @@ static void gic_mask_irq(struct irq_data *d)
{
unsigned int intr = GIC_HWIRQ_TO_SHARED(d->hwirq);
write_gic_rmask(intr);
if (gic_irq_lock_cluster(d)) {
write_gic_redir_rmask(intr);
mips_cm_unlock_other();
} else {
write_gic_rmask(intr);
}
gic_clear_pcpu_masks(intr);
}
@ -189,7 +281,12 @@ static void gic_unmask_irq(struct irq_data *d)
unsigned int intr = GIC_HWIRQ_TO_SHARED(d->hwirq);
unsigned int cpu;
write_gic_smask(intr);
if (gic_irq_lock_cluster(d)) {
write_gic_redir_smask(intr);
mips_cm_unlock_other();
} else {
write_gic_smask(intr);
}
gic_clear_pcpu_masks(intr);
cpu = cpumask_first(irq_data_get_effective_affinity_mask(d));
@ -200,7 +297,12 @@ static void gic_ack_irq(struct irq_data *d)
{
unsigned int irq = GIC_HWIRQ_TO_SHARED(d->hwirq);
write_gic_wedge(irq);
if (gic_irq_lock_cluster(d)) {
write_gic_redir_wedge(irq);
mips_cm_unlock_other();
} else {
write_gic_wedge(irq);
}
}
static int gic_set_type(struct irq_data *d, unsigned int type)
@ -240,9 +342,16 @@ static int gic_set_type(struct irq_data *d, unsigned int type)
break;
}
change_gic_pol(irq, pol);
change_gic_trig(irq, trig);
change_gic_dual(irq, dual);
if (gic_irq_lock_cluster(d)) {
change_gic_redir_pol(irq, pol);
change_gic_redir_trig(irq, trig);
change_gic_redir_dual(irq, dual);
mips_cm_unlock_other();
} else {
change_gic_pol(irq, pol);
change_gic_trig(irq, trig);
change_gic_dual(irq, dual);
}
if (trig == GIC_TRIG_EDGE)
irq_set_chip_handler_name_locked(d, &gic_edge_irq_controller,
@ -260,25 +369,72 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *cpumask,
bool force)
{
unsigned int irq = GIC_HWIRQ_TO_SHARED(d->hwirq);
unsigned int cpu, cl, old_cpu, old_cl;
unsigned long flags;
unsigned int cpu;
/*
* The GIC specifies that we can only route an interrupt to one VP(E),
* ie. CPU in Linux parlance, at a time. Therefore we always route to
* the first online CPU in the mask.
*/
cpu = cpumask_first_and(cpumask, cpu_online_mask);
if (cpu >= NR_CPUS)
return -EINVAL;
/* Assumption : cpumask refers to a single CPU */
old_cpu = cpumask_first(irq_data_get_effective_affinity_mask(d));
old_cl = cpu_cluster(&cpu_data[old_cpu]);
cl = cpu_cluster(&cpu_data[cpu]);
raw_spin_lock_irqsave(&gic_lock, flags);
/* Re-route this IRQ */
write_gic_map_vp(irq, BIT(mips_cm_vp_id(cpu)));
/* Update the pcpu_masks */
gic_clear_pcpu_masks(irq);
if (read_gic_mask(irq))
set_bit(irq, per_cpu_ptr(pcpu_masks, cpu));
/*
* If we're moving affinity between clusters, stop routing the
* interrupt to any VP(E) in the old cluster.
*/
if (cl != old_cl) {
if (gic_irq_lock_cluster(d)) {
write_gic_redir_map_vp(irq, 0);
mips_cm_unlock_other();
} else {
write_gic_map_vp(irq, 0);
}
}
/*
* Update effective affinity - after this gic_irq_lock_cluster() will
* begin operating on the new cluster.
*/
irq_data_update_effective_affinity(d, cpumask_of(cpu));
/*
* If we're moving affinity between clusters, configure the interrupt
* trigger type in the new cluster.
*/
if (cl != old_cl)
gic_set_type(d, irqd_get_trigger_type(d));
/* Route the interrupt to its new VP(E) */
if (gic_irq_lock_cluster(d)) {
write_gic_redir_map_pin(irq,
GIC_MAP_PIN_MAP_TO_PIN | gic_cpu_pin);
write_gic_redir_map_vp(irq, BIT(mips_cm_vp_id(cpu)));
/* Update the pcpu_masks */
gic_clear_pcpu_masks(irq);
if (read_gic_redir_mask(irq))
set_bit(irq, per_cpu_ptr(pcpu_masks, cpu));
mips_cm_unlock_other();
} else {
write_gic_map_pin(irq, GIC_MAP_PIN_MAP_TO_PIN | gic_cpu_pin);
write_gic_map_vp(irq, BIT(mips_cm_vp_id(cpu)));
/* Update the pcpu_masks */
gic_clear_pcpu_masks(irq);
if (read_gic_mask(irq))
set_bit(irq, per_cpu_ptr(pcpu_masks, cpu));
}
raw_spin_unlock_irqrestore(&gic_lock, flags);
return IRQ_SET_MASK_OK;
@ -350,37 +506,33 @@ static struct irq_chip gic_local_irq_controller = {
static void gic_mask_local_irq_all_vpes(struct irq_data *d)
{
struct gic_all_vpes_chip_data *cd;
unsigned long flags;
int intr, cpu;
if (!mips_cps_multicluster_cpus())
return;
intr = GIC_HWIRQ_TO_LOCAL(d->hwirq);
cd = irq_data_get_irq_chip_data(d);
cd->mask = false;
raw_spin_lock_irqsave(&gic_lock, flags);
for_each_online_cpu(cpu) {
write_gic_vl_other(mips_cm_vp_id(cpu));
for_each_online_cpu_gic(cpu, &gic_lock)
write_gic_vo_rmask(BIT(intr));
}
raw_spin_unlock_irqrestore(&gic_lock, flags);
}
static void gic_unmask_local_irq_all_vpes(struct irq_data *d)
{
struct gic_all_vpes_chip_data *cd;
unsigned long flags;
int intr, cpu;
if (!mips_cps_multicluster_cpus())
return;
intr = GIC_HWIRQ_TO_LOCAL(d->hwirq);
cd = irq_data_get_irq_chip_data(d);
cd->mask = true;
raw_spin_lock_irqsave(&gic_lock, flags);
for_each_online_cpu(cpu) {
write_gic_vl_other(mips_cm_vp_id(cpu));
for_each_online_cpu_gic(cpu, &gic_lock)
write_gic_vo_smask(BIT(intr));
}
raw_spin_unlock_irqrestore(&gic_lock, flags);
}
static void gic_all_vpes_irq_cpu_online(void)
@ -436,11 +588,21 @@ static int gic_shared_irq_domain_map(struct irq_domain *d, unsigned int virq,
unsigned long flags;
data = irq_get_irq_data(virq);
irq_data_update_effective_affinity(data, cpumask_of(cpu));
raw_spin_lock_irqsave(&gic_lock, flags);
write_gic_map_pin(intr, GIC_MAP_PIN_MAP_TO_PIN | gic_cpu_pin);
write_gic_map_vp(intr, BIT(mips_cm_vp_id(cpu)));
irq_data_update_effective_affinity(data, cpumask_of(cpu));
/* Route the interrupt to its VP(E) */
if (gic_irq_lock_cluster(data)) {
write_gic_redir_map_pin(intr,
GIC_MAP_PIN_MAP_TO_PIN | gic_cpu_pin);
write_gic_redir_map_vp(intr, BIT(mips_cm_vp_id(cpu)));
mips_cm_unlock_other();
} else {
write_gic_map_pin(intr, GIC_MAP_PIN_MAP_TO_PIN | gic_cpu_pin);
write_gic_map_vp(intr, BIT(mips_cm_vp_id(cpu)));
}
raw_spin_unlock_irqrestore(&gic_lock, flags);
return 0;
@ -469,7 +631,6 @@ static int gic_irq_domain_map(struct irq_domain *d, unsigned int virq,
irq_hw_number_t hwirq)
{
struct gic_all_vpes_chip_data *cd;
unsigned long flags;
unsigned int intr;
int err, cpu;
u32 map;
@ -533,12 +694,10 @@ static int gic_irq_domain_map(struct irq_domain *d, unsigned int virq,
if (!gic_local_irq_is_routable(intr))
return -EPERM;
raw_spin_lock_irqsave(&gic_lock, flags);
for_each_online_cpu(cpu) {
write_gic_vl_other(mips_cm_vp_id(cpu));
write_gic_vo_map(mips_gic_vx_map_reg(intr), map);
if (mips_cps_multicluster_cpus()) {
for_each_online_cpu_gic(cpu, &gic_lock)
write_gic_vo_map(mips_gic_vx_map_reg(intr), map);
}
raw_spin_unlock_irqrestore(&gic_lock, flags);
return 0;
}
@ -621,6 +780,9 @@ static int gic_ipi_domain_alloc(struct irq_domain *d, unsigned int virq,
if (ret)
goto error;
/* Set affinity to cpu. */
irq_data_update_effective_affinity(irq_get_irq_data(virq + i),
cpumask_of(cpu));
ret = irq_set_irq_type(virq + i, IRQ_TYPE_EDGE_RISING);
if (ret)
goto error;
@ -734,7 +896,7 @@ static int gic_cpu_startup(unsigned int cpu)
static int __init gic_of_init(struct device_node *node,
struct device_node *parent)
{
unsigned int cpu_vec, i, gicconfig;
unsigned int cpu_vec, i, gicconfig, cl, nclusters;
unsigned long reserved;
phys_addr_t gic_base;
struct resource res;
@ -815,11 +977,32 @@ static int __init gic_of_init(struct device_node *node,
board_bind_eic_interrupt = &gic_bind_eic_interrupt;
/* Setup defaults */
for (i = 0; i < gic_shared_intrs; i++) {
change_gic_pol(i, GIC_POL_ACTIVE_HIGH);
change_gic_trig(i, GIC_TRIG_LEVEL);
write_gic_rmask(i);
/*
* Initialise each cluster's GIC shared registers to sane default
* values.
* Otherwise, the IPI set up will be erased if we move code
* to gic_cpu_startup for each cpu.
*/
nclusters = mips_cps_numclusters();
for (cl = 0; cl < nclusters; cl++) {
if (cl == cpu_cluster(&current_cpu_data)) {
for (i = 0; i < gic_shared_intrs; i++) {
change_gic_pol(i, GIC_POL_ACTIVE_HIGH);
change_gic_trig(i, GIC_TRIG_LEVEL);
write_gic_rmask(i);
}
} else if (mips_cps_numcores(cl) != 0) {
mips_cm_lock_other(cl, 0, 0, CM_GCR_Cx_OTHER_BLOCK_GLOBAL);
for (i = 0; i < gic_shared_intrs; i++) {
change_gic_redir_pol(i, GIC_POL_ACTIVE_HIGH);
change_gic_redir_trig(i, GIC_TRIG_LEVEL);
write_gic_redir_rmask(i);
}
mips_cm_unlock_other();
} else {
pr_warn("No CPU cores on the cluster %d skip it\n", cl);
}
}
return cpuhp_setup_state(CPUHP_AP_IRQ_MIPS_GIC_STARTING,

View File

@ -0,0 +1,513 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Renesas RZ/V2H(P) ICU Driver
*
* Based on irq-renesas-rzg2l.c
*
* Copyright (C) 2024 Renesas Electronics Corporation.
*
* Author: Fabrizio Castro <fabrizio.castro.jz@renesas.com>
*/
#include <linux/bitfield.h>
#include <linux/cleanup.h>
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/io.h>
#include <linux/irqchip.h>
#include <linux/irqdomain.h>
#include <linux/of_address.h>
#include <linux/of_platform.h>
#include <linux/pm_runtime.h>
#include <linux/reset.h>
#include <linux/spinlock.h>
#include <linux/syscore_ops.h>
/* DT "interrupts" indexes */
#define ICU_IRQ_START 1
#define ICU_IRQ_COUNT 16
#define ICU_TINT_START (ICU_IRQ_START + ICU_IRQ_COUNT)
#define ICU_TINT_COUNT 32
#define ICU_NUM_IRQ (ICU_TINT_START + ICU_TINT_COUNT)
/* Registers */
#define ICU_NSCNT 0x00
#define ICU_NSCLR 0x04
#define ICU_NITSR 0x08
#define ICU_ISCTR 0x10
#define ICU_ISCLR 0x14
#define ICU_IITSR 0x18
#define ICU_TSCTR 0x20
#define ICU_TSCLR 0x24
#define ICU_TITSR(k) (0x28 + (k) * 4)
#define ICU_TSSR(k) (0x30 + (k) * 4)
/* NMI */
#define ICU_NMI_EDGE_FALLING 0
#define ICU_NMI_EDGE_RISING 1
#define ICU_NSCLR_NCLR BIT(0)
/* IRQ */
#define ICU_IRQ_LEVEL_LOW 0
#define ICU_IRQ_EDGE_FALLING 1
#define ICU_IRQ_EDGE_RISING 2
#define ICU_IRQ_EDGE_BOTH 3
#define ICU_IITSR_IITSEL_PREP(iitsel, n) ((iitsel) << ((n) * 2))
#define ICU_IITSR_IITSEL_GET(iitsr, n) (((iitsr) >> ((n) * 2)) & 0x03)
#define ICU_IITSR_IITSEL_MASK(n) ICU_IITSR_IITSEL_PREP(0x03, n)
/* TINT */
#define ICU_TINT_EDGE_RISING 0
#define ICU_TINT_EDGE_FALLING 1
#define ICU_TINT_LEVEL_HIGH 2
#define ICU_TINT_LEVEL_LOW 3
#define ICU_TSSR_K(tint_nr) ((tint_nr) / 4)
#define ICU_TSSR_TSSEL_N(tint_nr) ((tint_nr) % 4)
#define ICU_TSSR_TSSEL_PREP(tssel, n) ((tssel) << ((n) * 8))
#define ICU_TSSR_TSSEL_MASK(n) ICU_TSSR_TSSEL_PREP(0x7F, n)
#define ICU_TSSR_TIEN(n) (BIT(7) << ((n) * 8))
#define ICU_TITSR_K(tint_nr) ((tint_nr) / 16)
#define ICU_TITSR_TITSEL_N(tint_nr) ((tint_nr) % 16)
#define ICU_TITSR_TITSEL_PREP(titsel, n) ICU_IITSR_IITSEL_PREP(titsel, n)
#define ICU_TITSR_TITSEL_MASK(n) ICU_IITSR_IITSEL_MASK(n)
#define ICU_TITSR_TITSEL_GET(titsr, n) ICU_IITSR_IITSEL_GET(titsr, n)
#define ICU_TINT_EXTRACT_HWIRQ(x) FIELD_GET(GENMASK(15, 0), (x))
#define ICU_TINT_EXTRACT_GPIOINT(x) FIELD_GET(GENMASK(31, 16), (x))
#define ICU_PB5_TINT 0x55
/**
* struct rzv2h_icu_priv - Interrupt Control Unit controller private data structure.
* @base: Controller's base address
* @irqchip: Pointer to struct irq_chip
* @fwspec: IRQ firmware specific data
* @lock: Lock to serialize access to hardware registers
*/
struct rzv2h_icu_priv {
void __iomem *base;
const struct irq_chip *irqchip;
struct irq_fwspec fwspec[ICU_NUM_IRQ];
raw_spinlock_t lock;
};
static inline struct rzv2h_icu_priv *irq_data_to_priv(struct irq_data *data)
{
return data->domain->host_data;
}
static void rzv2h_icu_eoi(struct irq_data *d)
{
struct rzv2h_icu_priv *priv = irq_data_to_priv(d);
unsigned int hw_irq = irqd_to_hwirq(d);
unsigned int tintirq_nr;
u32 bit;
scoped_guard(raw_spinlock, &priv->lock) {
if (hw_irq >= ICU_TINT_START) {
tintirq_nr = hw_irq - ICU_TINT_START;
bit = BIT(tintirq_nr);
if (!irqd_is_level_type(d))
writel_relaxed(bit, priv->base + ICU_TSCLR);
} else if (hw_irq >= ICU_IRQ_START) {
tintirq_nr = hw_irq - ICU_IRQ_START;
bit = BIT(tintirq_nr);
if (!irqd_is_level_type(d))
writel_relaxed(bit, priv->base + ICU_ISCLR);
} else {
writel_relaxed(ICU_NSCLR_NCLR, priv->base + ICU_NSCLR);
}
}
irq_chip_eoi_parent(d);
}
static void rzv2h_tint_irq_endisable(struct irq_data *d, bool enable)
{
struct rzv2h_icu_priv *priv = irq_data_to_priv(d);
unsigned int hw_irq = irqd_to_hwirq(d);
u32 tint_nr, tssel_n, k, tssr;
if (hw_irq < ICU_TINT_START)
return;
tint_nr = hw_irq - ICU_TINT_START;
k = ICU_TSSR_K(tint_nr);
tssel_n = ICU_TSSR_TSSEL_N(tint_nr);
guard(raw_spinlock)(&priv->lock);
tssr = readl_relaxed(priv->base + ICU_TSSR(k));
if (enable)
tssr |= ICU_TSSR_TIEN(tssel_n);
else
tssr &= ~ICU_TSSR_TIEN(tssel_n);
writel_relaxed(tssr, priv->base + ICU_TSSR(k));
}
static void rzv2h_icu_irq_disable(struct irq_data *d)
{
irq_chip_disable_parent(d);
rzv2h_tint_irq_endisable(d, false);
}
static void rzv2h_icu_irq_enable(struct irq_data *d)
{
rzv2h_tint_irq_endisable(d, true);
irq_chip_enable_parent(d);
}
static int rzv2h_nmi_set_type(struct irq_data *d, unsigned int type)
{
struct rzv2h_icu_priv *priv = irq_data_to_priv(d);
u32 sense;
switch (type & IRQ_TYPE_SENSE_MASK) {
case IRQ_TYPE_EDGE_FALLING:
sense = ICU_NMI_EDGE_FALLING;
break;
case IRQ_TYPE_EDGE_RISING:
sense = ICU_NMI_EDGE_RISING;
break;
default:
return -EINVAL;
}
writel_relaxed(sense, priv->base + ICU_NITSR);
return 0;
}
static void rzv2h_clear_irq_int(struct rzv2h_icu_priv *priv, unsigned int hwirq)
{
unsigned int irq_nr = hwirq - ICU_IRQ_START;
u32 isctr, iitsr, iitsel;
u32 bit = BIT(irq_nr);
isctr = readl_relaxed(priv->base + ICU_ISCTR);
iitsr = readl_relaxed(priv->base + ICU_IITSR);
iitsel = ICU_IITSR_IITSEL_GET(iitsr, irq_nr);
/*
* When level sensing is used, the interrupt flag gets automatically cleared when the
* interrupt signal is de-asserted by the source of the interrupt request, therefore clear
* the interrupt only for edge triggered interrupts.
*/
if ((isctr & bit) && (iitsel != ICU_IRQ_LEVEL_LOW))
writel_relaxed(bit, priv->base + ICU_ISCLR);
}
static int rzv2h_irq_set_type(struct irq_data *d, unsigned int type)
{
struct rzv2h_icu_priv *priv = irq_data_to_priv(d);
unsigned int hwirq = irqd_to_hwirq(d);
u32 irq_nr = hwirq - ICU_IRQ_START;
u32 iitsr, sense;
switch (type & IRQ_TYPE_SENSE_MASK) {
case IRQ_TYPE_LEVEL_LOW:
sense = ICU_IRQ_LEVEL_LOW;
break;
case IRQ_TYPE_EDGE_FALLING:
sense = ICU_IRQ_EDGE_FALLING;
break;
case IRQ_TYPE_EDGE_RISING:
sense = ICU_IRQ_EDGE_RISING;
break;
case IRQ_TYPE_EDGE_BOTH:
sense = ICU_IRQ_EDGE_BOTH;
break;
default:
return -EINVAL;
}
guard(raw_spinlock)(&priv->lock);
iitsr = readl_relaxed(priv->base + ICU_IITSR);
iitsr &= ~ICU_IITSR_IITSEL_MASK(irq_nr);
iitsr |= ICU_IITSR_IITSEL_PREP(sense, irq_nr);
rzv2h_clear_irq_int(priv, hwirq);
writel_relaxed(iitsr, priv->base + ICU_IITSR);
return 0;
}
static void rzv2h_clear_tint_int(struct rzv2h_icu_priv *priv, unsigned int hwirq)
{
unsigned int tint_nr = hwirq - ICU_TINT_START;
int titsel_n = ICU_TITSR_TITSEL_N(tint_nr);
u32 tsctr, titsr, titsel;
u32 bit = BIT(tint_nr);
int k = tint_nr / 16;
tsctr = readl_relaxed(priv->base + ICU_TSCTR);
titsr = readl_relaxed(priv->base + ICU_TITSR(k));
titsel = ICU_TITSR_TITSEL_GET(titsr, titsel_n);
/*
* Writing 1 to the corresponding flag from register ICU_TSCTR only has effect if
* TSTATn = 1b and if it's a rising edge or a falling edge interrupt.
*/
if ((tsctr & bit) && ((titsel == ICU_TINT_EDGE_RISING) ||
(titsel == ICU_TINT_EDGE_FALLING)))
writel_relaxed(bit, priv->base + ICU_TSCLR);
}
static int rzv2h_tint_set_type(struct irq_data *d, unsigned int type)
{
u32 titsr, titsr_k, titsel_n, tien;
struct rzv2h_icu_priv *priv;
u32 tssr, tssr_k, tssel_n;
unsigned int hwirq;
u32 tint, sense;
int tint_nr;
switch (type & IRQ_TYPE_SENSE_MASK) {
case IRQ_TYPE_LEVEL_LOW:
sense = ICU_TINT_LEVEL_LOW;
break;
case IRQ_TYPE_LEVEL_HIGH:
sense = ICU_TINT_LEVEL_HIGH;
break;
case IRQ_TYPE_EDGE_RISING:
sense = ICU_TINT_EDGE_RISING;
break;
case IRQ_TYPE_EDGE_FALLING:
sense = ICU_TINT_EDGE_FALLING;
break;
default:
return -EINVAL;
}
tint = (u32)(uintptr_t)irq_data_get_irq_chip_data(d);
if (tint > ICU_PB5_TINT)
return -EINVAL;
priv = irq_data_to_priv(d);
hwirq = irqd_to_hwirq(d);
tint_nr = hwirq - ICU_TINT_START;
tssr_k = ICU_TSSR_K(tint_nr);
tssel_n = ICU_TSSR_TSSEL_N(tint_nr);
titsr_k = ICU_TITSR_K(tint_nr);
titsel_n = ICU_TITSR_TITSEL_N(tint_nr);
tien = ICU_TSSR_TIEN(titsel_n);
guard(raw_spinlock)(&priv->lock);
tssr = readl_relaxed(priv->base + ICU_TSSR(tssr_k));
tssr &= ~(ICU_TSSR_TSSEL_MASK(tssel_n) | tien);
tssr |= ICU_TSSR_TSSEL_PREP(tint, tssel_n);
writel_relaxed(tssr, priv->base + ICU_TSSR(tssr_k));
titsr = readl_relaxed(priv->base + ICU_TITSR(titsr_k));
titsr &= ~ICU_TITSR_TITSEL_MASK(titsel_n);
titsr |= ICU_TITSR_TITSEL_PREP(sense, titsel_n);
writel_relaxed(titsr, priv->base + ICU_TITSR(titsr_k));
rzv2h_clear_tint_int(priv, hwirq);
writel_relaxed(tssr | tien, priv->base + ICU_TSSR(tssr_k));
return 0;
}
static int rzv2h_icu_set_type(struct irq_data *d, unsigned int type)
{
unsigned int hw_irq = irqd_to_hwirq(d);
int ret;
if (hw_irq >= ICU_TINT_START)
ret = rzv2h_tint_set_type(d, type);
else if (hw_irq >= ICU_IRQ_START)
ret = rzv2h_irq_set_type(d, type);
else
ret = rzv2h_nmi_set_type(d, type);
if (ret)
return ret;
return irq_chip_set_type_parent(d, IRQ_TYPE_LEVEL_HIGH);
}
static const struct irq_chip rzv2h_icu_chip = {
.name = "rzv2h-icu",
.irq_eoi = rzv2h_icu_eoi,
.irq_mask = irq_chip_mask_parent,
.irq_unmask = irq_chip_unmask_parent,
.irq_disable = rzv2h_icu_irq_disable,
.irq_enable = rzv2h_icu_irq_enable,
.irq_get_irqchip_state = irq_chip_get_parent_state,
.irq_set_irqchip_state = irq_chip_set_parent_state,
.irq_retrigger = irq_chip_retrigger_hierarchy,
.irq_set_type = rzv2h_icu_set_type,
.irq_set_affinity = irq_chip_set_affinity_parent,
.flags = IRQCHIP_SET_TYPE_MASKED,
};
static int rzv2h_icu_alloc(struct irq_domain *domain, unsigned int virq, unsigned int nr_irqs,
void *arg)
{
struct rzv2h_icu_priv *priv = domain->host_data;
unsigned long tint = 0;
irq_hw_number_t hwirq;
unsigned int type;
int ret;
ret = irq_domain_translate_twocell(domain, arg, &hwirq, &type);
if (ret)
return ret;
/*
* For TINT interrupts the hwirq and TINT are encoded in
* fwspec->param[0].
* hwirq is embedded in bits 0-15.
* TINT is embedded in bits 16-31.
*/
if (hwirq >= ICU_TINT_START) {
tint = ICU_TINT_EXTRACT_GPIOINT(hwirq);
hwirq = ICU_TINT_EXTRACT_HWIRQ(hwirq);
if (hwirq < ICU_TINT_START)
return -EINVAL;
}
if (hwirq > (ICU_NUM_IRQ - 1))
return -EINVAL;
ret = irq_domain_set_hwirq_and_chip(domain, virq, hwirq, priv->irqchip,
(void *)(uintptr_t)tint);
if (ret)
return ret;
return irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, &priv->fwspec[hwirq]);
}
static const struct irq_domain_ops rzv2h_icu_domain_ops = {
.alloc = rzv2h_icu_alloc,
.free = irq_domain_free_irqs_common,
.translate = irq_domain_translate_twocell,
};
static int rzv2h_icu_parse_interrupts(struct rzv2h_icu_priv *priv, struct device_node *np)
{
struct of_phandle_args map;
unsigned int i;
int ret;
for (i = 0; i < ICU_NUM_IRQ; i++) {
ret = of_irq_parse_one(np, i, &map);
if (ret)
return ret;
of_phandle_args_to_fwspec(np, map.args, map.args_count, &priv->fwspec[i]);
}
return 0;
}
static int rzv2h_icu_init(struct device_node *node, struct device_node *parent)
{
struct irq_domain *irq_domain, *parent_domain;
struct rzv2h_icu_priv *rzv2h_icu_data;
struct platform_device *pdev;
struct reset_control *resetn;
int ret;
pdev = of_find_device_by_node(node);
if (!pdev)
return -ENODEV;
parent_domain = irq_find_host(parent);
if (!parent_domain) {
dev_err(&pdev->dev, "cannot find parent domain\n");
ret = -ENODEV;
goto put_dev;
}
rzv2h_icu_data = devm_kzalloc(&pdev->dev, sizeof(*rzv2h_icu_data), GFP_KERNEL);
if (!rzv2h_icu_data) {
ret = -ENOMEM;
goto put_dev;
}
rzv2h_icu_data->irqchip = &rzv2h_icu_chip;
rzv2h_icu_data->base = devm_of_iomap(&pdev->dev, pdev->dev.of_node, 0, NULL);
if (IS_ERR(rzv2h_icu_data->base)) {
ret = PTR_ERR(rzv2h_icu_data->base);
goto put_dev;
}
ret = rzv2h_icu_parse_interrupts(rzv2h_icu_data, node);
if (ret) {
dev_err(&pdev->dev, "cannot parse interrupts: %d\n", ret);
goto put_dev;
}
resetn = devm_reset_control_get_exclusive(&pdev->dev, NULL);
if (IS_ERR(resetn)) {
ret = PTR_ERR(resetn);
goto put_dev;
}
ret = reset_control_deassert(resetn);
if (ret) {
dev_err(&pdev->dev, "failed to deassert resetn pin, %d\n", ret);
goto put_dev;
}
pm_runtime_enable(&pdev->dev);
ret = pm_runtime_resume_and_get(&pdev->dev);
if (ret < 0) {
dev_err(&pdev->dev, "pm_runtime_resume_and_get failed: %d\n", ret);
goto pm_disable;
}
raw_spin_lock_init(&rzv2h_icu_data->lock);
irq_domain = irq_domain_add_hierarchy(parent_domain, 0, ICU_NUM_IRQ, node,
&rzv2h_icu_domain_ops, rzv2h_icu_data);
if (!irq_domain) {
dev_err(&pdev->dev, "failed to add irq domain\n");
ret = -ENOMEM;
goto pm_put;
}
/*
* coccicheck complains about a missing put_device call before returning, but it's a false
* positive. We still need &pdev->dev after successfully returning from this function.
*/
return 0;
pm_put:
pm_runtime_put(&pdev->dev);
pm_disable:
pm_runtime_disable(&pdev->dev);
reset_control_assert(resetn);
put_dev:
put_device(&pdev->dev);
return ret;
}
IRQCHIP_PLATFORM_DRIVER_BEGIN(rzv2h_icu)
IRQCHIP_MATCH("renesas,r9a09g057-icu", rzv2h_icu_init)
IRQCHIP_PLATFORM_DRIVER_END(rzv2h_icu)
MODULE_AUTHOR("Fabrizio Castro <fabrizio.castro.jz@renesas.com>");
MODULE_DESCRIPTION("Renesas RZ/V2H(P) ICU Driver");

View File

@ -207,7 +207,8 @@ static int aplic_probe(struct platform_device *pdev)
else
rc = aplic_direct_setup(dev, regs);
if (rc)
dev_err(dev, "failed to setup APLIC in %s mode\n", msi_mode ? "MSI" : "direct");
dev_err_probe(dev, rc, "failed to setup APLIC in %s mode\n",
msi_mode ? "MSI" : "direct");
#ifdef CONFIG_ACPI
if (!acpi_disabled)

View File

@ -266,6 +266,9 @@ int aplic_msi_setup(struct device *dev, void __iomem *regs)
if (msi_domain)
dev_set_msi_domain(dev, msi_domain);
}
if (!dev_get_msi_domain(dev))
return -EPROBE_DEFER;
}
if (!msi_create_device_irq_domain(dev, MSI_DEFAULT_DOMAIN, &aplic_msi_template,

View File

@ -252,11 +252,10 @@ static int plic_irq_suspend(void)
priv = per_cpu_ptr(&plic_handlers, smp_processor_id())->priv;
for (i = 0; i < priv->nr_irqs; i++)
if (readl(priv->regs + PRIORITY_BASE + i * PRIORITY_PER_ID))
__set_bit(i, priv->prio_save);
else
__clear_bit(i, priv->prio_save);
for (i = 0; i < priv->nr_irqs; i++) {
__assign_bit(i, priv->prio_save,
readl(priv->regs + PRIORITY_BASE + i * PRIORITY_PER_ID));
}
for_each_cpu(cpu, cpu_present_mask) {
struct plic_handler *handler = per_cpu_ptr(&plic_handlers, cpu);

View File

@ -696,8 +696,7 @@ static int stm32mp_exti_probe(struct platform_device *pdev)
if (ret)
return ret;
if (of_property_read_bool(np, "interrupts-extended"))
host_data->dt_has_irqs_desc = true;
host_data->dt_has_irqs_desc = of_property_present(np, "interrupts-extended");
return 0;
}

View File

@ -0,0 +1,176 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2024 Inochi Amaoto <inochiama@gmail.com>
*/
#define pr_fmt(fmt) "thead-c900-aclint-sswi: " fmt
#include <linux/cpu.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/irq.h>
#include <linux/irqchip.h>
#include <linux/irqchip/chained_irq.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/pci.h>
#include <linux/spinlock.h>
#include <linux/smp.h>
#include <linux/string_choices.h>
#include <asm/sbi.h>
#include <asm/vendorid_list.h>
#define THEAD_ACLINT_xSWI_REGISTER_SIZE 4
#define THEAD_C9XX_CSR_SXSTATUS 0x5c0
#define THEAD_C9XX_SXSTATUS_CLINTEE BIT(17)
static int sswi_ipi_virq __ro_after_init;
static DEFINE_PER_CPU(void __iomem *, sswi_cpu_regs);
static void thead_aclint_sswi_ipi_send(unsigned int cpu)
{
writel_relaxed(0x1, per_cpu(sswi_cpu_regs, cpu));
}
static void thead_aclint_sswi_ipi_clear(void)
{
writel_relaxed(0x0, this_cpu_read(sswi_cpu_regs));
}
static void thead_aclint_sswi_ipi_handle(struct irq_desc *desc)
{
struct irq_chip *chip = irq_desc_get_chip(desc);
chained_irq_enter(chip, desc);
csr_clear(CSR_IP, IE_SIE);
thead_aclint_sswi_ipi_clear();
ipi_mux_process();
chained_irq_exit(chip, desc);
}
static int thead_aclint_sswi_starting_cpu(unsigned int cpu)
{
enable_percpu_irq(sswi_ipi_virq, irq_get_trigger_type(sswi_ipi_virq));
return 0;
}
static int thead_aclint_sswi_dying_cpu(unsigned int cpu)
{
thead_aclint_sswi_ipi_clear();
disable_percpu_irq(sswi_ipi_virq);
return 0;
}
static int __init thead_aclint_sswi_parse_irq(struct fwnode_handle *fwnode,
void __iomem *reg)
{
struct of_phandle_args parent;
unsigned long hartid;
u32 contexts, i;
int rc, cpu;
contexts = of_irq_count(to_of_node(fwnode));
if (!(contexts)) {
pr_err("%pfwP: no ACLINT SSWI context available\n", fwnode);
return -EINVAL;
}
for (i = 0; i < contexts; i++) {
rc = of_irq_parse_one(to_of_node(fwnode), i, &parent);
if (rc)
return rc;
rc = riscv_of_parent_hartid(parent.np, &hartid);
if (rc)
return rc;
if (parent.args[0] != RV_IRQ_SOFT)
return -ENOTSUPP;
cpu = riscv_hartid_to_cpuid(hartid);
per_cpu(sswi_cpu_regs, cpu) = reg + i * THEAD_ACLINT_xSWI_REGISTER_SIZE;
}
pr_info("%pfwP: register %u CPU%s\n", fwnode, contexts, str_plural(contexts));
return 0;
}
static int __init thead_aclint_sswi_probe(struct fwnode_handle *fwnode)
{
struct irq_domain *domain;
void __iomem *reg;
int virq, rc;
/* If it is T-HEAD CPU, check whether SSWI is enabled */
if (riscv_cached_mvendorid(0) == THEAD_VENDOR_ID &&
!(csr_read(THEAD_C9XX_CSR_SXSTATUS) & THEAD_C9XX_SXSTATUS_CLINTEE))
return -ENOTSUPP;
if (!is_of_node(fwnode))
return -EINVAL;
reg = of_iomap(to_of_node(fwnode), 0);
if (!reg)
return -ENOMEM;
/* Parse SSWI setting */
rc = thead_aclint_sswi_parse_irq(fwnode, reg);
if (rc < 0)
return rc;
/* If mulitple SSWI devices are present, do not register irq again */
if (sswi_ipi_virq)
return 0;
/* Find riscv intc domain and create IPI irq mapping */
domain = irq_find_matching_fwnode(riscv_get_intc_hwnode(), DOMAIN_BUS_ANY);
if (!domain) {
pr_err("%pfwP: Failed to find INTC domain\n", fwnode);
return -ENOENT;
}
sswi_ipi_virq = irq_create_mapping(domain, RV_IRQ_SOFT);
if (!sswi_ipi_virq) {
pr_err("unable to create ACLINT SSWI IRQ mapping\n");
return -ENOMEM;
}
/* Register SSWI irq and handler */
virq = ipi_mux_create(BITS_PER_BYTE, thead_aclint_sswi_ipi_send);
if (virq <= 0) {
pr_err("unable to create muxed IPIs\n");
irq_dispose_mapping(sswi_ipi_virq);
return virq < 0 ? virq : -ENOMEM;
}
irq_set_chained_handler(sswi_ipi_virq, thead_aclint_sswi_ipi_handle);
cpuhp_setup_state(CPUHP_AP_IRQ_THEAD_ACLINT_SSWI_STARTING,
"irqchip/thead-aclint-sswi:starting",
thead_aclint_sswi_starting_cpu,
thead_aclint_sswi_dying_cpu);
riscv_ipi_set_virq_range(virq, BITS_PER_BYTE);
/* Announce that SSWI is providing IPIs */
pr_info("providing IPIs using THEAD ACLINT SSWI\n");
return 0;
}
static int __init thead_aclint_sswi_early_probe(struct device_node *node,
struct device_node *parent)
{
return thead_aclint_sswi_probe(&node->fwnode);
}
IRQCHIP_DECLARE(thead_aclint_sswi, "thead,c900-aclint-sswi", thead_aclint_sswi_early_probe);

View File

@ -1302,7 +1302,7 @@ static int vortex_probe1(struct device *gendev, void __iomem *ioaddr, int irq,
if (print_info)
pr_cont(", IRQ %d\n", dev->irq);
/* Tell them about an invalid IRQ. */
if (dev->irq <= 0 || dev->irq >= nr_irqs)
if (dev->irq <= 0 || dev->irq >= irq_get_nr_irqs())
pr_warn(" *** Warning: IRQ %d is unlikely to work! ***\n",
dev->irq);

View File

@ -373,6 +373,7 @@ static enum uart ser12_check_uart(unsigned int iobase)
static int ser12_open(struct net_device *dev)
{
const unsigned int nr_irqs = irq_get_nr_irqs();
struct baycom_state *bc = netdev_priv(dev);
enum uart u;

View File

@ -1460,6 +1460,7 @@ scc_start_calibrate(struct scc_channel *scc, int duration, unsigned char pattern
static void z8530_init(void)
{
const unsigned int nr_irqs = irq_get_nr_irqs();
struct scc_channel *scc;
int chip, k;
unsigned long flags;
@ -1735,7 +1736,7 @@ static int scc_net_siocdevprivate(struct net_device *dev,
if (hwcfg.irq == 2) hwcfg.irq = 9;
if (hwcfg.irq < 0 || hwcfg.irq >= nr_irqs)
if (hwcfg.irq < 0 || hwcfg.irq >= irq_get_nr_irqs())
return -EINVAL;
if (!Ivec[hwcfg.irq].used && hwcfg.irq)
@ -2117,6 +2118,7 @@ static int __init scc_init_driver (void)
static void __exit scc_cleanup_driver(void)
{
const unsigned int nr_irqs = irq_get_nr_irqs();
io_port ctrl;
int k;
struct scc_channel *scc;

View File

@ -295,7 +295,7 @@ CMD_INC_RESID(struct scsi_cmnd *cmd, int inc)
#else
#define IRQ_MIN 9
#if defined(__PPC)
#define IRQ_MAX (nr_irqs-1)
#define IRQ_MAX (irq_get_nr_irqs()-1)
#else
#define IRQ_MAX 12
#endif

View File

@ -18,6 +18,7 @@
static int intc_irq_xlate_show(struct seq_file *m, void *priv)
{
const unsigned int nr_irqs = irq_get_nr_irqs();
int i;
seq_printf(m, "%-5s %-7s %-15s\n", "irq", "enum", "chip name");

View File

@ -347,6 +347,7 @@ config ARCH_R9A09G011
config ARCH_R9A09G057
bool "ARM64 Platform support for RZ/V2H(P)"
select RENESAS_RZV2H_ICU
help
This enables support for the Renesas RZ/V2H(P) SoC variants.

View File

@ -3176,7 +3176,7 @@ static void serial8250_config_port(struct uart_port *port, int flags)
static int
serial8250_verify_port(struct uart_port *port, struct serial_struct *ser)
{
if (ser->irq >= nr_irqs || ser->irq < 0 ||
if (ser->irq >= irq_get_nr_irqs() || ser->irq < 0 ||
ser->baud_base < 9600 || ser->type < PORT_UNKNOWN ||
ser->type >= ARRAY_SIZE(uart_config) || ser->type == PORT_CIRRUS ||
ser->type == PORT_STARTECH)

View File

@ -499,7 +499,7 @@ static int pl010_verify_port(struct uart_port *port, struct serial_struct *ser)
int ret = 0;
if (ser->type != PORT_UNKNOWN && ser->type != PORT_AMBA)
ret = -EINVAL;
if (ser->irq < 0 || ser->irq >= nr_irqs)
if (ser->irq < 0 || ser->irq >= irq_get_nr_irqs())
ret = -EINVAL;
if (ser->baud_base < 9600)
ret = -EINVAL;

View File

@ -2202,7 +2202,7 @@ static int pl011_verify_port(struct uart_port *port, struct serial_struct *ser)
if (ser->type != PORT_UNKNOWN && ser->type != PORT_AMBA)
ret = -EINVAL;
if (ser->irq < 0 || ser->irq >= nr_irqs)
if (ser->irq < 0 || ser->irq >= irq_get_nr_irqs())
ret = -EINVAL;
if (ser->baud_base < 9600)
ret = -EINVAL;

View File

@ -631,7 +631,7 @@ static int cpm_uart_verify_port(struct uart_port *port,
if (ser->type != PORT_UNKNOWN && ser->type != PORT_CPM)
ret = -EINVAL;
if (ser->irq < 0 || ser->irq >= nr_irqs)
if (ser->irq < 0 || ser->irq >= irq_get_nr_irqs())
ret = -EINVAL;
if (ser->baud_base < 9600)
ret = -EINVAL;

View File

@ -919,7 +919,7 @@ static int uart_set_info(struct tty_struct *tty, struct tty_port *port,
if (uport->ops->verify_port)
retval = uport->ops->verify_port(uport, new_info);
if ((new_info->irq >= nr_irqs) || (new_info->irq < 0) ||
if ((new_info->irq >= irq_get_nr_irqs()) || (new_info->irq < 0) ||
(new_info->baud_base < 9600))
retval = -EINVAL;

View File

@ -1045,7 +1045,7 @@ static int qe_uart_verify_port(struct uart_port *port,
if (ser->type != PORT_UNKNOWN && ser->type != PORT_CPM)
return -EINVAL;
if (ser->irq < 0 || ser->irq >= nr_irqs)
if (ser->irq < 0 || ser->irq >= irq_get_nr_irqs())
return -EINVAL;
if (ser->baud_base < 9600)

View File

@ -411,7 +411,7 @@ static evtchn_port_t evtchn_from_irq(unsigned int irq)
{
const struct irq_info *info = NULL;
if (likely(irq < nr_irqs))
if (likely(irq < irq_get_nr_irqs()))
info = info_for_irq(irq);
if (!info)
return 0;

View File

@ -11,13 +11,13 @@
*/
static void *int_seq_start(struct seq_file *f, loff_t *pos)
{
return (*pos <= nr_irqs) ? pos : NULL;
return *pos <= irq_get_nr_irqs() ? pos : NULL;
}
static void *int_seq_next(struct seq_file *f, void *v, loff_t *pos)
{
(*pos)++;
if (*pos > nr_irqs)
if (*pos > irq_get_nr_irqs())
return NULL;
return pos;
}

View File

@ -76,7 +76,7 @@ static void show_all_irqs(struct seq_file *p)
seq_put_decimal_ull(p, " ", kstat_irqs_usr(i));
next = i + 1;
}
show_irq_gap(p, nr_irqs - next);
show_irq_gap(p, irq_get_nr_irqs() - next);
}
static int show_stat(struct seq_file *p, void *v)
@ -196,7 +196,7 @@ static int stat_open(struct inode *inode, struct file *file)
unsigned int size = 1024 + 128 * num_online_cpus();
/* minimum size to display an interrupt count : 2 bytes */
size += 2 * nr_irqs;
size += 2 * irq_get_nr_irqs();
return single_open_size(file, show_stat, NULL, size);
}

View File

@ -147,6 +147,7 @@ enum cpuhp_state {
CPUHP_AP_IRQ_EIOINTC_STARTING,
CPUHP_AP_IRQ_AVECINTC_STARTING,
CPUHP_AP_IRQ_SIFIVE_PLIC_STARTING,
CPUHP_AP_IRQ_THEAD_ACLINT_SSWI_STARTING,
CPUHP_AP_IRQ_RISCV_IMSIC_STARTING,
CPUHP_AP_IRQ_RISCV_SBI_IPI_STARTING,
CPUHP_AP_ARM_MVEBU_COHERENCY,

View File

@ -616,6 +616,53 @@ extern void __raise_softirq_irqoff(unsigned int nr);
extern void raise_softirq_irqoff(unsigned int nr);
extern void raise_softirq(unsigned int nr);
/*
* With forced-threaded interrupts enabled a raised softirq is deferred to
* ksoftirqd unless it can be handled within the threaded interrupt. This
* affects timer_list timers and hrtimers which are explicitly marked with
* HRTIMER_MODE_SOFT.
* With PREEMPT_RT enabled more hrtimers are moved to softirq for processing
* which includes all timers which are not explicitly marked HRTIMER_MODE_HARD.
* Userspace controlled timers (like the clock_nanosleep() interface) is divided
* into two categories: Tasks with elevated scheduling policy including
* SCHED_{FIFO|RR|DL} and the remaining scheduling policy. The tasks with the
* elevated scheduling policy are woken up directly from the HARDIRQ while all
* other wake ups are delayed to softirq and so to ksoftirqd.
*
* The ksoftirqd runs at SCHED_OTHER policy at which it should remain since it
* handles the softirq in an overloaded situation (not handled everything
* within its last run).
* If the timers are handled at SCHED_OTHER priority then they competes with all
* other SCHED_OTHER tasks for CPU resources are possibly delayed.
* Moving timers softirqs to a low priority SCHED_FIFO thread instead ensures
* that timer are performed before scheduling any SCHED_OTHER thread.
*/
DECLARE_PER_CPU(struct task_struct *, ktimerd);
DECLARE_PER_CPU(unsigned long, pending_timer_softirq);
void raise_ktimers_thread(unsigned int nr);
static inline unsigned int local_timers_pending_force_th(void)
{
return __this_cpu_read(pending_timer_softirq);
}
static inline void raise_timer_softirq(unsigned int nr)
{
lockdep_assert_in_irq();
if (force_irqthreads())
raise_ktimers_thread(nr);
else
__raise_softirq_irqoff(nr);
}
static inline unsigned int local_timers_pending(void)
{
if (force_irqthreads())
return local_timers_pending_force_th();
else
return local_softirq_pending();
}
DECLARE_PER_CPU(struct task_struct *, ksoftirqd);
static inline struct task_struct *this_cpu_ksoftirqd(void)

View File

@ -5,30 +5,36 @@
#include <uapi/linux/irqnr.h>
extern int nr_irqs;
unsigned int irq_get_nr_irqs(void) __pure;
unsigned int irq_set_nr_irqs(unsigned int nr);
extern struct irq_desc *irq_to_desc(unsigned int irq);
unsigned int irq_get_next_irq(unsigned int offset);
# define for_each_irq_desc(irq, desc) \
for (irq = 0, desc = irq_to_desc(irq); irq < nr_irqs; \
irq++, desc = irq_to_desc(irq)) \
if (!desc) \
; \
else
#define for_each_irq_desc(irq, desc) \
for (unsigned int __nr_irqs__ = irq_get_nr_irqs(); __nr_irqs__; \
__nr_irqs__ = 0) \
for (irq = 0, desc = irq_to_desc(irq); irq < __nr_irqs__; \
irq++, desc = irq_to_desc(irq)) \
if (!desc) \
; \
else
# define for_each_irq_desc_reverse(irq, desc) \
for (irq = nr_irqs - 1, desc = irq_to_desc(irq); irq >= 0; \
irq--, desc = irq_to_desc(irq)) \
for (irq = irq_get_nr_irqs() - 1, desc = irq_to_desc(irq); \
irq >= 0; irq--, desc = irq_to_desc(irq)) \
if (!desc) \
; \
else
# define for_each_active_irq(irq) \
for (irq = irq_get_next_irq(0); irq < nr_irqs; \
irq = irq_get_next_irq(irq + 1))
#define for_each_active_irq(irq) \
for (unsigned int __nr_irqs__ = irq_get_nr_irqs(); __nr_irqs__; \
__nr_irqs__ = 0) \
for (irq = irq_get_next_irq(0); irq < __nr_irqs__; \
irq = irq_get_next_irq(irq + 1))
#define for_each_irq_nr(irq) \
for (irq = 0; irq < nr_irqs; irq++)
#define for_each_irq_nr(irq) \
for (unsigned int __nr_irqs__ = irq_get_nr_irqs(); __nr_irqs__; \
__nr_irqs__ = 0) \
for (irq = 0; irq < __nr_irqs__; irq++)
#endif

View File

@ -141,9 +141,8 @@ void devm_free_irq(struct device *dev, unsigned int irq, void *dev_id)
{
struct irq_devres match_data = { irq, dev_id };
WARN_ON(devres_destroy(dev, devm_irq_release, devm_irq_match,
WARN_ON(devres_release(dev, devm_irq_release, devm_irq_match,
&match_data));
free_irq(irq, dev_id);
}
EXPORT_SYMBOL(devm_free_irq);

View File

@ -15,6 +15,7 @@
#include <linux/maple_tree.h>
#include <linux/irqdomain.h>
#include <linux/sysfs.h>
#include <linux/string_choices.h>
#include "internals.h"
@ -138,8 +139,30 @@ static void desc_set_defaults(unsigned int irq, struct irq_desc *desc, int node,
desc_smp_init(desc, node, affinity);
}
int nr_irqs = NR_IRQS;
EXPORT_SYMBOL_GPL(nr_irqs);
static unsigned int nr_irqs = NR_IRQS;
/**
* irq_get_nr_irqs() - Number of interrupts supported by the system.
*/
unsigned int irq_get_nr_irqs(void)
{
return nr_irqs;
}
EXPORT_SYMBOL_GPL(irq_get_nr_irqs);
/**
* irq_set_nr_irqs() - Set the number of interrupts supported by the system.
* @nr: New number of interrupts.
*
* Return: @nr.
*/
unsigned int irq_set_nr_irqs(unsigned int nr)
{
nr_irqs = nr;
return nr;
}
EXPORT_SYMBOL_GPL(irq_set_nr_irqs);
static DEFINE_MUTEX(sparse_irq_lock);
static struct maple_tree sparse_irqs = MTREE_INIT_EXT(sparse_irqs,
@ -298,8 +321,7 @@ static ssize_t wakeup_show(struct kobject *kobj,
ssize_t ret = 0;
raw_spin_lock_irq(&desc->lock);
ret = sprintf(buf, "%s\n",
irqd_is_wakeup_set(&desc->irq_data) ? "enabled" : "disabled");
ret = sprintf(buf, "%s\n", str_enabled_disabled(irqd_is_wakeup_set(&desc->irq_data)));
raw_spin_unlock_irq(&desc->lock);
return ret;

View File

@ -1225,7 +1225,7 @@ int irq_domain_alloc_descs(int virq, unsigned int cnt, irq_hw_number_t hwirq,
virq = __irq_alloc_descs(virq, virq, cnt, node, THIS_MODULE,
affinity);
} else {
hint = hwirq % nr_irqs;
hint = hwirq % irq_get_nr_irqs();
if (hint == 0)
hint++;
virq = __irq_alloc_descs(-1, hint, cnt, node, THIS_MODULE,

View File

@ -457,11 +457,12 @@ int __weak arch_show_interrupts(struct seq_file *p, int prec)
}
#ifndef ACTUAL_NR_IRQS
# define ACTUAL_NR_IRQS nr_irqs
# define ACTUAL_NR_IRQS irq_get_nr_irqs()
#endif
int show_interrupts(struct seq_file *p, void *v)
{
const unsigned int nr_irqs = irq_get_nr_irqs();
static int prec;
int i = *(loff_t *) v, j;
@ -494,9 +495,12 @@ int show_interrupts(struct seq_file *p, void *v)
if (!desc->action || irq_desc_is_chained(desc) || !desc->kstat_irqs)
goto outsparse;
seq_printf(p, "%*d: ", prec, i);
for_each_online_cpu(j)
seq_printf(p, "%10u ", desc->kstat_irqs ? per_cpu(desc->kstat_irqs->cnt, j) : 0);
seq_printf(p, "%*d:", prec, i);
for_each_online_cpu(j) {
unsigned int cnt = desc->kstat_irqs ? per_cpu(desc->kstat_irqs->cnt, j) : 0;
seq_put_decimal_ull_width(p, " ", cnt, 10);
}
raw_spin_lock_irqsave(&desc->lock, flags);
if (desc->irq_data.chip) {

View File

@ -2476,6 +2476,14 @@ static int rcutorture_booster_init(unsigned int cpu)
WARN_ON_ONCE(!t);
sp.sched_priority = 2;
sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
#ifdef CONFIG_IRQ_FORCED_THREADING
if (force_irqthreads()) {
t = per_cpu(ktimerd, cpu);
WARN_ON_ONCE(!t);
sp.sched_priority = 2;
sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
}
#endif
}
/* Don't allow time recalculation while creating a new task. */

View File

@ -624,6 +624,24 @@ static inline void tick_irq_exit(void)
#endif
}
#ifdef CONFIG_IRQ_FORCED_THREADING
DEFINE_PER_CPU(struct task_struct *, ktimerd);
DEFINE_PER_CPU(unsigned long, pending_timer_softirq);
static void wake_timersd(void)
{
struct task_struct *tsk = __this_cpu_read(ktimerd);
if (tsk)
wake_up_process(tsk);
}
#else
static inline void wake_timersd(void) { }
#endif
static inline void __irq_exit_rcu(void)
{
#ifndef __ARCH_IRQ_EXIT_IRQS_DISABLED
@ -636,6 +654,10 @@ static inline void __irq_exit_rcu(void)
if (!in_interrupt() && local_softirq_pending())
invoke_softirq();
if (IS_ENABLED(CONFIG_IRQ_FORCED_THREADING) && force_irqthreads() &&
local_timers_pending_force_th() && !(in_nmi() | in_hardirq()))
wake_timersd();
tick_irq_exit();
}
@ -965,12 +987,57 @@ static struct smp_hotplug_thread softirq_threads = {
.thread_comm = "ksoftirqd/%u",
};
#ifdef CONFIG_IRQ_FORCED_THREADING
static void ktimerd_setup(unsigned int cpu)
{
/* Above SCHED_NORMAL to handle timers before regular tasks. */
sched_set_fifo_low(current);
}
static int ktimerd_should_run(unsigned int cpu)
{
return local_timers_pending_force_th();
}
void raise_ktimers_thread(unsigned int nr)
{
trace_softirq_raise(nr);
__this_cpu_or(pending_timer_softirq, BIT(nr));
}
static void run_ktimerd(unsigned int cpu)
{
unsigned int timer_si;
ksoftirqd_run_begin();
timer_si = local_timers_pending_force_th();
__this_cpu_write(pending_timer_softirq, 0);
or_softirq_pending(timer_si);
__do_softirq();
ksoftirqd_run_end();
}
static struct smp_hotplug_thread timer_thread = {
.store = &ktimerd,
.setup = ktimerd_setup,
.thread_should_run = ktimerd_should_run,
.thread_fn = run_ktimerd,
.thread_comm = "ktimers/%u",
};
#endif
static __init int spawn_ksoftirqd(void)
{
cpuhp_setup_state_nocalls(CPUHP_SOFTIRQ_DEAD, "softirq:dead", NULL,
takeover_tasklets);
BUG_ON(smpboot_register_percpu_thread(&softirq_threads));
#ifdef CONFIG_IRQ_FORCED_THREADING
if (force_irqthreads())
BUG_ON(smpboot_register_percpu_thread(&timer_thread));
#endif
return 0;
}
early_initcall(spawn_ksoftirqd);

View File

@ -1811,7 +1811,7 @@ retry:
if (!ktime_before(now, cpu_base->softirq_expires_next)) {
cpu_base->softirq_expires_next = KTIME_MAX;
cpu_base->softirq_activated = 1;
raise_softirq_irqoff(HRTIMER_SOFTIRQ);
raise_timer_softirq(HRTIMER_SOFTIRQ);
}
__hrtimer_run_queues(cpu_base, now, flags, HRTIMER_ACTIVE_HARD);
@ -1906,7 +1906,7 @@ void hrtimer_run_queues(void)
if (!ktime_before(now, cpu_base->softirq_expires_next)) {
cpu_base->softirq_expires_next = KTIME_MAX;
cpu_base->softirq_activated = 1;
raise_softirq_irqoff(HRTIMER_SOFTIRQ);
raise_timer_softirq(HRTIMER_SOFTIRQ);
}
__hrtimer_run_queues(cpu_base, now, flags, HRTIMER_ACTIVE_HARD);

View File

@ -865,7 +865,7 @@ static void tick_nohz_restart(struct tick_sched *ts, ktime_t now)
static inline bool local_timer_softirq_pending(void)
{
return local_softirq_pending() & BIT(TIMER_SOFTIRQ);
return local_timers_pending() & BIT(TIMER_SOFTIRQ);
}
/*

View File

@ -2499,7 +2499,7 @@ static void run_local_timers(void)
*/
if (time_after_eq(jiffies, READ_ONCE(base->next_expiry)) ||
(i == BASE_DEF && tmigr_requires_handle_remote())) {
raise_softirq(TIMER_SOFTIRQ);
raise_timer_softirq(TIMER_SOFTIRQ);
return;
}
}