mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-22 12:14:01 +08:00
Merge branch 'irq-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull irq fixes from Thomas Gleixner: "A rather large update after the kaisered maintainer finally found time to handle regression reports. - The larger part addresses a regression caused by the x86 vector management rework. The reservation based model does not work reliably for MSI interrupts, if they cannot be masked (yes, yet another hw engineering trainwreck). The reason is that the reservation mode assigns a dummy vector when the interrupt is allocated and switches to a real vector when the interrupt is requested. If the MSI entry cannot be masked then the initialization might raise an interrupt before the interrupt is requested, which ends up as spurious interrupt and causes device malfunction and worse. The fix is to exclude MSI interrupts which do not support masking from reservation mode and assign a real vector right away. - Extend the extra lockdep class setup for nested interrupts with a class for the recently added irq_desc::request_mutex so lockdep can differeniate and does not emit false positive warnings. - A ratelimit guard for the bad irq printout so in case a bad irq comes back immediately the system does not drown in dmesg spam" * 'irq-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: genirq/msi, x86/vector: Prevent reservation mode for non maskable MSI genirq/irqdomain: Rename early argument of irq_domain_activate_irq() x86/vector: Use IRQD_CAN_RESERVE flag genirq: Introduce IRQD_CAN_RESERVE flag genirq/msi: Handle reactivation only on success gpio: brcmstb: Make really use of the new lockdep class genirq: Guard handle_bad_irq log messages kernel/irq: Extend lockdep class for request mutex
This commit is contained in:
commit
88fa025d30
@ -354,6 +354,7 @@ static int fsl_of_msi_remove(struct platform_device *ofdev)
|
||||
}
|
||||
|
||||
static struct lock_class_key fsl_msi_irq_class;
|
||||
static struct lock_class_key fsl_msi_irq_request_class;
|
||||
|
||||
static int fsl_msi_setup_hwirq(struct fsl_msi *msi, struct platform_device *dev,
|
||||
int offset, int irq_index)
|
||||
@ -373,7 +374,8 @@ static int fsl_msi_setup_hwirq(struct fsl_msi *msi, struct platform_device *dev,
|
||||
dev_err(&dev->dev, "No memory for MSI cascade data\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
irq_set_lockdep_class(virt_msir, &fsl_msi_irq_class);
|
||||
irq_set_lockdep_class(virt_msir, &fsl_msi_irq_class,
|
||||
&fsl_msi_irq_request_class);
|
||||
cascade_data->index = offset;
|
||||
cascade_data->msi_data = msi;
|
||||
cascade_data->virq = virt_msir;
|
||||
|
@ -44,7 +44,7 @@ extern int mp_irqdomain_alloc(struct irq_domain *domain, unsigned int virq,
|
||||
extern void mp_irqdomain_free(struct irq_domain *domain, unsigned int virq,
|
||||
unsigned int nr_irqs);
|
||||
extern int mp_irqdomain_activate(struct irq_domain *domain,
|
||||
struct irq_data *irq_data, bool early);
|
||||
struct irq_data *irq_data, bool reserve);
|
||||
extern void mp_irqdomain_deactivate(struct irq_domain *domain,
|
||||
struct irq_data *irq_data);
|
||||
extern int mp_irqdomain_ioapic_idx(struct irq_domain *domain);
|
||||
|
@ -283,34 +283,34 @@ TRACE_EVENT(vector_alloc_managed,
|
||||
DECLARE_EVENT_CLASS(vector_activate,
|
||||
|
||||
TP_PROTO(unsigned int irq, bool is_managed, bool can_reserve,
|
||||
bool early),
|
||||
bool reserve),
|
||||
|
||||
TP_ARGS(irq, is_managed, can_reserve, early),
|
||||
TP_ARGS(irq, is_managed, can_reserve, reserve),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field( unsigned int, irq )
|
||||
__field( bool, is_managed )
|
||||
__field( bool, can_reserve )
|
||||
__field( bool, early )
|
||||
__field( bool, reserve )
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->irq = irq;
|
||||
__entry->is_managed = is_managed;
|
||||
__entry->can_reserve = can_reserve;
|
||||
__entry->early = early;
|
||||
__entry->reserve = reserve;
|
||||
),
|
||||
|
||||
TP_printk("irq=%u is_managed=%d can_reserve=%d early=%d",
|
||||
TP_printk("irq=%u is_managed=%d can_reserve=%d reserve=%d",
|
||||
__entry->irq, __entry->is_managed, __entry->can_reserve,
|
||||
__entry->early)
|
||||
__entry->reserve)
|
||||
);
|
||||
|
||||
#define DEFINE_IRQ_VECTOR_ACTIVATE_EVENT(name) \
|
||||
DEFINE_EVENT_FN(vector_activate, name, \
|
||||
TP_PROTO(unsigned int irq, bool is_managed, \
|
||||
bool can_reserve, bool early), \
|
||||
TP_ARGS(irq, is_managed, can_reserve, early), NULL, NULL); \
|
||||
bool can_reserve, bool reserve), \
|
||||
TP_ARGS(irq, is_managed, can_reserve, reserve), NULL, NULL); \
|
||||
|
||||
DEFINE_IRQ_VECTOR_ACTIVATE_EVENT(vector_activate);
|
||||
DEFINE_IRQ_VECTOR_ACTIVATE_EVENT(vector_deactivate);
|
||||
|
@ -2988,7 +2988,7 @@ void mp_irqdomain_free(struct irq_domain *domain, unsigned int virq,
|
||||
}
|
||||
|
||||
int mp_irqdomain_activate(struct irq_domain *domain,
|
||||
struct irq_data *irq_data, bool early)
|
||||
struct irq_data *irq_data, bool reserve)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
|
@ -184,6 +184,7 @@ static void reserve_irq_vector_locked(struct irq_data *irqd)
|
||||
irq_matrix_reserve(vector_matrix);
|
||||
apicd->can_reserve = true;
|
||||
apicd->has_reserved = true;
|
||||
irqd_set_can_reserve(irqd);
|
||||
trace_vector_reserve(irqd->irq, 0);
|
||||
vector_assign_managed_shutdown(irqd);
|
||||
}
|
||||
@ -368,8 +369,18 @@ static int activate_reserved(struct irq_data *irqd)
|
||||
int ret;
|
||||
|
||||
ret = assign_irq_vector_any_locked(irqd);
|
||||
if (!ret)
|
||||
if (!ret) {
|
||||
apicd->has_reserved = false;
|
||||
/*
|
||||
* Core might have disabled reservation mode after
|
||||
* allocating the irq descriptor. Ideally this should
|
||||
* happen before allocation time, but that would require
|
||||
* completely convoluted ways of transporting that
|
||||
* information.
|
||||
*/
|
||||
if (!irqd_can_reserve(irqd))
|
||||
apicd->can_reserve = false;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -398,21 +409,21 @@ static int activate_managed(struct irq_data *irqd)
|
||||
}
|
||||
|
||||
static int x86_vector_activate(struct irq_domain *dom, struct irq_data *irqd,
|
||||
bool early)
|
||||
bool reserve)
|
||||
{
|
||||
struct apic_chip_data *apicd = apic_chip_data(irqd);
|
||||
unsigned long flags;
|
||||
int ret = 0;
|
||||
|
||||
trace_vector_activate(irqd->irq, apicd->is_managed,
|
||||
apicd->can_reserve, early);
|
||||
apicd->can_reserve, reserve);
|
||||
|
||||
/* Nothing to do for fixed assigned vectors */
|
||||
if (!apicd->can_reserve && !apicd->is_managed)
|
||||
return 0;
|
||||
|
||||
raw_spin_lock_irqsave(&vector_lock, flags);
|
||||
if (early || irqd_is_managed_and_shutdown(irqd))
|
||||
if (reserve || irqd_is_managed_and_shutdown(irqd))
|
||||
vector_assign_managed_shutdown(irqd);
|
||||
else if (apicd->is_managed)
|
||||
ret = activate_managed(irqd);
|
||||
@ -478,6 +489,7 @@ static bool vector_configure_legacy(unsigned int virq, struct irq_data *irqd,
|
||||
} else {
|
||||
/* Release the vector */
|
||||
apicd->can_reserve = true;
|
||||
irqd_set_can_reserve(irqd);
|
||||
clear_irq_vector(irqd);
|
||||
realloc = true;
|
||||
}
|
||||
|
@ -128,7 +128,7 @@ static void uv_domain_free(struct irq_domain *domain, unsigned int virq,
|
||||
* on the specified blade to allow the sending of MSIs to the specified CPU.
|
||||
*/
|
||||
static int uv_domain_activate(struct irq_domain *domain,
|
||||
struct irq_data *irq_data, bool early)
|
||||
struct irq_data *irq_data, bool reserve)
|
||||
{
|
||||
uv_program_mmr(irqd_cfg(irq_data), irq_data->chip_data);
|
||||
return 0;
|
||||
|
@ -522,6 +522,7 @@ static struct of_device_id const bcm_kona_gpio_of_match[] = {
|
||||
* category than their parents, so it won't report false recursion.
|
||||
*/
|
||||
static struct lock_class_key gpio_lock_class;
|
||||
static struct lock_class_key gpio_request_class;
|
||||
|
||||
static int bcm_kona_gpio_irq_map(struct irq_domain *d, unsigned int irq,
|
||||
irq_hw_number_t hwirq)
|
||||
@ -531,7 +532,7 @@ static int bcm_kona_gpio_irq_map(struct irq_domain *d, unsigned int irq,
|
||||
ret = irq_set_chip_data(irq, d->host_data);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
irq_set_lockdep_class(irq, &gpio_lock_class);
|
||||
irq_set_lockdep_class(irq, &gpio_lock_class, &gpio_request_class);
|
||||
irq_set_chip_and_handler(irq, &bcm_gpio_irq_chip, handle_simple_irq);
|
||||
irq_set_noprobe(irq);
|
||||
|
||||
|
@ -327,6 +327,7 @@ static struct brcmstb_gpio_bank *brcmstb_gpio_hwirq_to_bank(
|
||||
* category than their parents, so it won't report false recursion.
|
||||
*/
|
||||
static struct lock_class_key brcmstb_gpio_irq_lock_class;
|
||||
static struct lock_class_key brcmstb_gpio_irq_request_class;
|
||||
|
||||
|
||||
static int brcmstb_gpio_irq_map(struct irq_domain *d, unsigned int irq,
|
||||
@ -346,7 +347,8 @@ static int brcmstb_gpio_irq_map(struct irq_domain *d, unsigned int irq,
|
||||
ret = irq_set_chip_data(irq, &bank->gc);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
irq_set_lockdep_class(irq, &brcmstb_gpio_irq_lock_class);
|
||||
irq_set_lockdep_class(irq, &brcmstb_gpio_irq_lock_class,
|
||||
&brcmstb_gpio_irq_request_class);
|
||||
irq_set_chip_and_handler(irq, &priv->irq_chip, handle_level_irq);
|
||||
irq_set_noprobe(irq);
|
||||
return 0;
|
||||
|
@ -565,6 +565,7 @@ static const struct dev_pm_ops tegra_gpio_pm_ops = {
|
||||
* than their parents, so it won't report false recursion.
|
||||
*/
|
||||
static struct lock_class_key gpio_lock_class;
|
||||
static struct lock_class_key gpio_request_class;
|
||||
|
||||
static int tegra_gpio_probe(struct platform_device *pdev)
|
||||
{
|
||||
@ -670,7 +671,8 @@ static int tegra_gpio_probe(struct platform_device *pdev)
|
||||
|
||||
bank = &tgi->bank_info[GPIO_BANK(gpio)];
|
||||
|
||||
irq_set_lockdep_class(irq, &gpio_lock_class);
|
||||
irq_set_lockdep_class(irq, &gpio_lock_class,
|
||||
&gpio_request_class);
|
||||
irq_set_chip_data(irq, bank);
|
||||
irq_set_chip_and_handler(irq, &tgi->ic, handle_simple_irq);
|
||||
}
|
||||
|
@ -139,7 +139,7 @@ static int xgene_gpio_sb_to_irq(struct gpio_chip *gc, u32 gpio)
|
||||
|
||||
static int xgene_gpio_sb_domain_activate(struct irq_domain *d,
|
||||
struct irq_data *irq_data,
|
||||
bool early)
|
||||
bool reserve)
|
||||
{
|
||||
struct xgene_gpio_sb *priv = d->host_data;
|
||||
u32 gpio = HWIRQ_TO_GPIO(priv, irq_data->hwirq);
|
||||
|
@ -73,7 +73,8 @@ LIST_HEAD(gpio_devices);
|
||||
|
||||
static void gpiochip_free_hogs(struct gpio_chip *chip);
|
||||
static int gpiochip_add_irqchip(struct gpio_chip *gpiochip,
|
||||
struct lock_class_key *key);
|
||||
struct lock_class_key *lock_key,
|
||||
struct lock_class_key *request_key);
|
||||
static void gpiochip_irqchip_remove(struct gpio_chip *gpiochip);
|
||||
static int gpiochip_irqchip_init_valid_mask(struct gpio_chip *gpiochip);
|
||||
static void gpiochip_irqchip_free_valid_mask(struct gpio_chip *gpiochip);
|
||||
@ -1100,7 +1101,8 @@ static void gpiochip_setup_devs(void)
|
||||
}
|
||||
|
||||
int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data,
|
||||
struct lock_class_key *key)
|
||||
struct lock_class_key *lock_key,
|
||||
struct lock_class_key *request_key)
|
||||
{
|
||||
unsigned long flags;
|
||||
int status = 0;
|
||||
@ -1246,7 +1248,7 @@ int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data,
|
||||
if (status)
|
||||
goto err_remove_from_list;
|
||||
|
||||
status = gpiochip_add_irqchip(chip, key);
|
||||
status = gpiochip_add_irqchip(chip, lock_key, request_key);
|
||||
if (status)
|
||||
goto err_remove_chip;
|
||||
|
||||
@ -1632,7 +1634,7 @@ int gpiochip_irq_map(struct irq_domain *d, unsigned int irq,
|
||||
* This lock class tells lockdep that GPIO irqs are in a different
|
||||
* category than their parents, so it won't report false recursion.
|
||||
*/
|
||||
irq_set_lockdep_class(irq, chip->irq.lock_key);
|
||||
irq_set_lockdep_class(irq, chip->irq.lock_key, chip->irq.request_key);
|
||||
irq_set_chip_and_handler(irq, chip->irq.chip, chip->irq.handler);
|
||||
/* Chips that use nested thread handlers have them marked */
|
||||
if (chip->irq.threaded)
|
||||
@ -1712,10 +1714,12 @@ static int gpiochip_to_irq(struct gpio_chip *chip, unsigned offset)
|
||||
/**
|
||||
* gpiochip_add_irqchip() - adds an IRQ chip to a GPIO chip
|
||||
* @gpiochip: the GPIO chip to add the IRQ chip to
|
||||
* @lock_key: lockdep class
|
||||
* @lock_key: lockdep class for IRQ lock
|
||||
* @request_key: lockdep class for IRQ request
|
||||
*/
|
||||
static int gpiochip_add_irqchip(struct gpio_chip *gpiochip,
|
||||
struct lock_class_key *lock_key)
|
||||
struct lock_class_key *lock_key,
|
||||
struct lock_class_key *request_key)
|
||||
{
|
||||
struct irq_chip *irqchip = gpiochip->irq.chip;
|
||||
const struct irq_domain_ops *ops;
|
||||
@ -1753,6 +1757,7 @@ static int gpiochip_add_irqchip(struct gpio_chip *gpiochip,
|
||||
gpiochip->to_irq = gpiochip_to_irq;
|
||||
gpiochip->irq.default_type = type;
|
||||
gpiochip->irq.lock_key = lock_key;
|
||||
gpiochip->irq.request_key = request_key;
|
||||
|
||||
if (gpiochip->irq.domain_ops)
|
||||
ops = gpiochip->irq.domain_ops;
|
||||
@ -1850,7 +1855,8 @@ static void gpiochip_irqchip_remove(struct gpio_chip *gpiochip)
|
||||
* @type: the default type for IRQs on this irqchip, pass IRQ_TYPE_NONE
|
||||
* to have the core avoid setting up any default type in the hardware.
|
||||
* @threaded: whether this irqchip uses a nested thread handler
|
||||
* @lock_key: lockdep class
|
||||
* @lock_key: lockdep class for IRQ lock
|
||||
* @request_key: lockdep class for IRQ request
|
||||
*
|
||||
* This function closely associates a certain irqchip with a certain
|
||||
* gpiochip, providing an irq domain to translate the local IRQs to
|
||||
@ -1872,7 +1878,8 @@ int gpiochip_irqchip_add_key(struct gpio_chip *gpiochip,
|
||||
irq_flow_handler_t handler,
|
||||
unsigned int type,
|
||||
bool threaded,
|
||||
struct lock_class_key *lock_key)
|
||||
struct lock_class_key *lock_key,
|
||||
struct lock_class_key *request_key)
|
||||
{
|
||||
struct device_node *of_node;
|
||||
|
||||
@ -1913,6 +1920,7 @@ int gpiochip_irqchip_add_key(struct gpio_chip *gpiochip,
|
||||
gpiochip->irq.default_type = type;
|
||||
gpiochip->to_irq = gpiochip_to_irq;
|
||||
gpiochip->irq.lock_key = lock_key;
|
||||
gpiochip->irq.request_key = request_key;
|
||||
gpiochip->irq.domain = irq_domain_add_simple(of_node,
|
||||
gpiochip->ngpio, first_irq,
|
||||
&gpiochip_domain_ops, gpiochip);
|
||||
@ -1940,7 +1948,8 @@ EXPORT_SYMBOL_GPL(gpiochip_irqchip_add_key);
|
||||
#else /* CONFIG_GPIOLIB_IRQCHIP */
|
||||
|
||||
static inline int gpiochip_add_irqchip(struct gpio_chip *gpiochip,
|
||||
struct lock_class_key *key)
|
||||
struct lock_class_key *lock_key,
|
||||
struct lock_class_key *request_key)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
@ -4184,7 +4184,7 @@ static void amd_ir_update_irte(struct irq_data *irqd, struct amd_iommu *iommu,
|
||||
struct irq_cfg *cfg);
|
||||
|
||||
static int irq_remapping_activate(struct irq_domain *domain,
|
||||
struct irq_data *irq_data, bool early)
|
||||
struct irq_data *irq_data, bool reserve)
|
||||
{
|
||||
struct amd_ir_data *data = irq_data->chip_data;
|
||||
struct irq_2_irte *irte_info = &data->irq_2_irte;
|
||||
|
@ -1397,7 +1397,7 @@ static void intel_irq_remapping_free(struct irq_domain *domain,
|
||||
}
|
||||
|
||||
static int intel_irq_remapping_activate(struct irq_domain *domain,
|
||||
struct irq_data *irq_data, bool early)
|
||||
struct irq_data *irq_data, bool reserve)
|
||||
{
|
||||
intel_ir_reconfigure_irte(irq_data, true);
|
||||
return 0;
|
||||
|
@ -2303,7 +2303,7 @@ static int its_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
|
||||
}
|
||||
|
||||
static int its_irq_domain_activate(struct irq_domain *domain,
|
||||
struct irq_data *d, bool early)
|
||||
struct irq_data *d, bool reserve)
|
||||
{
|
||||
struct its_device *its_dev = irq_data_get_irq_chip_data(d);
|
||||
u32 event = its_get_event_id(d);
|
||||
@ -2818,7 +2818,7 @@ static int its_vpe_irq_domain_alloc(struct irq_domain *domain, unsigned int virq
|
||||
}
|
||||
|
||||
static int its_vpe_irq_domain_activate(struct irq_domain *domain,
|
||||
struct irq_data *d, bool early)
|
||||
struct irq_data *d, bool reserve)
|
||||
{
|
||||
struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
|
||||
struct its_node *its;
|
||||
|
@ -342,6 +342,9 @@ static irqreturn_t intc_irqpin_shared_irq_handler(int irq, void *dev_id)
|
||||
*/
|
||||
static struct lock_class_key intc_irqpin_irq_lock_class;
|
||||
|
||||
/* And this is for the request mutex */
|
||||
static struct lock_class_key intc_irqpin_irq_request_class;
|
||||
|
||||
static int intc_irqpin_irq_domain_map(struct irq_domain *h, unsigned int virq,
|
||||
irq_hw_number_t hw)
|
||||
{
|
||||
@ -352,7 +355,8 @@ static int intc_irqpin_irq_domain_map(struct irq_domain *h, unsigned int virq,
|
||||
|
||||
intc_irqpin_dbg(&p->irq[hw], "map");
|
||||
irq_set_chip_data(virq, h->host_data);
|
||||
irq_set_lockdep_class(virq, &intc_irqpin_irq_lock_class);
|
||||
irq_set_lockdep_class(virq, &intc_irqpin_irq_lock_class,
|
||||
&intc_irqpin_irq_request_class);
|
||||
irq_set_chip_and_handler(virq, &p->irq_chip, handle_level_irq);
|
||||
return 0;
|
||||
}
|
||||
|
@ -184,6 +184,7 @@ static struct irq_chip arizona_irq_chip = {
|
||||
};
|
||||
|
||||
static struct lock_class_key arizona_irq_lock_class;
|
||||
static struct lock_class_key arizona_irq_request_class;
|
||||
|
||||
static int arizona_irq_map(struct irq_domain *h, unsigned int virq,
|
||||
irq_hw_number_t hw)
|
||||
@ -191,7 +192,8 @@ static int arizona_irq_map(struct irq_domain *h, unsigned int virq,
|
||||
struct arizona *data = h->host_data;
|
||||
|
||||
irq_set_chip_data(virq, data);
|
||||
irq_set_lockdep_class(virq, &arizona_irq_lock_class);
|
||||
irq_set_lockdep_class(virq, &arizona_irq_lock_class,
|
||||
&arizona_irq_request_class);
|
||||
irq_set_chip_and_handler(virq, &arizona_irq_chip, handle_simple_irq);
|
||||
irq_set_nested_thread(virq, 1);
|
||||
irq_set_noprobe(virq);
|
||||
|
@ -222,6 +222,9 @@ static enum pin_config_param pcs_bias[] = {
|
||||
*/
|
||||
static struct lock_class_key pcs_lock_class;
|
||||
|
||||
/* Class for the IRQ request mutex */
|
||||
static struct lock_class_key pcs_request_class;
|
||||
|
||||
/*
|
||||
* REVISIT: Reads and writes could eventually use regmap or something
|
||||
* generic. But at least on omaps, some mux registers are performance
|
||||
@ -1486,7 +1489,7 @@ static int pcs_irqdomain_map(struct irq_domain *d, unsigned int irq,
|
||||
irq_set_chip_data(irq, pcs_soc);
|
||||
irq_set_chip_and_handler(irq, &pcs->chip,
|
||||
handle_level_irq);
|
||||
irq_set_lockdep_class(irq, &pcs_lock_class);
|
||||
irq_set_lockdep_class(irq, &pcs_lock_class, &pcs_request_class);
|
||||
irq_set_noprobe(irq);
|
||||
|
||||
return 0;
|
||||
|
@ -290,7 +290,7 @@ static int stm32_gpio_domain_translate(struct irq_domain *d,
|
||||
}
|
||||
|
||||
static int stm32_gpio_domain_activate(struct irq_domain *d,
|
||||
struct irq_data *irq_data, bool early)
|
||||
struct irq_data *irq_data, bool reserve)
|
||||
{
|
||||
struct stm32_gpio_bank *bank = d->host_data;
|
||||
struct stm32_pinctrl *pctl = dev_get_drvdata(bank->gpio_chip.parent);
|
||||
|
@ -66,9 +66,10 @@ struct gpio_irq_chip {
|
||||
/**
|
||||
* @lock_key:
|
||||
*
|
||||
* Per GPIO IRQ chip lockdep class.
|
||||
* Per GPIO IRQ chip lockdep classes.
|
||||
*/
|
||||
struct lock_class_key *lock_key;
|
||||
struct lock_class_key *request_key;
|
||||
|
||||
/**
|
||||
* @parent_handler:
|
||||
@ -323,7 +324,8 @@ extern const char *gpiochip_is_requested(struct gpio_chip *chip,
|
||||
|
||||
/* add/remove chips */
|
||||
extern int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data,
|
||||
struct lock_class_key *lock_key);
|
||||
struct lock_class_key *lock_key,
|
||||
struct lock_class_key *request_key);
|
||||
|
||||
/**
|
||||
* gpiochip_add_data() - register a gpio_chip
|
||||
@ -350,11 +352,13 @@ extern int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data,
|
||||
*/
|
||||
#ifdef CONFIG_LOCKDEP
|
||||
#define gpiochip_add_data(chip, data) ({ \
|
||||
static struct lock_class_key key; \
|
||||
gpiochip_add_data_with_key(chip, data, &key); \
|
||||
static struct lock_class_key lock_key; \
|
||||
static struct lock_class_key request_key; \
|
||||
gpiochip_add_data_with_key(chip, data, &lock_key, \
|
||||
&request_key); \
|
||||
})
|
||||
#else
|
||||
#define gpiochip_add_data(chip, data) gpiochip_add_data_with_key(chip, data, NULL)
|
||||
#define gpiochip_add_data(chip, data) gpiochip_add_data_with_key(chip, data, NULL, NULL)
|
||||
#endif
|
||||
|
||||
static inline int gpiochip_add(struct gpio_chip *chip)
|
||||
@ -429,7 +433,8 @@ int gpiochip_irqchip_add_key(struct gpio_chip *gpiochip,
|
||||
irq_flow_handler_t handler,
|
||||
unsigned int type,
|
||||
bool threaded,
|
||||
struct lock_class_key *lock_key);
|
||||
struct lock_class_key *lock_key,
|
||||
struct lock_class_key *request_key);
|
||||
|
||||
#ifdef CONFIG_LOCKDEP
|
||||
|
||||
@ -445,10 +450,12 @@ static inline int gpiochip_irqchip_add(struct gpio_chip *gpiochip,
|
||||
irq_flow_handler_t handler,
|
||||
unsigned int type)
|
||||
{
|
||||
static struct lock_class_key key;
|
||||
static struct lock_class_key lock_key;
|
||||
static struct lock_class_key request_key;
|
||||
|
||||
return gpiochip_irqchip_add_key(gpiochip, irqchip, first_irq,
|
||||
handler, type, false, &key);
|
||||
handler, type, false,
|
||||
&lock_key, &request_key);
|
||||
}
|
||||
|
||||
static inline int gpiochip_irqchip_add_nested(struct gpio_chip *gpiochip,
|
||||
@ -458,10 +465,12 @@ static inline int gpiochip_irqchip_add_nested(struct gpio_chip *gpiochip,
|
||||
unsigned int type)
|
||||
{
|
||||
|
||||
static struct lock_class_key key;
|
||||
static struct lock_class_key lock_key;
|
||||
static struct lock_class_key request_key;
|
||||
|
||||
return gpiochip_irqchip_add_key(gpiochip, irqchip, first_irq,
|
||||
handler, type, true, &key);
|
||||
handler, type, true,
|
||||
&lock_key, &request_key);
|
||||
}
|
||||
#else
|
||||
static inline int gpiochip_irqchip_add(struct gpio_chip *gpiochip,
|
||||
@ -471,7 +480,7 @@ static inline int gpiochip_irqchip_add(struct gpio_chip *gpiochip,
|
||||
unsigned int type)
|
||||
{
|
||||
return gpiochip_irqchip_add_key(gpiochip, irqchip, first_irq,
|
||||
handler, type, false, NULL);
|
||||
handler, type, false, NULL, NULL);
|
||||
}
|
||||
|
||||
static inline int gpiochip_irqchip_add_nested(struct gpio_chip *gpiochip,
|
||||
@ -481,7 +490,7 @@ static inline int gpiochip_irqchip_add_nested(struct gpio_chip *gpiochip,
|
||||
unsigned int type)
|
||||
{
|
||||
return gpiochip_irqchip_add_key(gpiochip, irqchip, first_irq,
|
||||
handler, type, true, NULL);
|
||||
handler, type, true, NULL, NULL);
|
||||
}
|
||||
#endif /* CONFIG_LOCKDEP */
|
||||
|
||||
|
@ -212,6 +212,7 @@ struct irq_data {
|
||||
* mask. Applies only to affinity managed irqs.
|
||||
* IRQD_SINGLE_TARGET - IRQ allows only a single affinity target
|
||||
* IRQD_DEFAULT_TRIGGER_SET - Expected trigger already been set
|
||||
* IRQD_CAN_RESERVE - Can use reservation mode
|
||||
*/
|
||||
enum {
|
||||
IRQD_TRIGGER_MASK = 0xf,
|
||||
@ -233,6 +234,7 @@ enum {
|
||||
IRQD_MANAGED_SHUTDOWN = (1 << 23),
|
||||
IRQD_SINGLE_TARGET = (1 << 24),
|
||||
IRQD_DEFAULT_TRIGGER_SET = (1 << 25),
|
||||
IRQD_CAN_RESERVE = (1 << 26),
|
||||
};
|
||||
|
||||
#define __irqd_to_state(d) ACCESS_PRIVATE((d)->common, state_use_accessors)
|
||||
@ -377,6 +379,21 @@ static inline bool irqd_is_managed_and_shutdown(struct irq_data *d)
|
||||
return __irqd_to_state(d) & IRQD_MANAGED_SHUTDOWN;
|
||||
}
|
||||
|
||||
static inline void irqd_set_can_reserve(struct irq_data *d)
|
||||
{
|
||||
__irqd_to_state(d) |= IRQD_CAN_RESERVE;
|
||||
}
|
||||
|
||||
static inline void irqd_clr_can_reserve(struct irq_data *d)
|
||||
{
|
||||
__irqd_to_state(d) &= ~IRQD_CAN_RESERVE;
|
||||
}
|
||||
|
||||
static inline bool irqd_can_reserve(struct irq_data *d)
|
||||
{
|
||||
return __irqd_to_state(d) & IRQD_CAN_RESERVE;
|
||||
}
|
||||
|
||||
#undef __irqd_to_state
|
||||
|
||||
static inline irq_hw_number_t irqd_to_hwirq(struct irq_data *d)
|
||||
|
@ -255,12 +255,15 @@ static inline bool irq_is_percpu_devid(unsigned int irq)
|
||||
}
|
||||
|
||||
static inline void
|
||||
irq_set_lockdep_class(unsigned int irq, struct lock_class_key *class)
|
||||
irq_set_lockdep_class(unsigned int irq, struct lock_class_key *lock_class,
|
||||
struct lock_class_key *request_class)
|
||||
{
|
||||
struct irq_desc *desc = irq_to_desc(irq);
|
||||
|
||||
if (desc)
|
||||
lockdep_set_class(&desc->lock, class);
|
||||
if (desc) {
|
||||
lockdep_set_class(&desc->lock, lock_class);
|
||||
lockdep_set_class(&desc->request_mutex, request_class);
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef CONFIG_IRQ_PREFLOW_FASTEOI
|
||||
|
@ -113,7 +113,7 @@ struct irq_domain_ops {
|
||||
unsigned int nr_irqs, void *arg);
|
||||
void (*free)(struct irq_domain *d, unsigned int virq,
|
||||
unsigned int nr_irqs);
|
||||
int (*activate)(struct irq_domain *d, struct irq_data *irqd, bool early);
|
||||
int (*activate)(struct irq_domain *d, struct irq_data *irqd, bool reserve);
|
||||
void (*deactivate)(struct irq_domain *d, struct irq_data *irq_data);
|
||||
int (*translate)(struct irq_domain *d, struct irq_fwspec *fwspec,
|
||||
unsigned long *out_hwirq, unsigned int *out_type);
|
||||
|
@ -12,6 +12,11 @@
|
||||
|
||||
static inline void print_irq_desc(unsigned int irq, struct irq_desc *desc)
|
||||
{
|
||||
static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5);
|
||||
|
||||
if (!__ratelimit(&ratelimit))
|
||||
return;
|
||||
|
||||
printk("irq %d, desc: %p, depth: %d, count: %d, unhandled: %d\n",
|
||||
irq, desc, desc->depth, desc->irq_count, desc->irqs_unhandled);
|
||||
printk("->handle_irq(): %p, ", desc->handle_irq);
|
||||
|
@ -113,6 +113,7 @@ static const struct irq_bit_descr irqdata_states[] = {
|
||||
BIT_MASK_DESCR(IRQD_SETAFFINITY_PENDING),
|
||||
BIT_MASK_DESCR(IRQD_AFFINITY_MANAGED),
|
||||
BIT_MASK_DESCR(IRQD_MANAGED_SHUTDOWN),
|
||||
BIT_MASK_DESCR(IRQD_CAN_RESERVE),
|
||||
|
||||
BIT_MASK_DESCR(IRQD_FORWARDED_TO_VCPU),
|
||||
|
||||
|
@ -364,10 +364,11 @@ irq_get_domain_generic_chip(struct irq_domain *d, unsigned int hw_irq)
|
||||
EXPORT_SYMBOL_GPL(irq_get_domain_generic_chip);
|
||||
|
||||
/*
|
||||
* Separate lockdep class for interrupt chip which can nest irq_desc
|
||||
* lock.
|
||||
* Separate lockdep classes for interrupt chip which can nest irq_desc
|
||||
* lock and request mutex.
|
||||
*/
|
||||
static struct lock_class_key irq_nested_lock_class;
|
||||
static struct lock_class_key irq_nested_request_class;
|
||||
|
||||
/*
|
||||
* irq_map_generic_chip - Map a generic chip for an irq domain
|
||||
@ -409,7 +410,8 @@ int irq_map_generic_chip(struct irq_domain *d, unsigned int virq,
|
||||
set_bit(idx, &gc->installed);
|
||||
|
||||
if (dgc->gc_flags & IRQ_GC_INIT_NESTED_LOCK)
|
||||
irq_set_lockdep_class(virq, &irq_nested_lock_class);
|
||||
irq_set_lockdep_class(virq, &irq_nested_lock_class,
|
||||
&irq_nested_request_class);
|
||||
|
||||
if (chip->irq_calc_mask)
|
||||
chip->irq_calc_mask(data);
|
||||
@ -479,7 +481,8 @@ void irq_setup_generic_chip(struct irq_chip_generic *gc, u32 msk,
|
||||
continue;
|
||||
|
||||
if (flags & IRQ_GC_INIT_NESTED_LOCK)
|
||||
irq_set_lockdep_class(i, &irq_nested_lock_class);
|
||||
irq_set_lockdep_class(i, &irq_nested_lock_class,
|
||||
&irq_nested_request_class);
|
||||
|
||||
if (!(flags & IRQ_GC_NO_MASK)) {
|
||||
struct irq_data *d = irq_get_irq_data(i);
|
||||
|
@ -440,7 +440,7 @@ static inline bool irq_fixup_move_pending(struct irq_desc *desc, bool fclear)
|
||||
#endif /* !CONFIG_GENERIC_PENDING_IRQ */
|
||||
|
||||
#if !defined(CONFIG_IRQ_DOMAIN) || !defined(CONFIG_IRQ_DOMAIN_HIERARCHY)
|
||||
static inline int irq_domain_activate_irq(struct irq_data *data, bool early)
|
||||
static inline int irq_domain_activate_irq(struct irq_data *data, bool reserve)
|
||||
{
|
||||
irqd_set_activated(data);
|
||||
return 0;
|
||||
|
@ -1693,7 +1693,7 @@ static void __irq_domain_deactivate_irq(struct irq_data *irq_data)
|
||||
}
|
||||
}
|
||||
|
||||
static int __irq_domain_activate_irq(struct irq_data *irqd, bool early)
|
||||
static int __irq_domain_activate_irq(struct irq_data *irqd, bool reserve)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
@ -1702,9 +1702,9 @@ static int __irq_domain_activate_irq(struct irq_data *irqd, bool early)
|
||||
|
||||
if (irqd->parent_data)
|
||||
ret = __irq_domain_activate_irq(irqd->parent_data,
|
||||
early);
|
||||
reserve);
|
||||
if (!ret && domain->ops->activate) {
|
||||
ret = domain->ops->activate(domain, irqd, early);
|
||||
ret = domain->ops->activate(domain, irqd, reserve);
|
||||
/* Rollback in case of error */
|
||||
if (ret && irqd->parent_data)
|
||||
__irq_domain_deactivate_irq(irqd->parent_data);
|
||||
@ -1716,17 +1716,18 @@ static int __irq_domain_activate_irq(struct irq_data *irqd, bool early)
|
||||
/**
|
||||
* irq_domain_activate_irq - Call domain_ops->activate recursively to activate
|
||||
* interrupt
|
||||
* @irq_data: outermost irq_data associated with interrupt
|
||||
* @irq_data: Outermost irq_data associated with interrupt
|
||||
* @reserve: If set only reserve an interrupt vector instead of assigning one
|
||||
*
|
||||
* This is the second step to call domain_ops->activate to program interrupt
|
||||
* controllers, so the interrupt could actually get delivered.
|
||||
*/
|
||||
int irq_domain_activate_irq(struct irq_data *irq_data, bool early)
|
||||
int irq_domain_activate_irq(struct irq_data *irq_data, bool reserve)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
if (!irqd_is_activated(irq_data))
|
||||
ret = __irq_domain_activate_irq(irq_data, early);
|
||||
ret = __irq_domain_activate_irq(irq_data, reserve);
|
||||
if (!ret)
|
||||
irqd_set_activated(irq_data);
|
||||
return ret;
|
||||
|
@ -339,6 +339,40 @@ int msi_domain_populate_irqs(struct irq_domain *domain, struct device *dev,
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Carefully check whether the device can use reservation mode. If
|
||||
* reservation mode is enabled then the early activation will assign a
|
||||
* dummy vector to the device. If the PCI/MSI device does not support
|
||||
* masking of the entry then this can result in spurious interrupts when
|
||||
* the device driver is not absolutely careful. But even then a malfunction
|
||||
* of the hardware could result in a spurious interrupt on the dummy vector
|
||||
* and render the device unusable. If the entry can be masked then the core
|
||||
* logic will prevent the spurious interrupt and reservation mode can be
|
||||
* used. For now reservation mode is restricted to PCI/MSI.
|
||||
*/
|
||||
static bool msi_check_reservation_mode(struct irq_domain *domain,
|
||||
struct msi_domain_info *info,
|
||||
struct device *dev)
|
||||
{
|
||||
struct msi_desc *desc;
|
||||
|
||||
if (domain->bus_token != DOMAIN_BUS_PCI_MSI)
|
||||
return false;
|
||||
|
||||
if (!(info->flags & MSI_FLAG_MUST_REACTIVATE))
|
||||
return false;
|
||||
|
||||
if (IS_ENABLED(CONFIG_PCI_MSI) && pci_msi_ignore_mask)
|
||||
return false;
|
||||
|
||||
/*
|
||||
* Checking the first MSI descriptor is sufficient. MSIX supports
|
||||
* masking and MSI does so when the maskbit is set.
|
||||
*/
|
||||
desc = first_msi_entry(dev);
|
||||
return desc->msi_attrib.is_msix || desc->msi_attrib.maskbit;
|
||||
}
|
||||
|
||||
/**
|
||||
* msi_domain_alloc_irqs - Allocate interrupts from a MSI interrupt domain
|
||||
* @domain: The domain to allocate from
|
||||
@ -353,9 +387,11 @@ int msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev,
|
||||
{
|
||||
struct msi_domain_info *info = domain->host_data;
|
||||
struct msi_domain_ops *ops = info->ops;
|
||||
msi_alloc_info_t arg;
|
||||
struct irq_data *irq_data;
|
||||
struct msi_desc *desc;
|
||||
msi_alloc_info_t arg;
|
||||
int i, ret, virq;
|
||||
bool can_reserve;
|
||||
|
||||
ret = msi_domain_prepare_irqs(domain, dev, nvec, &arg);
|
||||
if (ret)
|
||||
@ -385,6 +421,8 @@ int msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev,
|
||||
if (ops->msi_finish)
|
||||
ops->msi_finish(&arg, 0);
|
||||
|
||||
can_reserve = msi_check_reservation_mode(domain, info, dev);
|
||||
|
||||
for_each_msi_entry(desc, dev) {
|
||||
virq = desc->irq;
|
||||
if (desc->nvec_used == 1)
|
||||
@ -397,15 +435,25 @@ int msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev,
|
||||
* the MSI entries before the PCI layer enables MSI in the
|
||||
* card. Otherwise the card latches a random msi message.
|
||||
*/
|
||||
if (info->flags & MSI_FLAG_ACTIVATE_EARLY) {
|
||||
struct irq_data *irq_data;
|
||||
if (!(info->flags & MSI_FLAG_ACTIVATE_EARLY))
|
||||
continue;
|
||||
|
||||
irq_data = irq_domain_get_irq_data(domain, desc->irq);
|
||||
if (!can_reserve)
|
||||
irqd_clr_can_reserve(irq_data);
|
||||
ret = irq_domain_activate_irq(irq_data, can_reserve);
|
||||
if (ret)
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
/*
|
||||
* If these interrupts use reservation mode, clear the activated bit
|
||||
* so request_irq() will assign the final vector.
|
||||
*/
|
||||
if (can_reserve) {
|
||||
for_each_msi_entry(desc, dev) {
|
||||
irq_data = irq_domain_get_irq_data(domain, desc->irq);
|
||||
ret = irq_domain_activate_irq(irq_data, true);
|
||||
if (ret)
|
||||
goto cleanup;
|
||||
if (info->flags & MSI_FLAG_MUST_REACTIVATE)
|
||||
irqd_clr_activated(irq_data);
|
||||
irqd_clr_activated(irq_data);
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
|
Loading…
Reference in New Issue
Block a user