2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-12-21 11:44:01 +08:00

pinctrl/amd: poll InterruptEnable bits in amd_gpio_irq_set_type

From the AMD BKDG, if WAKE_INT_MASTER_REG.MaskStsEn is set, a software
write to the debounce registers of *any* gpio will block wake/interrupt
status generation for *all* gpios for a length of time that depends on
WAKE_INT_MASTER_REG.MaskStsLength[11:0].  During this period the Interrupt
Delivery bit (INTERRUPT_ENABLE) will read as 0.

In commit 4c1de0414a ("pinctrl/amd: poll InterruptEnable bits in
enable_irq") we tried to fix this same "gpio Interrupts are blocked
immediately after writing debounce registers" problem, but incorrectly
assumed it only affected the gpio whose debounce was being configured
and not ALL gpios.

To solve this for all gpios, we move the polling loop from
amd_gpio_irq_enable() to amd_gpio_irq_set_type(), while holding the gpio
spinlock.  This ensures that another gpio operation (e.g.
amd_gpio_irq_unmask()) can read a temporarily disabled IRQ and
incorrectly disable it while trying to modify some other register bits.

Fixes: 4c1de0414a pinctrl/amd: poll InterruptEnable bits in enable_irq
Signed-off-by: Daniel Kurtz <djkurtz@chromium.org>
Signed-off-by: Linus Walleij <linus.walleij@linaro.org>
This commit is contained in:
Daniel Kurtz 2018-09-22 13:58:26 -06:00 committed by Linus Walleij
parent 6bf4ca7fbc
commit b85bfa246e

View File

@ -348,21 +348,12 @@ static void amd_gpio_irq_enable(struct irq_data *d)
unsigned long flags;
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
struct amd_gpio *gpio_dev = gpiochip_get_data(gc);
u32 mask = BIT(INTERRUPT_ENABLE_OFF) | BIT(INTERRUPT_MASK_OFF);
raw_spin_lock_irqsave(&gpio_dev->lock, flags);
pin_reg = readl(gpio_dev->base + (d->hwirq)*4);
pin_reg |= BIT(INTERRUPT_ENABLE_OFF);
pin_reg |= BIT(INTERRUPT_MASK_OFF);
writel(pin_reg, gpio_dev->base + (d->hwirq)*4);
/*
* When debounce logic is enabled it takes ~900 us before interrupts
* can be enabled. During this "debounce warm up" period the
* "INTERRUPT_ENABLE" bit will read as 0. Poll the bit here until it
* reads back as 1, signaling that interrupts are now enabled.
*/
while ((readl(gpio_dev->base + (d->hwirq)*4) & mask) != mask)
continue;
raw_spin_unlock_irqrestore(&gpio_dev->lock, flags);
}
@ -426,7 +417,7 @@ static void amd_gpio_irq_eoi(struct irq_data *d)
static int amd_gpio_irq_set_type(struct irq_data *d, unsigned int type)
{
int ret = 0;
u32 pin_reg;
u32 pin_reg, pin_reg_irq_en, mask;
unsigned long flags, irq_flags;
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
struct amd_gpio *gpio_dev = gpiochip_get_data(gc);
@ -495,6 +486,28 @@ static int amd_gpio_irq_set_type(struct irq_data *d, unsigned int type)
}
pin_reg |= CLR_INTR_STAT << INTERRUPT_STS_OFF;
/*
* If WAKE_INT_MASTER_REG.MaskStsEn is set, a software write to the
* debounce registers of any GPIO will block wake/interrupt status
* generation for *all* GPIOs for a lenght of time that depends on
* WAKE_INT_MASTER_REG.MaskStsLength[11:0]. During this period the
* INTERRUPT_ENABLE bit will read as 0.
*
* We temporarily enable irq for the GPIO whose configuration is
* changing, and then wait for it to read back as 1 to know when
* debounce has settled and then disable the irq again.
* We do this polling with the spinlock held to ensure other GPIO
* access routines do not read an incorrect value for the irq enable
* bit of other GPIOs. We keep the GPIO masked while polling to avoid
* spurious irqs, and disable the irq again after polling.
*/
mask = BIT(INTERRUPT_ENABLE_OFF);
pin_reg_irq_en = pin_reg;
pin_reg_irq_en |= mask;
pin_reg_irq_en &= ~BIT(INTERRUPT_MASK_OFF);
writel(pin_reg_irq_en, gpio_dev->base + (d->hwirq)*4);
while ((readl(gpio_dev->base + (d->hwirq)*4) & mask) != mask)
continue;
writel(pin_reg, gpio_dev->base + (d->hwirq)*4);
raw_spin_unlock_irqrestore(&gpio_dev->lock, flags);