mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-15 16:24:13 +08:00
soc: fsl: qbman: Use raw spinlock for cgr_lock
[ Upstream commitfbec4e7fed
] smp_call_function always runs its callback in hard IRQ context, even on PREEMPT_RT, where spinlocks can sleep. So we need to use a raw spinlock for cgr_lock to ensure we aren't waiting on a sleeping task. Although this bug has existed for a while, it was not apparent until commitef2a8d5478
("net: dpaa: Adjust queue depth on rate change") which invokes smp_call_function_single via qman_update_cgr_safe every time a link goes up or down. Fixes:96f413f476
("soc/fsl/qbman: fix issue in qman_delete_cgr_safe()") CC: stable@vger.kernel.org Reported-by: Vladimir Oltean <vladimir.oltean@nxp.com> Closes: https://lore.kernel.org/all/20230323153935.nofnjucqjqnz34ej@skbuf/ Reported-by: Steffen Trumtrar <s.trumtrar@pengutronix.de> Closes: https://lore.kernel.org/linux-arm-kernel/87wmsyvclu.fsf@pengutronix.de/ Signed-off-by: Sean Anderson <sean.anderson@linux.dev> Reviewed-by: Camelia Groza <camelia.groza@nxp.com> Tested-by: Vladimir Oltean <vladimir.oltean@nxp.com> Signed-off-by: David S. Miller <davem@davemloft.net> Signed-off-by: Sasha Levin <sashal@kernel.org>
This commit is contained in:
parent
af25c5180b
commit
cd53a8ae5a
@ -991,7 +991,7 @@ struct qman_portal {
|
||||
/* linked-list of CSCN handlers. */
|
||||
struct list_head cgr_cbs;
|
||||
/* list lock */
|
||||
spinlock_t cgr_lock;
|
||||
raw_spinlock_t cgr_lock;
|
||||
struct work_struct congestion_work;
|
||||
struct work_struct mr_work;
|
||||
char irqname[MAX_IRQNAME];
|
||||
@ -1281,7 +1281,7 @@ static int qman_create_portal(struct qman_portal *portal,
|
||||
/* if the given mask is NULL, assume all CGRs can be seen */
|
||||
qman_cgrs_fill(&portal->cgrs[0]);
|
||||
INIT_LIST_HEAD(&portal->cgr_cbs);
|
||||
spin_lock_init(&portal->cgr_lock);
|
||||
raw_spin_lock_init(&portal->cgr_lock);
|
||||
INIT_WORK(&portal->congestion_work, qm_congestion_task);
|
||||
INIT_WORK(&portal->mr_work, qm_mr_process_task);
|
||||
portal->bits = 0;
|
||||
@ -1456,11 +1456,14 @@ static void qm_congestion_task(struct work_struct *work)
|
||||
union qm_mc_result *mcr;
|
||||
struct qman_cgr *cgr;
|
||||
|
||||
spin_lock_irq(&p->cgr_lock);
|
||||
/*
|
||||
* FIXME: QM_MCR_TIMEOUT is 10ms, which is too long for a raw spinlock!
|
||||
*/
|
||||
raw_spin_lock_irq(&p->cgr_lock);
|
||||
qm_mc_start(&p->p);
|
||||
qm_mc_commit(&p->p, QM_MCC_VERB_QUERYCONGESTION);
|
||||
if (!qm_mc_result_timeout(&p->p, &mcr)) {
|
||||
spin_unlock_irq(&p->cgr_lock);
|
||||
raw_spin_unlock_irq(&p->cgr_lock);
|
||||
dev_crit(p->config->dev, "QUERYCONGESTION timeout\n");
|
||||
qman_p_irqsource_add(p, QM_PIRQ_CSCI);
|
||||
return;
|
||||
@ -1476,7 +1479,7 @@ static void qm_congestion_task(struct work_struct *work)
|
||||
list_for_each_entry(cgr, &p->cgr_cbs, node)
|
||||
if (cgr->cb && qman_cgrs_get(&c, cgr->cgrid))
|
||||
cgr->cb(p, cgr, qman_cgrs_get(&rr, cgr->cgrid));
|
||||
spin_unlock_irq(&p->cgr_lock);
|
||||
raw_spin_unlock_irq(&p->cgr_lock);
|
||||
qman_p_irqsource_add(p, QM_PIRQ_CSCI);
|
||||
}
|
||||
|
||||
@ -2440,7 +2443,7 @@ int qman_create_cgr(struct qman_cgr *cgr, u32 flags,
|
||||
preempt_enable();
|
||||
|
||||
cgr->chan = p->config->channel;
|
||||
spin_lock_irq(&p->cgr_lock);
|
||||
raw_spin_lock_irq(&p->cgr_lock);
|
||||
|
||||
if (opts) {
|
||||
struct qm_mcc_initcgr local_opts = *opts;
|
||||
@ -2477,7 +2480,7 @@ int qman_create_cgr(struct qman_cgr *cgr, u32 flags,
|
||||
qman_cgrs_get(&p->cgrs[1], cgr->cgrid))
|
||||
cgr->cb(p, cgr, 1);
|
||||
out:
|
||||
spin_unlock_irq(&p->cgr_lock);
|
||||
raw_spin_unlock_irq(&p->cgr_lock);
|
||||
put_affine_portal();
|
||||
return ret;
|
||||
}
|
||||
@ -2512,7 +2515,7 @@ int qman_delete_cgr(struct qman_cgr *cgr)
|
||||
return -EINVAL;
|
||||
|
||||
memset(&local_opts, 0, sizeof(struct qm_mcc_initcgr));
|
||||
spin_lock_irqsave(&p->cgr_lock, irqflags);
|
||||
raw_spin_lock_irqsave(&p->cgr_lock, irqflags);
|
||||
list_del(&cgr->node);
|
||||
/*
|
||||
* If there are no other CGR objects for this CGRID in the list,
|
||||
@ -2537,7 +2540,7 @@ int qman_delete_cgr(struct qman_cgr *cgr)
|
||||
/* add back to the list */
|
||||
list_add(&cgr->node, &p->cgr_cbs);
|
||||
release_lock:
|
||||
spin_unlock_irqrestore(&p->cgr_lock, irqflags);
|
||||
raw_spin_unlock_irqrestore(&p->cgr_lock, irqflags);
|
||||
put_affine_portal();
|
||||
return ret;
|
||||
}
|
||||
@ -2577,9 +2580,9 @@ static int qman_update_cgr(struct qman_cgr *cgr, struct qm_mcc_initcgr *opts)
|
||||
if (!p)
|
||||
return -EINVAL;
|
||||
|
||||
spin_lock_irqsave(&p->cgr_lock, irqflags);
|
||||
raw_spin_lock_irqsave(&p->cgr_lock, irqflags);
|
||||
ret = qm_modify_cgr(cgr, 0, opts);
|
||||
spin_unlock_irqrestore(&p->cgr_lock, irqflags);
|
||||
raw_spin_unlock_irqrestore(&p->cgr_lock, irqflags);
|
||||
put_affine_portal();
|
||||
return ret;
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user