mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-30 16:13:54 +08:00
ARM: OMAP: clock: fix race in disable all clocks
clk_disable_unused is invoked when CONFIG_OMAP_RESET_CLOCKS=y. Since clk_disable_unused is called as lateinitcall, there can be more than a few workqueues executing off secondary CPU(s). The current code does the following: a) checks if clk is unused b) holds lock c) disables clk d) unlocks Between (a) and (b) being executed on CPU0, It is possible to have a driver executing on CPU1 which could do a get_sync->clk_get (and increase the use_count) of the clock which was just about to be disabled by clk_disable_unused. We ensure instead that the entire list traversal is protected by the lock allowing for parent child clock traversal which could be potentially be done by runtime operations to be safe as well. Reported-by: Todd Poynor <toddpoynor@google.com> Signed-off-by: Nishanth Menon <nm@ti.com> Signed-off-by: Paul Walmsley <paul@pwsan.com>
This commit is contained in:
parent
ac387330a6
commit
0eb4fd9b3e
@ -441,6 +441,8 @@ static int __init clk_disable_unused(void)
|
||||
return 0;
|
||||
|
||||
pr_info("clock: disabling unused clocks to save power\n");
|
||||
|
||||
spin_lock_irqsave(&clockfw_lock, flags);
|
||||
list_for_each_entry(ck, &clocks, node) {
|
||||
if (ck->ops == &clkops_null)
|
||||
continue;
|
||||
@ -448,10 +450,9 @@ static int __init clk_disable_unused(void)
|
||||
if (ck->usecount > 0 || !ck->enable_reg)
|
||||
continue;
|
||||
|
||||
spin_lock_irqsave(&clockfw_lock, flags);
|
||||
arch_clock->clk_disable_unused(ck);
|
||||
spin_unlock_irqrestore(&clockfw_lock, flags);
|
||||
}
|
||||
spin_unlock_irqrestore(&clockfw_lock, flags);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user