mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-13 14:24:11 +08:00
regmap: Ensure range selector registers are updated after cache sync
When we sync the register cache we do so with the cache bypassed in order to avoid overhead from writing the synced values back into the cache. If the regmap has ranges and the selector register for those ranges is in a register which is cached this has the unfortunate side effect of meaning that the physical and cached copies of the selector register can be out of sync after a cache sync. The cache will have whatever the selector was when the sync started and the hardware will have the selector for the register that was synced last. Fix this by rewriting all cached selector registers after every sync, ensuring that the hardware and cache have the same content. This will result in extra writes that wouldn't otherwise be needed but is simple so hopefully robust. We don't read from the hardware since not all devices have physical read support. Given that nobody noticed this until now it is likely that we are rarely if ever hitting this case. Reported-by: Hector Martin <marcan@marcan.st> Cc: stable@vger.kernel.org Signed-off-by: Mark Brown <broonie@kernel.org> Link: https://lore.kernel.org/r/20231026-regmap-fix-selector-sync-v1-1-633ded82770d@kernel.org Signed-off-by: Mark Brown <broonie@kernel.org>
This commit is contained in:
parent
c6df843348
commit
0ec7731655
@ -334,6 +334,11 @@ static int regcache_default_sync(struct regmap *map, unsigned int min,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int rbtree_all(const void *key, const struct rb_node *node)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* regcache_sync - Sync the register cache with the hardware.
|
||||
*
|
||||
@ -351,6 +356,7 @@ int regcache_sync(struct regmap *map)
|
||||
unsigned int i;
|
||||
const char *name;
|
||||
bool bypass;
|
||||
struct rb_node *node;
|
||||
|
||||
if (WARN_ON(map->cache_type == REGCACHE_NONE))
|
||||
return -EINVAL;
|
||||
@ -392,6 +398,30 @@ out:
|
||||
/* Restore the bypass state */
|
||||
map->cache_bypass = bypass;
|
||||
map->no_sync_defaults = false;
|
||||
|
||||
/*
|
||||
* If we did any paging with cache bypassed and a cached
|
||||
* paging register then the register and cache state might
|
||||
* have gone out of sync, force writes of all the paging
|
||||
* registers.
|
||||
*/
|
||||
rb_for_each(node, 0, &map->range_tree, rbtree_all) {
|
||||
struct regmap_range_node *this =
|
||||
rb_entry(node, struct regmap_range_node, node);
|
||||
|
||||
/* If there's nothing in the cache there's nothing to sync */
|
||||
ret = regcache_read(map, this->selector_reg, &i);
|
||||
if (ret != 0)
|
||||
continue;
|
||||
|
||||
ret = _regmap_write(map, this->selector_reg, i);
|
||||
if (ret != 0) {
|
||||
dev_err(map->dev, "Failed to write %x = %x: %d\n",
|
||||
this->selector_reg, i, ret);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
map->unlock(map->lock_arg);
|
||||
|
||||
regmap_async_complete(map);
|
||||
|
Loading…
Reference in New Issue
Block a user