2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-12-29 23:53:55 +08:00

regmap: introduce fast_io busses, and use a spinlock for them

Some bus types have very fast IO. For these, acquiring a mutex for every
IO operation is a significant overhead. Allow busses to indicate their IO
is fast, and enhance regmap to use a spinlock for those busses.

[Currently limited to native endian registers -- broonie]

Signed-off-by: Stephen Warren <swarren@nvidia.com>
Signed-off-by: Mark Brown <broonie@opensource.wolfsonmicro.com>
This commit is contained in:
Stephen Warren 2012-04-04 15:48:28 -06:00 committed by Mark Brown
parent 26b5e74d31
commit a42678c4c8
5 changed files with 67 additions and 30 deletions

View File

@ -31,8 +31,14 @@ struct regmap_format {
unsigned int (*parse_val)(void *buf);
};
typedef void (*regmap_lock)(struct regmap *map);
typedef void (*regmap_unlock)(struct regmap *map);
struct regmap {
struct mutex lock;
struct mutex mutex;
spinlock_t spinlock;
regmap_lock lock;
regmap_unlock unlock;
struct device *dev; /* Device we do I/O on */
void *work_buf; /* Scratch buffer used to format I/O */

View File

@ -140,7 +140,7 @@ static int rbtree_show(struct seq_file *s, void *ignored)
int registers = 0;
int average;
mutex_lock(&map->lock);
map->lock(map);
for (node = rb_first(&rbtree_ctx->root); node != NULL;
node = rb_next(node)) {
@ -161,7 +161,7 @@ static int rbtree_show(struct seq_file *s, void *ignored)
seq_printf(s, "%d nodes, %d registers, average %d registers\n",
nodes, registers, average);
mutex_unlock(&map->lock);
map->unlock(map);
return 0;
}

View File

@ -264,7 +264,7 @@ int regcache_sync(struct regmap *map)
BUG_ON(!map->cache_ops || !map->cache_ops->sync);
mutex_lock(&map->lock);
map->lock(map);
/* Remember the initial bypass state */
bypass = map->cache_bypass;
dev_dbg(map->dev, "Syncing %s cache\n",
@ -296,7 +296,7 @@ out:
trace_regcache_sync(map->dev, name, "stop");
/* Restore the bypass state */
map->cache_bypass = bypass;
mutex_unlock(&map->lock);
map->unlock(map);
return ret;
}
@ -323,7 +323,7 @@ int regcache_sync_region(struct regmap *map, unsigned int min,
BUG_ON(!map->cache_ops || !map->cache_ops->sync);
mutex_lock(&map->lock);
map->lock(map);
/* Remember the initial bypass state */
bypass = map->cache_bypass;
@ -342,7 +342,7 @@ out:
trace_regcache_sync(map->dev, name, "stop region");
/* Restore the bypass state */
map->cache_bypass = bypass;
mutex_unlock(&map->lock);
map->unlock(map);
return ret;
}
@ -362,11 +362,11 @@ EXPORT_SYMBOL_GPL(regcache_sync_region);
*/
void regcache_cache_only(struct regmap *map, bool enable)
{
mutex_lock(&map->lock);
map->lock(map);
WARN_ON(map->cache_bypass && enable);
map->cache_only = enable;
trace_regmap_cache_only(map->dev, enable);
mutex_unlock(&map->lock);
map->unlock(map);
}
EXPORT_SYMBOL_GPL(regcache_cache_only);
@ -381,9 +381,9 @@ EXPORT_SYMBOL_GPL(regcache_cache_only);
*/
void regcache_mark_dirty(struct regmap *map)
{
mutex_lock(&map->lock);
map->lock(map);
map->cache_dirty = true;
mutex_unlock(&map->lock);
map->unlock(map);
}
EXPORT_SYMBOL_GPL(regcache_mark_dirty);
@ -400,11 +400,11 @@ EXPORT_SYMBOL_GPL(regcache_mark_dirty);
*/
void regcache_cache_bypass(struct regmap *map, bool enable)
{
mutex_lock(&map->lock);
map->lock(map);
WARN_ON(map->cache_only && enable);
map->cache_bypass = enable;
trace_regmap_cache_bypass(map->dev, enable);
mutex_unlock(&map->lock);
map->unlock(map);
}
EXPORT_SYMBOL_GPL(regcache_cache_bypass);

View File

@ -179,6 +179,26 @@ static unsigned int regmap_parse_32(void *buf)
return b[0];
}
static void regmap_lock_mutex(struct regmap *map)
{
mutex_lock(&map->mutex);
}
static void regmap_unlock_mutex(struct regmap *map)
{
mutex_unlock(&map->mutex);
}
static void regmap_lock_spinlock(struct regmap *map)
{
spin_lock(&map->spinlock);
}
static void regmap_unlock_spinlock(struct regmap *map)
{
spin_unlock(&map->spinlock);
}
/**
* regmap_init(): Initialise register map
*
@ -208,7 +228,15 @@ struct regmap *regmap_init(struct device *dev,
goto err;
}
mutex_init(&map->lock);
if (bus->fast_io) {
spin_lock_init(&map->spinlock);
map->lock = regmap_lock_spinlock;
map->unlock = regmap_unlock_spinlock;
} else {
mutex_init(&map->mutex);
map->lock = regmap_lock_mutex;
map->unlock = regmap_unlock_mutex;
}
map->format.buf_size = (config->reg_bits + config->val_bits) / 8;
map->format.reg_bytes = DIV_ROUND_UP(config->reg_bits, 8);
map->format.pad_bytes = config->pad_bits / 8;
@ -391,7 +419,7 @@ int regmap_reinit_cache(struct regmap *map, const struct regmap_config *config)
{
int ret;
mutex_lock(&map->lock);
map->lock(map);
regcache_exit(map);
regmap_debugfs_exit(map);
@ -410,7 +438,7 @@ int regmap_reinit_cache(struct regmap *map, const struct regmap_config *config)
ret = regcache_init(map, config);
mutex_unlock(&map->lock);
map->unlock(map);
return ret;
}
@ -562,11 +590,11 @@ int regmap_write(struct regmap *map, unsigned int reg, unsigned int val)
{
int ret;
mutex_lock(&map->lock);
map->lock(map);
ret = _regmap_write(map, reg, val);
mutex_unlock(&map->lock);
map->unlock(map);
return ret;
}
@ -593,11 +621,11 @@ int regmap_raw_write(struct regmap *map, unsigned int reg,
{
int ret;
mutex_lock(&map->lock);
map->lock(map);
ret = _regmap_raw_write(map, reg, val, val_len);
mutex_unlock(&map->lock);
map->unlock(map);
return ret;
}
@ -627,7 +655,7 @@ int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val,
if (!map->format.parse_val)
return -EINVAL;
mutex_lock(&map->lock);
map->lock(map);
/* No formatting is require if val_byte is 1 */
if (val_bytes == 1) {
@ -648,7 +676,7 @@ int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val,
kfree(wval);
out:
mutex_unlock(&map->lock);
map->unlock(map);
return ret;
}
EXPORT_SYMBOL_GPL(regmap_bulk_write);
@ -722,11 +750,11 @@ int regmap_read(struct regmap *map, unsigned int reg, unsigned int *val)
{
int ret;
mutex_lock(&map->lock);
map->lock(map);
ret = _regmap_read(map, reg, val);
mutex_unlock(&map->lock);
map->unlock(map);
return ret;
}
@ -751,7 +779,7 @@ int regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
unsigned int v;
int ret, i;
mutex_lock(&map->lock);
map->lock(map);
if (regmap_volatile_range(map, reg, val_count) || map->cache_bypass ||
map->cache_type == REGCACHE_NONE) {
@ -772,7 +800,7 @@ int regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
}
out:
mutex_unlock(&map->lock);
map->unlock(map);
return ret;
}
@ -825,7 +853,7 @@ static int _regmap_update_bits(struct regmap *map, unsigned int reg,
int ret;
unsigned int tmp, orig;
mutex_lock(&map->lock);
map->lock(map);
ret = _regmap_read(map, reg, &orig);
if (ret != 0)
@ -842,7 +870,7 @@ static int _regmap_update_bits(struct regmap *map, unsigned int reg,
}
out:
mutex_unlock(&map->lock);
map->unlock(map);
return ret;
}
@ -909,7 +937,7 @@ int regmap_register_patch(struct regmap *map, const struct reg_default *regs,
if (map->patch)
return -EBUSY;
mutex_lock(&map->lock);
map->lock(map);
bypass = map->cache_bypass;
@ -937,7 +965,7 @@ int regmap_register_patch(struct regmap *map, const struct reg_default *regs,
out:
map->cache_bypass = bypass;
mutex_unlock(&map->lock);
map->unlock(map);
return ret;
}

View File

@ -110,6 +110,8 @@ typedef void (*regmap_hw_free_context)(void *context);
/**
* Description of a hardware bus for the register map infrastructure.
*
* @fast_io: Register IO is fast. Use a spinlock instead of a mutex
* to perform locking.
* @write: Write operation.
* @gather_write: Write operation with split register/value, return -ENOTSUPP
* if not implemented on a given device.
@ -119,6 +121,7 @@ typedef void (*regmap_hw_free_context)(void *context);
* a read.
*/
struct regmap_bus {
bool fast_io;
regmap_hw_write write;
regmap_hw_gather_write gather_write;
regmap_hw_read read;