aboutsummaryrefslogtreecommitdiff
path: root/drivers/base/regmap/regmap.c
diff options
context:
space:
mode:
authorMark Brown <broonie@linaro.org>2013-10-07 23:00:24 +0100
committerMark Brown <broonie@linaro.org>2013-10-07 23:04:41 +0100
commit7e09a979404ed07b8f05d09a0e87a87c7891f472 (patch)
tree3222b86dc2cc2b3649758633cfb17207d2a0e35c /drivers/base/regmap/regmap.c
parentd0e639c9e06d44e713170031fe05fb60ebe680af (diff)
regmap: Cache async work structures
Rather than allocating and deallocating the structures used to manage async transfers each time we do one keep the structures around as long as the regmap is around. This should provide a small performance improvement. Signed-off-by: Mark Brown <broonie@linaro.org>
Diffstat (limited to 'drivers/base/regmap/regmap.c')
-rw-r--r--drivers/base/regmap/regmap.c59
1 files changed, 33 insertions, 26 deletions
diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
index 7d689a15c50..742f300ca48 100644
--- a/drivers/base/regmap/regmap.c
+++ b/drivers/base/regmap/regmap.c
@@ -42,15 +42,6 @@ static int _regmap_bus_formatted_write(void *context, unsigned int reg,
static int _regmap_bus_raw_write(void *context, unsigned int reg,
unsigned int val);
-static void async_cleanup(struct work_struct *work)
-{
- struct regmap_async *async = container_of(work, struct regmap_async,
- cleanup);
-
- kfree(async->work_buf);
- kfree(async);
-}
-
bool regmap_reg_in_ranges(unsigned int reg,
const struct regmap_range *ranges,
unsigned int nranges)
@@ -465,6 +456,7 @@ struct regmap *regmap_init(struct device *dev,
spin_lock_init(&map->async_lock);
INIT_LIST_HEAD(&map->async_list);
+ INIT_LIST_HEAD(&map->async_free);
init_waitqueue_head(&map->async_waitq);
if (config->read_flag_mask || config->write_flag_mask) {
@@ -942,12 +934,22 @@ EXPORT_SYMBOL_GPL(regmap_reinit_cache);
*/
void regmap_exit(struct regmap *map)
{
+ struct regmap_async *async;
+
regcache_exit(map);
regmap_debugfs_exit(map);
regmap_range_exit(map);
if (map->bus && map->bus->free_context)
map->bus->free_context(map->bus_context);
kfree(map->work_buf);
+ while (!list_empty(&map->async_free)) {
+ async = list_first_entry_or_null(&map->async_free,
+ struct regmap_async,
+ list);
+ list_del(&async->list);
+ kfree(async->work_buf);
+ kfree(async);
+ }
kfree(map);
}
EXPORT_SYMBOL_GPL(regmap_exit);
@@ -1115,20 +1117,31 @@ int _regmap_raw_write(struct regmap *map, unsigned int reg,
u8[0] |= map->write_flag_mask;
if (async && map->bus->async_write) {
- struct regmap_async *async = map->bus->async_alloc();
- if (!async)
- return -ENOMEM;
+ struct regmap_async *async;
trace_regmap_async_write_start(map->dev, reg, val_len);
- async->work_buf = kzalloc(map->format.buf_size,
- GFP_KERNEL | GFP_DMA);
- if (!async->work_buf) {
- kfree(async);
- return -ENOMEM;
+ spin_lock_irqsave(&map->async_lock, flags);
+ async = list_first_entry_or_null(&map->async_free,
+ struct regmap_async,
+ list);
+ if (async)
+ list_del(&async->list);
+ spin_unlock_irqrestore(&map->async_lock, flags);
+
+ if (!async) {
+ async = map->bus->async_alloc();
+ if (!async)
+ return -ENOMEM;
+
+ async->work_buf = kzalloc(map->format.buf_size,
+ GFP_KERNEL | GFP_DMA);
+ if (!async->work_buf) {
+ kfree(async);
+ return -ENOMEM;
+ }
}
- INIT_WORK(&async->cleanup, async_cleanup);
async->map = map;
/* If the caller supplied the value we can use it safely. */
@@ -1152,11 +1165,8 @@ int _regmap_raw_write(struct regmap *map, unsigned int reg,
ret);
spin_lock_irqsave(&map->async_lock, flags);
- list_del(&async->list);
+ list_move(&async->list, &map->async_free);
spin_unlock_irqrestore(&map->async_lock, flags);
-
- kfree(async->work_buf);
- kfree(async);
}
return ret;
@@ -1820,8 +1830,7 @@ void regmap_async_complete_cb(struct regmap_async *async, int ret)
trace_regmap_async_io_complete(map->dev);
spin_lock(&map->async_lock);
-
- list_del(&async->list);
+ list_move(&async->list, &map->async_free);
wake = list_empty(&map->async_list);
if (ret != 0)
@@ -1829,8 +1838,6 @@ void regmap_async_complete_cb(struct regmap_async *async, int ret)
spin_unlock(&map->async_lock);
- schedule_work(&async->cleanup);
-
if (wake)
wake_up(&map->async_waitq);
}