Commit 5cbff4b2 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'regmap-v6.7' of git://git.kernel.org/pub/scm/linux/kernel/git/broonie/regmap

Pull regmap updates from Mark Brown:
 "The main change here is a fix for an issue where we were letting the
  selector for windowed register ranges get out of sync with the
  hardware during a cache sync plus associated KUnit tests. This was
  reported just at the end of the release cycle and only in -next for a
  day prior to the merge window so it seemed better to hold off for now,
  the bug had been present for more than a decade so wasn't causing too
  many practical problems hopefully.

  There's also a fix for error handling in the debugfs output from
  Christope Jaillet"

* tag 'regmap-v6.7' of git://git.kernel.org/pub/scm/linux/kernel/git/broonie/regmap:
  regmap: Ensure range selector registers are updated after cache sync
  regmap: kunit: Add test for cache sync interaction with ranges
  regmap: kunit: Fix marking of the range window as volatile
  regmap: debugfs: Fix a erroneous check after snprintf()
parents b05ddad0 6bbebcc1
...@@ -334,6 +334,11 @@ static int regcache_default_sync(struct regmap *map, unsigned int min, ...@@ -334,6 +334,11 @@ static int regcache_default_sync(struct regmap *map, unsigned int min,
return 0; return 0;
} }
static int rbtree_all(const void *key, const struct rb_node *node)
{
return 0;
}
/** /**
* regcache_sync - Sync the register cache with the hardware. * regcache_sync - Sync the register cache with the hardware.
* *
...@@ -351,6 +356,7 @@ int regcache_sync(struct regmap *map) ...@@ -351,6 +356,7 @@ int regcache_sync(struct regmap *map)
unsigned int i; unsigned int i;
const char *name; const char *name;
bool bypass; bool bypass;
struct rb_node *node;
if (WARN_ON(map->cache_type == REGCACHE_NONE)) if (WARN_ON(map->cache_type == REGCACHE_NONE))
return -EINVAL; return -EINVAL;
...@@ -392,6 +398,30 @@ int regcache_sync(struct regmap *map) ...@@ -392,6 +398,30 @@ int regcache_sync(struct regmap *map)
/* Restore the bypass state */ /* Restore the bypass state */
map->cache_bypass = bypass; map->cache_bypass = bypass;
map->no_sync_defaults = false; map->no_sync_defaults = false;
/*
* If we did any paging with cache bypassed and a cached
* paging register then the register and cache state might
* have gone out of sync, force writes of all the paging
* registers.
*/
rb_for_each(node, 0, &map->range_tree, rbtree_all) {
struct regmap_range_node *this =
rb_entry(node, struct regmap_range_node, node);
/* If there's nothing in the cache there's nothing to sync */
ret = regcache_read(map, this->selector_reg, &i);
if (ret != 0)
continue;
ret = _regmap_write(map, this->selector_reg, i);
if (ret != 0) {
dev_err(map->dev, "Failed to write %x = %x: %d\n",
this->selector_reg, i, ret);
break;
}
}
map->unlock(map->lock_arg); map->unlock(map->lock_arg);
regmap_async_complete(map); regmap_async_complete(map);
......
...@@ -48,7 +48,7 @@ static ssize_t regmap_name_read_file(struct file *file, ...@@ -48,7 +48,7 @@ static ssize_t regmap_name_read_file(struct file *file,
name = map->dev->driver->name; name = map->dev->driver->name;
ret = snprintf(buf, PAGE_SIZE, "%s\n", name); ret = snprintf(buf, PAGE_SIZE, "%s\n", name);
if (ret < 0) { if (ret >= PAGE_SIZE) {
kfree(buf); kfree(buf);
return ret; return ret;
} }
......
...@@ -442,10 +442,18 @@ static struct regmap_range_cfg test_range = { ...@@ -442,10 +442,18 @@ static struct regmap_range_cfg test_range = {
.range_max = 40, .range_max = 40,
}; };
static bool test_range_volatile(struct device *dev, unsigned int reg) static bool test_range_window_volatile(struct device *dev, unsigned int reg)
{ {
if (reg >= test_range.window_start && if (reg >= test_range.window_start &&
reg <= test_range.selector_reg + test_range.window_len) reg <= test_range.window_start + test_range.window_len)
return true;
return false;
}
static bool test_range_all_volatile(struct device *dev, unsigned int reg)
{
if (test_range_window_volatile(dev, reg))
return true; return true;
if (reg >= test_range.range_min && reg <= test_range.range_max) if (reg >= test_range.range_min && reg <= test_range.range_max)
...@@ -465,7 +473,7 @@ static void basic_ranges(struct kunit *test) ...@@ -465,7 +473,7 @@ static void basic_ranges(struct kunit *test)
config = test_regmap_config; config = test_regmap_config;
config.cache_type = t->type; config.cache_type = t->type;
config.volatile_reg = test_range_volatile; config.volatile_reg = test_range_all_volatile;
config.ranges = &test_range; config.ranges = &test_range;
config.num_ranges = 1; config.num_ranges = 1;
config.max_register = test_range.range_max; config.max_register = test_range.range_max;
...@@ -875,6 +883,59 @@ static void cache_present(struct kunit *test) ...@@ -875,6 +883,59 @@ static void cache_present(struct kunit *test)
regmap_exit(map); regmap_exit(map);
} }
/* Check that caching the window register works with sync */
static void cache_range_window_reg(struct kunit *test)
{
struct regcache_types *t = (struct regcache_types *)test->param_value;
struct regmap *map;
struct regmap_config config;
struct regmap_ram_data *data;
unsigned int val;
int i;
config = test_regmap_config;
config.cache_type = t->type;
config.volatile_reg = test_range_window_volatile;
config.ranges = &test_range;
config.num_ranges = 1;
config.max_register = test_range.range_max;
map = gen_regmap(&config, &data);
KUNIT_ASSERT_FALSE(test, IS_ERR(map));
if (IS_ERR(map))
return;
/* Write new values to the entire range */
for (i = test_range.range_min; i <= test_range.range_max; i++)
KUNIT_ASSERT_EQ(test, 0, regmap_write(map, i, 0));
val = data->vals[test_range.selector_reg] & test_range.selector_mask;
KUNIT_ASSERT_EQ(test, val, 2);
/* Write to the first register in the range to reset the page */
KUNIT_ASSERT_EQ(test, 0, regmap_write(map, test_range.range_min, 0));
val = data->vals[test_range.selector_reg] & test_range.selector_mask;
KUNIT_ASSERT_EQ(test, val, 0);
/* Trigger a cache sync */
regcache_mark_dirty(map);
KUNIT_ASSERT_EQ(test, 0, regcache_sync(map));
/* Write to the first register again, the page should be reset */
KUNIT_ASSERT_EQ(test, 0, regmap_write(map, test_range.range_min, 0));
val = data->vals[test_range.selector_reg] & test_range.selector_mask;
KUNIT_ASSERT_EQ(test, val, 0);
/* Trigger another cache sync */
regcache_mark_dirty(map);
KUNIT_ASSERT_EQ(test, 0, regcache_sync(map));
/* Write to the last register again, the page should be reset */
KUNIT_ASSERT_EQ(test, 0, regmap_write(map, test_range.range_max, 0));
val = data->vals[test_range.selector_reg] & test_range.selector_mask;
KUNIT_ASSERT_EQ(test, val, 2);
}
struct raw_test_types { struct raw_test_types {
const char *name; const char *name;
...@@ -1217,6 +1278,7 @@ static struct kunit_case regmap_test_cases[] = { ...@@ -1217,6 +1278,7 @@ static struct kunit_case regmap_test_cases[] = {
KUNIT_CASE_PARAM(cache_sync_patch, real_cache_types_gen_params), KUNIT_CASE_PARAM(cache_sync_patch, real_cache_types_gen_params),
KUNIT_CASE_PARAM(cache_drop, sparse_cache_types_gen_params), KUNIT_CASE_PARAM(cache_drop, sparse_cache_types_gen_params),
KUNIT_CASE_PARAM(cache_present, sparse_cache_types_gen_params), KUNIT_CASE_PARAM(cache_present, sparse_cache_types_gen_params),
KUNIT_CASE_PARAM(cache_range_window_reg, real_cache_types_gen_params),
KUNIT_CASE_PARAM(raw_read_defaults_single, raw_test_types_gen_params), KUNIT_CASE_PARAM(raw_read_defaults_single, raw_test_types_gen_params),
KUNIT_CASE_PARAM(raw_read_defaults, raw_test_types_gen_params), KUNIT_CASE_PARAM(raw_read_defaults, raw_test_types_gen_params),
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment