Commit 4171a9aa authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'regmap-v6.5' of git://git.kernel.org/pub/scm/linux/kernel/git/broonie/regmap

Pull regmap updates from Mark Brown:
 "Another busy release for regmap with the second half of the maple tree
  register cache implementation, there's some smaller optimisations that
  could be done but this should now be able to replace the rbtree cache
  for most devices.

  We also had a followup from Aidan MacDonald's refactoring of some of
  the regmap-irq interfaces, the conversion is complete so the old
  interfaces are removed. This means that even with the new features for
  the maple tree cache we'd have a nice negative diffstat were it not
  for the addition of a bunch more KUnit coverage.

  There's one GPIO patch in here, it was a dependency for a cleanup of
  an API in the regmap-irq code for which the gpio-104-dio-48e driver
  was the only user.

  Highlights:

   - The maple tree cache can now load in default values more
     efficiently, and is capabale of syncing multiple registers
     in a single write during cache sync

   - More KUnit coverage, including some coverage for raw I/O
     and a dummy RAM backed cache to support it

   - Removal of several old interfaces in regmap-irq now all
     users have been modernised"

* tag 'regmap-v6.5' of git://git.kernel.org/pub/scm/linux/kernel/git/broonie/regmap: (23 commits)
  regmap: Allow reads from write only registers with the flat cache
  regmap: Drop early readability check
  regmap: Check for register readability before checking cache during read
  regmap: Add test to make sure we don't sync to read only registers
  regmap: Add a test case for write only registers
  regmap: Add test that writes to write only registers are prevented
  regmap: Add debugfs file for forcing field writes
  regmap: Don't check for changes in regcache_set_val()
  regmap: maple: Implement block sync for the maple tree cache
  regmap: Provide basic KUnit coverage for the raw register I/O
  regmap: Provide a ram backed regmap with raw support
  regmap: Add missing cache_only checks
  regmap: regmap-irq: Move handle_post_irq to before pm_runtime_put
  regmap: Load register defaults in blocks rather than register by register
  regmap: mmio: Allow passing an empty config->reg_stride
  regmap-irq: Drop backward compatibility for inverted mask/unmask
  regmap-irq: Minor adjustments to .handle_mask_sync()
  regmap-irq: Remove support for not_fixed_stride
  regmap-irq: Remove type registers
  regmap-irq: Remove virtual registers
  ...
parents 1b2c92a1 d0c99ffe
...@@ -8,7 +8,7 @@ obj-$(CONFIG_DEBUG_FS) += regmap-debugfs.o ...@@ -8,7 +8,7 @@ obj-$(CONFIG_DEBUG_FS) += regmap-debugfs.o
obj-$(CONFIG_REGMAP_KUNIT) += regmap-kunit.o obj-$(CONFIG_REGMAP_KUNIT) += regmap-kunit.o
obj-$(CONFIG_REGMAP_AC97) += regmap-ac97.o obj-$(CONFIG_REGMAP_AC97) += regmap-ac97.o
obj-$(CONFIG_REGMAP_I2C) += regmap-i2c.o obj-$(CONFIG_REGMAP_I2C) += regmap-i2c.o
obj-$(CONFIG_REGMAP_RAM) += regmap-ram.o obj-$(CONFIG_REGMAP_RAM) += regmap-ram.o regmap-raw-ram.o
obj-$(CONFIG_REGMAP_SLIMBUS) += regmap-slimbus.o obj-$(CONFIG_REGMAP_SLIMBUS) += regmap-slimbus.o
obj-$(CONFIG_REGMAP_SPI) += regmap-spi.o obj-$(CONFIG_REGMAP_SPI) += regmap-spi.o
obj-$(CONFIG_REGMAP_SPMI) += regmap-spmi.o obj-$(CONFIG_REGMAP_SPMI) += regmap-spmi.o
......
...@@ -125,6 +125,9 @@ struct regmap { ...@@ -125,6 +125,9 @@ struct regmap {
int reg_stride; int reg_stride;
int reg_stride_order; int reg_stride_order;
/* If set, will always write field to HW. */
bool force_write_field;
/* regcache specific members */ /* regcache specific members */
const struct regcache_ops *cache_ops; const struct regcache_ops *cache_ops;
enum regcache_type cache_type; enum regcache_type cache_type;
...@@ -257,6 +260,8 @@ int regcache_sync_block(struct regmap *map, void *block, ...@@ -257,6 +260,8 @@ int regcache_sync_block(struct regmap *map, void *block,
unsigned long *cache_present, unsigned long *cache_present,
unsigned int block_base, unsigned int start, unsigned int block_base, unsigned int start,
unsigned int end); unsigned int end);
bool regcache_reg_needs_sync(struct regmap *map, unsigned int reg,
unsigned int val);
static inline const void *regcache_get_val_addr(struct regmap *map, static inline const void *regcache_get_val_addr(struct regmap *map,
const void *base, const void *base,
...@@ -267,7 +272,7 @@ static inline const void *regcache_get_val_addr(struct regmap *map, ...@@ -267,7 +272,7 @@ static inline const void *regcache_get_val_addr(struct regmap *map,
unsigned int regcache_get_val(struct regmap *map, const void *base, unsigned int regcache_get_val(struct regmap *map, const void *base,
unsigned int idx); unsigned int idx);
bool regcache_set_val(struct regmap *map, void *base, unsigned int idx, void regcache_set_val(struct regmap *map, void *base, unsigned int idx,
unsigned int val); unsigned int val);
int regcache_lookup_reg(struct regmap *map, unsigned int reg); int regcache_lookup_reg(struct regmap *map, unsigned int reg);
int regcache_sync_val(struct regmap *map, unsigned int reg, unsigned int val); int regcache_sync_val(struct regmap *map, unsigned int reg, unsigned int val);
...@@ -312,6 +317,7 @@ struct regmap_ram_data { ...@@ -312,6 +317,7 @@ struct regmap_ram_data {
unsigned int *vals; /* Allocatd by caller */ unsigned int *vals; /* Allocatd by caller */
bool *read; bool *read;
bool *written; bool *written;
enum regmap_endian reg_endian;
}; };
/* /*
...@@ -326,5 +332,12 @@ struct regmap *__regmap_init_ram(const struct regmap_config *config, ...@@ -326,5 +332,12 @@ struct regmap *__regmap_init_ram(const struct regmap_config *config,
#define regmap_init_ram(config, data) \ #define regmap_init_ram(config, data) \
__regmap_lockdep_wrapper(__regmap_init_ram, #config, config, data) __regmap_lockdep_wrapper(__regmap_init_ram, #config, config, data)
struct regmap *__regmap_init_raw_ram(const struct regmap_config *config,
struct regmap_ram_data *data,
struct lock_class_key *lock_key,
const char *lock_name);
#define regmap_init_raw_ram(config, data) \
__regmap_lockdep_wrapper(__regmap_init_raw_ram, #config, config, data)
#endif #endif
...@@ -186,6 +186,55 @@ static int regcache_maple_drop(struct regmap *map, unsigned int min, ...@@ -186,6 +186,55 @@ static int regcache_maple_drop(struct regmap *map, unsigned int min,
return ret; return ret;
} }
static int regcache_maple_sync_block(struct regmap *map, unsigned long *entry,
struct ma_state *mas,
unsigned int min, unsigned int max)
{
void *buf;
unsigned long r;
size_t val_bytes = map->format.val_bytes;
int ret = 0;
mas_pause(mas);
rcu_read_unlock();
/*
* Use a raw write if writing more than one register to a
* device that supports raw writes to reduce transaction
* overheads.
*/
if (max - min > 1 && regmap_can_raw_write(map)) {
buf = kmalloc(val_bytes * (max - min), map->alloc_flags);
if (!buf) {
ret = -ENOMEM;
goto out;
}
/* Render the data for a raw write */
for (r = min; r < max; r++) {
regcache_set_val(map, buf, r - min,
entry[r - mas->index]);
}
ret = _regmap_raw_write(map, min, buf, (max - min) * val_bytes,
false);
kfree(buf);
} else {
for (r = min; r < max; r++) {
ret = _regmap_write(map, r,
entry[r - mas->index]);
if (ret != 0)
goto out;
}
}
out:
rcu_read_lock();
return ret;
}
static int regcache_maple_sync(struct regmap *map, unsigned int min, static int regcache_maple_sync(struct regmap *map, unsigned int min,
unsigned int max) unsigned int max)
{ {
...@@ -194,8 +243,9 @@ static int regcache_maple_sync(struct regmap *map, unsigned int min, ...@@ -194,8 +243,9 @@ static int regcache_maple_sync(struct regmap *map, unsigned int min,
MA_STATE(mas, mt, min, max); MA_STATE(mas, mt, min, max);
unsigned long lmin = min; unsigned long lmin = min;
unsigned long lmax = max; unsigned long lmax = max;
unsigned int r; unsigned int r, v, sync_start;
int ret; int ret;
bool sync_needed = false;
map->cache_bypass = true; map->cache_bypass = true;
...@@ -203,18 +253,38 @@ static int regcache_maple_sync(struct regmap *map, unsigned int min, ...@@ -203,18 +253,38 @@ static int regcache_maple_sync(struct regmap *map, unsigned int min,
mas_for_each(&mas, entry, max) { mas_for_each(&mas, entry, max) {
for (r = max(mas.index, lmin); r <= min(mas.last, lmax); r++) { for (r = max(mas.index, lmin); r <= min(mas.last, lmax); r++) {
mas_pause(&mas); v = entry[r - mas.index];
rcu_read_unlock();
ret = regcache_sync_val(map, r, entry[r - mas.index]); if (regcache_reg_needs_sync(map, r, v)) {
if (!sync_needed) {
sync_start = r;
sync_needed = true;
}
continue;
}
if (!sync_needed)
continue;
ret = regcache_maple_sync_block(map, entry, &mas,
sync_start, r);
if (ret != 0)
goto out;
sync_needed = false;
}
if (sync_needed) {
ret = regcache_maple_sync_block(map, entry, &mas,
sync_start, r);
if (ret != 0) if (ret != 0)
goto out; goto out;
rcu_read_lock(); sync_needed = false;
} }
} }
out:
rcu_read_unlock(); rcu_read_unlock();
out:
map->cache_bypass = false; map->cache_bypass = false;
return ret; return ret;
...@@ -242,11 +312,41 @@ static int regcache_maple_exit(struct regmap *map) ...@@ -242,11 +312,41 @@ static int regcache_maple_exit(struct regmap *map)
return 0; return 0;
} }
static int regcache_maple_insert_block(struct regmap *map, int first,
int last)
{
struct maple_tree *mt = map->cache;
MA_STATE(mas, mt, first, last);
unsigned long *entry;
int i, ret;
entry = kcalloc(last - first + 1, sizeof(unsigned long), GFP_KERNEL);
if (!entry)
return -ENOMEM;
for (i = 0; i < last - first + 1; i++)
entry[i] = map->reg_defaults[first + i].def;
mas_lock(&mas);
mas_set_range(&mas, map->reg_defaults[first].reg,
map->reg_defaults[last].reg);
ret = mas_store_gfp(&mas, entry, GFP_KERNEL);
mas_unlock(&mas);
if (ret)
kfree(entry);
return ret;
}
static int regcache_maple_init(struct regmap *map) static int regcache_maple_init(struct regmap *map)
{ {
struct maple_tree *mt; struct maple_tree *mt;
int i; int i;
int ret; int ret;
int range_start;
mt = kmalloc(sizeof(*mt), GFP_KERNEL); mt = kmalloc(sizeof(*mt), GFP_KERNEL);
if (!mt) if (!mt)
...@@ -255,14 +355,30 @@ static int regcache_maple_init(struct regmap *map) ...@@ -255,14 +355,30 @@ static int regcache_maple_init(struct regmap *map)
mt_init(mt); mt_init(mt);
for (i = 0; i < map->num_reg_defaults; i++) { if (!map->num_reg_defaults)
ret = regcache_maple_write(map, return 0;
map->reg_defaults[i].reg,
map->reg_defaults[i].def); range_start = 0;
if (ret)
goto err; /* Scan for ranges of contiguous registers */
for (i = 1; i < map->num_reg_defaults; i++) {
if (map->reg_defaults[i].reg !=
map->reg_defaults[i - 1].reg + 1) {
ret = regcache_maple_insert_block(map, range_start,
i - 1);
if (ret != 0)
goto err;
range_start = i;
}
} }
/* Add the last block */
ret = regcache_maple_insert_block(map, range_start,
map->num_reg_defaults - 1);
if (ret != 0)
goto err;
return 0; return 0;
err: err:
......
...@@ -279,8 +279,8 @@ int regcache_write(struct regmap *map, ...@@ -279,8 +279,8 @@ int regcache_write(struct regmap *map,
return 0; return 0;
} }
static bool regcache_reg_needs_sync(struct regmap *map, unsigned int reg, bool regcache_reg_needs_sync(struct regmap *map, unsigned int reg,
unsigned int val) unsigned int val)
{ {
int ret; int ret;
...@@ -561,17 +561,14 @@ void regcache_cache_bypass(struct regmap *map, bool enable) ...@@ -561,17 +561,14 @@ void regcache_cache_bypass(struct regmap *map, bool enable)
} }
EXPORT_SYMBOL_GPL(regcache_cache_bypass); EXPORT_SYMBOL_GPL(regcache_cache_bypass);
bool regcache_set_val(struct regmap *map, void *base, unsigned int idx, void regcache_set_val(struct regmap *map, void *base, unsigned int idx,
unsigned int val) unsigned int val)
{ {
if (regcache_get_val(map, base, idx) == val)
return true;
/* Use device native format if possible */ /* Use device native format if possible */
if (map->format.format_val) { if (map->format.format_val) {
map->format.format_val(base + (map->cache_word_size * idx), map->format.format_val(base + (map->cache_word_size * idx),
val, 0); val, 0);
return false; return;
} }
switch (map->cache_word_size) { switch (map->cache_word_size) {
...@@ -604,7 +601,6 @@ bool regcache_set_val(struct regmap *map, void *base, unsigned int idx, ...@@ -604,7 +601,6 @@ bool regcache_set_val(struct regmap *map, void *base, unsigned int idx,
default: default:
BUG(); BUG();
} }
return false;
} }
unsigned int regcache_get_val(struct regmap *map, const void *base, unsigned int regcache_get_val(struct regmap *map, const void *base,
......
...@@ -636,6 +636,17 @@ void regmap_debugfs_init(struct regmap *map) ...@@ -636,6 +636,17 @@ void regmap_debugfs_init(struct regmap *map)
&regmap_cache_bypass_fops); &regmap_cache_bypass_fops);
} }
/*
* This could interfere with driver operation. Therefore, don't provide
* any real compile time configuration option for this feature. One will
* have to modify the source code directly in order to use it.
*/
#undef REGMAP_ALLOW_FORCE_WRITE_FIELD_DEBUGFS
#ifdef REGMAP_ALLOW_FORCE_WRITE_FIELD_DEBUGFS
debugfs_create_bool("force_write_field", 0600, map->debugfs,
&map->force_write_field);
#endif
next = rb_first(&map->range_tree); next = rb_first(&map->range_tree);
while (next) { while (next) {
range_node = rb_entry(next, struct regmap_range_node, node); range_node = rb_entry(next, struct regmap_range_node, node);
......
...@@ -30,9 +30,6 @@ struct regmap_irq_chip_data { ...@@ -30,9 +30,6 @@ struct regmap_irq_chip_data {
int irq; int irq;
int wake_count; int wake_count;
unsigned int mask_base;
unsigned int unmask_base;
void *status_reg_buf; void *status_reg_buf;
unsigned int *main_status_buf; unsigned int *main_status_buf;
unsigned int *status_buf; unsigned int *status_buf;
...@@ -41,7 +38,6 @@ struct regmap_irq_chip_data { ...@@ -41,7 +38,6 @@ struct regmap_irq_chip_data {
unsigned int *wake_buf; unsigned int *wake_buf;
unsigned int *type_buf; unsigned int *type_buf;
unsigned int *type_buf_def; unsigned int *type_buf_def;
unsigned int **virt_buf;
unsigned int **config_buf; unsigned int **config_buf;
unsigned int irq_reg_stride; unsigned int irq_reg_stride;
...@@ -114,25 +110,22 @@ static void regmap_irq_sync_unlock(struct irq_data *data) ...@@ -114,25 +110,22 @@ static void regmap_irq_sync_unlock(struct irq_data *data)
* suppress pointless writes. * suppress pointless writes.
*/ */
for (i = 0; i < d->chip->num_regs; i++) { for (i = 0; i < d->chip->num_regs; i++) {
if (d->mask_base) { if (d->chip->handle_mask_sync)
if (d->chip->handle_mask_sync) d->chip->handle_mask_sync(i, d->mask_buf_def[i],
d->chip->handle_mask_sync(d->map, i, d->mask_buf[i],
d->mask_buf_def[i], d->chip->irq_drv_data);
d->mask_buf[i],
d->chip->irq_drv_data); if (d->chip->mask_base && !d->chip->handle_mask_sync) {
else { reg = d->get_irq_reg(d, d->chip->mask_base, i);
reg = d->get_irq_reg(d, d->mask_base, i); ret = regmap_update_bits(d->map, reg,
ret = regmap_update_bits(d->map, reg, d->mask_buf_def[i],
d->mask_buf_def[i], d->mask_buf[i]);
d->mask_buf[i]); if (ret)
if (ret) dev_err(d->map->dev, "Failed to sync masks in %x\n", reg);
dev_err(d->map->dev, "Failed to sync masks in %x\n",
reg);
}
} }
if (d->unmask_base) { if (d->chip->unmask_base && !d->chip->handle_mask_sync) {
reg = d->get_irq_reg(d, d->unmask_base, i); reg = d->get_irq_reg(d, d->chip->unmask_base, i);
ret = regmap_update_bits(d->map, reg, ret = regmap_update_bits(d->map, reg,
d->mask_buf_def[i], ~d->mask_buf[i]); d->mask_buf_def[i], ~d->mask_buf[i]);
if (ret) if (ret)
...@@ -183,34 +176,6 @@ static void regmap_irq_sync_unlock(struct irq_data *data) ...@@ -183,34 +176,6 @@ static void regmap_irq_sync_unlock(struct irq_data *data)
} }
} }
/* Don't update the type bits if we're using mask bits for irq type. */
if (!d->chip->type_in_mask) {
for (i = 0; i < d->chip->num_type_reg; i++) {
if (!d->type_buf_def[i])
continue;
reg = d->get_irq_reg(d, d->chip->type_base, i);
ret = regmap_update_bits(d->map, reg,
d->type_buf_def[i], d->type_buf[i]);
if (ret != 0)
dev_err(d->map->dev, "Failed to sync type in %x\n",
reg);
}
}
if (d->chip->num_virt_regs) {
for (i = 0; i < d->chip->num_virt_regs; i++) {
for (j = 0; j < d->chip->num_regs; j++) {
reg = d->get_irq_reg(d, d->chip->virt_reg_base[i],
j);
ret = regmap_write(map, reg, d->virt_buf[i][j]);
if (ret != 0)
dev_err(d->map->dev,
"Failed to write virt 0x%x: %d\n",
reg, ret);
}
}
}
for (i = 0; i < d->chip->num_config_bases; i++) { for (i = 0; i < d->chip->num_config_bases; i++) {
for (j = 0; j < d->chip->num_config_regs; j++) { for (j = 0; j < d->chip->num_config_regs; j++) {
reg = d->get_irq_reg(d, d->chip->config_base[i], j); reg = d->get_irq_reg(d, d->chip->config_base[i], j);
...@@ -289,41 +254,9 @@ static int regmap_irq_set_type(struct irq_data *data, unsigned int type) ...@@ -289,41 +254,9 @@ static int regmap_irq_set_type(struct irq_data *data, unsigned int type)
reg = t->type_reg_offset / map->reg_stride; reg = t->type_reg_offset / map->reg_stride;
if (t->type_reg_mask) if (d->chip->type_in_mask) {
d->type_buf[reg] &= ~t->type_reg_mask; ret = regmap_irq_set_type_config_simple(&d->type_buf, type,
else irq_data, reg, d->chip->irq_drv_data);
d->type_buf[reg] &= ~(t->type_falling_val |
t->type_rising_val |
t->type_level_low_val |
t->type_level_high_val);
switch (type) {
case IRQ_TYPE_EDGE_FALLING:
d->type_buf[reg] |= t->type_falling_val;
break;
case IRQ_TYPE_EDGE_RISING:
d->type_buf[reg] |= t->type_rising_val;
break;
case IRQ_TYPE_EDGE_BOTH:
d->type_buf[reg] |= (t->type_falling_val |
t->type_rising_val);
break;
case IRQ_TYPE_LEVEL_HIGH:
d->type_buf[reg] |= t->type_level_high_val;
break;
case IRQ_TYPE_LEVEL_LOW:
d->type_buf[reg] |= t->type_level_low_val;
break;
default:
return -EINVAL;
}
if (d->chip->set_type_virt) {
ret = d->chip->set_type_virt(d->virt_buf, type, data->hwirq,
reg);
if (ret) if (ret)
return ret; return ret;
} }
...@@ -390,15 +323,8 @@ static inline int read_sub_irq_data(struct regmap_irq_chip_data *data, ...@@ -390,15 +323,8 @@ static inline int read_sub_irq_data(struct regmap_irq_chip_data *data,
unsigned int offset = subreg->offset[i]; unsigned int offset = subreg->offset[i];
unsigned int index = offset / map->reg_stride; unsigned int index = offset / map->reg_stride;
if (chip->not_fixed_stride) ret = regmap_read(map, chip->status_base + offset,
ret = regmap_read(map, &data->status_buf[index]);
chip->status_base + offset,
&data->status_buf[b]);
else
ret = regmap_read(map,
chip->status_base + offset,
&data->status_buf[index]);
if (ret) if (ret)
break; break;
} }
...@@ -453,17 +379,7 @@ static irqreturn_t regmap_irq_thread(int irq, void *d) ...@@ -453,17 +379,7 @@ static irqreturn_t regmap_irq_thread(int irq, void *d)
* sake of simplicity. and add bulk reads only if needed * sake of simplicity. and add bulk reads only if needed
*/ */
for (i = 0; i < chip->num_main_regs; i++) { for (i = 0; i < chip->num_main_regs; i++) {
/* reg = data->get_irq_reg(data, chip->main_status, i);
* For not_fixed_stride, don't use ->get_irq_reg().
* It would produce an incorrect result.
*/
if (data->chip->not_fixed_stride)
reg = chip->main_status +
i * map->reg_stride * data->irq_reg_stride;
else
reg = data->get_irq_reg(data,
chip->main_status, i);
ret = regmap_read(map, reg, &data->main_status_buf[i]); ret = regmap_read(map, reg, &data->main_status_buf[i]);
if (ret) { if (ret) {
dev_err(map->dev, dev_err(map->dev,
...@@ -586,12 +502,12 @@ static irqreturn_t regmap_irq_thread(int irq, void *d) ...@@ -586,12 +502,12 @@ static irqreturn_t regmap_irq_thread(int irq, void *d)
} }
exit: exit:
if (chip->runtime_pm)
pm_runtime_put(map->dev);
if (chip->handle_post_irq) if (chip->handle_post_irq)
chip->handle_post_irq(chip->irq_drv_data); chip->handle_post_irq(chip->irq_drv_data);
if (chip->runtime_pm)
pm_runtime_put(map->dev);
if (handled) if (handled)
return IRQ_HANDLED; return IRQ_HANDLED;
else else
...@@ -629,20 +545,8 @@ static const struct irq_domain_ops regmap_domain_ops = { ...@@ -629,20 +545,8 @@ static const struct irq_domain_ops regmap_domain_ops = {
unsigned int regmap_irq_get_irq_reg_linear(struct regmap_irq_chip_data *data, unsigned int regmap_irq_get_irq_reg_linear(struct regmap_irq_chip_data *data,
unsigned int base, int index) unsigned int base, int index)
{ {
const struct regmap_irq_chip *chip = data->chip;
struct regmap *map = data->map; struct regmap *map = data->map;
/*
* FIXME: This is for backward compatibility and should be removed
* when not_fixed_stride is dropped (it's only used by qcom-pm8008).
*/
if (chip->not_fixed_stride && chip->sub_reg_offsets) {
struct regmap_irq_sub_irq_map *subreg;
subreg = &chip->sub_reg_offsets[0];
return base + subreg->offset[0];
}
return base + index * map->reg_stride * data->irq_reg_stride; return base + index * map->reg_stride * data->irq_reg_stride;
} }
EXPORT_SYMBOL_GPL(regmap_irq_get_irq_reg_linear); EXPORT_SYMBOL_GPL(regmap_irq_get_irq_reg_linear);
...@@ -730,8 +634,6 @@ int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode, ...@@ -730,8 +634,6 @@ int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode,
struct regmap_irq_chip_data *d; struct regmap_irq_chip_data *d;
int i; int i;
int ret = -ENOMEM; int ret = -ENOMEM;
int num_type_reg;
int num_regs;
u32 reg; u32 reg;
if (chip->num_regs <= 0) if (chip->num_regs <= 0)
...@@ -740,6 +642,9 @@ int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode, ...@@ -740,6 +642,9 @@ int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode,
if (chip->clear_on_unmask && (chip->ack_base || chip->use_ack)) if (chip->clear_on_unmask && (chip->ack_base || chip->use_ack))
return -EINVAL; return -EINVAL;
if (chip->mask_base && chip->unmask_base && !chip->mask_unmask_non_inverted)
return -EINVAL;
for (i = 0; i < chip->num_irqs; i++) { for (i = 0; i < chip->num_irqs; i++) {
if (chip->irqs[i].reg_offset % map->reg_stride) if (chip->irqs[i].reg_offset % map->reg_stride)
return -EINVAL; return -EINVAL;
...@@ -748,20 +653,6 @@ int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode, ...@@ -748,20 +653,6 @@ int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode,
return -EINVAL; return -EINVAL;
} }
if (chip->not_fixed_stride) {
dev_warn(map->dev, "not_fixed_stride is deprecated; use ->get_irq_reg() instead");
for (i = 0; i < chip->num_regs; i++)
if (chip->sub_reg_offsets[i].num_regs != 1)
return -EINVAL;
}
if (chip->num_type_reg)
dev_warn(map->dev, "type registers are deprecated; use config registers instead");
if (chip->num_virt_regs || chip->virt_reg_base || chip->set_type_virt)
dev_warn(map->dev, "virtual registers are deprecated; use config registers instead");
if (irq_base) { if (irq_base) {
irq_base = irq_alloc_descs(irq_base, 0, chip->num_irqs, 0); irq_base = irq_alloc_descs(irq_base, 0, chip->num_irqs, 0);
if (irq_base < 0) { if (irq_base < 0) {
...@@ -806,43 +697,17 @@ int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode, ...@@ -806,43 +697,17 @@ int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode,
goto err_alloc; goto err_alloc;
} }
/* if (chip->type_in_mask) {
* Use num_config_regs if defined, otherwise fall back to num_type_reg d->type_buf_def = kcalloc(chip->num_regs,
* to maintain backward compatibility.
*/
num_type_reg = chip->num_config_regs ? chip->num_config_regs
: chip->num_type_reg;
num_regs = chip->type_in_mask ? chip->num_regs : num_type_reg;
if (num_regs) {
d->type_buf_def = kcalloc(num_regs,
sizeof(*d->type_buf_def), GFP_KERNEL); sizeof(*d->type_buf_def), GFP_KERNEL);
if (!d->type_buf_def) if (!d->type_buf_def)
goto err_alloc; goto err_alloc;
d->type_buf = kcalloc(num_regs, sizeof(*d->type_buf), d->type_buf = kcalloc(chip->num_regs, sizeof(*d->type_buf), GFP_KERNEL);
GFP_KERNEL);
if (!d->type_buf) if (!d->type_buf)
goto err_alloc; goto err_alloc;
} }
if (chip->num_virt_regs) {
/*
* Create virt_buf[chip->num_extra_config_regs][chip->num_regs]
*/
d->virt_buf = kcalloc(chip->num_virt_regs, sizeof(*d->virt_buf),
GFP_KERNEL);
if (!d->virt_buf)
goto err_alloc;
for (i = 0; i < chip->num_virt_regs; i++) {
d->virt_buf[i] = kcalloc(chip->num_regs,
sizeof(**d->virt_buf),
GFP_KERNEL);
if (!d->virt_buf[i])
goto err_alloc;
}
}
if (chip->num_config_bases && chip->num_config_regs) { if (chip->num_config_bases && chip->num_config_regs) {
/* /*
* Create config_buf[num_config_bases][num_config_regs] * Create config_buf[num_config_bases][num_config_regs]
...@@ -868,28 +733,6 @@ int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode, ...@@ -868,28 +733,6 @@ int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode,
d->chip = chip; d->chip = chip;
d->irq_base = irq_base; d->irq_base = irq_base;
if (chip->mask_base && chip->unmask_base &&
!chip->mask_unmask_non_inverted) {
/*
* Chips that specify both mask_base and unmask_base used to
* get inverted mask behavior by default, with no way to ask
* for the normal, non-inverted behavior. This "inverted by
* default" behavior is deprecated, but we have to support it
* until existing drivers have been fixed.
*
* Existing drivers should be updated by swapping mask_base
* and unmask_base and setting mask_unmask_non_inverted=true.
* New drivers should always set the flag.
*/
dev_warn(map->dev, "mask_base and unmask_base are inverted, please fix it");
d->mask_base = chip->unmask_base;
d->unmask_base = chip->mask_base;
} else {
d->mask_base = chip->mask_base;
d->unmask_base = chip->unmask_base;
}
if (chip->irq_reg_stride) if (chip->irq_reg_stride)
d->irq_reg_stride = chip->irq_reg_stride; d->irq_reg_stride = chip->irq_reg_stride;
else else
...@@ -918,29 +761,28 @@ int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode, ...@@ -918,29 +761,28 @@ int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode,
for (i = 0; i < chip->num_regs; i++) { for (i = 0; i < chip->num_regs; i++) {
d->mask_buf[i] = d->mask_buf_def[i]; d->mask_buf[i] = d->mask_buf_def[i];
if (d->mask_base) { if (chip->handle_mask_sync) {
if (chip->handle_mask_sync) { ret = chip->handle_mask_sync(i, d->mask_buf_def[i],
ret = chip->handle_mask_sync(d->map, i, d->mask_buf[i],
d->mask_buf_def[i], chip->irq_drv_data);
d->mask_buf[i], if (ret)
chip->irq_drv_data); goto err_alloc;
if (ret) }
goto err_alloc;
} else { if (chip->mask_base && !chip->handle_mask_sync) {
reg = d->get_irq_reg(d, d->mask_base, i); reg = d->get_irq_reg(d, chip->mask_base, i);
ret = regmap_update_bits(d->map, reg, ret = regmap_update_bits(d->map, reg,
d->mask_buf_def[i], d->mask_buf_def[i],
d->mask_buf[i]); d->mask_buf[i]);
if (ret) { if (ret) {
dev_err(map->dev, "Failed to set masks in 0x%x: %d\n", dev_err(map->dev, "Failed to set masks in 0x%x: %d\n",
reg, ret); reg, ret);
goto err_alloc; goto err_alloc;
}
} }
} }
if (d->unmask_base) { if (chip->unmask_base && !chip->handle_mask_sync) {
reg = d->get_irq_reg(d, d->unmask_base, i); reg = d->get_irq_reg(d, chip->unmask_base, i);
ret = regmap_update_bits(d->map, reg, ret = regmap_update_bits(d->map, reg,
d->mask_buf_def[i], ~d->mask_buf[i]); d->mask_buf_def[i], ~d->mask_buf[i]);
if (ret) { if (ret) {
...@@ -1014,20 +856,6 @@ int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode, ...@@ -1014,20 +856,6 @@ int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode,
} }
} }
if (chip->num_type_reg && !chip->type_in_mask) {
for (i = 0; i < chip->num_type_reg; ++i) {
reg = d->get_irq_reg(d, d->chip->type_base, i);
ret = regmap_read(map, reg, &d->type_buf_def[i]);
if (ret) {
dev_err(map->dev, "Failed to get type defaults at 0x%x: %d\n",
reg, ret);
goto err_alloc;
}
}
}
if (irq_base) if (irq_base)
d->domain = irq_domain_create_legacy(fwnode, chip->num_irqs, d->domain = irq_domain_create_legacy(fwnode, chip->num_irqs,
irq_base, 0, irq_base, 0,
...@@ -1064,11 +892,6 @@ int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode, ...@@ -1064,11 +892,6 @@ int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode,
kfree(d->mask_buf); kfree(d->mask_buf);
kfree(d->status_buf); kfree(d->status_buf);
kfree(d->status_reg_buf); kfree(d->status_reg_buf);
if (d->virt_buf) {
for (i = 0; i < chip->num_virt_regs; i++)
kfree(d->virt_buf[i]);
kfree(d->virt_buf);
}
if (d->config_buf) { if (d->config_buf) {
for (i = 0; i < chip->num_config_bases; i++) for (i = 0; i < chip->num_config_bases; i++)
kfree(d->config_buf[i]); kfree(d->config_buf[i]);
......
...@@ -92,6 +92,11 @@ static struct regmap *gen_regmap(struct regmap_config *config, ...@@ -92,6 +92,11 @@ static struct regmap *gen_regmap(struct regmap_config *config,
return ret; return ret;
} }
static bool reg_5_false(struct device *context, unsigned int reg)
{
return reg != 5;
}
static void basic_read_write(struct kunit *test) static void basic_read_write(struct kunit *test)
{ {
struct regcache_types *t = (struct regcache_types *)test->param_value; struct regcache_types *t = (struct regcache_types *)test->param_value;
...@@ -191,6 +196,81 @@ static void bulk_read(struct kunit *test) ...@@ -191,6 +196,81 @@ static void bulk_read(struct kunit *test)
regmap_exit(map); regmap_exit(map);
} }
static void write_readonly(struct kunit *test)
{
struct regcache_types *t = (struct regcache_types *)test->param_value;
struct regmap *map;
struct regmap_config config;
struct regmap_ram_data *data;
unsigned int val;
int i;
config = test_regmap_config;
config.cache_type = t->type;
config.num_reg_defaults = BLOCK_TEST_SIZE;
config.writeable_reg = reg_5_false;
map = gen_regmap(&config, &data);
KUNIT_ASSERT_FALSE(test, IS_ERR(map));
if (IS_ERR(map))
return;
get_random_bytes(&val, sizeof(val));
for (i = 0; i < BLOCK_TEST_SIZE; i++)
data->written[i] = false;
/* Change the value of all registers, readonly should fail */
for (i = 0; i < BLOCK_TEST_SIZE; i++)
KUNIT_EXPECT_EQ(test, i != 5, regmap_write(map, i, val) == 0);
/* Did that match what we see on the device? */
for (i = 0; i < BLOCK_TEST_SIZE; i++)
KUNIT_EXPECT_EQ(test, i != 5, data->written[i]);
regmap_exit(map);
}
static void read_writeonly(struct kunit *test)
{
struct regcache_types *t = (struct regcache_types *)test->param_value;
struct regmap *map;
struct regmap_config config;
struct regmap_ram_data *data;
unsigned int val;
int i;
config = test_regmap_config;
config.cache_type = t->type;
config.readable_reg = reg_5_false;
map = gen_regmap(&config, &data);
KUNIT_ASSERT_FALSE(test, IS_ERR(map));
if (IS_ERR(map))
return;
for (i = 0; i < BLOCK_TEST_SIZE; i++)
data->read[i] = false;
/*
* Try to read all the registers, the writeonly one should
* fail if we aren't using the flat cache.
*/
for (i = 0; i < BLOCK_TEST_SIZE; i++) {
if (t->type != REGCACHE_FLAT) {
KUNIT_EXPECT_EQ(test, i != 5,
regmap_read(map, i, &val) == 0);
} else {
KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &val));
}
}
/* Did we trigger a hardware access? */
KUNIT_EXPECT_FALSE(test, data->read[5]);
regmap_exit(map);
}
static void reg_defaults(struct kunit *test) static void reg_defaults(struct kunit *test)
{ {
struct regcache_types *t = (struct regcache_types *)test->param_value; struct regcache_types *t = (struct regcache_types *)test->param_value;
...@@ -609,6 +689,47 @@ static void cache_sync_defaults(struct kunit *test) ...@@ -609,6 +689,47 @@ static void cache_sync_defaults(struct kunit *test)
regmap_exit(map); regmap_exit(map);
} }
static void cache_sync_readonly(struct kunit *test)
{
struct regcache_types *t = (struct regcache_types *)test->param_value;
struct regmap *map;
struct regmap_config config;
struct regmap_ram_data *data;
unsigned int val;
int i;
config = test_regmap_config;
config.cache_type = t->type;
config.writeable_reg = reg_5_false;
map = gen_regmap(&config, &data);
KUNIT_ASSERT_FALSE(test, IS_ERR(map));
if (IS_ERR(map))
return;
/* Read all registers to fill the cache */
for (i = 0; i < BLOCK_TEST_SIZE; i++)
KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &val));
/* Change the value of all registers, readonly should fail */
get_random_bytes(&val, sizeof(val));
regcache_cache_only(map, true);
for (i = 0; i < BLOCK_TEST_SIZE; i++)
KUNIT_EXPECT_EQ(test, i != 5, regmap_write(map, i, val) == 0);
regcache_cache_only(map, false);
/* Resync */
for (i = 0; i < BLOCK_TEST_SIZE; i++)
data->written[i] = false;
KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
/* Did that match what we see on the device? */
for (i = 0; i < BLOCK_TEST_SIZE; i++)
KUNIT_EXPECT_EQ(test, i != 5, data->written[i]);
regmap_exit(map);
}
static void cache_sync_patch(struct kunit *test) static void cache_sync_patch(struct kunit *test)
{ {
struct regcache_types *t = (struct regcache_types *)test->param_value; struct regcache_types *t = (struct regcache_types *)test->param_value;
...@@ -712,10 +833,333 @@ static void cache_drop(struct kunit *test) ...@@ -712,10 +833,333 @@ static void cache_drop(struct kunit *test)
regmap_exit(map); regmap_exit(map);
} }
struct raw_test_types {
const char *name;
enum regcache_type cache_type;
enum regmap_endian val_endian;
};
static void raw_to_desc(const struct raw_test_types *t, char *desc)
{
strcpy(desc, t->name);
}
static const struct raw_test_types raw_types_list[] = {
{ "none-little", REGCACHE_NONE, REGMAP_ENDIAN_LITTLE },
{ "none-big", REGCACHE_NONE, REGMAP_ENDIAN_BIG },
{ "flat-little", REGCACHE_FLAT, REGMAP_ENDIAN_LITTLE },
{ "flat-big", REGCACHE_FLAT, REGMAP_ENDIAN_BIG },
{ "rbtree-little", REGCACHE_RBTREE, REGMAP_ENDIAN_LITTLE },
{ "rbtree-big", REGCACHE_RBTREE, REGMAP_ENDIAN_BIG },
{ "maple-little", REGCACHE_MAPLE, REGMAP_ENDIAN_LITTLE },
{ "maple-big", REGCACHE_MAPLE, REGMAP_ENDIAN_BIG },
};
KUNIT_ARRAY_PARAM(raw_test_types, raw_types_list, raw_to_desc);
static const struct raw_test_types raw_cache_types_list[] = {
{ "flat-little", REGCACHE_FLAT, REGMAP_ENDIAN_LITTLE },
{ "flat-big", REGCACHE_FLAT, REGMAP_ENDIAN_BIG },
{ "rbtree-little", REGCACHE_RBTREE, REGMAP_ENDIAN_LITTLE },
{ "rbtree-big", REGCACHE_RBTREE, REGMAP_ENDIAN_BIG },
{ "maple-little", REGCACHE_MAPLE, REGMAP_ENDIAN_LITTLE },
{ "maple-big", REGCACHE_MAPLE, REGMAP_ENDIAN_BIG },
};
KUNIT_ARRAY_PARAM(raw_test_cache_types, raw_cache_types_list, raw_to_desc);
static const struct regmap_config raw_regmap_config = {
.max_register = BLOCK_TEST_SIZE,
.reg_format_endian = REGMAP_ENDIAN_LITTLE,
.reg_bits = 16,
.val_bits = 16,
};
static struct regmap *gen_raw_regmap(struct regmap_config *config,
struct raw_test_types *test_type,
struct regmap_ram_data **data)
{
u16 *buf;
struct regmap *ret;
size_t size = (config->max_register + 1) * config->reg_bits / 8;
int i;
struct reg_default *defaults;
config->cache_type = test_type->cache_type;
config->val_format_endian = test_type->val_endian;
buf = kmalloc(size, GFP_KERNEL);
if (!buf)
return ERR_PTR(-ENOMEM);
get_random_bytes(buf, size);
*data = kzalloc(sizeof(**data), GFP_KERNEL);
if (!(*data))
return ERR_PTR(-ENOMEM);
(*data)->vals = (void *)buf;
config->num_reg_defaults = config->max_register + 1;
defaults = kcalloc(config->num_reg_defaults,
sizeof(struct reg_default),
GFP_KERNEL);
if (!defaults)
return ERR_PTR(-ENOMEM);
config->reg_defaults = defaults;
for (i = 0; i < config->num_reg_defaults; i++) {
defaults[i].reg = i;
switch (test_type->val_endian) {
case REGMAP_ENDIAN_LITTLE:
defaults[i].def = le16_to_cpu(buf[i]);
break;
case REGMAP_ENDIAN_BIG:
defaults[i].def = be16_to_cpu(buf[i]);
break;
default:
return ERR_PTR(-EINVAL);
}
}
/*
* We use the defaults in the tests but they don't make sense
* to the core if there's no cache.
*/
if (config->cache_type == REGCACHE_NONE)
config->num_reg_defaults = 0;
ret = regmap_init_raw_ram(config, *data);
if (IS_ERR(ret)) {
kfree(buf);
kfree(*data);
}
return ret;
}
static void raw_read_defaults_single(struct kunit *test)
{
struct raw_test_types *t = (struct raw_test_types *)test->param_value;
struct regmap *map;
struct regmap_config config;
struct regmap_ram_data *data;
unsigned int rval;
int i;
config = raw_regmap_config;
map = gen_raw_regmap(&config, t, &data);
KUNIT_ASSERT_FALSE(test, IS_ERR(map));
if (IS_ERR(map))
return;
/* Check that we can read the defaults via the API */
for (i = 0; i < config.max_register + 1; i++) {
KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &rval));
KUNIT_EXPECT_EQ(test, config.reg_defaults[i].def, rval);
}
regmap_exit(map);
}
static void raw_read_defaults(struct kunit *test)
{
struct raw_test_types *t = (struct raw_test_types *)test->param_value;
struct regmap *map;
struct regmap_config config;
struct regmap_ram_data *data;
u16 *rval;
u16 def;
size_t val_len;
int i;
config = raw_regmap_config;
map = gen_raw_regmap(&config, t, &data);
KUNIT_ASSERT_FALSE(test, IS_ERR(map));
if (IS_ERR(map))
return;
val_len = sizeof(*rval) * (config.max_register + 1);
rval = kmalloc(val_len, GFP_KERNEL);
KUNIT_ASSERT_TRUE(test, rval != NULL);
if (!rval)
return;
/* Check that we can read the defaults via the API */
KUNIT_EXPECT_EQ(test, 0, regmap_raw_read(map, 0, rval, val_len));
for (i = 0; i < config.max_register + 1; i++) {
def = config.reg_defaults[i].def;
if (config.val_format_endian == REGMAP_ENDIAN_BIG) {
KUNIT_EXPECT_EQ(test, def, be16_to_cpu(rval[i]));
} else {
KUNIT_EXPECT_EQ(test, def, le16_to_cpu(rval[i]));
}
}
kfree(rval);
regmap_exit(map);
}
static void raw_write_read_single(struct kunit *test)
{
struct raw_test_types *t = (struct raw_test_types *)test->param_value;
struct regmap *map;
struct regmap_config config;
struct regmap_ram_data *data;
u16 val;
unsigned int rval;
config = raw_regmap_config;
map = gen_raw_regmap(&config, t, &data);
KUNIT_ASSERT_FALSE(test, IS_ERR(map));
if (IS_ERR(map))
return;
get_random_bytes(&val, sizeof(val));
/* If we write a value to a register we can read it back */
KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 0, val));
KUNIT_EXPECT_EQ(test, 0, regmap_read(map, 0, &rval));
KUNIT_EXPECT_EQ(test, val, rval);
regmap_exit(map);
}
static void raw_write(struct kunit *test)
{
struct raw_test_types *t = (struct raw_test_types *)test->param_value;
struct regmap *map;
struct regmap_config config;
struct regmap_ram_data *data;
u16 *hw_buf;
u16 val[2];
unsigned int rval;
int i;
config = raw_regmap_config;
map = gen_raw_regmap(&config, t, &data);
KUNIT_ASSERT_FALSE(test, IS_ERR(map));
if (IS_ERR(map))
return;
hw_buf = (u16 *)data->vals;
get_random_bytes(&val, sizeof(val));
/* Do a raw write */
KUNIT_EXPECT_EQ(test, 0, regmap_raw_write(map, 2, val, sizeof(val)));
/* We should read back the new values, and defaults for the rest */
for (i = 0; i < config.max_register + 1; i++) {
KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &rval));
switch (i) {
case 2:
case 3:
if (config.val_format_endian == REGMAP_ENDIAN_BIG) {
KUNIT_EXPECT_EQ(test, rval,
be16_to_cpu(val[i % 2]));
} else {
KUNIT_EXPECT_EQ(test, rval,
le16_to_cpu(val[i % 2]));
}
break;
default:
KUNIT_EXPECT_EQ(test, config.reg_defaults[i].def, rval);
break;
}
}
/* The values should appear in the "hardware" */
KUNIT_EXPECT_MEMEQ(test, &hw_buf[2], val, sizeof(val));
regmap_exit(map);
}
static void raw_sync(struct kunit *test)
{
struct raw_test_types *t = (struct raw_test_types *)test->param_value;
struct regmap *map;
struct regmap_config config;
struct regmap_ram_data *data;
u16 val[2];
u16 *hw_buf;
unsigned int rval;
int i;
config = raw_regmap_config;
map = gen_raw_regmap(&config, t, &data);
KUNIT_ASSERT_FALSE(test, IS_ERR(map));
if (IS_ERR(map))
return;
hw_buf = (u16 *)data->vals;
get_random_bytes(&val, sizeof(val));
/* Do a regular write and a raw write in cache only mode */
regcache_cache_only(map, true);
KUNIT_EXPECT_EQ(test, 0, regmap_raw_write(map, 2, val, sizeof(val)));
if (config.val_format_endian == REGMAP_ENDIAN_BIG)
KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 6,
be16_to_cpu(val[0])));
else
KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 6,
le16_to_cpu(val[0])));
/* We should read back the new values, and defaults for the rest */
for (i = 0; i < config.max_register + 1; i++) {
KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &rval));
switch (i) {
case 2:
case 3:
case 6:
if (config.val_format_endian == REGMAP_ENDIAN_BIG) {
KUNIT_EXPECT_EQ(test, rval,
be16_to_cpu(val[i % 2]));
} else {
KUNIT_EXPECT_EQ(test, rval,
le16_to_cpu(val[i % 2]));
}
break;
default:
KUNIT_EXPECT_EQ(test, config.reg_defaults[i].def, rval);
break;
}
}
/* The values should not appear in the "hardware" */
KUNIT_EXPECT_MEMNEQ(test, &hw_buf[2], val, sizeof(val));
KUNIT_EXPECT_MEMNEQ(test, &hw_buf[6], val, sizeof(u16));
for (i = 0; i < config.max_register + 1; i++)
data->written[i] = false;
/* Do the sync */
regcache_cache_only(map, false);
regcache_mark_dirty(map);
KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
/* The values should now appear in the "hardware" */
KUNIT_EXPECT_MEMEQ(test, &hw_buf[2], val, sizeof(val));
KUNIT_EXPECT_MEMEQ(test, &hw_buf[6], val, sizeof(u16));
regmap_exit(map);
}
static struct kunit_case regmap_test_cases[] = { static struct kunit_case regmap_test_cases[] = {
KUNIT_CASE_PARAM(basic_read_write, regcache_types_gen_params), KUNIT_CASE_PARAM(basic_read_write, regcache_types_gen_params),
KUNIT_CASE_PARAM(bulk_write, regcache_types_gen_params), KUNIT_CASE_PARAM(bulk_write, regcache_types_gen_params),
KUNIT_CASE_PARAM(bulk_read, regcache_types_gen_params), KUNIT_CASE_PARAM(bulk_read, regcache_types_gen_params),
KUNIT_CASE_PARAM(write_readonly, regcache_types_gen_params),
KUNIT_CASE_PARAM(read_writeonly, regcache_types_gen_params),
KUNIT_CASE_PARAM(reg_defaults, regcache_types_gen_params), KUNIT_CASE_PARAM(reg_defaults, regcache_types_gen_params),
KUNIT_CASE_PARAM(reg_defaults_read_dev, regcache_types_gen_params), KUNIT_CASE_PARAM(reg_defaults_read_dev, regcache_types_gen_params),
KUNIT_CASE_PARAM(register_patch, regcache_types_gen_params), KUNIT_CASE_PARAM(register_patch, regcache_types_gen_params),
...@@ -725,8 +1169,15 @@ static struct kunit_case regmap_test_cases[] = { ...@@ -725,8 +1169,15 @@ static struct kunit_case regmap_test_cases[] = {
KUNIT_CASE_PARAM(cache_bypass, real_cache_types_gen_params), KUNIT_CASE_PARAM(cache_bypass, real_cache_types_gen_params),
KUNIT_CASE_PARAM(cache_sync, real_cache_types_gen_params), KUNIT_CASE_PARAM(cache_sync, real_cache_types_gen_params),
KUNIT_CASE_PARAM(cache_sync_defaults, real_cache_types_gen_params), KUNIT_CASE_PARAM(cache_sync_defaults, real_cache_types_gen_params),
KUNIT_CASE_PARAM(cache_sync_readonly, real_cache_types_gen_params),
KUNIT_CASE_PARAM(cache_sync_patch, real_cache_types_gen_params), KUNIT_CASE_PARAM(cache_sync_patch, real_cache_types_gen_params),
KUNIT_CASE_PARAM(cache_drop, sparse_cache_types_gen_params), KUNIT_CASE_PARAM(cache_drop, sparse_cache_types_gen_params),
KUNIT_CASE_PARAM(raw_read_defaults_single, raw_test_types_gen_params),
KUNIT_CASE_PARAM(raw_read_defaults, raw_test_types_gen_params),
KUNIT_CASE_PARAM(raw_write_read_single, raw_test_types_gen_params),
KUNIT_CASE_PARAM(raw_write, raw_test_types_gen_params),
KUNIT_CASE_PARAM(raw_sync, raw_test_cache_types_gen_params),
{} {}
}; };
......
...@@ -448,7 +448,7 @@ static struct regmap_mmio_context *regmap_mmio_gen_context(struct device *dev, ...@@ -448,7 +448,7 @@ static struct regmap_mmio_context *regmap_mmio_gen_context(struct device *dev,
if (min_stride < 0) if (min_stride < 0)
return ERR_PTR(min_stride); return ERR_PTR(min_stride);
if (config->reg_stride < min_stride) if (config->reg_stride && config->reg_stride < min_stride)
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
if (config->use_relaxed_mmio && config->io_port) if (config->use_relaxed_mmio && config->io_port)
......
// SPDX-License-Identifier: GPL-2.0
//
// Register map access API - Memory region with raw access
//
// This is intended for testing only
//
// Copyright (c) 2023, Arm Ltd
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/regmap.h>
#include <linux/slab.h>
#include <linux/swab.h>
#include "internal.h"
static unsigned int decode_reg(enum regmap_endian endian, const void *reg)
{
const u16 *r = reg;
if (endian == REGMAP_ENDIAN_BIG)
return be16_to_cpu(*r);
else
return le16_to_cpu(*r);
}
static int regmap_raw_ram_gather_write(void *context,
const void *reg, size_t reg_len,
const void *val, size_t val_len)
{
struct regmap_ram_data *data = context;
unsigned int r;
u16 *our_buf = (u16 *)data->vals;
int i;
if (reg_len != 2)
return -EINVAL;
if (val_len % 2)
return -EINVAL;
r = decode_reg(data->reg_endian, reg);
memcpy(&our_buf[r], val, val_len);
for (i = 0; i < val_len / 2; i++)
data->written[r + i] = true;
return 0;
}
static int regmap_raw_ram_write(void *context, const void *data, size_t count)
{
return regmap_raw_ram_gather_write(context, data, 2,
data + 2, count - 2);
}
static int regmap_raw_ram_read(void *context,
const void *reg, size_t reg_len,
void *val, size_t val_len)
{
struct regmap_ram_data *data = context;
unsigned int r;
u16 *our_buf = (u16 *)data->vals;
int i;
if (reg_len != 2)
return -EINVAL;
if (val_len % 2)
return -EINVAL;
r = decode_reg(data->reg_endian, reg);
memcpy(val, &our_buf[r], val_len);
for (i = 0; i < val_len / 2; i++)
data->read[r + i] = true;
return 0;
}
static void regmap_raw_ram_free_context(void *context)
{
struct regmap_ram_data *data = context;
kfree(data->vals);
kfree(data->read);
kfree(data->written);
kfree(data);
}
static const struct regmap_bus regmap_raw_ram = {
.fast_io = true,
.write = regmap_raw_ram_write,
.gather_write = regmap_raw_ram_gather_write,
.read = regmap_raw_ram_read,
.free_context = regmap_raw_ram_free_context,
};
struct regmap *__regmap_init_raw_ram(const struct regmap_config *config,
struct regmap_ram_data *data,
struct lock_class_key *lock_key,
const char *lock_name)
{
struct regmap *map;
if (config->reg_bits != 16)
return ERR_PTR(-EINVAL);
if (!config->max_register) {
pr_crit("No max_register specified for RAM regmap\n");
return ERR_PTR(-EINVAL);
}
data->read = kcalloc(sizeof(bool), config->max_register + 1,
GFP_KERNEL);
if (!data->read)
return ERR_PTR(-ENOMEM);
data->written = kcalloc(sizeof(bool), config->max_register + 1,
GFP_KERNEL);
if (!data->written)
return ERR_PTR(-ENOMEM);
data->reg_endian = config->reg_format_endian;
map = __regmap_init(NULL, &regmap_raw_ram, data, config,
lock_key, lock_name);
return map;
}
EXPORT_SYMBOL_GPL(__regmap_init_raw_ram);
MODULE_LICENSE("GPL v2");
...@@ -2983,6 +2983,11 @@ int regmap_raw_read(struct regmap *map, unsigned int reg, void *val, ...@@ -2983,6 +2983,11 @@ int regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
size_t chunk_count, chunk_bytes; size_t chunk_count, chunk_bytes;
size_t chunk_regs = val_count; size_t chunk_regs = val_count;
if (!map->cache_bypass && map->cache_only) {
ret = -EBUSY;
goto out;
}
if (!map->read) { if (!map->read) {
ret = -ENOTSUPP; ret = -ENOTSUPP;
goto out; goto out;
...@@ -3078,18 +3083,19 @@ int regmap_noinc_read(struct regmap *map, unsigned int reg, ...@@ -3078,18 +3083,19 @@ int regmap_noinc_read(struct regmap *map, unsigned int reg,
goto out_unlock; goto out_unlock;
} }
/*
* We have not defined the FIFO semantics for cache, as the
* cache is just one value deep. Should we return the last
* written value? Just avoid this by always reading the FIFO
* even when using cache. Cache only will not work.
*/
if (!map->cache_bypass && map->cache_only) {
ret = -EBUSY;
goto out_unlock;
}
/* Use the accelerated operation if we can */ /* Use the accelerated operation if we can */
if (map->bus->reg_noinc_read) { if (map->bus->reg_noinc_read) {
/*
* We have not defined the FIFO semantics for cache, as the
* cache is just one value deep. Should we return the last
* written value? Just avoid this by always reading the FIFO
* even when using cache. Cache only will not work.
*/
if (map->cache_only) {
ret = -EBUSY;
goto out_unlock;
}
ret = regmap_noinc_readwrite(map, reg, val, val_len, false); ret = regmap_noinc_readwrite(map, reg, val, val_len, false);
goto out_unlock; goto out_unlock;
} }
...@@ -3273,7 +3279,7 @@ static int _regmap_update_bits(struct regmap *map, unsigned int reg, ...@@ -3273,7 +3279,7 @@ static int _regmap_update_bits(struct regmap *map, unsigned int reg,
tmp = orig & ~mask; tmp = orig & ~mask;
tmp |= val & mask; tmp |= val & mask;
if (force_write || (tmp != orig)) { if (force_write || (tmp != orig) || map->force_write_field) {
ret = _regmap_write(map, reg, tmp); ret = _regmap_write(map, reg, tmp);
if (ret == 0 && change) if (ret == 0 && change)
*change = true; *change = true;
......
...@@ -100,13 +100,23 @@ static const struct regmap_irq dio48e_regmap_irqs[] = { ...@@ -100,13 +100,23 @@ static const struct regmap_irq dio48e_regmap_irqs[] = {
DIO48E_REGMAP_IRQ(0), DIO48E_REGMAP_IRQ(1), DIO48E_REGMAP_IRQ(0), DIO48E_REGMAP_IRQ(1),
}; };
static int dio48e_handle_mask_sync(struct regmap *const map, const int index, /**
* struct dio48e_gpio - GPIO device private data structure
* @map: Regmap for the device
* @irq_mask: Current IRQ mask state on the device
*/
struct dio48e_gpio {
struct regmap *map;
unsigned int irq_mask;
};
static int dio48e_handle_mask_sync(const int index,
const unsigned int mask_buf_def, const unsigned int mask_buf_def,
const unsigned int mask_buf, const unsigned int mask_buf,
void *const irq_drv_data) void *const irq_drv_data)
{ {
unsigned int *const irq_mask = irq_drv_data; struct dio48e_gpio *const dio48egpio = irq_drv_data;
const unsigned int prev_mask = *irq_mask; const unsigned int prev_mask = dio48egpio->irq_mask;
int err; int err;
unsigned int val; unsigned int val;
...@@ -115,19 +125,19 @@ static int dio48e_handle_mask_sync(struct regmap *const map, const int index, ...@@ -115,19 +125,19 @@ static int dio48e_handle_mask_sync(struct regmap *const map, const int index,
return 0; return 0;
/* remember the current mask for the next mask sync */ /* remember the current mask for the next mask sync */
*irq_mask = mask_buf; dio48egpio->irq_mask = mask_buf;
/* if all previously masked, enable interrupts when unmasking */ /* if all previously masked, enable interrupts when unmasking */
if (prev_mask == mask_buf_def) { if (prev_mask == mask_buf_def) {
err = regmap_write(map, DIO48E_CLEAR_INTERRUPT, 0x00); err = regmap_write(dio48egpio->map, DIO48E_CLEAR_INTERRUPT, 0x00);
if (err) if (err)
return err; return err;
return regmap_write(map, DIO48E_ENABLE_INTERRUPT, 0x00); return regmap_write(dio48egpio->map, DIO48E_ENABLE_INTERRUPT, 0x00);
} }
/* if all are currently masked, disable interrupts */ /* if all are currently masked, disable interrupts */
if (mask_buf == mask_buf_def) if (mask_buf == mask_buf_def)
return regmap_read(map, DIO48E_DISABLE_INTERRUPT, &val); return regmap_read(dio48egpio->map, DIO48E_DISABLE_INTERRUPT, &val);
return 0; return 0;
} }
...@@ -168,7 +178,7 @@ static int dio48e_probe(struct device *dev, unsigned int id) ...@@ -168,7 +178,7 @@ static int dio48e_probe(struct device *dev, unsigned int id)
struct regmap *map; struct regmap *map;
int err; int err;
struct regmap_irq_chip *chip; struct regmap_irq_chip *chip;
unsigned int irq_mask; struct dio48e_gpio *dio48egpio;
struct regmap_irq_chip_data *chip_data; struct regmap_irq_chip_data *chip_data;
if (!devm_request_region(dev, base[id], DIO48E_EXTENT, name)) { if (!devm_request_region(dev, base[id], DIO48E_EXTENT, name)) {
...@@ -186,12 +196,14 @@ static int dio48e_probe(struct device *dev, unsigned int id) ...@@ -186,12 +196,14 @@ static int dio48e_probe(struct device *dev, unsigned int id)
return dev_err_probe(dev, PTR_ERR(map), return dev_err_probe(dev, PTR_ERR(map),
"Unable to initialize register map\n"); "Unable to initialize register map\n");
chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL); dio48egpio = devm_kzalloc(dev, sizeof(*dio48egpio), GFP_KERNEL);
if (!chip) if (!dio48egpio)
return -ENOMEM; return -ENOMEM;
chip->irq_drv_data = devm_kzalloc(dev, sizeof(irq_mask), GFP_KERNEL); dio48egpio->map = map;
if (!chip->irq_drv_data)
chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL);
if (!chip)
return -ENOMEM; return -ENOMEM;
chip->name = name; chip->name = name;
...@@ -202,6 +214,7 @@ static int dio48e_probe(struct device *dev, unsigned int id) ...@@ -202,6 +214,7 @@ static int dio48e_probe(struct device *dev, unsigned int id)
chip->irqs = dio48e_regmap_irqs; chip->irqs = dio48e_regmap_irqs;
chip->num_irqs = ARRAY_SIZE(dio48e_regmap_irqs); chip->num_irqs = ARRAY_SIZE(dio48e_regmap_irqs);
chip->handle_mask_sync = dio48e_handle_mask_sync; chip->handle_mask_sync = dio48e_handle_mask_sync;
chip->irq_drv_data = dio48egpio;
/* Initialize to prevent spurious interrupts before we're ready */ /* Initialize to prevent spurious interrupts before we're ready */
err = dio48e_irq_init_hw(map); err = dio48e_irq_init_hw(map);
......
...@@ -1528,9 +1528,6 @@ struct regmap_irq_chip_data; ...@@ -1528,9 +1528,6 @@ struct regmap_irq_chip_data;
* status_base. Should contain num_regs arrays. * status_base. Should contain num_regs arrays.
* Can be provided for chips with more complex mapping than * Can be provided for chips with more complex mapping than
* 1.st bit to 1.st sub-reg, 2.nd bit to 2.nd sub-reg, ... * 1.st bit to 1.st sub-reg, 2.nd bit to 2.nd sub-reg, ...
* When used with not_fixed_stride, each one-element array
* member contains offset calculated as address from each
* peripheral to first peripheral.
* @num_main_regs: Number of 'main status' irq registers for chips which have * @num_main_regs: Number of 'main status' irq registers for chips which have
* main_status set. * main_status set.
* *
...@@ -1542,10 +1539,6 @@ struct regmap_irq_chip_data; ...@@ -1542,10 +1539,6 @@ struct regmap_irq_chip_data;
* @ack_base: Base ack address. If zero then the chip is clear on read. * @ack_base: Base ack address. If zero then the chip is clear on read.
* Using zero value is possible with @use_ack bit. * Using zero value is possible with @use_ack bit.
* @wake_base: Base address for wake enables. If zero unsupported. * @wake_base: Base address for wake enables. If zero unsupported.
* @type_base: Base address for irq type. If zero unsupported. Deprecated,
* use @config_base instead.
* @virt_reg_base: Base addresses for extra config regs. Deprecated, use
* @config_base instead.
* @config_base: Base address for IRQ type config regs. If null unsupported. * @config_base: Base address for IRQ type config regs. If null unsupported.
* @irq_reg_stride: Stride to use for chips where registers are not contiguous. * @irq_reg_stride: Stride to use for chips where registers are not contiguous.
* @init_ack_masked: Ack all masked interrupts once during initalization. * @init_ack_masked: Ack all masked interrupts once during initalization.
...@@ -1571,11 +1564,6 @@ struct regmap_irq_chip_data; ...@@ -1571,11 +1564,6 @@ struct regmap_irq_chip_data;
* registers before unmasking interrupts to clear any bits * registers before unmasking interrupts to clear any bits
* set when they were masked. * set when they were masked.
* @runtime_pm: Hold a runtime PM lock on the device when accessing it. * @runtime_pm: Hold a runtime PM lock on the device when accessing it.
* @not_fixed_stride: Used when chip peripherals are not laid out with fixed
* stride. Must be used with sub_reg_offsets containing the
* offsets to each peripheral. Deprecated; the same thing
* can be accomplished with a @get_irq_reg callback, without
* the need for a @sub_reg_offsets table.
* @no_status: No status register: all interrupts assumed generated by device. * @no_status: No status register: all interrupts assumed generated by device.
* *
* @num_regs: Number of registers in each control bank. * @num_regs: Number of registers in each control bank.
...@@ -1583,12 +1571,6 @@ struct regmap_irq_chip_data; ...@@ -1583,12 +1571,6 @@ struct regmap_irq_chip_data;
* @irqs: Descriptors for individual IRQs. Interrupt numbers are * @irqs: Descriptors for individual IRQs. Interrupt numbers are
* assigned based on the index in the array of the interrupt. * assigned based on the index in the array of the interrupt.
* @num_irqs: Number of descriptors. * @num_irqs: Number of descriptors.
*
* @num_type_reg: Number of type registers. Deprecated, use config registers
* instead.
* @num_virt_regs: Number of non-standard irq configuration registers.
* If zero unsupported. Deprecated, use config registers
* instead.
* @num_config_bases: Number of config base registers. * @num_config_bases: Number of config base registers.
* @num_config_regs: Number of config registers for each config base register. * @num_config_regs: Number of config registers for each config base register.
* *
...@@ -1598,15 +1580,12 @@ struct regmap_irq_chip_data; ...@@ -1598,15 +1580,12 @@ struct regmap_irq_chip_data;
* after handling the interrupts in regmap_irq_handler(). * after handling the interrupts in regmap_irq_handler().
* @handle_mask_sync: Callback used to handle IRQ mask syncs. The index will be * @handle_mask_sync: Callback used to handle IRQ mask syncs. The index will be
* in the range [0, num_regs) * in the range [0, num_regs)
* @set_type_virt: Driver specific callback to extend regmap_irq_set_type()
* and configure virt regs. Deprecated, use @set_type_config
* callback and config registers instead.
* @set_type_config: Callback used for configuring irq types. * @set_type_config: Callback used for configuring irq types.
* @get_irq_reg: Callback for mapping (base register, index) pairs to register * @get_irq_reg: Callback for mapping (base register, index) pairs to register
* addresses. The base register will be one of @status_base, * addresses. The base register will be one of @status_base,
* @mask_base, etc., @main_status, or any of @config_base. * @mask_base, etc., @main_status, or any of @config_base.
* The index will be in the range [0, num_main_regs[ for the * The index will be in the range [0, num_main_regs[ for the
* main status base, [0, num_type_settings[ for any config * main status base, [0, num_config_regs[ for any config
* register base, and [0, num_regs[ for any other base. * register base, and [0, num_regs[ for any other base.
* If unspecified then regmap_irq_get_irq_reg_linear() is used. * If unspecified then regmap_irq_get_irq_reg_linear() is used.
* @irq_drv_data: Driver specific IRQ data which is passed as parameter when * @irq_drv_data: Driver specific IRQ data which is passed as parameter when
...@@ -1629,8 +1608,6 @@ struct regmap_irq_chip { ...@@ -1629,8 +1608,6 @@ struct regmap_irq_chip {
unsigned int unmask_base; unsigned int unmask_base;
unsigned int ack_base; unsigned int ack_base;
unsigned int wake_base; unsigned int wake_base;
unsigned int type_base;
unsigned int *virt_reg_base;
const unsigned int *config_base; const unsigned int *config_base;
unsigned int irq_reg_stride; unsigned int irq_reg_stride;
unsigned int init_ack_masked:1; unsigned int init_ack_masked:1;
...@@ -1643,7 +1620,6 @@ struct regmap_irq_chip { ...@@ -1643,7 +1620,6 @@ struct regmap_irq_chip {
unsigned int type_in_mask:1; unsigned int type_in_mask:1;
unsigned int clear_on_unmask:1; unsigned int clear_on_unmask:1;
unsigned int runtime_pm:1; unsigned int runtime_pm:1;
unsigned int not_fixed_stride:1;
unsigned int no_status:1; unsigned int no_status:1;
int num_regs; int num_regs;
...@@ -1651,18 +1627,13 @@ struct regmap_irq_chip { ...@@ -1651,18 +1627,13 @@ struct regmap_irq_chip {
const struct regmap_irq *irqs; const struct regmap_irq *irqs;
int num_irqs; int num_irqs;
int num_type_reg;
int num_virt_regs;
int num_config_bases; int num_config_bases;
int num_config_regs; int num_config_regs;
int (*handle_pre_irq)(void *irq_drv_data); int (*handle_pre_irq)(void *irq_drv_data);
int (*handle_post_irq)(void *irq_drv_data); int (*handle_post_irq)(void *irq_drv_data);
int (*handle_mask_sync)(struct regmap *map, int index, int (*handle_mask_sync)(int index, unsigned int mask_buf_def,
unsigned int mask_buf_def,
unsigned int mask_buf, void *irq_drv_data); unsigned int mask_buf, void *irq_drv_data);
int (*set_type_virt)(unsigned int **buf, unsigned int type,
unsigned long hwirq, int reg);
int (*set_type_config)(unsigned int **buf, unsigned int type, int (*set_type_config)(unsigned int **buf, unsigned int type,
const struct regmap_irq *irq_data, int idx, const struct regmap_irq *irq_data, int idx,
void *irq_drv_data); void *irq_drv_data);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment