Commit 898673b9 authored by Simon Trimmer's avatar Simon Trimmer Committed by Mark Brown

ASoC: cs35l56: Move shared data into a common data structure

The ASoC and HDA drivers have structures that contain some of the same
information - instead of maintaining two locations for this data the
drivers should share a common data structure as this will enable common
utility functions to be created.

The first step is to move the location of these members in the ASoC
driver.
Signed-off-by: default avatarSimon Trimmer <simont@opensource.cirrus.com>
Signed-off-by: default avatarRichard Fitzgerald <rf@opensource.cirrus.com>
Acked-by: default avatarMark Brown <broonie@kernel.org>
Link: https://lore.kernel.org/r/20230721132120.5523-2-rf@opensource.cirrus.comSigned-off-by: default avatarMark Brown <broonie@kernel.org>
parent de1b43a5
...@@ -252,6 +252,19 @@ ...@@ -252,6 +252,19 @@
#define CS35L56_NUM_BULK_SUPPLIES 3 #define CS35L56_NUM_BULK_SUPPLIES 3
#define CS35L56_NUM_DSP_REGIONS 5 #define CS35L56_NUM_DSP_REGIONS 5
struct cs35l56_base {
struct device *dev;
struct regmap *regmap;
int irq;
struct mutex irq_lock;
u8 rev;
bool init_done;
bool fw_patched;
bool secured;
bool can_hibernate;
struct gpio_desc *reset_gpio;
};
extern struct regmap_config cs35l56_regmap_i2c; extern struct regmap_config cs35l56_regmap_i2c;
extern struct regmap_config cs35l56_regmap_spi; extern struct regmap_config cs35l56_regmap_spi;
extern struct regmap_config cs35l56_regmap_sdw; extern struct regmap_config cs35l56_regmap_sdw;
...@@ -260,7 +273,7 @@ extern const struct cs_dsp_region cs35l56_dsp1_regions[CS35L56_NUM_DSP_REGIONS]; ...@@ -260,7 +273,7 @@ extern const struct cs_dsp_region cs35l56_dsp1_regions[CS35L56_NUM_DSP_REGIONS];
extern const char * const cs35l56_tx_input_texts[CS35L56_NUM_INPUT_SRC]; extern const char * const cs35l56_tx_input_texts[CS35L56_NUM_INPUT_SRC];
extern const unsigned int cs35l56_tx_input_values[CS35L56_NUM_INPUT_SRC]; extern const unsigned int cs35l56_tx_input_values[CS35L56_NUM_INPUT_SRC];
int cs35l56_set_patch(struct regmap *regmap); int cs35l56_set_patch(struct cs35l56_base *cs35l56_base);
int cs35l56_get_bclk_freq_id(unsigned int freq); int cs35l56_get_bclk_freq_id(unsigned int freq);
void cs35l56_fill_supply_names(struct regulator_bulk_data *data); void cs35l56_fill_supply_names(struct regulator_bulk_data *data);
......
...@@ -26,14 +26,14 @@ static int cs35l56_i2c_probe(struct i2c_client *client) ...@@ -26,14 +26,14 @@ static int cs35l56_i2c_probe(struct i2c_client *client)
if (!cs35l56) if (!cs35l56)
return -ENOMEM; return -ENOMEM;
cs35l56->dev = dev; cs35l56->base.dev = dev;
cs35l56->can_hibernate = true; cs35l56->base.can_hibernate = true;
i2c_set_clientdata(client, cs35l56); i2c_set_clientdata(client, cs35l56);
cs35l56->regmap = devm_regmap_init_i2c(client, regmap_config); cs35l56->base.regmap = devm_regmap_init_i2c(client, regmap_config);
if (IS_ERR(cs35l56->regmap)) { if (IS_ERR(cs35l56->base.regmap)) {
ret = PTR_ERR(cs35l56->regmap); ret = PTR_ERR(cs35l56->base.regmap);
return dev_err_probe(cs35l56->dev, ret, "Failed to allocate register map\n"); return dev_err_probe(cs35l56->base.dev, ret, "Failed to allocate register map\n");
} }
ret = cs35l56_common_probe(cs35l56); ret = cs35l56_common_probe(cs35l56);
...@@ -42,7 +42,7 @@ static int cs35l56_i2c_probe(struct i2c_client *client) ...@@ -42,7 +42,7 @@ static int cs35l56_i2c_probe(struct i2c_client *client)
ret = cs35l56_init(cs35l56); ret = cs35l56_init(cs35l56);
if (ret == 0) if (ret == 0)
ret = cs35l56_irq_request(cs35l56, client->irq); ret = cs35l56_irq_request(&cs35l56->base, client->irq);
if (ret < 0) if (ret < 0)
cs35l56_remove(cs35l56); cs35l56_remove(cs35l56);
......
...@@ -166,13 +166,13 @@ static void cs35l56_sdw_init(struct sdw_slave *peripheral) ...@@ -166,13 +166,13 @@ static void cs35l56_sdw_init(struct sdw_slave *peripheral)
struct cs35l56_private *cs35l56 = dev_get_drvdata(&peripheral->dev); struct cs35l56_private *cs35l56 = dev_get_drvdata(&peripheral->dev);
int ret; int ret;
pm_runtime_get_noresume(cs35l56->dev); pm_runtime_get_noresume(cs35l56->base.dev);
regcache_cache_only(cs35l56->regmap, false); regcache_cache_only(cs35l56->base.regmap, false);
ret = cs35l56_init(cs35l56); ret = cs35l56_init(cs35l56);
if (ret < 0) { if (ret < 0) {
regcache_cache_only(cs35l56->regmap, true); regcache_cache_only(cs35l56->base.regmap, true);
goto out; goto out;
} }
...@@ -180,15 +180,15 @@ static void cs35l56_sdw_init(struct sdw_slave *peripheral) ...@@ -180,15 +180,15 @@ static void cs35l56_sdw_init(struct sdw_slave *peripheral)
* cs35l56_init can return with !init_done if it triggered * cs35l56_init can return with !init_done if it triggered
* a soft reset. * a soft reset.
*/ */
if (cs35l56->init_done) { if (cs35l56->base.init_done) {
/* Enable SoundWire interrupts */ /* Enable SoundWire interrupts */
sdw_write_no_pm(peripheral, CS35L56_SDW_GEN_INT_MASK_1, sdw_write_no_pm(peripheral, CS35L56_SDW_GEN_INT_MASK_1,
CS35L56_SDW_INT_MASK_CODEC_IRQ); CS35L56_SDW_INT_MASK_CODEC_IRQ);
} }
out: out:
pm_runtime_mark_last_busy(cs35l56->dev); pm_runtime_mark_last_busy(cs35l56->base.dev);
pm_runtime_put_autosuspend(cs35l56->dev); pm_runtime_put_autosuspend(cs35l56->base.dev);
} }
static int cs35l56_sdw_interrupt(struct sdw_slave *peripheral, static int cs35l56_sdw_interrupt(struct sdw_slave *peripheral,
...@@ -198,7 +198,7 @@ static int cs35l56_sdw_interrupt(struct sdw_slave *peripheral, ...@@ -198,7 +198,7 @@ static int cs35l56_sdw_interrupt(struct sdw_slave *peripheral,
/* SoundWire core holds our pm_runtime when calling this function. */ /* SoundWire core holds our pm_runtime when calling this function. */
dev_dbg(cs35l56->dev, "int control_port=%#x\n", status->control_port); dev_dbg(cs35l56->base.dev, "int control_port=%#x\n", status->control_port);
if ((status->control_port & SDW_SCP_INT1_IMPL_DEF) == 0) if ((status->control_port & SDW_SCP_INT1_IMPL_DEF) == 0)
return 0; return 0;
...@@ -207,7 +207,7 @@ static int cs35l56_sdw_interrupt(struct sdw_slave *peripheral, ...@@ -207,7 +207,7 @@ static int cs35l56_sdw_interrupt(struct sdw_slave *peripheral,
* Prevent bus manager suspending and possibly issuing a * Prevent bus manager suspending and possibly issuing a
* bus-reset before the queued work has run. * bus-reset before the queued work has run.
*/ */
pm_runtime_get_noresume(cs35l56->dev); pm_runtime_get_noresume(cs35l56->base.dev);
/* /*
* Mask and clear until it has been handled. The read of GEN_INT_STAT_1 * Mask and clear until it has been handled. The read of GEN_INT_STAT_1
...@@ -230,14 +230,14 @@ static void cs35l56_sdw_irq_work(struct work_struct *work) ...@@ -230,14 +230,14 @@ static void cs35l56_sdw_irq_work(struct work_struct *work)
struct cs35l56_private, struct cs35l56_private,
sdw_irq_work); sdw_irq_work);
cs35l56_irq(-1, cs35l56); cs35l56_irq(-1, &cs35l56->base);
/* unmask interrupts */ /* unmask interrupts */
if (!cs35l56->sdw_irq_no_unmask) if (!cs35l56->sdw_irq_no_unmask)
sdw_write_no_pm(cs35l56->sdw_peripheral, CS35L56_SDW_GEN_INT_MASK_1, sdw_write_no_pm(cs35l56->sdw_peripheral, CS35L56_SDW_GEN_INT_MASK_1,
CS35L56_SDW_INT_MASK_CODEC_IRQ); CS35L56_SDW_INT_MASK_CODEC_IRQ);
pm_runtime_put_autosuspend(cs35l56->dev); pm_runtime_put_autosuspend(cs35l56->base.dev);
} }
static int cs35l56_sdw_read_prop(struct sdw_slave *peripheral) static int cs35l56_sdw_read_prop(struct sdw_slave *peripheral)
...@@ -246,7 +246,7 @@ static int cs35l56_sdw_read_prop(struct sdw_slave *peripheral) ...@@ -246,7 +246,7 @@ static int cs35l56_sdw_read_prop(struct sdw_slave *peripheral)
struct sdw_slave_prop *prop = &peripheral->prop; struct sdw_slave_prop *prop = &peripheral->prop;
struct sdw_dpn_prop *ports; struct sdw_dpn_prop *ports;
ports = devm_kcalloc(cs35l56->dev, 2, sizeof(*ports), GFP_KERNEL); ports = devm_kcalloc(cs35l56->base.dev, 2, sizeof(*ports), GFP_KERNEL);
if (!ports) if (!ports)
return -ENOMEM; return -ENOMEM;
...@@ -279,17 +279,17 @@ static int cs35l56_sdw_update_status(struct sdw_slave *peripheral, ...@@ -279,17 +279,17 @@ static int cs35l56_sdw_update_status(struct sdw_slave *peripheral,
switch (status) { switch (status) {
case SDW_SLAVE_ATTACHED: case SDW_SLAVE_ATTACHED:
dev_dbg(cs35l56->dev, "%s: ATTACHED\n", __func__); dev_dbg(cs35l56->base.dev, "%s: ATTACHED\n", __func__);
if (cs35l56->sdw_attached) if (cs35l56->sdw_attached)
break; break;
if (!cs35l56->init_done || cs35l56->soft_resetting) if (!cs35l56->base.init_done || cs35l56->soft_resetting)
cs35l56_sdw_init(peripheral); cs35l56_sdw_init(peripheral);
cs35l56->sdw_attached = true; cs35l56->sdw_attached = true;
break; break;
case SDW_SLAVE_UNATTACHED: case SDW_SLAVE_UNATTACHED:
dev_dbg(cs35l56->dev, "%s: UNATTACHED\n", __func__); dev_dbg(cs35l56->base.dev, "%s: UNATTACHED\n", __func__);
cs35l56->sdw_attached = false; cs35l56->sdw_attached = false;
break; break;
default: default:
...@@ -305,7 +305,7 @@ static int cs35l56_a1_kick_divider(struct cs35l56_private *cs35l56, ...@@ -305,7 +305,7 @@ static int cs35l56_a1_kick_divider(struct cs35l56_private *cs35l56,
unsigned int curr_scale_reg, next_scale_reg; unsigned int curr_scale_reg, next_scale_reg;
int curr_scale, next_scale, ret; int curr_scale, next_scale, ret;
if (!cs35l56->init_done) if (!cs35l56->base.init_done)
return 0; return 0;
if (peripheral->bus->params.curr_bank) { if (peripheral->bus->params.curr_bank) {
...@@ -324,13 +324,13 @@ static int cs35l56_a1_kick_divider(struct cs35l56_private *cs35l56, ...@@ -324,13 +324,13 @@ static int cs35l56_a1_kick_divider(struct cs35l56_private *cs35l56,
*/ */
curr_scale = sdw_read_no_pm(peripheral, curr_scale_reg); curr_scale = sdw_read_no_pm(peripheral, curr_scale_reg);
if (curr_scale < 0) { if (curr_scale < 0) {
dev_err(cs35l56->dev, "Failed to read current clock scale: %d\n", curr_scale); dev_err(cs35l56->base.dev, "Failed to read current clock scale: %d\n", curr_scale);
return curr_scale; return curr_scale;
} }
next_scale = sdw_read_no_pm(peripheral, next_scale_reg); next_scale = sdw_read_no_pm(peripheral, next_scale_reg);
if (next_scale < 0) { if (next_scale < 0) {
dev_err(cs35l56->dev, "Failed to read next clock scale: %d\n", next_scale); dev_err(cs35l56->base.dev, "Failed to read next clock scale: %d\n", next_scale);
return next_scale; return next_scale;
} }
...@@ -338,7 +338,8 @@ static int cs35l56_a1_kick_divider(struct cs35l56_private *cs35l56, ...@@ -338,7 +338,8 @@ static int cs35l56_a1_kick_divider(struct cs35l56_private *cs35l56,
next_scale = cs35l56->old_sdw_clock_scale; next_scale = cs35l56->old_sdw_clock_scale;
ret = sdw_write_no_pm(peripheral, next_scale_reg, next_scale); ret = sdw_write_no_pm(peripheral, next_scale_reg, next_scale);
if (ret < 0) { if (ret < 0) {
dev_err(cs35l56->dev, "Failed to modify current clock scale: %d\n", ret); dev_err(cs35l56->base.dev, "Failed to modify current clock scale: %d\n",
ret);
return ret; return ret;
} }
} }
...@@ -346,11 +347,11 @@ static int cs35l56_a1_kick_divider(struct cs35l56_private *cs35l56, ...@@ -346,11 +347,11 @@ static int cs35l56_a1_kick_divider(struct cs35l56_private *cs35l56,
cs35l56->old_sdw_clock_scale = curr_scale; cs35l56->old_sdw_clock_scale = curr_scale;
ret = sdw_write_no_pm(peripheral, curr_scale_reg, CS35L56_SDW_INVALID_BUS_SCALE); ret = sdw_write_no_pm(peripheral, curr_scale_reg, CS35L56_SDW_INVALID_BUS_SCALE);
if (ret < 0) { if (ret < 0) {
dev_err(cs35l56->dev, "Failed to modify current clock scale: %d\n", ret); dev_err(cs35l56->base.dev, "Failed to modify current clock scale: %d\n", ret);
return ret; return ret;
} }
dev_dbg(cs35l56->dev, "Next bus scale: %#x\n", next_scale); dev_dbg(cs35l56->base.dev, "Next bus scale: %#x\n", next_scale);
return 0; return 0;
} }
...@@ -362,9 +363,10 @@ static int cs35l56_sdw_bus_config(struct sdw_slave *peripheral, ...@@ -362,9 +363,10 @@ static int cs35l56_sdw_bus_config(struct sdw_slave *peripheral,
int sclk; int sclk;
sclk = params->curr_dr_freq / 2; sclk = params->curr_dr_freq / 2;
dev_dbg(cs35l56->dev, "%s: sclk=%u c=%u r=%u\n", __func__, sclk, params->col, params->row); dev_dbg(cs35l56->base.dev, "%s: sclk=%u c=%u r=%u\n",
__func__, sclk, params->col, params->row);
if (cs35l56->rev < 0xb0) if (cs35l56->base.rev < 0xb0)
return cs35l56_a1_kick_divider(cs35l56, peripheral); return cs35l56_a1_kick_divider(cs35l56, peripheral);
return 0; return 0;
...@@ -376,7 +378,7 @@ static int __maybe_unused cs35l56_sdw_clk_stop(struct sdw_slave *peripheral, ...@@ -376,7 +378,7 @@ static int __maybe_unused cs35l56_sdw_clk_stop(struct sdw_slave *peripheral,
{ {
struct cs35l56_private *cs35l56 = dev_get_drvdata(&peripheral->dev); struct cs35l56_private *cs35l56 = dev_get_drvdata(&peripheral->dev);
dev_dbg(cs35l56->dev, "%s: mode:%d type:%d\n", __func__, mode, type); dev_dbg(cs35l56->base.dev, "%s: mode:%d type:%d\n", __func__, mode, type);
return 0; return 0;
} }
...@@ -397,10 +399,10 @@ static int __maybe_unused cs35l56_sdw_handle_unattach(struct cs35l56_private *cs ...@@ -397,10 +399,10 @@ static int __maybe_unused cs35l56_sdw_handle_unattach(struct cs35l56_private *cs
if (peripheral->unattach_request) { if (peripheral->unattach_request) {
/* Cannot access registers until bus is re-initialized. */ /* Cannot access registers until bus is re-initialized. */
dev_dbg(cs35l56->dev, "Wait for initialization_complete\n"); dev_dbg(cs35l56->base.dev, "Wait for initialization_complete\n");
if (!wait_for_completion_timeout(&peripheral->initialization_complete, if (!wait_for_completion_timeout(&peripheral->initialization_complete,
msecs_to_jiffies(5000))) { msecs_to_jiffies(5000))) {
dev_err(cs35l56->dev, "initialization_complete timed out\n"); dev_err(cs35l56->base.dev, "initialization_complete timed out\n");
return -ETIMEDOUT; return -ETIMEDOUT;
} }
...@@ -419,7 +421,7 @@ static int __maybe_unused cs35l56_sdw_runtime_suspend(struct device *dev) ...@@ -419,7 +421,7 @@ static int __maybe_unused cs35l56_sdw_runtime_suspend(struct device *dev)
{ {
struct cs35l56_private *cs35l56 = dev_get_drvdata(dev); struct cs35l56_private *cs35l56 = dev_get_drvdata(dev);
if (!cs35l56->init_done) if (!cs35l56->base.init_done)
return 0; return 0;
return cs35l56_runtime_suspend(dev); return cs35l56_runtime_suspend(dev);
...@@ -432,7 +434,7 @@ static int __maybe_unused cs35l56_sdw_runtime_resume(struct device *dev) ...@@ -432,7 +434,7 @@ static int __maybe_unused cs35l56_sdw_runtime_resume(struct device *dev)
dev_dbg(dev, "Runtime resume\n"); dev_dbg(dev, "Runtime resume\n");
if (!cs35l56->init_done) if (!cs35l56->base.init_done)
return 0; return 0;
ret = cs35l56_sdw_handle_unattach(cs35l56); ret = cs35l56_sdw_handle_unattach(cs35l56);
...@@ -454,7 +456,7 @@ static int __maybe_unused cs35l56_sdw_system_suspend(struct device *dev) ...@@ -454,7 +456,7 @@ static int __maybe_unused cs35l56_sdw_system_suspend(struct device *dev)
{ {
struct cs35l56_private *cs35l56 = dev_get_drvdata(dev); struct cs35l56_private *cs35l56 = dev_get_drvdata(dev);
if (!cs35l56->init_done) if (!cs35l56->base.init_done)
return 0; return 0;
/* /*
...@@ -493,21 +495,21 @@ static int cs35l56_sdw_probe(struct sdw_slave *peripheral, const struct sdw_devi ...@@ -493,21 +495,21 @@ static int cs35l56_sdw_probe(struct sdw_slave *peripheral, const struct sdw_devi
if (!cs35l56) if (!cs35l56)
return -ENOMEM; return -ENOMEM;
cs35l56->dev = dev; cs35l56->base.dev = dev;
cs35l56->sdw_peripheral = peripheral; cs35l56->sdw_peripheral = peripheral;
INIT_WORK(&cs35l56->sdw_irq_work, cs35l56_sdw_irq_work); INIT_WORK(&cs35l56->sdw_irq_work, cs35l56_sdw_irq_work);
dev_set_drvdata(dev, cs35l56); dev_set_drvdata(dev, cs35l56);
cs35l56->regmap = devm_regmap_init(dev, &cs35l56_regmap_bus_sdw, cs35l56->base.regmap = devm_regmap_init(dev, &cs35l56_regmap_bus_sdw,
peripheral, &cs35l56_regmap_sdw); peripheral, &cs35l56_regmap_sdw);
if (IS_ERR(cs35l56->regmap)) { if (IS_ERR(cs35l56->base.regmap)) {
ret = PTR_ERR(cs35l56->regmap); ret = PTR_ERR(cs35l56->base.regmap);
return dev_err_probe(dev, ret, "Failed to allocate register map\n"); return dev_err_probe(dev, ret, "Failed to allocate register map\n");
} }
/* Start in cache-only until device is enumerated */ /* Start in cache-only until device is enumerated */
regcache_cache_only(cs35l56->regmap, true); regcache_cache_only(cs35l56->base.regmap, true);
ret = cs35l56_common_probe(cs35l56); ret = cs35l56_common_probe(cs35l56);
if (ret != 0) if (ret != 0)
......
...@@ -18,9 +18,10 @@ static const struct reg_sequence cs35l56_patch[] = { ...@@ -18,9 +18,10 @@ static const struct reg_sequence cs35l56_patch[] = {
{ CS35L56_MAIN_POSTURE_NUMBER, 0x00000000 }, { CS35L56_MAIN_POSTURE_NUMBER, 0x00000000 },
}; };
int cs35l56_set_patch(struct regmap *regmap) int cs35l56_set_patch(struct cs35l56_base *cs35l56_base)
{ {
return regmap_register_patch(regmap, cs35l56_patch, ARRAY_SIZE(cs35l56_patch)); return regmap_register_patch(cs35l56_base->regmap, cs35l56_patch,
ARRAY_SIZE(cs35l56_patch));
} }
EXPORT_SYMBOL_NS_GPL(cs35l56_set_patch, SND_SOC_CS35L56_SHARED); EXPORT_SYMBOL_NS_GPL(cs35l56_set_patch, SND_SOC_CS35L56_SHARED);
......
...@@ -25,13 +25,13 @@ static int cs35l56_spi_probe(struct spi_device *spi) ...@@ -25,13 +25,13 @@ static int cs35l56_spi_probe(struct spi_device *spi)
return -ENOMEM; return -ENOMEM;
spi_set_drvdata(spi, cs35l56); spi_set_drvdata(spi, cs35l56);
cs35l56->regmap = devm_regmap_init_spi(spi, regmap_config); cs35l56->base.regmap = devm_regmap_init_spi(spi, regmap_config);
if (IS_ERR(cs35l56->regmap)) { if (IS_ERR(cs35l56->base.regmap)) {
ret = PTR_ERR(cs35l56->regmap); ret = PTR_ERR(cs35l56->base.regmap);
return dev_err_probe(&spi->dev, ret, "Failed to allocate register map\n"); return dev_err_probe(&spi->dev, ret, "Failed to allocate register map\n");
} }
cs35l56->dev = &spi->dev; cs35l56->base.dev = &spi->dev;
ret = cs35l56_common_probe(cs35l56); ret = cs35l56_common_probe(cs35l56);
if (ret != 0) if (ret != 0)
...@@ -39,7 +39,7 @@ static int cs35l56_spi_probe(struct spi_device *spi) ...@@ -39,7 +39,7 @@ static int cs35l56_spi_probe(struct spi_device *spi)
ret = cs35l56_init(cs35l56); ret = cs35l56_init(cs35l56);
if (ret == 0) if (ret == 0)
ret = cs35l56_irq_request(cs35l56, spi->irq); ret = cs35l56_irq_request(&cs35l56->base, spi->irq);
if (ret < 0) if (ret < 0)
cs35l56_remove(cs35l56); cs35l56_remove(cs35l56);
......
...@@ -34,17 +34,17 @@ ...@@ -34,17 +34,17 @@
static int cs35l56_dsp_event(struct snd_soc_dapm_widget *w, static int cs35l56_dsp_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event); struct snd_kcontrol *kcontrol, int event);
static int cs35l56_mbox_send(struct cs35l56_private *cs35l56, unsigned int command) static int cs35l56_mbox_send(struct cs35l56_base *cs35l56_base, unsigned int command)
{ {
unsigned int val; unsigned int val;
int ret; int ret;
regmap_write(cs35l56->regmap, CS35L56_DSP_VIRTUAL1_MBOX_1, command); regmap_write(cs35l56_base->regmap, CS35L56_DSP_VIRTUAL1_MBOX_1, command);
ret = regmap_read_poll_timeout(cs35l56->regmap, CS35L56_DSP_VIRTUAL1_MBOX_1, ret = regmap_read_poll_timeout(cs35l56_base->regmap, CS35L56_DSP_VIRTUAL1_MBOX_1,
val, (val == 0), val, (val == 0),
CS35L56_MBOX_POLL_US, CS35L56_MBOX_TIMEOUT_US); CS35L56_MBOX_POLL_US, CS35L56_MBOX_TIMEOUT_US);
if (ret) { if (ret) {
dev_warn(cs35l56->dev, "MBOX command %#x failed: %d\n", command, ret); dev_warn(cs35l56_base->dev, "MBOX command %#x failed: %d\n", command, ret);
return ret; return ret;
} }
...@@ -174,25 +174,25 @@ static int cs35l56_play_event(struct snd_soc_dapm_widget *w, ...@@ -174,25 +174,25 @@ static int cs35l56_play_event(struct snd_soc_dapm_widget *w,
unsigned int val; unsigned int val;
int ret; int ret;
dev_dbg(cs35l56->dev, "play: %d\n", event); dev_dbg(cs35l56->base.dev, "play: %d\n", event);
switch (event) { switch (event) {
case SND_SOC_DAPM_PRE_PMU: case SND_SOC_DAPM_PRE_PMU:
/* Don't wait for ACK, we check in POST_PMU that it completed */ /* Don't wait for ACK, we check in POST_PMU that it completed */
return regmap_write(cs35l56->regmap, CS35L56_DSP_VIRTUAL1_MBOX_1, return regmap_write(cs35l56->base.regmap, CS35L56_DSP_VIRTUAL1_MBOX_1,
CS35L56_MBOX_CMD_AUDIO_PLAY); CS35L56_MBOX_CMD_AUDIO_PLAY);
case SND_SOC_DAPM_POST_PMU: case SND_SOC_DAPM_POST_PMU:
/* Wait for firmware to enter PS0 power state */ /* Wait for firmware to enter PS0 power state */
ret = regmap_read_poll_timeout(cs35l56->regmap, ret = regmap_read_poll_timeout(cs35l56->base.regmap,
CS35L56_TRANSDUCER_ACTUAL_PS, CS35L56_TRANSDUCER_ACTUAL_PS,
val, (val == CS35L56_PS0), val, (val == CS35L56_PS0),
CS35L56_PS0_POLL_US, CS35L56_PS0_POLL_US,
CS35L56_PS0_TIMEOUT_US); CS35L56_PS0_TIMEOUT_US);
if (ret) if (ret)
dev_err(cs35l56->dev, "PS0 wait failed: %d\n", ret); dev_err(cs35l56->base.dev, "PS0 wait failed: %d\n", ret);
return ret; return ret;
case SND_SOC_DAPM_POST_PMD: case SND_SOC_DAPM_POST_PMD:
return cs35l56_mbox_send(cs35l56, CS35L56_MBOX_CMD_AUDIO_PAUSE); return cs35l56_mbox_send(&cs35l56->base, CS35L56_MBOX_CMD_AUDIO_PAUSE);
default: default:
return 0; return 0;
} }
...@@ -310,14 +310,14 @@ static int cs35l56_dsp_event(struct snd_soc_dapm_widget *w, ...@@ -310,14 +310,14 @@ static int cs35l56_dsp_event(struct snd_soc_dapm_widget *w,
struct snd_soc_component *component = snd_soc_dapm_to_component(w->dapm); struct snd_soc_component *component = snd_soc_dapm_to_component(w->dapm);
struct cs35l56_private *cs35l56 = snd_soc_component_get_drvdata(component); struct cs35l56_private *cs35l56 = snd_soc_component_get_drvdata(component);
dev_dbg(cs35l56->dev, "%s: %d\n", __func__, event); dev_dbg(cs35l56->base.dev, "%s: %d\n", __func__, event);
return wm_adsp_event(w, kcontrol, event); return wm_adsp_event(w, kcontrol, event);
} }
irqreturn_t cs35l56_irq(int irq, void *data) irqreturn_t cs35l56_irq(int irq, void *data)
{ {
struct cs35l56_private *cs35l56 = data; struct cs35l56_base *cs35l56_base = data;
unsigned int status1 = 0, status8 = 0, status20 = 0; unsigned int status1 = 0, status8 = 0, status20 = 0;
unsigned int mask1, mask8, mask20; unsigned int mask1, mask8, mask20;
unsigned int val; unsigned int val;
...@@ -325,77 +325,77 @@ irqreturn_t cs35l56_irq(int irq, void *data) ...@@ -325,77 +325,77 @@ irqreturn_t cs35l56_irq(int irq, void *data)
irqreturn_t ret = IRQ_NONE; irqreturn_t ret = IRQ_NONE;
if (!cs35l56->init_done) if (!cs35l56_base->init_done)
return IRQ_NONE; return IRQ_NONE;
mutex_lock(&cs35l56->irq_lock); mutex_lock(&cs35l56_base->irq_lock);
rv = pm_runtime_resume_and_get(cs35l56->dev); rv = pm_runtime_resume_and_get(cs35l56_base->dev);
if (rv < 0) { if (rv < 0) {
dev_err(cs35l56->dev, "irq: failed to get pm_runtime: %d\n", rv); dev_err(cs35l56_base->dev, "irq: failed to get pm_runtime: %d\n", rv);
goto err_unlock; goto err_unlock;
} }
regmap_read(cs35l56->regmap, CS35L56_IRQ1_STATUS, &val); regmap_read(cs35l56_base->regmap, CS35L56_IRQ1_STATUS, &val);
if ((val & CS35L56_IRQ1_STS_MASK) == 0) { if ((val & CS35L56_IRQ1_STS_MASK) == 0) {
dev_dbg(cs35l56->dev, "Spurious IRQ: no pending interrupt\n"); dev_dbg(cs35l56_base->dev, "Spurious IRQ: no pending interrupt\n");
goto err; goto err;
} }
/* Ack interrupts */ /* Ack interrupts */
regmap_read(cs35l56->regmap, CS35L56_IRQ1_EINT_1, &status1); regmap_read(cs35l56_base->regmap, CS35L56_IRQ1_EINT_1, &status1);
regmap_read(cs35l56->regmap, CS35L56_IRQ1_MASK_1, &mask1); regmap_read(cs35l56_base->regmap, CS35L56_IRQ1_MASK_1, &mask1);
status1 &= ~mask1; status1 &= ~mask1;
regmap_write(cs35l56->regmap, CS35L56_IRQ1_EINT_1, status1); regmap_write(cs35l56_base->regmap, CS35L56_IRQ1_EINT_1, status1);
regmap_read(cs35l56->regmap, CS35L56_IRQ1_EINT_8, &status8); regmap_read(cs35l56_base->regmap, CS35L56_IRQ1_EINT_8, &status8);
regmap_read(cs35l56->regmap, CS35L56_IRQ1_MASK_8, &mask8); regmap_read(cs35l56_base->regmap, CS35L56_IRQ1_MASK_8, &mask8);
status8 &= ~mask8; status8 &= ~mask8;
regmap_write(cs35l56->regmap, CS35L56_IRQ1_EINT_8, status8); regmap_write(cs35l56_base->regmap, CS35L56_IRQ1_EINT_8, status8);
regmap_read(cs35l56->regmap, CS35L56_IRQ1_EINT_20, &status20); regmap_read(cs35l56_base->regmap, CS35L56_IRQ1_EINT_20, &status20);
regmap_read(cs35l56->regmap, CS35L56_IRQ1_MASK_20, &mask20); regmap_read(cs35l56_base->regmap, CS35L56_IRQ1_MASK_20, &mask20);
status20 &= ~mask20; status20 &= ~mask20;
/* We don't want EINT20 but they default to unmasked: force mask */ /* We don't want EINT20 but they default to unmasked: force mask */
regmap_write(cs35l56->regmap, CS35L56_IRQ1_MASK_20, 0xffffffff); regmap_write(cs35l56_base->regmap, CS35L56_IRQ1_MASK_20, 0xffffffff);
dev_dbg(cs35l56->dev, "%s: %#x %#x\n", __func__, status1, status8); dev_dbg(cs35l56_base->dev, "%s: %#x %#x\n", __func__, status1, status8);
/* Check to see if unmasked bits are active */ /* Check to see if unmasked bits are active */
if (!status1 && !status8 && !status20) if (!status1 && !status8 && !status20)
goto err; goto err;
if (status1 & CS35L56_AMP_SHORT_ERR_EINT1_MASK) if (status1 & CS35L56_AMP_SHORT_ERR_EINT1_MASK)
dev_crit(cs35l56->dev, "Amp short error\n"); dev_crit(cs35l56_base->dev, "Amp short error\n");
if (status8 & CS35L56_TEMP_ERR_EINT1_MASK) if (status8 & CS35L56_TEMP_ERR_EINT1_MASK)
dev_crit(cs35l56->dev, "Overtemp error\n"); dev_crit(cs35l56_base->dev, "Overtemp error\n");
ret = IRQ_HANDLED; ret = IRQ_HANDLED;
err: err:
pm_runtime_put(cs35l56->dev); pm_runtime_put(cs35l56_base->dev);
err_unlock: err_unlock:
mutex_unlock(&cs35l56->irq_lock); mutex_unlock(&cs35l56_base->irq_lock);
return ret; return ret;
} }
EXPORT_SYMBOL_NS_GPL(cs35l56_irq, SND_SOC_CS35L56_CORE); EXPORT_SYMBOL_NS_GPL(cs35l56_irq, SND_SOC_CS35L56_CORE);
int cs35l56_irq_request(struct cs35l56_private *cs35l56, int irq) int cs35l56_irq_request(struct cs35l56_base *cs35l56_base, int irq)
{ {
int ret; int ret;
if (!irq) if (!irq)
return 0; return 0;
ret = devm_request_threaded_irq(cs35l56->dev, irq, NULL, cs35l56_irq, ret = devm_request_threaded_irq(cs35l56_base->dev, irq, NULL, cs35l56_irq,
IRQF_ONESHOT | IRQF_SHARED | IRQF_TRIGGER_LOW, IRQF_ONESHOT | IRQF_SHARED | IRQF_TRIGGER_LOW,
"cs35l56", cs35l56); "cs35l56", cs35l56_base);
if (!ret) if (!ret)
cs35l56->irq = irq; cs35l56_base->irq = irq;
else else
dev_err(cs35l56->dev, "Failed to get IRQ: %d\n", ret); dev_err(cs35l56_base->dev, "Failed to get IRQ: %d\n", ret);
return ret; return ret;
} }
...@@ -406,13 +406,13 @@ static int cs35l56_asp_dai_set_fmt(struct snd_soc_dai *codec_dai, unsigned int f ...@@ -406,13 +406,13 @@ static int cs35l56_asp_dai_set_fmt(struct snd_soc_dai *codec_dai, unsigned int f
struct cs35l56_private *cs35l56 = snd_soc_component_get_drvdata(codec_dai->component); struct cs35l56_private *cs35l56 = snd_soc_component_get_drvdata(codec_dai->component);
unsigned int val; unsigned int val;
dev_dbg(cs35l56->dev, "%s: %#x\n", __func__, fmt); dev_dbg(cs35l56->base.dev, "%s: %#x\n", __func__, fmt);
switch (fmt & SND_SOC_DAIFMT_CLOCK_PROVIDER_MASK) { switch (fmt & SND_SOC_DAIFMT_CLOCK_PROVIDER_MASK) {
case SND_SOC_DAIFMT_CBC_CFC: case SND_SOC_DAIFMT_CBC_CFC:
break; break;
default: default:
dev_err(cs35l56->dev, "Unsupported clock source mode\n"); dev_err(cs35l56->base.dev, "Unsupported clock source mode\n");
return -EINVAL; return -EINVAL;
} }
...@@ -426,7 +426,7 @@ static int cs35l56_asp_dai_set_fmt(struct snd_soc_dai *codec_dai, unsigned int f ...@@ -426,7 +426,7 @@ static int cs35l56_asp_dai_set_fmt(struct snd_soc_dai *codec_dai, unsigned int f
cs35l56->tdm_mode = false; cs35l56->tdm_mode = false;
break; break;
default: default:
dev_err(cs35l56->dev, "Unsupported DAI format\n"); dev_err(cs35l56->base.dev, "Unsupported DAI format\n");
return -EINVAL; return -EINVAL;
} }
...@@ -443,18 +443,18 @@ static int cs35l56_asp_dai_set_fmt(struct snd_soc_dai *codec_dai, unsigned int f ...@@ -443,18 +443,18 @@ static int cs35l56_asp_dai_set_fmt(struct snd_soc_dai *codec_dai, unsigned int f
case SND_SOC_DAIFMT_NB_NF: case SND_SOC_DAIFMT_NB_NF:
break; break;
default: default:
dev_err(cs35l56->dev, "Invalid clock invert\n"); dev_err(cs35l56->base.dev, "Invalid clock invert\n");
return -EINVAL; return -EINVAL;
} }
regmap_update_bits(cs35l56->regmap, regmap_update_bits(cs35l56->base.regmap,
CS35L56_ASP1_CONTROL2, CS35L56_ASP1_CONTROL2,
CS35L56_ASP_FMT_MASK | CS35L56_ASP_FMT_MASK |
CS35L56_ASP_BCLK_INV_MASK | CS35L56_ASP_FSYNC_INV_MASK, CS35L56_ASP_BCLK_INV_MASK | CS35L56_ASP_FSYNC_INV_MASK,
val); val);
/* Hi-Z DOUT in unused slots and when all TX are disabled */ /* Hi-Z DOUT in unused slots and when all TX are disabled */
regmap_update_bits(cs35l56->regmap, CS35L56_ASP1_CONTROL3, regmap_update_bits(cs35l56->base.regmap, CS35L56_ASP1_CONTROL3,
CS35L56_ASP1_DOUT_HIZ_CTRL_MASK, CS35L56_ASP1_DOUT_HIZ_CTRL_MASK,
CS35L56_ASP_UNUSED_HIZ_OFF_HIZ); CS35L56_ASP_UNUSED_HIZ_OFF_HIZ);
...@@ -485,7 +485,7 @@ static void cs35l56_set_asp_slot_positions(struct cs35l56_private *cs35l56, ...@@ -485,7 +485,7 @@ static void cs35l56_set_asp_slot_positions(struct cs35l56_private *cs35l56,
channel_shift += 8; channel_shift += 8;
} }
regmap_write(cs35l56->regmap, reg, reg_val); regmap_write(cs35l56->base.regmap, reg, reg_val);
} }
static int cs35l56_asp_dai_set_tdm_slot(struct snd_soc_dai *dai, unsigned int tx_mask, static int cs35l56_asp_dai_set_tdm_slot(struct snd_soc_dai *dai, unsigned int tx_mask,
...@@ -494,20 +494,20 @@ static int cs35l56_asp_dai_set_tdm_slot(struct snd_soc_dai *dai, unsigned int tx ...@@ -494,20 +494,20 @@ static int cs35l56_asp_dai_set_tdm_slot(struct snd_soc_dai *dai, unsigned int tx
struct cs35l56_private *cs35l56 = snd_soc_component_get_drvdata(dai->component); struct cs35l56_private *cs35l56 = snd_soc_component_get_drvdata(dai->component);
if ((slots == 0) || (slot_width == 0)) { if ((slots == 0) || (slot_width == 0)) {
dev_dbg(cs35l56->dev, "tdm config cleared\n"); dev_dbg(cs35l56->base.dev, "tdm config cleared\n");
cs35l56->asp_slot_width = 0; cs35l56->asp_slot_width = 0;
cs35l56->asp_slot_count = 0; cs35l56->asp_slot_count = 0;
return 0; return 0;
} }
if (slot_width > (CS35L56_ASP_RX_WIDTH_MASK >> CS35L56_ASP_RX_WIDTH_SHIFT)) { if (slot_width > (CS35L56_ASP_RX_WIDTH_MASK >> CS35L56_ASP_RX_WIDTH_SHIFT)) {
dev_err(cs35l56->dev, "tdm invalid slot width %d\n", slot_width); dev_err(cs35l56->base.dev, "tdm invalid slot width %d\n", slot_width);
return -EINVAL; return -EINVAL;
} }
/* More than 32 slots would give an unsupportable BCLK frequency */ /* More than 32 slots would give an unsupportable BCLK frequency */
if (slots > 32) { if (slots > 32) {
dev_err(cs35l56->dev, "tdm invalid slot count %d\n", slots); dev_err(cs35l56->base.dev, "tdm invalid slot count %d\n", slots);
return -EINVAL; return -EINVAL;
} }
...@@ -524,7 +524,7 @@ static int cs35l56_asp_dai_set_tdm_slot(struct snd_soc_dai *dai, unsigned int tx ...@@ -524,7 +524,7 @@ static int cs35l56_asp_dai_set_tdm_slot(struct snd_soc_dai *dai, unsigned int tx
cs35l56_set_asp_slot_positions(cs35l56, CS35L56_ASP1_FRAME_CONTROL1, rx_mask); cs35l56_set_asp_slot_positions(cs35l56, CS35L56_ASP1_FRAME_CONTROL1, rx_mask);
cs35l56_set_asp_slot_positions(cs35l56, CS35L56_ASP1_FRAME_CONTROL5, tx_mask); cs35l56_set_asp_slot_positions(cs35l56, CS35L56_ASP1_FRAME_CONTROL5, tx_mask);
dev_dbg(cs35l56->dev, "tdm slot width: %u count: %u tx_mask: %#x rx_mask: %#x\n", dev_dbg(cs35l56->base.dev, "tdm slot width: %u count: %u tx_mask: %#x rx_mask: %#x\n",
cs35l56->asp_slot_width, cs35l56->asp_slot_count, tx_mask, rx_mask); cs35l56->asp_slot_width, cs35l56->asp_slot_count, tx_mask, rx_mask);
return 0; return 0;
...@@ -544,7 +544,8 @@ static int cs35l56_asp_dai_hw_params(struct snd_pcm_substream *substream, ...@@ -544,7 +544,8 @@ static int cs35l56_asp_dai_hw_params(struct snd_pcm_substream *substream,
else else
asp_width = asp_wl; asp_width = asp_wl;
dev_dbg(cs35l56->dev, "%s: wl=%d, width=%d, rate=%d", __func__, asp_wl, asp_width, rate); dev_dbg(cs35l56->base.dev, "%s: wl=%d, width=%d, rate=%d",
__func__, asp_wl, asp_width, rate);
if (!cs35l56->sysclk_set) { if (!cs35l56->sysclk_set) {
unsigned int slots = cs35l56->asp_slot_count; unsigned int slots = cs35l56->asp_slot_count;
...@@ -562,26 +563,26 @@ static int cs35l56_asp_dai_hw_params(struct snd_pcm_substream *substream, ...@@ -562,26 +563,26 @@ static int cs35l56_asp_dai_hw_params(struct snd_pcm_substream *substream,
bclk_freq = asp_width * slots * rate; bclk_freq = asp_width * slots * rate;
freq_id = cs35l56_get_bclk_freq_id(bclk_freq); freq_id = cs35l56_get_bclk_freq_id(bclk_freq);
if (freq_id < 0) { if (freq_id < 0) {
dev_err(cs35l56->dev, "%s: Invalid BCLK %u\n", __func__, bclk_freq); dev_err(cs35l56->base.dev, "%s: Invalid BCLK %u\n", __func__, bclk_freq);
return -EINVAL; return -EINVAL;
} }
regmap_update_bits(cs35l56->regmap, CS35L56_ASP1_CONTROL1, regmap_update_bits(cs35l56->base.regmap, CS35L56_ASP1_CONTROL1,
CS35L56_ASP_BCLK_FREQ_MASK, CS35L56_ASP_BCLK_FREQ_MASK,
freq_id << CS35L56_ASP_BCLK_FREQ_SHIFT); freq_id << CS35L56_ASP_BCLK_FREQ_SHIFT);
} }
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
regmap_update_bits(cs35l56->regmap, CS35L56_ASP1_CONTROL2, regmap_update_bits(cs35l56->base.regmap, CS35L56_ASP1_CONTROL2,
CS35L56_ASP_RX_WIDTH_MASK, asp_width << CS35L56_ASP_RX_WIDTH_MASK, asp_width <<
CS35L56_ASP_RX_WIDTH_SHIFT); CS35L56_ASP_RX_WIDTH_SHIFT);
regmap_update_bits(cs35l56->regmap, CS35L56_ASP1_DATA_CONTROL5, regmap_update_bits(cs35l56->base.regmap, CS35L56_ASP1_DATA_CONTROL5,
CS35L56_ASP_RX_WL_MASK, asp_wl); CS35L56_ASP_RX_WL_MASK, asp_wl);
} else { } else {
regmap_update_bits(cs35l56->regmap, CS35L56_ASP1_CONTROL2, regmap_update_bits(cs35l56->base.regmap, CS35L56_ASP1_CONTROL2,
CS35L56_ASP_TX_WIDTH_MASK, asp_width << CS35L56_ASP_TX_WIDTH_MASK, asp_width <<
CS35L56_ASP_TX_WIDTH_SHIFT); CS35L56_ASP_TX_WIDTH_SHIFT);
regmap_update_bits(cs35l56->regmap, CS35L56_ASP1_DATA_CONTROL1, regmap_update_bits(cs35l56->base.regmap, CS35L56_ASP1_DATA_CONTROL1,
CS35L56_ASP_TX_WL_MASK, asp_wl); CS35L56_ASP_TX_WL_MASK, asp_wl);
} }
...@@ -603,7 +604,7 @@ static int cs35l56_asp_dai_set_sysclk(struct snd_soc_dai *dai, ...@@ -603,7 +604,7 @@ static int cs35l56_asp_dai_set_sysclk(struct snd_soc_dai *dai,
if (freq_id < 0) if (freq_id < 0)
return freq_id; return freq_id;
regmap_update_bits(cs35l56->regmap, CS35L56_ASP1_CONTROL1, regmap_update_bits(cs35l56->base.regmap, CS35L56_ASP1_CONTROL1,
CS35L56_ASP_BCLK_FREQ_MASK, CS35L56_ASP_BCLK_FREQ_MASK,
freq_id << CS35L56_ASP_BCLK_FREQ_SHIFT); freq_id << CS35L56_ASP_BCLK_FREQ_SHIFT);
cs35l56->sysclk_set = true; cs35l56->sysclk_set = true;
...@@ -646,9 +647,9 @@ static int cs35l56_sdw_dai_hw_params(struct snd_pcm_substream *substream, ...@@ -646,9 +647,9 @@ static int cs35l56_sdw_dai_hw_params(struct snd_pcm_substream *substream,
struct sdw_port_config pconfig; struct sdw_port_config pconfig;
int ret; int ret;
dev_dbg(cs35l56->dev, "%s: rate %d\n", __func__, params_rate(params)); dev_dbg(cs35l56->base.dev, "%s: rate %d\n", __func__, params_rate(params));
if (!cs35l56->init_done) if (!cs35l56->base.init_done)
return -ENODEV; return -ENODEV;
if (!sdw_stream) if (!sdw_stream)
...@@ -761,30 +762,30 @@ static struct snd_soc_dai_driver cs35l56_dai[] = { ...@@ -761,30 +762,30 @@ static struct snd_soc_dai_driver cs35l56_dai[] = {
} }
}; };
static int cs35l56_wait_for_firmware_boot(struct cs35l56_private *cs35l56) static int cs35l56_wait_for_firmware_boot(struct cs35l56_base *cs35l56_base)
{ {
unsigned int reg; unsigned int reg;
unsigned int val; unsigned int val;
int ret; int ret;
if (cs35l56->rev < CS35L56_REVID_B0) if (cs35l56_base->rev < CS35L56_REVID_B0)
reg = CS35L56_DSP1_HALO_STATE_A1; reg = CS35L56_DSP1_HALO_STATE_A1;
else else
reg = CS35L56_DSP1_HALO_STATE; reg = CS35L56_DSP1_HALO_STATE;
ret = regmap_read_poll_timeout(cs35l56->regmap, reg, ret = regmap_read_poll_timeout(cs35l56_base->regmap, reg,
val, val,
(val < 0xFFFF) && (val >= CS35L56_HALO_STATE_BOOT_DONE), (val < 0xFFFF) && (val >= CS35L56_HALO_STATE_BOOT_DONE),
CS35L56_HALO_STATE_POLL_US, CS35L56_HALO_STATE_POLL_US,
CS35L56_HALO_STATE_TIMEOUT_US); CS35L56_HALO_STATE_TIMEOUT_US);
if ((ret < 0) && (ret != -ETIMEDOUT)) { if ((ret < 0) && (ret != -ETIMEDOUT)) {
dev_err(cs35l56->dev, "Failed to read HALO_STATE: %d\n", ret); dev_err(cs35l56_base->dev, "Failed to read HALO_STATE: %d\n", ret);
return ret; return ret;
} }
if ((ret == -ETIMEDOUT) || (val != CS35L56_HALO_STATE_BOOT_DONE)) { if ((ret == -ETIMEDOUT) || (val != CS35L56_HALO_STATE_BOOT_DONE)) {
dev_err(cs35l56->dev, "Firmware boot fail: HALO_STATE=%#x\n", val); dev_err(cs35l56_base->dev, "Firmware boot fail: HALO_STATE=%#x\n", val);
return -EIO; return -EIO;
} }
...@@ -809,8 +810,8 @@ static void cs35l56_system_reset(struct cs35l56_private *cs35l56) ...@@ -809,8 +810,8 @@ static void cs35l56_system_reset(struct cs35l56_private *cs35l56)
* Must enter cache-only first so there can't be any more register * Must enter cache-only first so there can't be any more register
* accesses other than the controlled system reset sequence below. * accesses other than the controlled system reset sequence below.
*/ */
regcache_cache_only(cs35l56->regmap, true); regcache_cache_only(cs35l56->base.regmap, true);
regmap_multi_reg_write_bypassed(cs35l56->regmap, regmap_multi_reg_write_bypassed(cs35l56->base.regmap,
cs35l56_system_reset_seq, cs35l56_system_reset_seq,
ARRAY_SIZE(cs35l56_system_reset_seq)); ARRAY_SIZE(cs35l56_system_reset_seq));
...@@ -819,7 +820,7 @@ static void cs35l56_system_reset(struct cs35l56_private *cs35l56) ...@@ -819,7 +820,7 @@ static void cs35l56_system_reset(struct cs35l56_private *cs35l56)
return; return;
usleep_range(CS35L56_CONTROL_PORT_READY_US, CS35L56_CONTROL_PORT_READY_US + 400); usleep_range(CS35L56_CONTROL_PORT_READY_US, CS35L56_CONTROL_PORT_READY_US + 400);
regcache_cache_only(cs35l56->regmap, false); regcache_cache_only(cs35l56->base.regmap, false);
} }
static void cs35l56_secure_patch(struct cs35l56_private *cs35l56) static void cs35l56_secure_patch(struct cs35l56_private *cs35l56)
...@@ -829,9 +830,9 @@ static void cs35l56_secure_patch(struct cs35l56_private *cs35l56) ...@@ -829,9 +830,9 @@ static void cs35l56_secure_patch(struct cs35l56_private *cs35l56)
/* Use wm_adsp to load and apply the firmware patch and coefficient files */ /* Use wm_adsp to load and apply the firmware patch and coefficient files */
ret = wm_adsp_power_up(&cs35l56->dsp); ret = wm_adsp_power_up(&cs35l56->dsp);
if (ret) if (ret)
dev_dbg(cs35l56->dev, "%s: wm_adsp_power_up ret %d\n", __func__, ret); dev_dbg(cs35l56->base.dev, "%s: wm_adsp_power_up ret %d\n", __func__, ret);
else else
cs35l56_mbox_send(cs35l56, CS35L56_MBOX_CMD_AUDIO_REINIT); cs35l56_mbox_send(&cs35l56->base, CS35L56_MBOX_CMD_AUDIO_REINIT);
} }
static void cs35l56_patch(struct cs35l56_private *cs35l56) static void cs35l56_patch(struct cs35l56_private *cs35l56)
...@@ -854,31 +855,31 @@ static void cs35l56_patch(struct cs35l56_private *cs35l56) ...@@ -854,31 +855,31 @@ static void cs35l56_patch(struct cs35l56_private *cs35l56)
flush_work(&cs35l56->sdw_irq_work); flush_work(&cs35l56->sdw_irq_work);
} }
ret = cs35l56_mbox_send(cs35l56, CS35L56_MBOX_CMD_SHUTDOWN); ret = cs35l56_mbox_send(&cs35l56->base, CS35L56_MBOX_CMD_SHUTDOWN);
if (ret) if (ret)
goto err; goto err;
if (cs35l56->rev < CS35L56_REVID_B0) if (cs35l56->base.rev < CS35L56_REVID_B0)
reg = CS35L56_DSP1_PM_CUR_STATE_A1; reg = CS35L56_DSP1_PM_CUR_STATE_A1;
else else
reg = CS35L56_DSP1_PM_CUR_STATE; reg = CS35L56_DSP1_PM_CUR_STATE;
ret = regmap_read_poll_timeout(cs35l56->regmap, reg, ret = regmap_read_poll_timeout(cs35l56->base.regmap, reg,
val, (val == CS35L56_HALO_STATE_SHUTDOWN), val, (val == CS35L56_HALO_STATE_SHUTDOWN),
CS35L56_HALO_STATE_POLL_US, CS35L56_HALO_STATE_POLL_US,
CS35L56_HALO_STATE_TIMEOUT_US); CS35L56_HALO_STATE_TIMEOUT_US);
if (ret < 0) if (ret < 0)
dev_err(cs35l56->dev, "Failed to poll PM_CUR_STATE to 1 is %d (ret %d)\n", dev_err(cs35l56->base.dev, "Failed to poll PM_CUR_STATE to 1 is %d (ret %d)\n",
val, ret); val, ret);
/* Use wm_adsp to load and apply the firmware patch and coefficient files */ /* Use wm_adsp to load and apply the firmware patch and coefficient files */
ret = wm_adsp_power_up(&cs35l56->dsp); ret = wm_adsp_power_up(&cs35l56->dsp);
if (ret) { if (ret) {
dev_dbg(cs35l56->dev, "%s: wm_adsp_power_up ret %d\n", __func__, ret); dev_dbg(cs35l56->base.dev, "%s: wm_adsp_power_up ret %d\n", __func__, ret);
goto err; goto err;
} }
mutex_lock(&cs35l56->irq_lock); mutex_lock(&cs35l56->base.irq_lock);
init_completion(&cs35l56->init_completion); init_completion(&cs35l56->init_completion);
...@@ -892,18 +893,20 @@ static void cs35l56_patch(struct cs35l56_private *cs35l56) ...@@ -892,18 +893,20 @@ static void cs35l56_patch(struct cs35l56_private *cs35l56)
*/ */
if (!wait_for_completion_timeout(&cs35l56->init_completion, if (!wait_for_completion_timeout(&cs35l56->init_completion,
msecs_to_jiffies(5000))) { msecs_to_jiffies(5000))) {
dev_err(cs35l56->dev, "%s: init_completion timed out (SDW)\n", __func__); dev_err(cs35l56->base.dev, "%s: init_completion timed out (SDW)\n",
__func__);
goto err_unlock; goto err_unlock;
} }
} else if (cs35l56_init(cs35l56)) { } else if (cs35l56_init(cs35l56)) {
goto err_unlock; goto err_unlock;
} }
regmap_clear_bits(cs35l56->regmap, CS35L56_PROTECTION_STATUS, CS35L56_FIRMWARE_MISSING); regmap_clear_bits(cs35l56->base.regmap, CS35L56_PROTECTION_STATUS,
cs35l56->fw_patched = true; CS35L56_FIRMWARE_MISSING);
cs35l56->base.fw_patched = true;
err_unlock: err_unlock:
mutex_unlock(&cs35l56->irq_lock); mutex_unlock(&cs35l56->base.irq_lock);
err: err:
/* Re-enable SoundWire interrupts */ /* Re-enable SoundWire interrupts */
if (cs35l56->sdw_peripheral) { if (cs35l56->sdw_peripheral) {
...@@ -919,10 +922,10 @@ static void cs35l56_dsp_work(struct work_struct *work) ...@@ -919,10 +922,10 @@ static void cs35l56_dsp_work(struct work_struct *work)
struct cs35l56_private, struct cs35l56_private,
dsp_work); dsp_work);
if (!cs35l56->init_done) if (!cs35l56->base.init_done)
return; return;
pm_runtime_get_sync(cs35l56->dev); pm_runtime_get_sync(cs35l56->base.dev);
/* /*
* When the device is running in secure mode the firmware files can * When the device is running in secure mode the firmware files can
...@@ -930,13 +933,13 @@ static void cs35l56_dsp_work(struct work_struct *work) ...@@ -930,13 +933,13 @@ static void cs35l56_dsp_work(struct work_struct *work)
* shutdown the firmware to apply them and can use the lower cost * shutdown the firmware to apply them and can use the lower cost
* reinit sequence instead. * reinit sequence instead.
*/ */
if (cs35l56->secured) if (cs35l56->base.secured)
cs35l56_secure_patch(cs35l56); cs35l56_secure_patch(cs35l56);
else else
cs35l56_patch(cs35l56); cs35l56_patch(cs35l56);
pm_runtime_mark_last_busy(cs35l56->dev); pm_runtime_mark_last_busy(cs35l56->base.dev);
pm_runtime_put_autosuspend(cs35l56->dev); pm_runtime_put_autosuspend(cs35l56->base.dev);
} }
static int cs35l56_component_probe(struct snd_soc_component *component) static int cs35l56_component_probe(struct snd_soc_component *component)
...@@ -948,16 +951,16 @@ static int cs35l56_component_probe(struct snd_soc_component *component) ...@@ -948,16 +951,16 @@ static int cs35l56_component_probe(struct snd_soc_component *component)
if (!wait_for_completion_timeout(&cs35l56->init_completion, if (!wait_for_completion_timeout(&cs35l56->init_completion,
msecs_to_jiffies(5000))) { msecs_to_jiffies(5000))) {
dev_err(cs35l56->dev, "%s: init_completion timed out\n", __func__); dev_err(cs35l56->base.dev, "%s: init_completion timed out\n", __func__);
return -ENODEV; return -ENODEV;
} }
cs35l56->component = component; cs35l56->component = component;
wm_adsp2_component_probe(&cs35l56->dsp, component); wm_adsp2_component_probe(&cs35l56->dsp, component);
debugfs_create_bool("init_done", 0444, debugfs_root, &cs35l56->init_done); debugfs_create_bool("init_done", 0444, debugfs_root, &cs35l56->base.init_done);
debugfs_create_bool("can_hibernate", 0444, debugfs_root, &cs35l56->can_hibernate); debugfs_create_bool("can_hibernate", 0444, debugfs_root, &cs35l56->base.can_hibernate);
debugfs_create_bool("fw_patched", 0444, debugfs_root, &cs35l56->fw_patched); debugfs_create_bool("fw_patched", 0444, debugfs_root, &cs35l56->base.fw_patched);
queue_work(cs35l56->dsp_wq, &cs35l56->dsp_work); queue_work(cs35l56->dsp_wq, &cs35l56->dsp_work);
...@@ -1024,23 +1027,23 @@ int cs35l56_runtime_suspend(struct device *dev) ...@@ -1024,23 +1027,23 @@ int cs35l56_runtime_suspend(struct device *dev)
unsigned int val; unsigned int val;
int ret; int ret;
if (!cs35l56->init_done) if (!cs35l56->base.init_done)
return 0; return 0;
/* Firmware must have entered a power-save state */ /* Firmware must have entered a power-save state */
ret = regmap_read_poll_timeout(cs35l56->regmap, ret = regmap_read_poll_timeout(cs35l56->base.regmap,
CS35L56_TRANSDUCER_ACTUAL_PS, CS35L56_TRANSDUCER_ACTUAL_PS,
val, (val >= CS35L56_PS3), val, (val >= CS35L56_PS3),
CS35L56_PS3_POLL_US, CS35L56_PS3_POLL_US,
CS35L56_PS3_TIMEOUT_US); CS35L56_PS3_TIMEOUT_US);
if (ret) if (ret)
dev_warn(cs35l56->dev, "PS3 wait failed: %d\n", ret); dev_warn(cs35l56->base.dev, "PS3 wait failed: %d\n", ret);
/* Clear BOOT_DONE so it can be used to detect a reboot */ /* Clear BOOT_DONE so it can be used to detect a reboot */
regmap_write(cs35l56->regmap, CS35L56_IRQ1_EINT_4, CS35L56_OTP_BOOT_DONE_MASK); regmap_write(cs35l56->base.regmap, CS35L56_IRQ1_EINT_4, CS35L56_OTP_BOOT_DONE_MASK);
if (!cs35l56->can_hibernate) { if (!cs35l56->base.can_hibernate) {
regcache_cache_only(cs35l56->regmap, true); regcache_cache_only(cs35l56->base.regmap, true);
dev_dbg(dev, "Suspended: no hibernate"); dev_dbg(dev, "Suspended: no hibernate");
return 0; return 0;
...@@ -1050,15 +1053,15 @@ int cs35l56_runtime_suspend(struct device *dev) ...@@ -1050,15 +1053,15 @@ int cs35l56_runtime_suspend(struct device *dev)
* Enable auto-hibernate. If it is woken by some other wake source * Enable auto-hibernate. If it is woken by some other wake source
* it will automatically return to hibernate. * it will automatically return to hibernate.
*/ */
cs35l56_mbox_send(cs35l56, CS35L56_MBOX_CMD_ALLOW_AUTO_HIBERNATE); cs35l56_mbox_send(&cs35l56->base, CS35L56_MBOX_CMD_ALLOW_AUTO_HIBERNATE);
/* /*
* Must enter cache-only first so there can't be any more register * Must enter cache-only first so there can't be any more register
* accesses other than the controlled hibernate sequence below. * accesses other than the controlled hibernate sequence below.
*/ */
regcache_cache_only(cs35l56->regmap, true); regcache_cache_only(cs35l56->base.regmap, true);
regmap_multi_reg_write_bypassed(cs35l56->regmap, regmap_multi_reg_write_bypassed(cs35l56->base.regmap,
cs35l56_hibernate_seq, cs35l56_hibernate_seq,
ARRAY_SIZE(cs35l56_hibernate_seq)); ARRAY_SIZE(cs35l56_hibernate_seq));
...@@ -1072,7 +1075,7 @@ static int __maybe_unused cs35l56_runtime_resume_i2c_spi(struct device *dev) ...@@ -1072,7 +1075,7 @@ static int __maybe_unused cs35l56_runtime_resume_i2c_spi(struct device *dev)
{ {
struct cs35l56_private *cs35l56 = dev_get_drvdata(dev); struct cs35l56_private *cs35l56 = dev_get_drvdata(dev);
if (!cs35l56->init_done) if (!cs35l56->base.init_done)
return 0; return 0;
return cs35l56_runtime_resume_common(cs35l56); return cs35l56_runtime_resume_common(cs35l56);
...@@ -1083,7 +1086,7 @@ int cs35l56_runtime_resume_common(struct cs35l56_private *cs35l56) ...@@ -1083,7 +1086,7 @@ int cs35l56_runtime_resume_common(struct cs35l56_private *cs35l56)
unsigned int val; unsigned int val;
int ret; int ret;
if (!cs35l56->can_hibernate) if (!cs35l56->base.can_hibernate)
goto out_sync; goto out_sync;
if (!cs35l56->sdw_peripheral) { if (!cs35l56->sdw_peripheral) {
...@@ -1091,7 +1094,7 @@ int cs35l56_runtime_resume_common(struct cs35l56_private *cs35l56) ...@@ -1091,7 +1094,7 @@ int cs35l56_runtime_resume_common(struct cs35l56_private *cs35l56)
* Dummy transaction to trigger I2C/SPI auto-wake. This will NAK on I2C. * Dummy transaction to trigger I2C/SPI auto-wake. This will NAK on I2C.
* Must be done before releasing cache-only. * Must be done before releasing cache-only.
*/ */
regmap_multi_reg_write_bypassed(cs35l56->regmap, regmap_multi_reg_write_bypassed(cs35l56->base.regmap,
cs35l56_hibernate_wake_seq, cs35l56_hibernate_wake_seq,
ARRAY_SIZE(cs35l56_hibernate_wake_seq)); ARRAY_SIZE(cs35l56_hibernate_wake_seq));
...@@ -1100,36 +1103,36 @@ int cs35l56_runtime_resume_common(struct cs35l56_private *cs35l56) ...@@ -1100,36 +1103,36 @@ int cs35l56_runtime_resume_common(struct cs35l56_private *cs35l56)
} }
out_sync: out_sync:
regcache_cache_only(cs35l56->regmap, false); regcache_cache_only(cs35l56->base.regmap, false);
ret = cs35l56_wait_for_firmware_boot(cs35l56); ret = cs35l56_wait_for_firmware_boot(&cs35l56->base);
if (ret) { if (ret) {
dev_err(cs35l56->dev, "Hibernate wake failed: %d\n", ret); dev_err(cs35l56->base.dev, "Hibernate wake failed: %d\n", ret);
goto err; goto err;
} }
ret = cs35l56_mbox_send(cs35l56, CS35L56_MBOX_CMD_PREVENT_AUTO_HIBERNATE); ret = cs35l56_mbox_send(&cs35l56->base, CS35L56_MBOX_CMD_PREVENT_AUTO_HIBERNATE);
if (ret) if (ret)
goto err; goto err;
/* BOOT_DONE will be 1 if the amp reset */ /* BOOT_DONE will be 1 if the amp reset */
regmap_read(cs35l56->regmap, CS35L56_IRQ1_EINT_4, &val); regmap_read(cs35l56->base.regmap, CS35L56_IRQ1_EINT_4, &val);
if (val & CS35L56_OTP_BOOT_DONE_MASK) { if (val & CS35L56_OTP_BOOT_DONE_MASK) {
dev_dbg(cs35l56->dev, "Registers reset in suspend\n"); dev_dbg(cs35l56->base.dev, "Registers reset in suspend\n");
regcache_mark_dirty(cs35l56->regmap); regcache_mark_dirty(cs35l56->base.regmap);
} }
regcache_sync(cs35l56->regmap); regcache_sync(cs35l56->base.regmap);
dev_dbg(cs35l56->dev, "Resumed"); dev_dbg(cs35l56->base.dev, "Resumed");
return 0; return 0;
err: err:
regmap_write(cs35l56->regmap, CS35L56_DSP_VIRTUAL1_MBOX_1, regmap_write(cs35l56->base.regmap, CS35L56_DSP_VIRTUAL1_MBOX_1,
CS35L56_MBOX_CMD_HIBERNATE_NOW); CS35L56_MBOX_CMD_HIBERNATE_NOW);
regcache_cache_only(cs35l56->regmap, true); regcache_cache_only(cs35l56->base.regmap, true);
return ret; return ret;
} }
...@@ -1141,14 +1144,14 @@ static int cs35l56_is_fw_reload_needed(struct cs35l56_private *cs35l56) ...@@ -1141,14 +1144,14 @@ static int cs35l56_is_fw_reload_needed(struct cs35l56_private *cs35l56)
int ret; int ret;
/* Nothing to re-patch if we haven't done any patching yet. */ /* Nothing to re-patch if we haven't done any patching yet. */
if (!cs35l56->fw_patched) if (!cs35l56->base.fw_patched)
return false; return false;
/* /*
* If we have control of RESET we will have asserted it so the firmware * If we have control of RESET we will have asserted it so the firmware
* will need re-patching. * will need re-patching.
*/ */
if (cs35l56->reset_gpio) if (cs35l56->base.reset_gpio)
return true; return true;
/* /*
...@@ -1156,22 +1159,22 @@ static int cs35l56_is_fw_reload_needed(struct cs35l56_private *cs35l56) ...@@ -1156,22 +1159,22 @@ static int cs35l56_is_fw_reload_needed(struct cs35l56_private *cs35l56)
* can't be used here to test for memory retention. * can't be used here to test for memory retention.
* Assume that tuning must be re-loaded. * Assume that tuning must be re-loaded.
*/ */
if (cs35l56->secured) if (cs35l56->base.secured)
return true; return true;
ret = pm_runtime_resume_and_get(cs35l56->dev); ret = pm_runtime_resume_and_get(cs35l56->base.dev);
if (ret) { if (ret) {
dev_err(cs35l56->dev, "Failed to runtime_get: %d\n", ret); dev_err(cs35l56->base.dev, "Failed to runtime_get: %d\n", ret);
return ret; return ret;
} }
ret = regmap_read(cs35l56->regmap, CS35L56_PROTECTION_STATUS, &val); ret = regmap_read(cs35l56->base.regmap, CS35L56_PROTECTION_STATUS, &val);
if (ret) if (ret)
dev_err(cs35l56->dev, "Failed to read PROTECTION_STATUS: %d\n", ret); dev_err(cs35l56->base.dev, "Failed to read PROTECTION_STATUS: %d\n", ret);
else else
ret = !!(val & CS35L56_FIRMWARE_MISSING); ret = !!(val & CS35L56_FIRMWARE_MISSING);
pm_runtime_put_autosuspend(cs35l56->dev); pm_runtime_put_autosuspend(cs35l56->base.dev);
return ret; return ret;
} }
...@@ -1191,8 +1194,8 @@ int cs35l56_system_suspend(struct device *dev) ...@@ -1191,8 +1194,8 @@ int cs35l56_system_suspend(struct device *dev)
* clear it. Prevent this race by temporarily disabling the parent irq * clear it. Prevent this race by temporarily disabling the parent irq
* until we reach _no_irq. * until we reach _no_irq.
*/ */
if (cs35l56->irq) if (cs35l56->base.irq)
disable_irq(cs35l56->irq); disable_irq(cs35l56->base.irq);
return pm_runtime_force_suspend(dev); return pm_runtime_force_suspend(dev);
} }
...@@ -1209,8 +1212,8 @@ int cs35l56_system_suspend_late(struct device *dev) ...@@ -1209,8 +1212,8 @@ int cs35l56_system_suspend_late(struct device *dev)
* RESET is usually shared by all amps so it must not be asserted until * RESET is usually shared by all amps so it must not be asserted until
* all driver instances have done their suspend() stage. * all driver instances have done their suspend() stage.
*/ */
if (cs35l56->reset_gpio) { if (cs35l56->base.reset_gpio) {
gpiod_set_value_cansleep(cs35l56->reset_gpio, 0); gpiod_set_value_cansleep(cs35l56->base.reset_gpio, 0);
cs35l56_wait_min_reset_pulse(); cs35l56_wait_min_reset_pulse();
} }
...@@ -1227,8 +1230,8 @@ int cs35l56_system_suspend_no_irq(struct device *dev) ...@@ -1227,8 +1230,8 @@ int cs35l56_system_suspend_no_irq(struct device *dev)
dev_dbg(dev, "system_suspend_no_irq\n"); dev_dbg(dev, "system_suspend_no_irq\n");
/* Handlers are now disabled so the parent IRQ can safely be re-enabled. */ /* Handlers are now disabled so the parent IRQ can safely be re-enabled. */
if (cs35l56->irq) if (cs35l56->base.irq)
enable_irq(cs35l56->irq); enable_irq(cs35l56->base.irq);
return 0; return 0;
} }
...@@ -1247,8 +1250,8 @@ int cs35l56_system_resume_no_irq(struct device *dev) ...@@ -1247,8 +1250,8 @@ int cs35l56_system_resume_no_irq(struct device *dev)
* clear it, until it has fully resumed. Prevent this race by temporarily * clear it, until it has fully resumed. Prevent this race by temporarily
* disabling the parent irq until we complete resume(). * disabling the parent irq until we complete resume().
*/ */
if (cs35l56->irq) if (cs35l56->base.irq)
disable_irq(cs35l56->irq); disable_irq(cs35l56->base.irq);
return 0; return 0;
} }
...@@ -1262,8 +1265,8 @@ int cs35l56_system_resume_early(struct device *dev) ...@@ -1262,8 +1265,8 @@ int cs35l56_system_resume_early(struct device *dev)
dev_dbg(dev, "system_resume_early\n"); dev_dbg(dev, "system_resume_early\n");
/* Ensure a spec-compliant RESET pulse. */ /* Ensure a spec-compliant RESET pulse. */
if (cs35l56->reset_gpio) { if (cs35l56->base.reset_gpio) {
gpiod_set_value_cansleep(cs35l56->reset_gpio, 0); gpiod_set_value_cansleep(cs35l56->base.reset_gpio, 0);
cs35l56_wait_min_reset_pulse(); cs35l56_wait_min_reset_pulse();
} }
...@@ -1275,7 +1278,7 @@ int cs35l56_system_resume_early(struct device *dev) ...@@ -1275,7 +1278,7 @@ int cs35l56_system_resume_early(struct device *dev)
} }
/* Release shared RESET before drivers start resume(). */ /* Release shared RESET before drivers start resume(). */
gpiod_set_value_cansleep(cs35l56->reset_gpio, 1); gpiod_set_value_cansleep(cs35l56->base.reset_gpio, 1);
return 0; return 0;
} }
...@@ -1290,8 +1293,8 @@ int cs35l56_system_resume(struct device *dev) ...@@ -1290,8 +1293,8 @@ int cs35l56_system_resume(struct device *dev)
/* Undo pm_runtime_force_suspend() before re-enabling the irq */ /* Undo pm_runtime_force_suspend() before re-enabling the irq */
ret = pm_runtime_force_resume(dev); ret = pm_runtime_force_resume(dev);
if (cs35l56->irq) if (cs35l56->base.irq)
enable_irq(cs35l56->irq); enable_irq(cs35l56->base.irq);
if (ret) if (ret)
return ret; return ret;
...@@ -1301,11 +1304,11 @@ int cs35l56_system_resume(struct device *dev) ...@@ -1301,11 +1304,11 @@ int cs35l56_system_resume(struct device *dev)
return 0; return 0;
ret = cs35l56_is_fw_reload_needed(cs35l56); ret = cs35l56_is_fw_reload_needed(cs35l56);
dev_dbg(cs35l56->dev, "fw_reload_needed: %d\n", ret); dev_dbg(cs35l56->base.dev, "fw_reload_needed: %d\n", ret);
if (ret < 1) if (ret < 1)
return ret; return ret;
cs35l56->fw_patched = false; cs35l56->base.fw_patched = false;
queue_work(cs35l56->dsp_wq, &cs35l56->dsp_work); queue_work(cs35l56->dsp_wq, &cs35l56->dsp_work);
/* /*
...@@ -1334,8 +1337,8 @@ static int cs35l56_dsp_init(struct cs35l56_private *cs35l56) ...@@ -1334,8 +1337,8 @@ static int cs35l56_dsp_init(struct cs35l56_private *cs35l56)
dsp->cs_dsp.type = WMFW_HALO; dsp->cs_dsp.type = WMFW_HALO;
dsp->cs_dsp.rev = 0; dsp->cs_dsp.rev = 0;
dsp->fw = 12; dsp->fw = 12;
dsp->cs_dsp.dev = cs35l56->dev; dsp->cs_dsp.dev = cs35l56->base.dev;
dsp->cs_dsp.regmap = cs35l56->regmap; dsp->cs_dsp.regmap = cs35l56->base.regmap;
dsp->cs_dsp.base = CS35L56_DSP1_CORE_BASE; dsp->cs_dsp.base = CS35L56_DSP1_CORE_BASE;
dsp->cs_dsp.base_sysinfo = CS35L56_DSP1_SYS_INFO_ID; dsp->cs_dsp.base_sysinfo = CS35L56_DSP1_SYS_INFO_ID;
dsp->cs_dsp.mem = cs35l56_dsp1_regions; dsp->cs_dsp.mem = cs35l56_dsp1_regions;
...@@ -1343,11 +1346,11 @@ static int cs35l56_dsp_init(struct cs35l56_private *cs35l56) ...@@ -1343,11 +1346,11 @@ static int cs35l56_dsp_init(struct cs35l56_private *cs35l56)
dsp->cs_dsp.no_core_startstop = true; dsp->cs_dsp.no_core_startstop = true;
dsp->wmfw_optional = true; dsp->wmfw_optional = true;
dev_dbg(cs35l56->dev, "DSP system name: '%s'\n", dsp->system_name); dev_dbg(cs35l56->base.dev, "DSP system name: '%s'\n", dsp->system_name);
ret = wm_halo_init(dsp); ret = wm_halo_init(dsp);
if (ret != 0) { if (ret != 0) {
dev_err(cs35l56->dev, "wm_halo_init failed\n"); dev_err(cs35l56->base.dev, "wm_halo_init failed\n");
return ret; return ret;
} }
...@@ -1356,7 +1359,7 @@ static int cs35l56_dsp_init(struct cs35l56_private *cs35l56) ...@@ -1356,7 +1359,7 @@ static int cs35l56_dsp_init(struct cs35l56_private *cs35l56)
static int cs35l56_acpi_get_name(struct cs35l56_private *cs35l56) static int cs35l56_acpi_get_name(struct cs35l56_private *cs35l56)
{ {
acpi_handle handle = ACPI_HANDLE(cs35l56->dev); acpi_handle handle = ACPI_HANDLE(cs35l56->base.dev);
const char *sub; const char *sub;
/* If there is no ACPI_HANDLE, there is no ACPI for this system, return 0 */ /* If there is no ACPI_HANDLE, there is no ACPI for this system, return 0 */
...@@ -1373,7 +1376,7 @@ static int cs35l56_acpi_get_name(struct cs35l56_private *cs35l56) ...@@ -1373,7 +1376,7 @@ static int cs35l56_acpi_get_name(struct cs35l56_private *cs35l56)
} }
cs35l56->dsp.system_name = sub; cs35l56->dsp.system_name = sub;
dev_dbg(cs35l56->dev, "Subsystem ID: %s\n", cs35l56->dsp.system_name); dev_dbg(cs35l56->base.dev, "Subsystem ID: %s\n", cs35l56->dsp.system_name);
return 0; return 0;
} }
...@@ -1383,38 +1386,39 @@ int cs35l56_common_probe(struct cs35l56_private *cs35l56) ...@@ -1383,38 +1386,39 @@ int cs35l56_common_probe(struct cs35l56_private *cs35l56)
int ret; int ret;
init_completion(&cs35l56->init_completion); init_completion(&cs35l56->init_completion);
mutex_init(&cs35l56->irq_lock); mutex_init(&cs35l56->base.irq_lock);
dev_set_drvdata(cs35l56->dev, cs35l56); dev_set_drvdata(cs35l56->base.dev, cs35l56);
cs35l56_fill_supply_names(cs35l56->supplies); cs35l56_fill_supply_names(cs35l56->supplies);
ret = devm_regulator_bulk_get(cs35l56->dev, ARRAY_SIZE(cs35l56->supplies), ret = devm_regulator_bulk_get(cs35l56->base.dev, ARRAY_SIZE(cs35l56->supplies),
cs35l56->supplies); cs35l56->supplies);
if (ret != 0) if (ret != 0)
return dev_err_probe(cs35l56->dev, ret, "Failed to request supplies\n"); return dev_err_probe(cs35l56->base.dev, ret, "Failed to request supplies\n");
/* Reset could be controlled by the BIOS or shared by multiple amps */ /* Reset could be controlled by the BIOS or shared by multiple amps */
cs35l56->reset_gpio = devm_gpiod_get_optional(cs35l56->dev, "reset", GPIOD_OUT_LOW); cs35l56->base.reset_gpio = devm_gpiod_get_optional(cs35l56->base.dev, "reset",
if (IS_ERR(cs35l56->reset_gpio)) { GPIOD_OUT_LOW);
ret = PTR_ERR(cs35l56->reset_gpio); if (IS_ERR(cs35l56->base.reset_gpio)) {
ret = PTR_ERR(cs35l56->base.reset_gpio);
/* /*
* If RESET is shared the first amp to probe will grab the reset * If RESET is shared the first amp to probe will grab the reset
* line and reset all the amps * line and reset all the amps
*/ */
if (ret != -EBUSY) if (ret != -EBUSY)
return dev_err_probe(cs35l56->dev, ret, "Failed to get reset GPIO\n"); return dev_err_probe(cs35l56->base.dev, ret, "Failed to get reset GPIO\n");
dev_info(cs35l56->dev, "Reset GPIO busy, assume shared reset\n"); dev_info(cs35l56->base.dev, "Reset GPIO busy, assume shared reset\n");
cs35l56->reset_gpio = NULL; cs35l56->base.reset_gpio = NULL;
} }
ret = regulator_bulk_enable(ARRAY_SIZE(cs35l56->supplies), cs35l56->supplies); ret = regulator_bulk_enable(ARRAY_SIZE(cs35l56->supplies), cs35l56->supplies);
if (ret != 0) if (ret != 0)
return dev_err_probe(cs35l56->dev, ret, "Failed to enable supplies\n"); return dev_err_probe(cs35l56->base.dev, ret, "Failed to enable supplies\n");
if (cs35l56->reset_gpio) { if (cs35l56->base.reset_gpio) {
cs35l56_wait_min_reset_pulse(); cs35l56_wait_min_reset_pulse();
gpiod_set_value_cansleep(cs35l56->reset_gpio, 1); gpiod_set_value_cansleep(cs35l56->base.reset_gpio, 1);
} }
ret = cs35l56_acpi_get_name(cs35l56); ret = cs35l56_acpi_get_name(cs35l56);
...@@ -1423,22 +1427,22 @@ int cs35l56_common_probe(struct cs35l56_private *cs35l56) ...@@ -1423,22 +1427,22 @@ int cs35l56_common_probe(struct cs35l56_private *cs35l56)
ret = cs35l56_dsp_init(cs35l56); ret = cs35l56_dsp_init(cs35l56);
if (ret < 0) { if (ret < 0) {
dev_err_probe(cs35l56->dev, ret, "DSP init failed\n"); dev_err_probe(cs35l56->base.dev, ret, "DSP init failed\n");
goto err; goto err;
} }
ret = devm_snd_soc_register_component(cs35l56->dev, ret = devm_snd_soc_register_component(cs35l56->base.dev,
&soc_component_dev_cs35l56, &soc_component_dev_cs35l56,
cs35l56_dai, ARRAY_SIZE(cs35l56_dai)); cs35l56_dai, ARRAY_SIZE(cs35l56_dai));
if (ret < 0) { if (ret < 0) {
dev_err_probe(cs35l56->dev, ret, "Register codec failed\n"); dev_err_probe(cs35l56->base.dev, ret, "Register codec failed\n");
goto err; goto err;
} }
return 0; return 0;
err: err:
gpiod_set_value_cansleep(cs35l56->reset_gpio, 0); gpiod_set_value_cansleep(cs35l56->base.reset_gpio, 0);
regulator_bulk_disable(ARRAY_SIZE(cs35l56->supplies), cs35l56->supplies); regulator_bulk_disable(ARRAY_SIZE(cs35l56->supplies), cs35l56->supplies);
return ret; return ret;
...@@ -1457,20 +1461,20 @@ int cs35l56_init(struct cs35l56_private *cs35l56) ...@@ -1457,20 +1461,20 @@ int cs35l56_init(struct cs35l56_private *cs35l56)
if (cs35l56->soft_resetting) if (cs35l56->soft_resetting)
goto post_soft_reset; goto post_soft_reset;
if (cs35l56->init_done) if (cs35l56->base.init_done)
return 0; return 0;
pm_runtime_set_autosuspend_delay(cs35l56->dev, 100); pm_runtime_set_autosuspend_delay(cs35l56->base.dev, 100);
pm_runtime_use_autosuspend(cs35l56->dev); pm_runtime_use_autosuspend(cs35l56->base.dev);
pm_runtime_set_active(cs35l56->dev); pm_runtime_set_active(cs35l56->base.dev);
pm_runtime_enable(cs35l56->dev); pm_runtime_enable(cs35l56->base.dev);
/* /*
* If the system is not using a reset_gpio then issue a * If the system is not using a reset_gpio then issue a
* dummy read to force a wakeup. * dummy read to force a wakeup.
*/ */
if (!cs35l56->reset_gpio) if (!cs35l56->base.reset_gpio)
regmap_read(cs35l56->regmap, CS35L56_DSP_VIRTUAL1_MBOX_1, &devid); regmap_read(cs35l56->base.regmap, CS35L56_DSP_VIRTUAL1_MBOX_1, &devid);
/* Wait for control port to be ready (datasheet tIRS). */ /* Wait for control port to be ready (datasheet tIRS). */
usleep_range(CS35L56_CONTROL_PORT_READY_US, usleep_range(CS35L56_CONTROL_PORT_READY_US,
...@@ -1481,20 +1485,20 @@ int cs35l56_init(struct cs35l56_private *cs35l56) ...@@ -1481,20 +1485,20 @@ int cs35l56_init(struct cs35l56_private *cs35l56)
* devices so the REVID needs to be determined before waiting for the * devices so the REVID needs to be determined before waiting for the
* firmware to boot. * firmware to boot.
*/ */
ret = regmap_read(cs35l56->regmap, CS35L56_REVID, &revid); ret = regmap_read(cs35l56->base.regmap, CS35L56_REVID, &revid);
if (ret < 0) { if (ret < 0) {
dev_err(cs35l56->dev, "Get Revision ID failed\n"); dev_err(cs35l56->base.dev, "Get Revision ID failed\n");
return ret; return ret;
} }
cs35l56->rev = revid & (CS35L56_AREVID_MASK | CS35L56_MTLREVID_MASK); cs35l56->base.rev = revid & (CS35L56_AREVID_MASK | CS35L56_MTLREVID_MASK);
ret = cs35l56_wait_for_firmware_boot(cs35l56); ret = cs35l56_wait_for_firmware_boot(&cs35l56->base);
if (ret) if (ret)
return ret; return ret;
ret = regmap_read(cs35l56->regmap, CS35L56_DEVID, &devid); ret = regmap_read(cs35l56->base.regmap, CS35L56_DEVID, &devid);
if (ret < 0) { if (ret < 0) {
dev_err(cs35l56->dev, "Get Device ID failed\n"); dev_err(cs35l56->base.dev, "Get Device ID failed\n");
return ret; return ret;
} }
devid &= CS35L56_DEVID_MASK; devid &= CS35L56_DEVID_MASK;
...@@ -1503,50 +1507,50 @@ int cs35l56_init(struct cs35l56_private *cs35l56) ...@@ -1503,50 +1507,50 @@ int cs35l56_init(struct cs35l56_private *cs35l56)
case 0x35A56: case 0x35A56:
break; break;
default: default:
dev_err(cs35l56->dev, "Unknown device %x\n", devid); dev_err(cs35l56->base.dev, "Unknown device %x\n", devid);
return ret; return ret;
} }
ret = regmap_read(cs35l56->regmap, CS35L56_DSP_RESTRICT_STS1, &secured); ret = regmap_read(cs35l56->base.regmap, CS35L56_DSP_RESTRICT_STS1, &secured);
if (ret) { if (ret) {
dev_err(cs35l56->dev, "Get Secure status failed\n"); dev_err(cs35l56->base.dev, "Get Secure status failed\n");
return ret; return ret;
} }
/* When any bus is restricted treat the device as secured */ /* When any bus is restricted treat the device as secured */
if (secured & CS35L56_RESTRICTED_MASK) if (secured & CS35L56_RESTRICTED_MASK)
cs35l56->secured = true; cs35l56->base.secured = true;
ret = regmap_read(cs35l56->regmap, CS35L56_OTPID, &otpid); ret = regmap_read(cs35l56->base.regmap, CS35L56_OTPID, &otpid);
if (ret < 0) { if (ret < 0) {
dev_err(cs35l56->dev, "Get OTP ID failed\n"); dev_err(cs35l56->base.dev, "Get OTP ID failed\n");
return ret; return ret;
} }
dev_info(cs35l56->dev, "Cirrus Logic CS35L56%s Rev %02X OTP%d\n", dev_info(cs35l56->base.dev, "Cirrus Logic CS35L56%s Rev %02X OTP%d\n",
cs35l56->secured ? "s" : "", cs35l56->rev, otpid); cs35l56->base.secured ? "s" : "", cs35l56->base.rev, otpid);
/* Populate the DSP information with the revision and security state */ /* Populate the DSP information with the revision and security state */
cs35l56->dsp.part = devm_kasprintf(cs35l56->dev, GFP_KERNEL, "cs35l56%s-%02x", cs35l56->dsp.part = devm_kasprintf(cs35l56->base.dev, GFP_KERNEL, "cs35l56%s-%02x",
cs35l56->secured ? "s" : "", cs35l56->rev); cs35l56->base.secured ? "s" : "", cs35l56->base.rev);
if (!cs35l56->dsp.part) if (!cs35l56->dsp.part)
return -ENOMEM; return -ENOMEM;
/* Wake source and *_BLOCKED interrupts default to unmasked, so mask them */ /* Wake source and *_BLOCKED interrupts default to unmasked, so mask them */
regmap_write(cs35l56->regmap, CS35L56_IRQ1_MASK_20, 0xffffffff); regmap_write(cs35l56->base.regmap, CS35L56_IRQ1_MASK_20, 0xffffffff);
regmap_update_bits(cs35l56->regmap, CS35L56_IRQ1_MASK_1, regmap_update_bits(cs35l56->base.regmap, CS35L56_IRQ1_MASK_1,
CS35L56_AMP_SHORT_ERR_EINT1_MASK, CS35L56_AMP_SHORT_ERR_EINT1_MASK,
0); 0);
regmap_update_bits(cs35l56->regmap, CS35L56_IRQ1_MASK_8, regmap_update_bits(cs35l56->base.regmap, CS35L56_IRQ1_MASK_8,
CS35L56_TEMP_ERR_EINT1_MASK, CS35L56_TEMP_ERR_EINT1_MASK,
0); 0);
if (!cs35l56->reset_gpio) { if (!cs35l56->base.reset_gpio) {
dev_dbg(cs35l56->dev, "No reset gpio: using soft reset\n"); dev_dbg(cs35l56->base.dev, "No reset gpio: using soft reset\n");
cs35l56_system_reset(cs35l56); cs35l56_system_reset(cs35l56);
if (cs35l56->sdw_peripheral) { if (cs35l56->sdw_peripheral) {
/* Keep alive while we wait for re-enumeration */ /* Keep alive while we wait for re-enumeration */
pm_runtime_get_noresume(cs35l56->dev); pm_runtime_get_noresume(cs35l56->base.dev);
return 0; return 0;
} }
} }
...@@ -1556,30 +1560,30 @@ int cs35l56_init(struct cs35l56_private *cs35l56) ...@@ -1556,30 +1560,30 @@ int cs35l56_init(struct cs35l56_private *cs35l56)
cs35l56->soft_resetting = false; cs35l56->soft_resetting = false;
/* Done re-enumerating after one-time init so release the keep-alive */ /* Done re-enumerating after one-time init so release the keep-alive */
if (cs35l56->sdw_peripheral && !cs35l56->init_done) if (cs35l56->sdw_peripheral && !cs35l56->base.init_done)
pm_runtime_put_noidle(cs35l56->dev); pm_runtime_put_noidle(cs35l56->base.dev);
regcache_mark_dirty(cs35l56->regmap); regcache_mark_dirty(cs35l56->base.regmap);
ret = cs35l56_wait_for_firmware_boot(cs35l56); ret = cs35l56_wait_for_firmware_boot(&cs35l56->base);
if (ret) if (ret)
return ret; return ret;
dev_dbg(cs35l56->dev, "Firmware rebooted after soft reset\n"); dev_dbg(cs35l56->base.dev, "Firmware rebooted after soft reset\n");
} }
/* Disable auto-hibernate so that runtime_pm has control */ /* Disable auto-hibernate so that runtime_pm has control */
ret = cs35l56_mbox_send(cs35l56, CS35L56_MBOX_CMD_PREVENT_AUTO_HIBERNATE); ret = cs35l56_mbox_send(&cs35l56->base, CS35L56_MBOX_CMD_PREVENT_AUTO_HIBERNATE);
if (ret) if (ret)
return ret; return ret;
ret = cs35l56_set_patch(cs35l56->regmap); ret = cs35l56_set_patch(&cs35l56->base);
if (ret) if (ret)
return ret; return ret;
/* Registers could be dirty after soft reset or SoundWire enumeration */ /* Registers could be dirty after soft reset or SoundWire enumeration */
regcache_sync(cs35l56->regmap); regcache_sync(cs35l56->base.regmap);
cs35l56->init_done = true; cs35l56->base.init_done = true;
complete(&cs35l56->init_completion); complete(&cs35l56->init_completion);
return 0; return 0;
...@@ -1588,26 +1592,26 @@ EXPORT_SYMBOL_NS_GPL(cs35l56_init, SND_SOC_CS35L56_CORE); ...@@ -1588,26 +1592,26 @@ EXPORT_SYMBOL_NS_GPL(cs35l56_init, SND_SOC_CS35L56_CORE);
void cs35l56_remove(struct cs35l56_private *cs35l56) void cs35l56_remove(struct cs35l56_private *cs35l56)
{ {
cs35l56->init_done = false; cs35l56->base.init_done = false;
/* /*
* WAKE IRQs unmask if CS35L56 hibernates so free the handler to * WAKE IRQs unmask if CS35L56 hibernates so free the handler to
* prevent it racing with remove(). * prevent it racing with remove().
*/ */
if (cs35l56->irq) if (cs35l56->base.irq)
devm_free_irq(cs35l56->dev, cs35l56->irq, cs35l56); devm_free_irq(cs35l56->base.dev, cs35l56->base.irq, &cs35l56->base);
flush_workqueue(cs35l56->dsp_wq); flush_workqueue(cs35l56->dsp_wq);
destroy_workqueue(cs35l56->dsp_wq); destroy_workqueue(cs35l56->dsp_wq);
pm_runtime_suspend(cs35l56->dev); pm_runtime_suspend(cs35l56->base.dev);
pm_runtime_disable(cs35l56->dev); pm_runtime_disable(cs35l56->base.dev);
regcache_cache_only(cs35l56->regmap, true); regcache_cache_only(cs35l56->base.regmap, true);
kfree(cs35l56->dsp.system_name); kfree(cs35l56->dsp.system_name);
gpiod_set_value_cansleep(cs35l56->reset_gpio, 0); gpiod_set_value_cansleep(cs35l56->base.reset_gpio, 0);
regulator_bulk_disable(ARRAY_SIZE(cs35l56->supplies), cs35l56->supplies); regulator_bulk_disable(ARRAY_SIZE(cs35l56->supplies), cs35l56->supplies);
} }
EXPORT_SYMBOL_NS_GPL(cs35l56_remove, SND_SOC_CS35L56_CORE); EXPORT_SYMBOL_NS_GPL(cs35l56_remove, SND_SOC_CS35L56_CORE);
......
...@@ -32,26 +32,17 @@ struct sdw_slave; ...@@ -32,26 +32,17 @@ struct sdw_slave;
struct cs35l56_private { struct cs35l56_private {
struct wm_adsp dsp; /* must be first member */ struct wm_adsp dsp; /* must be first member */
struct cs35l56_base base;
struct work_struct dsp_work; struct work_struct dsp_work;
struct workqueue_struct *dsp_wq; struct workqueue_struct *dsp_wq;
struct mutex irq_lock;
struct snd_soc_component *component; struct snd_soc_component *component;
struct device *dev;
struct regmap *regmap;
struct regulator_bulk_data supplies[CS35L56_NUM_BULK_SUPPLIES]; struct regulator_bulk_data supplies[CS35L56_NUM_BULK_SUPPLIES];
int irq;
struct sdw_slave *sdw_peripheral; struct sdw_slave *sdw_peripheral;
u8 rev;
struct work_struct sdw_irq_work; struct work_struct sdw_irq_work;
bool secured;
bool sdw_irq_no_unmask; bool sdw_irq_no_unmask;
bool soft_resetting; bool soft_resetting;
bool init_done;
bool sdw_attached; bool sdw_attached;
bool fw_patched;
bool can_hibernate;
struct completion init_completion; struct completion init_completion;
struct gpio_desc *reset_gpio;
u32 rx_mask; u32 rx_mask;
u32 tx_mask; u32 tx_mask;
...@@ -73,7 +64,7 @@ int cs35l56_system_resume_no_irq(struct device *dev); ...@@ -73,7 +64,7 @@ int cs35l56_system_resume_no_irq(struct device *dev);
int cs35l56_system_resume_early(struct device *dev); int cs35l56_system_resume_early(struct device *dev);
int cs35l56_system_resume(struct device *dev); int cs35l56_system_resume(struct device *dev);
irqreturn_t cs35l56_irq(int irq, void *data); irqreturn_t cs35l56_irq(int irq, void *data);
int cs35l56_irq_request(struct cs35l56_private *cs35l56, int irq); int cs35l56_irq_request(struct cs35l56_base *cs35l56_base, int irq);
int cs35l56_common_probe(struct cs35l56_private *cs35l56); int cs35l56_common_probe(struct cs35l56_private *cs35l56);
int cs35l56_init(struct cs35l56_private *cs35l56); int cs35l56_init(struct cs35l56_private *cs35l56);
void cs35l56_remove(struct cs35l56_private *cs35l56); void cs35l56_remove(struct cs35l56_private *cs35l56);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment