Commit 4c9818d8 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'soundwire-6.4-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/vkoul/soundwire

Pull soundwire updates from Vinod Koul:
 "This features AMD soundwire controller driver, a bunch of Intel
  changes for future platform support, sdw API updates etc:

   - Support for AMD soundwire controller

   - Intel driver updates to support future platforms

   - Core API sdw_nread/nwrite_no_pm updates to handle page boundaries"

* tag 'soundwire-6.4-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/vkoul/soundwire: (38 commits)
  soundwire: intel_auxdevice: improve pm_prepare step
  soundwire: bus: Fix unbalanced pm_runtime_put() causing usage count underflow
  soundwire: intel: don't save hw_params for use in prepare
  soundwire: bus: Update sdw_nread/nwrite_no_pm to handle page boundaries
  soundwire: bus: Update kernel doc for no_pm functions
  soundwire: bus: Remove now outdated comments on no_pm IO
  soundwire: stream: uniquify dev_err() logs
  soundwire: stream: remove bus->dev from logs on multiple buses
  soundwire: amd: add pm_prepare callback and pm ops support
  soundwire: amd: handle SoundWire wake enable interrupt
  soundwire: amd: add runtime pm ops for AMD SoundWire manager driver
  soundwire: amd: add SoundWire manager interrupt handling
  soundwire: amd: enable build for AMD SoundWire manager driver
  soundwire: amd: register SoundWire manager dai ops
  soundwire: amd: Add support for AMD Manager driver
  soundwire: export sdw_compute_slave_ports() function
  soundwire: stream: restore cumulative bus bandwidth when compute_params callback failed
  soundwire: bandwidth allocation: Use hweight32() to calculate set bits
  soundwire: qcom: gracefully handle too many ports in DT
  soundwire: qcom: define hardcoded version magic numbers
  ...
parents 54bdf8a3 67572c8d
...@@ -18,6 +18,16 @@ if SOUNDWIRE ...@@ -18,6 +18,16 @@ if SOUNDWIRE
comment "SoundWire Devices" comment "SoundWire Devices"
config SOUNDWIRE_AMD
tristate "AMD SoundWire Manager driver"
select SOUNDWIRE_GENERIC_ALLOCATION
depends on ACPI && SND_SOC
help
SoundWire AMD Manager driver.
If you have an AMD platform which has a SoundWire Manager then
enable this config option to get the SoundWire support for that
device.
config SOUNDWIRE_CADENCE config SOUNDWIRE_CADENCE
tristate tristate
......
...@@ -15,12 +15,17 @@ ifdef CONFIG_DEBUG_FS ...@@ -15,12 +15,17 @@ ifdef CONFIG_DEBUG_FS
soundwire-bus-y += debugfs.o soundwire-bus-y += debugfs.o
endif endif
#AMD driver
soundwire-amd-y := amd_manager.o
obj-$(CONFIG_SOUNDWIRE_AMD) += soundwire-amd.o
#Cadence Objs #Cadence Objs
soundwire-cadence-y := cadence_master.o soundwire-cadence-y := cadence_master.o
obj-$(CONFIG_SOUNDWIRE_CADENCE) += soundwire-cadence.o obj-$(CONFIG_SOUNDWIRE_CADENCE) += soundwire-cadence.o
#Intel driver #Intel driver
soundwire-intel-y := intel.o intel_auxdevice.o intel_init.o dmi-quirks.o soundwire-intel-y := intel.o intel_auxdevice.o intel_init.o dmi-quirks.o \
intel_bus_common.o
obj-$(CONFIG_SOUNDWIRE_INTEL) += soundwire-intel.o obj-$(CONFIG_SOUNDWIRE_INTEL) += soundwire-intel.o
#Qualcomm driver #Qualcomm driver
......
This diff is collapsed.
This diff is collapsed.
...@@ -384,45 +384,73 @@ int sdw_fill_msg(struct sdw_msg *msg, struct sdw_slave *slave, ...@@ -384,45 +384,73 @@ int sdw_fill_msg(struct sdw_msg *msg, struct sdw_slave *slave,
/* /*
* Read/Write IO functions. * Read/Write IO functions.
* no_pm versions can only be called by the bus, e.g. while enumerating or
* handling suspend-resume sequences.
* all clients need to use the pm versions
*/ */
int sdw_nread_no_pm(struct sdw_slave *slave, u32 addr, size_t count, u8 *val) static int sdw_ntransfer_no_pm(struct sdw_slave *slave, u32 addr, u8 flags,
size_t count, u8 *val)
{ {
struct sdw_msg msg; struct sdw_msg msg;
size_t size;
int ret; int ret;
ret = sdw_fill_msg(&msg, slave, addr, count, while (count) {
slave->dev_num, SDW_MSG_FLAG_READ, val); // Only handle bytes up to next page boundary
if (ret < 0) size = min_t(size_t, count, (SDW_REGADDR + 1) - (addr & SDW_REGADDR));
return ret;
ret = sdw_transfer(slave->bus, &msg); ret = sdw_fill_msg(&msg, slave, addr, size, slave->dev_num, flags, val);
if (slave->is_mockup_device) if (ret < 0)
ret = 0; return ret;
return ret;
ret = sdw_transfer(slave->bus, &msg);
if (ret < 0 && !slave->is_mockup_device)
return ret;
addr += size;
val += size;
count -= size;
}
return 0;
}
/**
* sdw_nread_no_pm() - Read "n" contiguous SDW Slave registers with no PM
* @slave: SDW Slave
* @addr: Register address
* @count: length
* @val: Buffer for values to be read
*
* Note that if the message crosses a page boundary each page will be
* transferred under a separate invocation of the msg_lock.
*/
int sdw_nread_no_pm(struct sdw_slave *slave, u32 addr, size_t count, u8 *val)
{
return sdw_ntransfer_no_pm(slave, addr, SDW_MSG_FLAG_READ, count, val);
} }
EXPORT_SYMBOL(sdw_nread_no_pm); EXPORT_SYMBOL(sdw_nread_no_pm);
/**
* sdw_nwrite_no_pm() - Write "n" contiguous SDW Slave registers with no PM
* @slave: SDW Slave
* @addr: Register address
* @count: length
* @val: Buffer for values to be written
*
* Note that if the message crosses a page boundary each page will be
* transferred under a separate invocation of the msg_lock.
*/
int sdw_nwrite_no_pm(struct sdw_slave *slave, u32 addr, size_t count, const u8 *val) int sdw_nwrite_no_pm(struct sdw_slave *slave, u32 addr, size_t count, const u8 *val)
{ {
struct sdw_msg msg; return sdw_ntransfer_no_pm(slave, addr, SDW_MSG_FLAG_WRITE, count, (u8 *)val);
int ret;
ret = sdw_fill_msg(&msg, slave, addr, count,
slave->dev_num, SDW_MSG_FLAG_WRITE, (u8 *)val);
if (ret < 0)
return ret;
ret = sdw_transfer(slave->bus, &msg);
if (slave->is_mockup_device)
ret = 0;
return ret;
} }
EXPORT_SYMBOL(sdw_nwrite_no_pm); EXPORT_SYMBOL(sdw_nwrite_no_pm);
/**
* sdw_write_no_pm() - Write a SDW Slave register with no PM
* @slave: SDW Slave
* @addr: Register address
* @value: Register value
*/
int sdw_write_no_pm(struct sdw_slave *slave, u32 addr, u8 value) int sdw_write_no_pm(struct sdw_slave *slave, u32 addr, u8 value)
{ {
return sdw_nwrite_no_pm(slave, addr, 1, &value); return sdw_nwrite_no_pm(slave, addr, 1, &value);
...@@ -495,6 +523,11 @@ int sdw_bwrite_no_pm_unlocked(struct sdw_bus *bus, u16 dev_num, u32 addr, u8 val ...@@ -495,6 +523,11 @@ int sdw_bwrite_no_pm_unlocked(struct sdw_bus *bus, u16 dev_num, u32 addr, u8 val
} }
EXPORT_SYMBOL(sdw_bwrite_no_pm_unlocked); EXPORT_SYMBOL(sdw_bwrite_no_pm_unlocked);
/**
* sdw_read_no_pm() - Read a SDW Slave register with no PM
* @slave: SDW Slave
* @addr: Register address
*/
int sdw_read_no_pm(struct sdw_slave *slave, u32 addr) int sdw_read_no_pm(struct sdw_slave *slave, u32 addr)
{ {
u8 buf; u8 buf;
...@@ -541,14 +574,21 @@ EXPORT_SYMBOL(sdw_update); ...@@ -541,14 +574,21 @@ EXPORT_SYMBOL(sdw_update);
* @addr: Register address * @addr: Register address
* @count: length * @count: length
* @val: Buffer for values to be read * @val: Buffer for values to be read
*
* This version of the function will take a PM reference to the slave
* device.
* Note that if the message crosses a page boundary each page will be
* transferred under a separate invocation of the msg_lock.
*/ */
int sdw_nread(struct sdw_slave *slave, u32 addr, size_t count, u8 *val) int sdw_nread(struct sdw_slave *slave, u32 addr, size_t count, u8 *val)
{ {
int ret; int ret;
ret = pm_runtime_resume_and_get(&slave->dev); ret = pm_runtime_get_sync(&slave->dev);
if (ret < 0 && ret != -EACCES) if (ret < 0 && ret != -EACCES) {
pm_runtime_put_noidle(&slave->dev);
return ret; return ret;
}
ret = sdw_nread_no_pm(slave, addr, count, val); ret = sdw_nread_no_pm(slave, addr, count, val);
...@@ -565,14 +605,21 @@ EXPORT_SYMBOL(sdw_nread); ...@@ -565,14 +605,21 @@ EXPORT_SYMBOL(sdw_nread);
* @addr: Register address * @addr: Register address
* @count: length * @count: length
* @val: Buffer for values to be written * @val: Buffer for values to be written
*
* This version of the function will take a PM reference to the slave
* device.
* Note that if the message crosses a page boundary each page will be
* transferred under a separate invocation of the msg_lock.
*/ */
int sdw_nwrite(struct sdw_slave *slave, u32 addr, size_t count, const u8 *val) int sdw_nwrite(struct sdw_slave *slave, u32 addr, size_t count, const u8 *val)
{ {
int ret; int ret;
ret = pm_runtime_resume_and_get(&slave->dev); ret = pm_runtime_get_sync(&slave->dev);
if (ret < 0 && ret != -EACCES) if (ret < 0 && ret != -EACCES) {
pm_runtime_put_noidle(&slave->dev);
return ret; return ret;
}
ret = sdw_nwrite_no_pm(slave, addr, count, val); ret = sdw_nwrite_no_pm(slave, addr, count, val);
...@@ -587,6 +634,9 @@ EXPORT_SYMBOL(sdw_nwrite); ...@@ -587,6 +634,9 @@ EXPORT_SYMBOL(sdw_nwrite);
* sdw_read() - Read a SDW Slave register * sdw_read() - Read a SDW Slave register
* @slave: SDW Slave * @slave: SDW Slave
* @addr: Register address * @addr: Register address
*
* This version of the function will take a PM reference to the slave
* device.
*/ */
int sdw_read(struct sdw_slave *slave, u32 addr) int sdw_read(struct sdw_slave *slave, u32 addr)
{ {
...@@ -606,6 +656,9 @@ EXPORT_SYMBOL(sdw_read); ...@@ -606,6 +656,9 @@ EXPORT_SYMBOL(sdw_read);
* @slave: SDW Slave * @slave: SDW Slave
* @addr: Register address * @addr: Register address
* @value: Register value * @value: Register value
*
* This version of the function will take a PM reference to the slave
* device.
*/ */
int sdw_write(struct sdw_slave *slave, u32 addr, u8 value) int sdw_write(struct sdw_slave *slave, u32 addr, u8 value)
{ {
...@@ -1541,9 +1594,10 @@ static int sdw_handle_slave_alerts(struct sdw_slave *slave) ...@@ -1541,9 +1594,10 @@ static int sdw_handle_slave_alerts(struct sdw_slave *slave)
sdw_modify_slave_status(slave, SDW_SLAVE_ALERT); sdw_modify_slave_status(slave, SDW_SLAVE_ALERT);
ret = pm_runtime_resume_and_get(&slave->dev); ret = pm_runtime_get_sync(&slave->dev);
if (ret < 0 && ret != -EACCES) { if (ret < 0 && ret != -EACCES) {
dev_err(&slave->dev, "Failed to resume device: %d\n", ret); dev_err(&slave->dev, "Failed to resume device: %d\n", ret);
pm_runtime_put_noidle(&slave->dev);
return ret; return ret;
} }
......
...@@ -144,6 +144,13 @@ struct sdw_master_runtime { ...@@ -144,6 +144,13 @@ struct sdw_master_runtime {
struct list_head bus_node; struct list_head bus_node;
}; };
struct sdw_transport_data {
int hstart;
int hstop;
int block_offset;
int sub_block_offset;
};
struct sdw_dpn_prop *sdw_get_slave_dpn_prop(struct sdw_slave *slave, struct sdw_dpn_prop *sdw_get_slave_dpn_prop(struct sdw_slave *slave,
enum sdw_data_direction direction, enum sdw_data_direction direction,
unsigned int port_num); unsigned int port_num);
...@@ -158,17 +165,6 @@ int sdw_transfer_defer(struct sdw_bus *bus, struct sdw_msg *msg); ...@@ -158,17 +165,6 @@ int sdw_transfer_defer(struct sdw_bus *bus, struct sdw_msg *msg);
int sdw_fill_msg(struct sdw_msg *msg, struct sdw_slave *slave, int sdw_fill_msg(struct sdw_msg *msg, struct sdw_slave *slave,
u32 addr, size_t count, u16 dev_num, u8 flags, u8 *buf); u32 addr, size_t count, u16 dev_num, u8 flags, u8 *buf);
/* Retrieve and return channel count from channel mask */
static inline int sdw_ch_mask_to_ch(int ch_mask)
{
int c = 0;
for (c = 0; ch_mask; ch_mask >>= 1)
c += ch_mask & 1;
return c;
}
/* Fill transport parameter data structure */ /* Fill transport parameter data structure */
static inline void sdw_fill_xport_params(struct sdw_transport_params *params, static inline void sdw_fill_xport_params(struct sdw_transport_params *params,
int port_num, bool grp_ctrl_valid, int port_num, bool grp_ctrl_valid,
...@@ -212,5 +208,7 @@ int sdw_bwrite_no_pm_unlocked(struct sdw_bus *bus, u16 dev_num, u32 addr, u8 val ...@@ -212,5 +208,7 @@ int sdw_bwrite_no_pm_unlocked(struct sdw_bus *bus, u16 dev_num, u32 addr, u8 val
void sdw_clear_slave_status(struct sdw_bus *bus, u32 request); void sdw_clear_slave_status(struct sdw_bus *bus, u32 request);
int sdw_slave_modalias(const struct sdw_slave *slave, char *buf, size_t size); int sdw_slave_modalias(const struct sdw_slave *slave, char *buf, size_t size);
void sdw_compute_slave_ports(struct sdw_master_runtime *m_rt,
struct sdw_transport_data *t_data);
#endif /* __SDW_BUS_H */ #endif /* __SDW_BUS_H */
...@@ -27,32 +27,36 @@ module_param_named(cnds_mcp_int_mask, interrupt_mask, int, 0444); ...@@ -27,32 +27,36 @@ module_param_named(cnds_mcp_int_mask, interrupt_mask, int, 0444);
MODULE_PARM_DESC(cdns_mcp_int_mask, "Cadence MCP IntMask"); MODULE_PARM_DESC(cdns_mcp_int_mask, "Cadence MCP IntMask");
#define CDNS_MCP_CONFIG 0x0 #define CDNS_MCP_CONFIG 0x0
#define CDNS_MCP_CONFIG_MCMD_RETRY GENMASK(27, 24)
#define CDNS_MCP_CONFIG_MPREQ_DELAY GENMASK(20, 16)
#define CDNS_MCP_CONFIG_MMASTER BIT(7)
#define CDNS_MCP_CONFIG_BUS_REL BIT(6) #define CDNS_MCP_CONFIG_BUS_REL BIT(6)
#define CDNS_MCP_CONFIG_SNIFFER BIT(5)
#define CDNS_MCP_CONFIG_SSPMOD BIT(4) #define CDNS_IP_MCP_CONFIG 0x0 /* IP offset added at run-time */
#define CDNS_MCP_CONFIG_CMD BIT(3)
#define CDNS_MCP_CONFIG_OP GENMASK(2, 0) #define CDNS_IP_MCP_CONFIG_MCMD_RETRY GENMASK(27, 24)
#define CDNS_MCP_CONFIG_OP_NORMAL 0 #define CDNS_IP_MCP_CONFIG_MPREQ_DELAY GENMASK(20, 16)
#define CDNS_IP_MCP_CONFIG_MMASTER BIT(7)
#define CDNS_IP_MCP_CONFIG_SNIFFER BIT(5)
#define CDNS_IP_MCP_CONFIG_CMD BIT(3)
#define CDNS_IP_MCP_CONFIG_OP GENMASK(2, 0)
#define CDNS_IP_MCP_CONFIG_OP_NORMAL 0
#define CDNS_MCP_CONTROL 0x4 #define CDNS_MCP_CONTROL 0x4
#define CDNS_MCP_CONTROL_RST_DELAY GENMASK(10, 8)
#define CDNS_MCP_CONTROL_CMD_RST BIT(7) #define CDNS_MCP_CONTROL_CMD_RST BIT(7)
#define CDNS_MCP_CONTROL_SOFT_RST BIT(6) #define CDNS_MCP_CONTROL_SOFT_RST BIT(6)
#define CDNS_MCP_CONTROL_SW_RST BIT(5)
#define CDNS_MCP_CONTROL_HW_RST BIT(4) #define CDNS_MCP_CONTROL_HW_RST BIT(4)
#define CDNS_MCP_CONTROL_CLK_PAUSE BIT(3)
#define CDNS_MCP_CONTROL_CLK_STOP_CLR BIT(2) #define CDNS_MCP_CONTROL_CLK_STOP_CLR BIT(2)
#define CDNS_MCP_CONTROL_CMD_ACCEPT BIT(1)
#define CDNS_MCP_CONTROL_BLOCK_WAKEUP BIT(0)
#define CDNS_MCP_CMDCTRL 0x8 #define CDNS_IP_MCP_CONTROL 0x4 /* IP offset added at run-time */
#define CDNS_IP_MCP_CONTROL_RST_DELAY GENMASK(10, 8)
#define CDNS_IP_MCP_CONTROL_SW_RST BIT(5)
#define CDNS_IP_MCP_CONTROL_CLK_PAUSE BIT(3)
#define CDNS_IP_MCP_CONTROL_CMD_ACCEPT BIT(1)
#define CDNS_IP_MCP_CONTROL_BLOCK_WAKEUP BIT(0)
#define CDNS_MCP_CMDCTRL_INSERT_PARITY_ERR BIT(2) #define CDNS_IP_MCP_CMDCTRL 0x8 /* IP offset added at run-time */
#define CDNS_IP_MCP_CMDCTRL_INSERT_PARITY_ERR BIT(2)
#define CDNS_MCP_SSPSTAT 0xC #define CDNS_MCP_SSPSTAT 0xC
#define CDNS_MCP_FRAME_SHAPE 0x10 #define CDNS_MCP_FRAME_SHAPE 0x10
...@@ -125,8 +129,8 @@ MODULE_PARM_DESC(cdns_mcp_int_mask, "Cadence MCP IntMask"); ...@@ -125,8 +129,8 @@ MODULE_PARM_DESC(cdns_mcp_int_mask, "Cadence MCP IntMask");
#define CDNS_MCP_FIFOSTAT 0x7C #define CDNS_MCP_FIFOSTAT 0x7C
#define CDNS_MCP_RX_FIFO_AVAIL GENMASK(5, 0) #define CDNS_MCP_RX_FIFO_AVAIL GENMASK(5, 0)
#define CDNS_MCP_CMD_BASE 0x80 #define CDNS_IP_MCP_CMD_BASE 0x80 /* IP offset added at run-time */
#define CDNS_MCP_RESP_BASE 0x80 #define CDNS_IP_MCP_RESP_BASE 0x80 /* IP offset added at run-time */
/* FIFO can hold 8 commands */ /* FIFO can hold 8 commands */
#define CDNS_MCP_CMD_LEN 8 #define CDNS_MCP_CMD_LEN 8
#define CDNS_MCP_CMD_WORD_LEN 0x4 #define CDNS_MCP_CMD_WORD_LEN 0x4
...@@ -206,6 +210,16 @@ static inline void cdns_writel(struct sdw_cdns *cdns, int offset, u32 value) ...@@ -206,6 +210,16 @@ static inline void cdns_writel(struct sdw_cdns *cdns, int offset, u32 value)
writel(value, cdns->registers + offset); writel(value, cdns->registers + offset);
} }
static inline u32 cdns_ip_readl(struct sdw_cdns *cdns, int offset)
{
return cdns_readl(cdns, cdns->ip_offset + offset);
}
static inline void cdns_ip_writel(struct sdw_cdns *cdns, int offset, u32 value)
{
return cdns_writel(cdns, cdns->ip_offset + offset, value);
}
static inline void cdns_updatel(struct sdw_cdns *cdns, static inline void cdns_updatel(struct sdw_cdns *cdns,
int offset, u32 mask, u32 val) int offset, u32 mask, u32 val)
{ {
...@@ -216,6 +230,12 @@ static inline void cdns_updatel(struct sdw_cdns *cdns, ...@@ -216,6 +230,12 @@ static inline void cdns_updatel(struct sdw_cdns *cdns,
cdns_writel(cdns, offset, tmp); cdns_writel(cdns, offset, tmp);
} }
static inline void cdns_ip_updatel(struct sdw_cdns *cdns,
int offset, u32 mask, u32 val)
{
cdns_updatel(cdns, cdns->ip_offset + offset, mask, val);
}
static int cdns_set_wait(struct sdw_cdns *cdns, int offset, u32 mask, u32 value) static int cdns_set_wait(struct sdw_cdns *cdns, int offset, u32 mask, u32 value)
{ {
int timeout = 10; int timeout = 10;
...@@ -408,9 +428,9 @@ static int cdns_parity_error_injection(void *data, u64 value) ...@@ -408,9 +428,9 @@ static int cdns_parity_error_injection(void *data, u64 value)
mutex_lock(&bus->bus_lock); mutex_lock(&bus->bus_lock);
/* program hardware to inject parity error */ /* program hardware to inject parity error */
cdns_updatel(cdns, CDNS_MCP_CMDCTRL, cdns_ip_updatel(cdns, CDNS_IP_MCP_CMDCTRL,
CDNS_MCP_CMDCTRL_INSERT_PARITY_ERR, CDNS_IP_MCP_CMDCTRL_INSERT_PARITY_ERR,
CDNS_MCP_CMDCTRL_INSERT_PARITY_ERR); CDNS_IP_MCP_CMDCTRL_INSERT_PARITY_ERR);
/* commit changes */ /* commit changes */
cdns_updatel(cdns, CDNS_MCP_CONFIG_UPDATE, cdns_updatel(cdns, CDNS_MCP_CONFIG_UPDATE,
...@@ -422,9 +442,9 @@ static int cdns_parity_error_injection(void *data, u64 value) ...@@ -422,9 +442,9 @@ static int cdns_parity_error_injection(void *data, u64 value)
dev_info(cdns->dev, "parity error injection, read: %d\n", ret); dev_info(cdns->dev, "parity error injection, read: %d\n", ret);
/* program hardware to disable parity error */ /* program hardware to disable parity error */
cdns_updatel(cdns, CDNS_MCP_CMDCTRL, cdns_ip_updatel(cdns, CDNS_IP_MCP_CMDCTRL,
CDNS_MCP_CMDCTRL_INSERT_PARITY_ERR, CDNS_IP_MCP_CMDCTRL_INSERT_PARITY_ERR,
0); 0);
/* commit changes */ /* commit changes */
cdns_updatel(cdns, CDNS_MCP_CONFIG_UPDATE, cdns_updatel(cdns, CDNS_MCP_CONFIG_UPDATE,
...@@ -570,10 +590,10 @@ static void cdns_read_response(struct sdw_cdns *cdns) ...@@ -570,10 +590,10 @@ static void cdns_read_response(struct sdw_cdns *cdns)
num_resp = ARRAY_SIZE(cdns->response_buf); num_resp = ARRAY_SIZE(cdns->response_buf);
} }
cmd_base = CDNS_MCP_CMD_BASE; cmd_base = CDNS_IP_MCP_CMD_BASE;
for (i = 0; i < num_resp; i++) { for (i = 0; i < num_resp; i++) {
cdns->response_buf[i] = cdns_readl(cdns, cmd_base); cdns->response_buf[i] = cdns_ip_readl(cdns, cmd_base);
cmd_base += CDNS_MCP_CMD_WORD_LEN; cmd_base += CDNS_MCP_CMD_WORD_LEN;
} }
} }
...@@ -592,7 +612,7 @@ _cdns_xfer_msg(struct sdw_cdns *cdns, struct sdw_msg *msg, int cmd, ...@@ -592,7 +612,7 @@ _cdns_xfer_msg(struct sdw_cdns *cdns, struct sdw_msg *msg, int cmd,
cdns->msg_count = count; cdns->msg_count = count;
} }
base = CDNS_MCP_CMD_BASE; base = CDNS_IP_MCP_CMD_BASE;
addr = msg->addr + offset; addr = msg->addr + offset;
for (i = 0; i < count; i++) { for (i = 0; i < count; i++) {
...@@ -605,7 +625,7 @@ _cdns_xfer_msg(struct sdw_cdns *cdns, struct sdw_msg *msg, int cmd, ...@@ -605,7 +625,7 @@ _cdns_xfer_msg(struct sdw_cdns *cdns, struct sdw_msg *msg, int cmd,
data |= msg->buf[i + offset]; data |= msg->buf[i + offset];
data |= FIELD_PREP(CDNS_MCP_CMD_SSP_TAG, msg->ssp_sync); data |= FIELD_PREP(CDNS_MCP_CMD_SSP_TAG, msg->ssp_sync);
cdns_writel(cdns, base, data); cdns_ip_writel(cdns, base, data);
base += CDNS_MCP_CMD_WORD_LEN; base += CDNS_MCP_CMD_WORD_LEN;
} }
...@@ -653,10 +673,10 @@ cdns_program_scp_addr(struct sdw_cdns *cdns, struct sdw_msg *msg) ...@@ -653,10 +673,10 @@ cdns_program_scp_addr(struct sdw_cdns *cdns, struct sdw_msg *msg)
data[0] |= msg->addr_page1; data[0] |= msg->addr_page1;
data[1] |= msg->addr_page2; data[1] |= msg->addr_page2;
base = CDNS_MCP_CMD_BASE; base = CDNS_IP_MCP_CMD_BASE;
cdns_writel(cdns, base, data[0]); cdns_ip_writel(cdns, base, data[0]);
base += CDNS_MCP_CMD_WORD_LEN; base += CDNS_MCP_CMD_WORD_LEN;
cdns_writel(cdns, base, data[1]); cdns_ip_writel(cdns, base, data[1]);
time = wait_for_completion_timeout(&cdns->tx_complete, time = wait_for_completion_timeout(&cdns->tx_complete,
msecs_to_jiffies(CDNS_TX_TIMEOUT)); msecs_to_jiffies(CDNS_TX_TIMEOUT));
...@@ -1033,6 +1053,7 @@ static void cdns_update_slave_status_work(struct work_struct *work) ...@@ -1033,6 +1053,7 @@ static void cdns_update_slave_status_work(struct work_struct *work)
void sdw_cdns_check_self_clearing_bits(struct sdw_cdns *cdns, const char *string, void sdw_cdns_check_self_clearing_bits(struct sdw_cdns *cdns, const char *string,
bool initial_delay, int reset_iterations) bool initial_delay, int reset_iterations)
{ {
u32 ip_mcp_control;
u32 mcp_control; u32 mcp_control;
u32 mcp_config_update; u32 mcp_config_update;
int i; int i;
...@@ -1040,6 +1061,12 @@ void sdw_cdns_check_self_clearing_bits(struct sdw_cdns *cdns, const char *string ...@@ -1040,6 +1061,12 @@ void sdw_cdns_check_self_clearing_bits(struct sdw_cdns *cdns, const char *string
if (initial_delay) if (initial_delay)
usleep_range(1000, 1500); usleep_range(1000, 1500);
ip_mcp_control = cdns_ip_readl(cdns, CDNS_IP_MCP_CONTROL);
/* the following bits should be cleared immediately */
if (ip_mcp_control & CDNS_IP_MCP_CONTROL_SW_RST)
dev_err(cdns->dev, "%s failed: IP_MCP_CONTROL_SW_RST is not cleared\n", string);
mcp_control = cdns_readl(cdns, CDNS_MCP_CONTROL); mcp_control = cdns_readl(cdns, CDNS_MCP_CONTROL);
/* the following bits should be cleared immediately */ /* the following bits should be cleared immediately */
...@@ -1047,10 +1074,9 @@ void sdw_cdns_check_self_clearing_bits(struct sdw_cdns *cdns, const char *string ...@@ -1047,10 +1074,9 @@ void sdw_cdns_check_self_clearing_bits(struct sdw_cdns *cdns, const char *string
dev_err(cdns->dev, "%s failed: MCP_CONTROL_CMD_RST is not cleared\n", string); dev_err(cdns->dev, "%s failed: MCP_CONTROL_CMD_RST is not cleared\n", string);
if (mcp_control & CDNS_MCP_CONTROL_SOFT_RST) if (mcp_control & CDNS_MCP_CONTROL_SOFT_RST)
dev_err(cdns->dev, "%s failed: MCP_CONTROL_SOFT_RST is not cleared\n", string); dev_err(cdns->dev, "%s failed: MCP_CONTROL_SOFT_RST is not cleared\n", string);
if (mcp_control & CDNS_MCP_CONTROL_SW_RST)
dev_err(cdns->dev, "%s failed: MCP_CONTROL_SW_RST is not cleared\n", string);
if (mcp_control & CDNS_MCP_CONTROL_CLK_STOP_CLR) if (mcp_control & CDNS_MCP_CONTROL_CLK_STOP_CLR)
dev_err(cdns->dev, "%s failed: MCP_CONTROL_CLK_STOP_CLR is not cleared\n", string); dev_err(cdns->dev, "%s failed: MCP_CONTROL_CLK_STOP_CLR is not cleared\n", string);
mcp_config_update = cdns_readl(cdns, CDNS_MCP_CONFIG_UPDATE); mcp_config_update = cdns_readl(cdns, CDNS_MCP_CONFIG_UPDATE);
if (mcp_config_update & CDNS_MCP_CONFIG_UPDATE_BIT) if (mcp_config_update & CDNS_MCP_CONFIG_UPDATE_BIT)
dev_err(cdns->dev, "%s failed: MCP_CONFIG_UPDATE_BIT is not cleared\n", string); dev_err(cdns->dev, "%s failed: MCP_CONFIG_UPDATE_BIT is not cleared\n", string);
...@@ -1327,34 +1353,39 @@ int sdw_cdns_init(struct sdw_cdns *cdns) ...@@ -1327,34 +1353,39 @@ int sdw_cdns_init(struct sdw_cdns *cdns)
CDNS_MCP_CONTROL_CMD_RST); CDNS_MCP_CONTROL_CMD_RST);
/* Set cmd accept mode */ /* Set cmd accept mode */
cdns_updatel(cdns, CDNS_MCP_CONTROL, CDNS_MCP_CONTROL_CMD_ACCEPT, cdns_ip_updatel(cdns, CDNS_IP_MCP_CONTROL, CDNS_IP_MCP_CONTROL_CMD_ACCEPT,
CDNS_MCP_CONTROL_CMD_ACCEPT); CDNS_IP_MCP_CONTROL_CMD_ACCEPT);
/* Configure mcp config */ /* Configure mcp config */
val = cdns_readl(cdns, CDNS_MCP_CONFIG); val = cdns_readl(cdns, CDNS_MCP_CONFIG);
/* Disable auto bus release */
val &= ~CDNS_MCP_CONFIG_BUS_REL;
cdns_writel(cdns, CDNS_MCP_CONFIG, val);
/* Configure IP mcp config */
val = cdns_ip_readl(cdns, CDNS_IP_MCP_CONFIG);
/* enable bus operations with clock and data */ /* enable bus operations with clock and data */
val &= ~CDNS_MCP_CONFIG_OP; val &= ~CDNS_IP_MCP_CONFIG_OP;
val |= CDNS_MCP_CONFIG_OP_NORMAL; val |= CDNS_IP_MCP_CONFIG_OP_NORMAL;
/* Set cmd mode for Tx and Rx cmds */ /* Set cmd mode for Tx and Rx cmds */
val &= ~CDNS_MCP_CONFIG_CMD; val &= ~CDNS_IP_MCP_CONFIG_CMD;
/* Disable sniffer mode */ /* Disable sniffer mode */
val &= ~CDNS_MCP_CONFIG_SNIFFER; val &= ~CDNS_IP_MCP_CONFIG_SNIFFER;
/* Disable auto bus release */
val &= ~CDNS_MCP_CONFIG_BUS_REL;
if (cdns->bus.multi_link) if (cdns->bus.multi_link)
/* Set Multi-master mode to take gsync into account */ /* Set Multi-master mode to take gsync into account */
val |= CDNS_MCP_CONFIG_MMASTER; val |= CDNS_IP_MCP_CONFIG_MMASTER;
/* leave frame delay to hardware default of 0x1F */ /* leave frame delay to hardware default of 0x1F */
/* leave command retry to hardware default of 0 */ /* leave command retry to hardware default of 0 */
cdns_writel(cdns, CDNS_MCP_CONFIG, val); cdns_ip_writel(cdns, CDNS_IP_MCP_CONFIG, val);
/* changes will be committed later */ /* changes will be committed later */
return 0; return 0;
...@@ -1584,9 +1615,9 @@ int sdw_cdns_clock_stop(struct sdw_cdns *cdns, bool block_wake) ...@@ -1584,9 +1615,9 @@ int sdw_cdns_clock_stop(struct sdw_cdns *cdns, bool block_wake)
* in clock stop state * in clock stop state
*/ */
if (block_wake) if (block_wake)
cdns_updatel(cdns, CDNS_MCP_CONTROL, cdns_ip_updatel(cdns, CDNS_IP_MCP_CONTROL,
CDNS_MCP_CONTROL_BLOCK_WAKEUP, CDNS_IP_MCP_CONTROL_BLOCK_WAKEUP,
CDNS_MCP_CONTROL_BLOCK_WAKEUP); CDNS_IP_MCP_CONTROL_BLOCK_WAKEUP);
list_for_each_entry(slave, &cdns->bus.slaves, node) { list_for_each_entry(slave, &cdns->bus.slaves, node) {
if (slave->status == SDW_SLAVE_ATTACHED || if (slave->status == SDW_SLAVE_ATTACHED ||
...@@ -1659,18 +1690,18 @@ int sdw_cdns_clock_restart(struct sdw_cdns *cdns, bool bus_reset) ...@@ -1659,18 +1690,18 @@ int sdw_cdns_clock_restart(struct sdw_cdns *cdns, bool bus_reset)
return ret; return ret;
} }
cdns_updatel(cdns, CDNS_MCP_CONTROL, cdns_ip_updatel(cdns, CDNS_IP_MCP_CONTROL,
CDNS_MCP_CONTROL_BLOCK_WAKEUP, 0); CDNS_IP_MCP_CONTROL_BLOCK_WAKEUP, 0);
cdns_updatel(cdns, CDNS_MCP_CONTROL, CDNS_MCP_CONTROL_CMD_ACCEPT, cdns_ip_updatel(cdns, CDNS_IP_MCP_CONTROL, CDNS_IP_MCP_CONTROL_CMD_ACCEPT,
CDNS_MCP_CONTROL_CMD_ACCEPT); CDNS_IP_MCP_CONTROL_CMD_ACCEPT);
if (!bus_reset) { if (!bus_reset) {
/* enable bus operations with clock and data */ /* enable bus operations with clock and data */
cdns_updatel(cdns, CDNS_MCP_CONFIG, cdns_ip_updatel(cdns, CDNS_IP_MCP_CONFIG,
CDNS_MCP_CONFIG_OP, CDNS_IP_MCP_CONFIG_OP,
CDNS_MCP_CONFIG_OP_NORMAL); CDNS_IP_MCP_CONFIG_OP_NORMAL);
ret = cdns_config_update(cdns); ret = cdns_config_update(cdns);
if (ret < 0) { if (ret < 0) {
......
...@@ -84,7 +84,6 @@ struct sdw_cdns_stream_config { ...@@ -84,7 +84,6 @@ struct sdw_cdns_stream_config {
* @bus: Bus handle * @bus: Bus handle
* @stream_type: Stream type * @stream_type: Stream type
* @link_id: Master link id * @link_id: Master link id
* @hw_params: hw_params to be applied in .prepare step
* @suspended: status set when suspended, to be used in .prepare * @suspended: status set when suspended, to be used in .prepare
* @paused: status set in .trigger, to be used in suspend * @paused: status set in .trigger, to be used in suspend
* @direction: stream direction * @direction: stream direction
...@@ -96,7 +95,6 @@ struct sdw_cdns_dai_runtime { ...@@ -96,7 +95,6 @@ struct sdw_cdns_dai_runtime {
struct sdw_bus *bus; struct sdw_bus *bus;
enum sdw_stream_type stream_type; enum sdw_stream_type stream_type;
int link_id; int link_id;
struct snd_pcm_hw_params *hw_params;
bool suspended; bool suspended;
bool paused; bool paused;
int direction; int direction;
...@@ -107,6 +105,7 @@ struct sdw_cdns_dai_runtime { ...@@ -107,6 +105,7 @@ struct sdw_cdns_dai_runtime {
* @dev: Linux device * @dev: Linux device
* @bus: Bus handle * @bus: Bus handle
* @instance: instance number * @instance: instance number
* @ip_offset: version-dependent offset to access IP_MCP registers and fields
* @response_buf: SoundWire response buffer * @response_buf: SoundWire response buffer
* @tx_complete: Tx completion * @tx_complete: Tx completion
* @ports: Data ports * @ports: Data ports
...@@ -122,6 +121,8 @@ struct sdw_cdns { ...@@ -122,6 +121,8 @@ struct sdw_cdns {
struct sdw_bus bus; struct sdw_bus bus;
unsigned int instance; unsigned int instance;
u32 ip_offset;
/* /*
* The datasheet says the RX FIFO AVAIL can be 2 entries more * The datasheet says the RX FIFO AVAIL can be 2 entries more
* than the FIFO capacity, so allow for this. * than the FIFO capacity, so allow for this.
......
...@@ -73,6 +73,23 @@ static const struct adr_remap hp_omen_16[] = { ...@@ -73,6 +73,23 @@ static const struct adr_remap hp_omen_16[] = {
{} {}
}; };
/*
* Intel NUC M15 LAPRC510 and LAPRC710
*/
static const struct adr_remap intel_rooks_county[] = {
/* rt711-sdca on link0 */
{
0x000020025d071100ull,
0x000030025d071101ull
},
/* rt1316-sdca on link2 */
{
0x000120025d071100ull,
0x000230025d131601ull
},
{}
};
static const struct dmi_system_id adr_remap_quirk_table[] = { static const struct dmi_system_id adr_remap_quirk_table[] = {
/* TGL devices */ /* TGL devices */
{ {
...@@ -98,6 +115,14 @@ static const struct dmi_system_id adr_remap_quirk_table[] = { ...@@ -98,6 +115,14 @@ static const struct dmi_system_id adr_remap_quirk_table[] = {
}, },
.driver_data = (void *)intel_tgl_bios, .driver_data = (void *)intel_tgl_bios,
}, },
{
/* quirk used for NUC15 'Rooks County' LAPRC510 and LAPRC710 skews */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Intel(R) Client Systems"),
DMI_MATCH(DMI_PRODUCT_NAME, "LAPRC"),
},
.driver_data = (void *)intel_rooks_county,
},
{ {
.matches = { .matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc"), DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc"),
......
...@@ -6,6 +6,7 @@ ...@@ -6,6 +6,7 @@
* *
*/ */
#include <linux/bitops.h>
#include <linux/device.h> #include <linux/device.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/mod_devicetable.h> #include <linux/mod_devicetable.h>
...@@ -28,15 +29,8 @@ struct sdw_group { ...@@ -28,15 +29,8 @@ struct sdw_group {
unsigned int *rates; unsigned int *rates;
}; };
struct sdw_transport_data { void sdw_compute_slave_ports(struct sdw_master_runtime *m_rt,
int hstart; struct sdw_transport_data *t_data)
int hstop;
int block_offset;
int sub_block_offset;
};
static void sdw_compute_slave_ports(struct sdw_master_runtime *m_rt,
struct sdw_transport_data *t_data)
{ {
struct sdw_slave_runtime *s_rt = NULL; struct sdw_slave_runtime *s_rt = NULL;
struct sdw_port_runtime *p_rt; struct sdw_port_runtime *p_rt;
...@@ -54,7 +48,7 @@ static void sdw_compute_slave_ports(struct sdw_master_runtime *m_rt, ...@@ -54,7 +48,7 @@ static void sdw_compute_slave_ports(struct sdw_master_runtime *m_rt,
slave_total_ch = 0; slave_total_ch = 0;
list_for_each_entry(p_rt, &s_rt->port_list, port_node) { list_for_each_entry(p_rt, &s_rt->port_list, port_node) {
ch = sdw_ch_mask_to_ch(p_rt->ch_mask); ch = hweight32(p_rt->ch_mask);
sdw_fill_xport_params(&p_rt->transport_params, sdw_fill_xport_params(&p_rt->transport_params,
p_rt->num, false, p_rt->num, false,
...@@ -85,6 +79,7 @@ static void sdw_compute_slave_ports(struct sdw_master_runtime *m_rt, ...@@ -85,6 +79,7 @@ static void sdw_compute_slave_ports(struct sdw_master_runtime *m_rt,
} }
} }
} }
EXPORT_SYMBOL(sdw_compute_slave_ports);
static void sdw_compute_master_ports(struct sdw_master_runtime *m_rt, static void sdw_compute_master_ports(struct sdw_master_runtime *m_rt,
struct sdw_group_params *params, struct sdw_group_params *params,
......
This diff is collapsed.
...@@ -50,6 +50,35 @@ struct sdw_intel { ...@@ -50,6 +50,35 @@ struct sdw_intel {
#endif #endif
}; };
enum intel_pdi_type {
INTEL_PDI_IN = 0,
INTEL_PDI_OUT = 1,
INTEL_PDI_BD = 2,
};
/*
* Read, write helpers for HW registers
*/
static inline int intel_readl(void __iomem *base, int offset)
{
return readl(base + offset);
}
static inline void intel_writel(void __iomem *base, int offset, int value)
{
writel(value, base + offset);
}
static inline u16 intel_readw(void __iomem *base, int offset)
{
return readw(base + offset);
}
static inline void intel_writew(void __iomem *base, int offset, u16 value)
{
writew(value, base + offset);
}
#define cdns_to_intel(_cdns) container_of(_cdns, struct sdw_intel, cdns) #define cdns_to_intel(_cdns) container_of(_cdns, struct sdw_intel, cdns)
#define INTEL_MASTER_RESET_ITERATIONS 10 #define INTEL_MASTER_RESET_ITERATIONS 10
...@@ -138,4 +167,42 @@ static inline void sdw_intel_shim_wake(struct sdw_intel *sdw, bool wake_enable) ...@@ -138,4 +167,42 @@ static inline void sdw_intel_shim_wake(struct sdw_intel *sdw, bool wake_enable)
SDW_INTEL_OPS(sdw, shim_wake)(sdw, wake_enable); SDW_INTEL_OPS(sdw, shim_wake)(sdw, wake_enable);
} }
static inline void sdw_intel_sync_arm(struct sdw_intel *sdw)
{
if (SDW_INTEL_CHECK_OPS(sdw, sync_arm))
SDW_INTEL_OPS(sdw, sync_arm)(sdw);
}
static inline int sdw_intel_sync_go_unlocked(struct sdw_intel *sdw)
{
if (SDW_INTEL_CHECK_OPS(sdw, sync_go_unlocked))
return SDW_INTEL_OPS(sdw, sync_go_unlocked)(sdw);
return -ENOTSUPP;
}
static inline int sdw_intel_sync_go(struct sdw_intel *sdw)
{
if (SDW_INTEL_CHECK_OPS(sdw, sync_go))
return SDW_INTEL_OPS(sdw, sync_go)(sdw);
return -ENOTSUPP;
}
static inline bool sdw_intel_sync_check_cmdsync_unlocked(struct sdw_intel *sdw)
{
if (SDW_INTEL_CHECK_OPS(sdw, sync_check_cmdsync_unlocked))
return SDW_INTEL_OPS(sdw, sync_check_cmdsync_unlocked)(sdw);
return false;
}
/* common bus management */
int intel_start_bus(struct sdw_intel *sdw);
int intel_start_bus_after_reset(struct sdw_intel *sdw);
void intel_check_clock_stop(struct sdw_intel *sdw);
int intel_start_bus_after_clock_stop(struct sdw_intel *sdw);
int intel_stop_bus(struct sdw_intel *sdw, bool clock_stop);
/* common bank switch routines */
int intel_pre_bank_switch(struct sdw_intel *sdw);
int intel_post_bank_switch(struct sdw_intel *sdw);
#endif /* __SDW_INTEL_LOCAL_H */ #endif /* __SDW_INTEL_LOCAL_H */
...@@ -358,10 +358,12 @@ static int intel_resume_child_device(struct device *dev, void *data) ...@@ -358,10 +358,12 @@ static int intel_resume_child_device(struct device *dev, void *data)
} }
ret = pm_request_resume(dev); ret = pm_request_resume(dev);
if (ret < 0) if (ret < 0) {
dev_err(dev, "%s: pm_request_resume failed: %d\n", __func__, ret); dev_err(dev, "%s: pm_request_resume failed: %d\n", __func__, ret);
return ret;
}
return ret; return 0;
} }
static int __maybe_unused intel_pm_prepare(struct device *dev) static int __maybe_unused intel_pm_prepare(struct device *dev)
......
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
// Copyright(c) 2015-2023 Intel Corporation. All rights reserved.
#include <linux/acpi.h>
#include <linux/soundwire/sdw_registers.h>
#include <linux/soundwire/sdw.h>
#include <linux/soundwire/sdw_intel.h>
#include "cadence_master.h"
#include "bus.h"
#include "intel.h"
int intel_start_bus(struct sdw_intel *sdw)
{
struct device *dev = sdw->cdns.dev;
struct sdw_cdns *cdns = &sdw->cdns;
struct sdw_bus *bus = &cdns->bus;
int ret;
ret = sdw_cdns_enable_interrupt(cdns, true);
if (ret < 0) {
dev_err(dev, "%s: cannot enable interrupts: %d\n", __func__, ret);
return ret;
}
/*
* follow recommended programming flows to avoid timeouts when
* gsync is enabled
*/
if (bus->multi_link)
sdw_intel_sync_arm(sdw);
ret = sdw_cdns_init(cdns);
if (ret < 0) {
dev_err(dev, "%s: unable to initialize Cadence IP: %d\n", __func__, ret);
goto err_interrupt;
}
ret = sdw_cdns_exit_reset(cdns);
if (ret < 0) {
dev_err(dev, "%s: unable to exit bus reset sequence: %d\n", __func__, ret);
goto err_interrupt;
}
if (bus->multi_link) {
ret = sdw_intel_sync_go(sdw);
if (ret < 0) {
dev_err(dev, "%s: sync go failed: %d\n", __func__, ret);
goto err_interrupt;
}
}
sdw_cdns_check_self_clearing_bits(cdns, __func__,
true, INTEL_MASTER_RESET_ITERATIONS);
return 0;
err_interrupt:
sdw_cdns_enable_interrupt(cdns, false);
return ret;
}
int intel_start_bus_after_reset(struct sdw_intel *sdw)
{
struct device *dev = sdw->cdns.dev;
struct sdw_cdns *cdns = &sdw->cdns;
struct sdw_bus *bus = &cdns->bus;
bool clock_stop0;
int status;
int ret;
/*
* An exception condition occurs for the CLK_STOP_BUS_RESET
* case if one or more masters remain active. In this condition,
* all the masters are powered on for they are in the same power
* domain. Master can preserve its context for clock stop0, so
* there is no need to clear slave status and reset bus.
*/
clock_stop0 = sdw_cdns_is_clock_stop(&sdw->cdns);
if (!clock_stop0) {
/*
* make sure all Slaves are tagged as UNATTACHED and
* provide reason for reinitialization
*/
status = SDW_UNATTACH_REQUEST_MASTER_RESET;
sdw_clear_slave_status(bus, status);
ret = sdw_cdns_enable_interrupt(cdns, true);
if (ret < 0) {
dev_err(dev, "cannot enable interrupts during resume\n");
return ret;
}
/*
* follow recommended programming flows to avoid
* timeouts when gsync is enabled
*/
if (bus->multi_link)
sdw_intel_sync_arm(sdw);
/*
* Re-initialize the IP since it was powered-off
*/
sdw_cdns_init(&sdw->cdns);
} else {
ret = sdw_cdns_enable_interrupt(cdns, true);
if (ret < 0) {
dev_err(dev, "cannot enable interrupts during resume\n");
return ret;
}
}
ret = sdw_cdns_clock_restart(cdns, !clock_stop0);
if (ret < 0) {
dev_err(dev, "unable to restart clock during resume\n");
goto err_interrupt;
}
if (!clock_stop0) {
ret = sdw_cdns_exit_reset(cdns);
if (ret < 0) {
dev_err(dev, "unable to exit bus reset sequence during resume\n");
goto err_interrupt;
}
if (bus->multi_link) {
ret = sdw_intel_sync_go(sdw);
if (ret < 0) {
dev_err(sdw->cdns.dev, "sync go failed during resume\n");
goto err_interrupt;
}
}
}
sdw_cdns_check_self_clearing_bits(cdns, __func__, true, INTEL_MASTER_RESET_ITERATIONS);
return 0;
err_interrupt:
sdw_cdns_enable_interrupt(cdns, false);
return ret;
}
void intel_check_clock_stop(struct sdw_intel *sdw)
{
struct device *dev = sdw->cdns.dev;
bool clock_stop0;
clock_stop0 = sdw_cdns_is_clock_stop(&sdw->cdns);
if (!clock_stop0)
dev_err(dev, "%s: invalid configuration, clock was not stopped\n", __func__);
}
int intel_start_bus_after_clock_stop(struct sdw_intel *sdw)
{
struct device *dev = sdw->cdns.dev;
struct sdw_cdns *cdns = &sdw->cdns;
int ret;
ret = sdw_cdns_enable_interrupt(cdns, true);
if (ret < 0) {
dev_err(dev, "%s: cannot enable interrupts: %d\n", __func__, ret);
return ret;
}
ret = sdw_cdns_clock_restart(cdns, false);
if (ret < 0) {
dev_err(dev, "%s: unable to restart clock: %d\n", __func__, ret);
sdw_cdns_enable_interrupt(cdns, false);
return ret;
}
sdw_cdns_check_self_clearing_bits(cdns, "intel_resume_runtime no_quirks",
true, INTEL_MASTER_RESET_ITERATIONS);
return 0;
}
int intel_stop_bus(struct sdw_intel *sdw, bool clock_stop)
{
struct device *dev = sdw->cdns.dev;
struct sdw_cdns *cdns = &sdw->cdns;
bool wake_enable = false;
int ret;
if (clock_stop) {
ret = sdw_cdns_clock_stop(cdns, true);
if (ret < 0)
dev_err(dev, "%s: cannot stop clock: %d\n", __func__, ret);
else
wake_enable = true;
}
ret = sdw_cdns_enable_interrupt(cdns, false);
if (ret < 0) {
dev_err(dev, "%s: cannot disable interrupts: %d\n", __func__, ret);
return ret;
}
ret = sdw_intel_link_power_down(sdw);
if (ret) {
dev_err(dev, "%s: Link power down failed: %d\n", __func__, ret);
return ret;
}
sdw_intel_shim_wake(sdw, wake_enable);
return 0;
}
/*
* bank switch routines
*/
int intel_pre_bank_switch(struct sdw_intel *sdw)
{
struct sdw_cdns *cdns = &sdw->cdns;
struct sdw_bus *bus = &cdns->bus;
/* Write to register only for multi-link */
if (!bus->multi_link)
return 0;
sdw_intel_sync_arm(sdw);
return 0;
}
int intel_post_bank_switch(struct sdw_intel *sdw)
{
struct sdw_cdns *cdns = &sdw->cdns;
struct sdw_bus *bus = &cdns->bus;
int ret = 0;
/* Write to register only for multi-link */
if (!bus->multi_link)
return 0;
mutex_lock(sdw->link_res->shim_lock);
/*
* post_bank_switch() ops is called from the bus in loop for
* all the Masters in the steam with the expectation that
* we trigger the bankswitch for the only first Master in the list
* and do nothing for the other Masters
*
* So, set the SYNCGO bit only if CMDSYNC bit is set for any Master.
*/
if (sdw_intel_sync_check_cmdsync_unlocked(sdw))
ret = sdw_intel_sync_go_unlocked(sdw);
mutex_unlock(sdw->link_res->shim_lock);
if (ret < 0)
dev_err(sdw->cdns.dev, "Post bank switch failed: %d\n", ret);
return ret;
}
...@@ -28,6 +28,9 @@ ...@@ -28,6 +28,9 @@
#define SWRM_LINK_MANAGER_EE 0x018 #define SWRM_LINK_MANAGER_EE 0x018
#define SWRM_EE_CPU 1 #define SWRM_EE_CPU 1
#define SWRM_FRM_GEN_ENABLED BIT(0) #define SWRM_FRM_GEN_ENABLED BIT(0)
#define SWRM_VERSION_1_3_0 0x01030000
#define SWRM_VERSION_1_5_1 0x01050001
#define SWRM_VERSION_1_7_0 0x01070000
#define SWRM_COMP_HW_VERSION 0x00 #define SWRM_COMP_HW_VERSION 0x00
#define SWRM_COMP_CFG_ADDR 0x04 #define SWRM_COMP_CFG_ADDR 0x04
#define SWRM_COMP_CFG_IRQ_LEVEL_OR_PULSE_MSK BIT(1) #define SWRM_COMP_CFG_IRQ_LEVEL_OR_PULSE_MSK BIT(1)
...@@ -351,8 +354,7 @@ static int qcom_swrm_cmd_fifo_wr_cmd(struct qcom_swrm_ctrl *swrm, u8 cmd_data, ...@@ -351,8 +354,7 @@ static int qcom_swrm_cmd_fifo_wr_cmd(struct qcom_swrm_ctrl *swrm, u8 cmd_data,
/* Its assumed that write is okay as we do not get any status back */ /* Its assumed that write is okay as we do not get any status back */
swrm->reg_write(swrm, SWRM_CMD_FIFO_WR_CMD, val); swrm->reg_write(swrm, SWRM_CMD_FIFO_WR_CMD, val);
/* version 1.3 or less */ if (swrm->version <= SWRM_VERSION_1_3_0)
if (swrm->version <= 0x01030000)
usleep_range(150, 155); usleep_range(150, 155);
if (cmd_id == SWR_BROADCAST_CMD_ID) { if (cmd_id == SWR_BROADCAST_CMD_ID) {
...@@ -695,7 +697,7 @@ static int qcom_swrm_init(struct qcom_swrm_ctrl *ctrl) ...@@ -695,7 +697,7 @@ static int qcom_swrm_init(struct qcom_swrm_ctrl *ctrl)
u32p_replace_bits(&val, SWRM_DEF_CMD_NO_PINGS, SWRM_MCP_CFG_MAX_NUM_OF_CMD_NO_PINGS_BMSK); u32p_replace_bits(&val, SWRM_DEF_CMD_NO_PINGS, SWRM_MCP_CFG_MAX_NUM_OF_CMD_NO_PINGS_BMSK);
ctrl->reg_write(ctrl, SWRM_MCP_CFG_ADDR, val); ctrl->reg_write(ctrl, SWRM_MCP_CFG_ADDR, val);
if (ctrl->version >= 0x01070000) { if (ctrl->version >= SWRM_VERSION_1_7_0) {
ctrl->reg_write(ctrl, SWRM_LINK_MANAGER_EE, SWRM_EE_CPU); ctrl->reg_write(ctrl, SWRM_LINK_MANAGER_EE, SWRM_EE_CPU);
ctrl->reg_write(ctrl, SWRM_MCP_BUS_CTRL, ctrl->reg_write(ctrl, SWRM_MCP_BUS_CTRL,
SWRM_MCP_BUS_CLK_START << SWRM_EE_CPU); SWRM_MCP_BUS_CLK_START << SWRM_EE_CPU);
...@@ -704,8 +706,7 @@ static int qcom_swrm_init(struct qcom_swrm_ctrl *ctrl) ...@@ -704,8 +706,7 @@ static int qcom_swrm_init(struct qcom_swrm_ctrl *ctrl)
} }
/* Configure number of retries of a read/write cmd */ /* Configure number of retries of a read/write cmd */
if (ctrl->version > 0x01050001) { if (ctrl->version >= SWRM_VERSION_1_5_1) {
/* Only for versions >= 1.5.1 */
ctrl->reg_write(ctrl, SWRM_CMD_FIFO_CFG_ADDR, ctrl->reg_write(ctrl, SWRM_CMD_FIFO_CFG_ADDR,
SWRM_RD_WR_CMD_RETRIES | SWRM_RD_WR_CMD_RETRIES |
SWRM_CONTINUE_EXEC_ON_CMD_IGNORE); SWRM_CONTINUE_EXEC_ON_CMD_IGNORE);
...@@ -1217,6 +1218,9 @@ static int qcom_swrm_get_port_config(struct qcom_swrm_ctrl *ctrl) ...@@ -1217,6 +1218,9 @@ static int qcom_swrm_get_port_config(struct qcom_swrm_ctrl *ctrl)
ctrl->num_dout_ports = val; ctrl->num_dout_ports = val;
nports = ctrl->num_dout_ports + ctrl->num_din_ports; nports = ctrl->num_dout_ports + ctrl->num_din_ports;
if (nports > QCOM_SDW_MAX_PORTS)
return -EINVAL;
/* Valid port numbers are from 1-14, so mask out port 0 explicitly */ /* Valid port numbers are from 1-14, so mask out port 0 explicitly */
set_bit(0, &ctrl->dout_port_mask); set_bit(0, &ctrl->dout_port_mask);
set_bit(0, &ctrl->din_port_mask); set_bit(0, &ctrl->din_port_mask);
...@@ -1239,7 +1243,7 @@ static int qcom_swrm_get_port_config(struct qcom_swrm_ctrl *ctrl) ...@@ -1239,7 +1243,7 @@ static int qcom_swrm_get_port_config(struct qcom_swrm_ctrl *ctrl)
ret = of_property_read_u8_array(np, "qcom,ports-block-pack-mode", ret = of_property_read_u8_array(np, "qcom,ports-block-pack-mode",
bp_mode, nports); bp_mode, nports);
if (ret) { if (ret) {
if (ctrl->version <= 0x01030000) if (ctrl->version <= SWRM_VERSION_1_3_0)
memset(bp_mode, SWR_INVALID_PARAM, QCOM_SDW_MAX_PORTS); memset(bp_mode, SWR_INVALID_PARAM, QCOM_SDW_MAX_PORTS);
else else
return ret; return ret;
...@@ -1442,7 +1446,7 @@ static int qcom_swrm_probe(struct platform_device *pdev) ...@@ -1442,7 +1446,7 @@ static int qcom_swrm_probe(struct platform_device *pdev)
pm_runtime_enable(dev); pm_runtime_enable(dev);
/* Clk stop is not supported on WSA Soundwire masters */ /* Clk stop is not supported on WSA Soundwire masters */
if (ctrl->version <= 0x01030000) { if (ctrl->version <= SWRM_VERSION_1_3_0) {
ctrl->clock_stop_not_supported = true; ctrl->clock_stop_not_supported = true;
} else { } else {
ctrl->reg_read(ctrl, SWRM_COMP_MASTER_ID, &val); ctrl->reg_read(ctrl, SWRM_COMP_MASTER_ID, &val);
...@@ -1527,7 +1531,7 @@ static int __maybe_unused swrm_runtime_resume(struct device *dev) ...@@ -1527,7 +1531,7 @@ static int __maybe_unused swrm_runtime_resume(struct device *dev)
} else { } else {
reset_control_reset(ctrl->audio_cgcr); reset_control_reset(ctrl->audio_cgcr);
if (ctrl->version >= 0x01070000) { if (ctrl->version >= SWRM_VERSION_1_7_0) {
ctrl->reg_write(ctrl, SWRM_LINK_MANAGER_EE, SWRM_EE_CPU); ctrl->reg_write(ctrl, SWRM_LINK_MANAGER_EE, SWRM_EE_CPU);
ctrl->reg_write(ctrl, SWRM_MCP_BUS_CTRL, ctrl->reg_write(ctrl, SWRM_MCP_BUS_CTRL,
SWRM_MCP_BUS_CLK_START << SWRM_EE_CPU); SWRM_MCP_BUS_CLK_START << SWRM_EE_CPU);
......
...@@ -1369,7 +1369,7 @@ static int _sdw_prepare_stream(struct sdw_stream_runtime *stream, ...@@ -1369,7 +1369,7 @@ static int _sdw_prepare_stream(struct sdw_stream_runtime *stream,
if (ret < 0) { if (ret < 0) {
dev_err(bus->dev, "Compute params failed: %d\n", dev_err(bus->dev, "Compute params failed: %d\n",
ret); ret);
return ret; goto restore_params;
} }
} }
...@@ -1389,7 +1389,7 @@ static int _sdw_prepare_stream(struct sdw_stream_runtime *stream, ...@@ -1389,7 +1389,7 @@ static int _sdw_prepare_stream(struct sdw_stream_runtime *stream,
ret = do_bank_switch(stream); ret = do_bank_switch(stream);
if (ret < 0) { if (ret < 0) {
dev_err(bus->dev, "Bank switch failed: %d\n", ret); pr_err("%s: do_bank_switch failed: %d\n", __func__, ret);
goto restore_params; goto restore_params;
} }
...@@ -1477,7 +1477,7 @@ static int _sdw_enable_stream(struct sdw_stream_runtime *stream) ...@@ -1477,7 +1477,7 @@ static int _sdw_enable_stream(struct sdw_stream_runtime *stream)
/* Program params */ /* Program params */
ret = sdw_program_params(bus, false); ret = sdw_program_params(bus, false);
if (ret < 0) { if (ret < 0) {
dev_err(bus->dev, "Program params failed: %d\n", ret); dev_err(bus->dev, "%s: Program params failed: %d\n", __func__, ret);
return ret; return ret;
} }
...@@ -1497,7 +1497,7 @@ static int _sdw_enable_stream(struct sdw_stream_runtime *stream) ...@@ -1497,7 +1497,7 @@ static int _sdw_enable_stream(struct sdw_stream_runtime *stream)
ret = do_bank_switch(stream); ret = do_bank_switch(stream);
if (ret < 0) { if (ret < 0) {
dev_err(bus->dev, "Bank switch failed: %d\n", ret); pr_err("%s: do_bank_switch failed: %d\n", __func__, ret);
return ret; return ret;
} }
...@@ -1567,14 +1567,14 @@ static int _sdw_disable_stream(struct sdw_stream_runtime *stream) ...@@ -1567,14 +1567,14 @@ static int _sdw_disable_stream(struct sdw_stream_runtime *stream)
/* Program params */ /* Program params */
ret = sdw_program_params(bus, false); ret = sdw_program_params(bus, false);
if (ret < 0) { if (ret < 0) {
dev_err(bus->dev, "Program params failed: %d\n", ret); dev_err(bus->dev, "%s: Program params failed: %d\n", __func__, ret);
return ret; return ret;
} }
} }
ret = do_bank_switch(stream); ret = do_bank_switch(stream);
if (ret < 0) { if (ret < 0) {
pr_err("Bank switch failed: %d\n", ret); pr_err("%s: do_bank_switch failed: %d\n", __func__, ret);
return ret; return ret;
} }
...@@ -1664,7 +1664,7 @@ static int _sdw_deprepare_stream(struct sdw_stream_runtime *stream) ...@@ -1664,7 +1664,7 @@ static int _sdw_deprepare_stream(struct sdw_stream_runtime *stream)
/* Program params */ /* Program params */
ret = sdw_program_params(bus, false); ret = sdw_program_params(bus, false);
if (ret < 0) { if (ret < 0) {
dev_err(bus->dev, "Program params failed: %d\n", ret); dev_err(bus->dev, "%s: Program params failed: %d\n", __func__, ret);
return ret; return ret;
} }
} }
...@@ -1893,7 +1893,8 @@ int sdw_stream_add_master(struct sdw_bus *bus, ...@@ -1893,7 +1893,8 @@ int sdw_stream_add_master(struct sdw_bus *bus,
m_rt = sdw_master_rt_alloc(bus, stream); m_rt = sdw_master_rt_alloc(bus, stream);
if (!m_rt) { if (!m_rt) {
dev_err(bus->dev, "Master runtime alloc failed for stream:%s\n", stream->name); dev_err(bus->dev, "%s: Master runtime alloc failed for stream:%s\n",
__func__, stream->name);
ret = -ENOMEM; ret = -ENOMEM;
goto unlock; goto unlock;
} }
...@@ -2012,7 +2013,8 @@ int sdw_stream_add_slave(struct sdw_slave *slave, ...@@ -2012,7 +2013,8 @@ int sdw_stream_add_slave(struct sdw_slave *slave,
*/ */
m_rt = sdw_master_rt_alloc(slave->bus, stream); m_rt = sdw_master_rt_alloc(slave->bus, stream);
if (!m_rt) { if (!m_rt) {
dev_err(&slave->dev, "Master runtime alloc failed for stream:%s\n", stream->name); dev_err(&slave->dev, "%s: Master runtime alloc failed for stream:%s\n",
__func__, stream->name);
ret = -ENOMEM; ret = -ENOMEM;
goto unlock; goto unlock;
} }
......
/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Copyright (C) 2023 Advanced Micro Devices, Inc. All rights reserved.
*/
#ifndef __SDW_AMD_H
#define __SDW_AMD_H
#include <linux/soundwire/sdw.h>
/* AMD pm_runtime quirk definitions */
/*
* Force the clock to stop(ClockStopMode0) when suspend callback
* is invoked.
*/
#define AMD_SDW_CLK_STOP_MODE 1
/*
* Stop the bus when runtime suspend/system level suspend callback
* is invoked. If set, a complete bus reset and re-enumeration will
* be performed when the bus restarts. In-band wake interrupts are
* not supported in this mode.
*/
#define AMD_SDW_POWER_OFF_MODE 2
#define ACP_SDW0 0
#define ACP_SDW1 1
struct acp_sdw_pdata {
u16 instance;
/* mutex to protect acp common register access */
struct mutex *acp_sdw_lock;
};
struct sdw_manager_reg_mask {
u32 sw_pad_enable_mask;
u32 sw_pad_pulldown_mask;
u32 acp_sdw_intr_mask;
};
/**
* struct sdw_amd_dai_runtime: AMD sdw dai runtime data
*
* @name: SoundWire stream name
* @stream: stream runtime
* @bus: Bus handle
* @stream_type: Stream type
*/
struct sdw_amd_dai_runtime {
char *name;
struct sdw_stream_runtime *stream;
struct sdw_bus *bus;
enum sdw_stream_type stream_type;
};
/**
* struct amd_sdw_manager - amd manager driver context
* @bus: bus handle
* @dev: linux device
* @mmio: SoundWire registers mmio base
* @acp_mmio: acp registers mmio base
* @reg_mask: register mask structure per manager instance
* @amd_sdw_irq_thread: SoundWire manager irq workqueue
* @amd_sdw_work: peripheral status work queue
* @probe_work: SoundWire manager probe workqueue
* @acp_sdw_lock: mutex to protect acp share register access
* @status: peripheral devices status array
* @num_din_ports: number of input ports
* @num_dout_ports: number of output ports
* @cols_index: Column index in frame shape
* @rows_index: Rows index in frame shape
* @instance: SoundWire manager instance
* @quirks: SoundWire manager quirks
* @wake_en_mask: wake enable mask per SoundWire manager
* @clk_stopped: flag set to true when clock is stopped
* @power_mode_mask: flag interprets amd SoundWire manager power mode
* @dai_runtime_array: dai runtime array
*/
struct amd_sdw_manager {
struct sdw_bus bus;
struct device *dev;
void __iomem *mmio;
void __iomem *acp_mmio;
struct sdw_manager_reg_mask *reg_mask;
struct work_struct amd_sdw_irq_thread;
struct work_struct amd_sdw_work;
struct work_struct probe_work;
/* mutex to protect acp common register access */
struct mutex *acp_sdw_lock;
enum sdw_slave_status status[SDW_MAX_DEVICES + 1];
int num_din_ports;
int num_dout_ports;
int cols_index;
int rows_index;
u32 instance;
u32 quirks;
u32 wake_en_mask;
u32 power_mode_mask;
bool clk_stopped;
struct sdw_amd_dai_runtime **dai_runtime_array;
};
#endif
...@@ -309,6 +309,12 @@ struct sdw_intel; ...@@ -309,6 +309,12 @@ struct sdw_intel;
* @shim_wake: enable/disable in-band wake management * @shim_wake: enable/disable in-band wake management
* @pre_bank_switch: helper for bus management * @pre_bank_switch: helper for bus management
* @post_bank_switch: helper for bus management * @post_bank_switch: helper for bus management
* @sync_arm: helper for multi-link synchronization
* @sync_go_unlocked: helper for multi-link synchronization -
* shim_lock is assumed to be locked at higher level
* @sync_go: helper for multi-link synchronization
* @sync_check_cmdsync_unlocked: helper for multi-link synchronization
* and bank switch - shim_lock is assumed to be locked at higher level
*/ */
struct sdw_intel_hw_ops { struct sdw_intel_hw_ops {
void (*debugfs_init)(struct sdw_intel *sdw); void (*debugfs_init)(struct sdw_intel *sdw);
...@@ -330,6 +336,11 @@ struct sdw_intel_hw_ops { ...@@ -330,6 +336,11 @@ struct sdw_intel_hw_ops {
int (*pre_bank_switch)(struct sdw_intel *sdw); int (*pre_bank_switch)(struct sdw_intel *sdw);
int (*post_bank_switch)(struct sdw_intel *sdw); int (*post_bank_switch)(struct sdw_intel *sdw);
void (*sync_arm)(struct sdw_intel *sdw);
int (*sync_go_unlocked)(struct sdw_intel *sdw);
int (*sync_go)(struct sdw_intel *sdw);
bool (*sync_check_cmdsync_unlocked)(struct sdw_intel *sdw);
}; };
extern const struct sdw_intel_hw_ops sdw_intel_cnl_hw_ops; extern const struct sdw_intel_hw_ops sdw_intel_cnl_hw_ops;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment