Commit d77b016b authored by Arnd Bergmann's avatar Arnd Bergmann

Merge tag 'scmi-fixes-6.8' of...

Merge tag 'scmi-fixes-6.8' of git://git.kernel.org/pub/scm/linux/kernel/git/sudeep.holla/linux into arm/fixes

Arm SCMI fixes for v6.8

Few fixes addressing the below issues:

1. A spurious IRQ related to the late reply can get wrongly associated
   with the new enqueued request resulting in misinterpretation of data
   in shared memory. This race-condition can be detected by looking at
   the channel status bits which the platform must set to the channel
   free before triggering the completion IRQ. Adding a consistency check
   to validate such condition will fix the issue.
2. Incorrect use of asm-generic/bug.h instead of generic linux/bUg.h
3. xa_store() can't check for possible duplication insertion, use
   xa_insert() instead
4. Fix the SCMI clock protocol version in the v3.2 SCMI specification
5. Incorrect upgrade of highest supported clock protocol version from
   v2.0 to v3.0

* tag 'scmi-fixes-6.8' of git://git.kernel.org/pub/scm/linux/kernel/git/sudeep.holla/linux:
  firmware: arm_scmi: Fix the clock protocol supported version
  firmware: arm_scmi: Fix the clock protocol version for v3.2
  firmware: arm_scmi: Use xa_insert() when saving raw queues
  firmware: arm_scmi: Use xa_insert() to store opps
  firmware: arm_scmi: Replace asm-generic/bug.h with linux/bug.h
  firmware: arm_scmi: Check mailbox/SMT channel for consistency

Link: https://lore.kernel.org/r/20240122161640.3551085-1-sudeep.holla@arm.comSigned-off-by: default avatarArnd Bergmann <arnd@arndb.de>
parents 5e2400f1 6bd1b3fe
...@@ -13,7 +13,7 @@ ...@@ -13,7 +13,7 @@
#include "notify.h" #include "notify.h"
/* Updated only after ALL the mandatory features for that version are merged */ /* Updated only after ALL the mandatory features for that version are merged */
#define SCMI_PROTOCOL_SUPPORTED_VERSION 0x20001 #define SCMI_PROTOCOL_SUPPORTED_VERSION 0x20000
enum scmi_clock_protocol_cmd { enum scmi_clock_protocol_cmd {
CLOCK_ATTRIBUTES = 0x3, CLOCK_ATTRIBUTES = 0x3,
...@@ -954,8 +954,7 @@ static int scmi_clock_protocol_init(const struct scmi_protocol_handle *ph) ...@@ -954,8 +954,7 @@ static int scmi_clock_protocol_init(const struct scmi_protocol_handle *ph)
scmi_clock_describe_rates_get(ph, clkid, clk); scmi_clock_describe_rates_get(ph, clkid, clk);
} }
if (PROTOCOL_REV_MAJOR(version) >= 0x2 && if (PROTOCOL_REV_MAJOR(version) >= 0x3) {
PROTOCOL_REV_MINOR(version) >= 0x1) {
cinfo->clock_config_set = scmi_clock_config_set_v2; cinfo->clock_config_set = scmi_clock_config_set_v2;
cinfo->clock_config_get = scmi_clock_config_get_v2; cinfo->clock_config_get = scmi_clock_config_get_v2;
} else { } else {
......
...@@ -314,6 +314,7 @@ void shmem_fetch_notification(struct scmi_shared_mem __iomem *shmem, ...@@ -314,6 +314,7 @@ void shmem_fetch_notification(struct scmi_shared_mem __iomem *shmem,
void shmem_clear_channel(struct scmi_shared_mem __iomem *shmem); void shmem_clear_channel(struct scmi_shared_mem __iomem *shmem);
bool shmem_poll_done(struct scmi_shared_mem __iomem *shmem, bool shmem_poll_done(struct scmi_shared_mem __iomem *shmem,
struct scmi_xfer *xfer); struct scmi_xfer *xfer);
bool shmem_channel_free(struct scmi_shared_mem __iomem *shmem);
/* declarations for message passing transports */ /* declarations for message passing transports */
struct scmi_msg_payld; struct scmi_msg_payld;
......
...@@ -45,6 +45,20 @@ static void rx_callback(struct mbox_client *cl, void *m) ...@@ -45,6 +45,20 @@ static void rx_callback(struct mbox_client *cl, void *m)
{ {
struct scmi_mailbox *smbox = client_to_scmi_mailbox(cl); struct scmi_mailbox *smbox = client_to_scmi_mailbox(cl);
/*
* An A2P IRQ is NOT valid when received while the platform still has
* the ownership of the channel, because the platform at first releases
* the SMT channel and then sends the completion interrupt.
*
* This addresses a possible race condition in which a spurious IRQ from
* a previous timed-out reply which arrived late could be wrongly
* associated with the next pending transaction.
*/
if (cl->knows_txdone && !shmem_channel_free(smbox->shmem)) {
dev_warn(smbox->cinfo->dev, "Ignoring spurious A2P IRQ !\n");
return;
}
scmi_rx_callback(smbox->cinfo, shmem_read_header(smbox->shmem), NULL); scmi_rx_callback(smbox->cinfo, shmem_read_header(smbox->shmem), NULL);
} }
......
...@@ -350,8 +350,8 @@ process_response_opp(struct scmi_opp *opp, unsigned int loop_idx, ...@@ -350,8 +350,8 @@ process_response_opp(struct scmi_opp *opp, unsigned int loop_idx,
} }
static inline void static inline void
process_response_opp_v4(struct perf_dom_info *dom, struct scmi_opp *opp, process_response_opp_v4(struct device *dev, struct perf_dom_info *dom,
unsigned int loop_idx, struct scmi_opp *opp, unsigned int loop_idx,
const struct scmi_msg_resp_perf_describe_levels_v4 *r) const struct scmi_msg_resp_perf_describe_levels_v4 *r)
{ {
opp->perf = le32_to_cpu(r->opp[loop_idx].perf_val); opp->perf = le32_to_cpu(r->opp[loop_idx].perf_val);
...@@ -362,10 +362,23 @@ process_response_opp_v4(struct perf_dom_info *dom, struct scmi_opp *opp, ...@@ -362,10 +362,23 @@ process_response_opp_v4(struct perf_dom_info *dom, struct scmi_opp *opp,
/* Note that PERF v4 reports always five 32-bit words */ /* Note that PERF v4 reports always five 32-bit words */
opp->indicative_freq = le32_to_cpu(r->opp[loop_idx].indicative_freq); opp->indicative_freq = le32_to_cpu(r->opp[loop_idx].indicative_freq);
if (dom->level_indexing_mode) { if (dom->level_indexing_mode) {
int ret;
opp->level_index = le32_to_cpu(r->opp[loop_idx].level_index); opp->level_index = le32_to_cpu(r->opp[loop_idx].level_index);
xa_store(&dom->opps_by_idx, opp->level_index, opp, GFP_KERNEL); ret = xa_insert(&dom->opps_by_idx, opp->level_index, opp,
xa_store(&dom->opps_by_lvl, opp->perf, opp, GFP_KERNEL); GFP_KERNEL);
if (ret)
dev_warn(dev,
"Failed to add opps_by_idx at %d - ret:%d\n",
opp->level_index, ret);
ret = xa_insert(&dom->opps_by_lvl, opp->perf, opp, GFP_KERNEL);
if (ret)
dev_warn(dev,
"Failed to add opps_by_lvl at %d - ret:%d\n",
opp->perf, ret);
hash_add(dom->opps_by_freq, &opp->hash, opp->indicative_freq); hash_add(dom->opps_by_freq, &opp->hash, opp->indicative_freq);
} }
} }
...@@ -382,7 +395,7 @@ iter_perf_levels_process_response(const struct scmi_protocol_handle *ph, ...@@ -382,7 +395,7 @@ iter_perf_levels_process_response(const struct scmi_protocol_handle *ph,
if (PROTOCOL_REV_MAJOR(p->version) <= 0x3) if (PROTOCOL_REV_MAJOR(p->version) <= 0x3)
process_response_opp(opp, st->loop_idx, response); process_response_opp(opp, st->loop_idx, response);
else else
process_response_opp_v4(p->perf_dom, opp, st->loop_idx, process_response_opp_v4(ph->dev, p->perf_dom, opp, st->loop_idx,
response); response);
p->perf_dom->opp_count++; p->perf_dom->opp_count++;
......
...@@ -1111,7 +1111,6 @@ static int scmi_raw_mode_setup(struct scmi_raw_mode_info *raw, ...@@ -1111,7 +1111,6 @@ static int scmi_raw_mode_setup(struct scmi_raw_mode_info *raw,
int i; int i;
for (i = 0; i < num_chans; i++) { for (i = 0; i < num_chans; i++) {
void *xret;
struct scmi_raw_queue *q; struct scmi_raw_queue *q;
q = scmi_raw_queue_init(raw); q = scmi_raw_queue_init(raw);
...@@ -1120,13 +1119,12 @@ static int scmi_raw_mode_setup(struct scmi_raw_mode_info *raw, ...@@ -1120,13 +1119,12 @@ static int scmi_raw_mode_setup(struct scmi_raw_mode_info *raw,
goto err_xa; goto err_xa;
} }
xret = xa_store(&raw->chans_q, channels[i], q, ret = xa_insert(&raw->chans_q, channels[i], q,
GFP_KERNEL); GFP_KERNEL);
if (xa_err(xret)) { if (ret) {
dev_err(dev, dev_err(dev,
"Fail to allocate Raw queue 0x%02X\n", "Fail to allocate Raw queue 0x%02X\n",
channels[i]); channels[i]);
ret = xa_err(xret);
goto err_xa; goto err_xa;
} }
} }
...@@ -1322,6 +1320,12 @@ void scmi_raw_message_report(void *r, struct scmi_xfer *xfer, ...@@ -1322,6 +1320,12 @@ void scmi_raw_message_report(void *r, struct scmi_xfer *xfer,
dev = raw->handle->dev; dev = raw->handle->dev;
q = scmi_raw_queue_select(raw, idx, q = scmi_raw_queue_select(raw, idx,
SCMI_XFER_IS_CHAN_SET(xfer) ? chan_id : 0); SCMI_XFER_IS_CHAN_SET(xfer) ? chan_id : 0);
if (!q) {
dev_warn(dev,
"RAW[%d] - NO queue for chan 0x%X. Dropping report.\n",
idx, chan_id);
return;
}
/* /*
* Grab the msg_q_lock upfront to avoid a possible race between * Grab the msg_q_lock upfront to avoid a possible race between
......
...@@ -10,7 +10,7 @@ ...@@ -10,7 +10,7 @@
#include <linux/processor.h> #include <linux/processor.h>
#include <linux/types.h> #include <linux/types.h>
#include <asm-generic/bug.h> #include <linux/bug.h>
#include "common.h" #include "common.h"
...@@ -122,3 +122,9 @@ bool shmem_poll_done(struct scmi_shared_mem __iomem *shmem, ...@@ -122,3 +122,9 @@ bool shmem_poll_done(struct scmi_shared_mem __iomem *shmem,
(SCMI_SHMEM_CHAN_STAT_CHANNEL_ERROR | (SCMI_SHMEM_CHAN_STAT_CHANNEL_ERROR |
SCMI_SHMEM_CHAN_STAT_CHANNEL_FREE); SCMI_SHMEM_CHAN_STAT_CHANNEL_FREE);
} }
bool shmem_channel_free(struct scmi_shared_mem __iomem *shmem)
{
return (ioread32(&shmem->channel_status) &
SCMI_SHMEM_CHAN_STAT_CHANNEL_FREE);
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment