Commit b610c55b authored by Arnd Bergmann's avatar Arnd Bergmann

Merge tag 'scmi-updates-5.18' of...

Merge tag 'scmi-updates-5.18' of git://git.kernel.org/pub/scm/linux/kernel/git/sudeep.holla/linux into arm/drivers

Arm SCMI firmware interface updates for v5.18

Few main additions include:
- Support for OPTEE based SCMI transport to enable using SCMI service
  provided by OPTEE on some platforms
- Support for atomic SCMI transports which enables few SCMI transactions
  to be completed in atomic context. This involves other refactoring work
  associated with it. It also marks SMC and OPTEE as atomic transport as
  the commands are completed once the return.
- Support for polling mode in SCMI VirtIO transport in order to support
  atomic operations
- Support for atomic clock operations based on availability of atomic
  capability in the underlying SCMI transport

Other changes involves some trace and log enhancements and miscellaneous
bug fixes.

* tag 'scmi-updates-5.18' of git://git.kernel.org/pub/scm/linux/kernel/git/sudeep.holla/linux: (28 commits)
  clk: scmi: Support atomic clock enable/disable API
  firmware: arm_scmi: Add support for clock_enable_latency
  firmware: arm_scmi: Add atomic support to clock protocol
  firmware: arm_scmi: Support optional system wide atomic-threshold-us
  dt-bindings: firmware: arm,scmi: Add atomic-threshold-us optional property
  firmware: arm_scmi: Add atomic mode support to virtio transport
  firmware: arm_scmi: Review virtio free_list handling
  firmware: arm_scmi: Add a virtio channel refcount
  firmware: arm_scmi: Disable ftrace for Clang Thumb2 builds
  firmware: arm_scmi: Add new parameter to mark_txdone
  firmware: arm_scmi: Add atomic mode support to smc transport
  firmware: arm_scmi: Add support for atomic transports
  firmware: arm_scmi: Make optee support sync_cmds_completed_on_ret
  firmware: arm_scmi: Make smc support sync_cmds_completed_on_ret
  firmware: arm_scmi: Add sync_cmds_completed_on_ret transport flag
  firmware: arm_scmi: Make smc transport use common completions
  firmware: arm_scmi: Add configurable polling mode for transports
  firmware: arm_scmi: Use new trace event scmi_xfer_response_wait
  include: trace: Add new scmi_xfer_response_wait event
  firmware: arm_scmi: Refactor message response path
  ...

Link: https://lore.kernel.org/r/20220222201742.3338589-1-sudeep.holla@arm.comSigned-off-by: default avatarArnd Bergmann <arnd@arndb.de>
parents c8812c2a 38a0e5b7
......@@ -38,6 +38,9 @@ properties:
The virtio transport only supports a single device.
items:
- const: arm,scmi-virtio
- description: SCMI compliant firmware with OP-TEE transport
items:
- const: linaro,scmi-optee
interrupts:
description:
......@@ -78,11 +81,24 @@ properties:
'#size-cells':
const: 0
atomic-threshold-us:
description:
An optional time value, expressed in microseconds, representing, on this
platform, the threshold above which any SCMI command, advertised to have
an higher-than-threshold execution latency, should not be considered for
atomic mode of operation, even if requested.
default: 0
arm,smc-id:
$ref: /schemas/types.yaml#/definitions/uint32
description:
SMC id required when using smc or hvc transports
linaro,optee-channel-id:
$ref: /schemas/types.yaml#/definitions/uint32
description:
Channel specifier required when using OP-TEE transport.
protocol@11:
type: object
properties:
......@@ -195,6 +211,12 @@ patternProperties:
minItems: 1
maxItems: 2
linaro,optee-channel-id:
$ref: /schemas/types.yaml#/definitions/uint32
description:
Channel specifier required when using OP-TEE transport and
protocol has a dedicated communication channel.
required:
- reg
......@@ -226,6 +248,16 @@ else:
- arm,smc-id
- shmem
else:
if:
properties:
compatible:
contains:
const: linaro,scmi-optee
then:
required:
- linaro,optee-channel-id
examples:
- |
firmware {
......@@ -240,6 +272,8 @@ examples:
#address-cells = <1>;
#size-cells = <0>;
atomic-threshold-us = <10000>;
scmi_devpd: protocol@11 {
reg = <0x11>;
#power-domain-cells = <1>;
......@@ -340,7 +374,48 @@ examples:
reg = <0x11>;
#power-domain-cells = <1>;
};
};
};
- |
firmware {
scmi {
compatible = "linaro,scmi-optee";
linaro,optee-channel-id = <0>;
#address-cells = <1>;
#size-cells = <0>;
scmi_dvfs1: protocol@13 {
reg = <0x13>;
linaro,optee-channel-id = <1>;
shmem = <&cpu_optee_lpri0>;
#clock-cells = <1>;
};
scmi_clk0: protocol@14 {
reg = <0x14>;
#clock-cells = <1>;
};
};
};
soc {
#address-cells = <2>;
#size-cells = <2>;
sram@51000000 {
compatible = "mmio-sram";
reg = <0x0 0x51000000 0x0 0x10000>;
#address-cells = <1>;
#size-cells = <1>;
ranges = <0 0x0 0x51000000 0x10000>;
cpu_optee_lpri0: optee-sram-section@0 {
compatible = "arm,scmi-shmem";
reg = <0x0 0x80>;
};
};
};
......
......@@ -2,7 +2,7 @@
/*
* System Control and Power Interface (SCMI) Protocol based clock driver
*
* Copyright (C) 2018-2021 ARM Ltd.
* Copyright (C) 2018-2022 ARM Ltd.
*/
#include <linux/clk-provider.h>
......@@ -88,21 +88,51 @@ static void scmi_clk_disable(struct clk_hw *hw)
scmi_proto_clk_ops->disable(clk->ph, clk->id);
}
static int scmi_clk_atomic_enable(struct clk_hw *hw)
{
struct scmi_clk *clk = to_scmi_clk(hw);
return scmi_proto_clk_ops->enable_atomic(clk->ph, clk->id);
}
static void scmi_clk_atomic_disable(struct clk_hw *hw)
{
struct scmi_clk *clk = to_scmi_clk(hw);
scmi_proto_clk_ops->disable_atomic(clk->ph, clk->id);
}
/*
* We can provide enable/disable atomic callbacks only if the underlying SCMI
* transport for an SCMI instance is configured to handle SCMI commands in an
* atomic manner.
*
* When no SCMI atomic transport support is available we instead provide only
* the prepare/unprepare API, as allowed by the clock framework when atomic
* calls are not available.
*
* Two distinct sets of clk_ops are provided since we could have multiple SCMI
* instances with different underlying transport quality, so they cannot be
* shared.
*/
static const struct clk_ops scmi_clk_ops = {
.recalc_rate = scmi_clk_recalc_rate,
.round_rate = scmi_clk_round_rate,
.set_rate = scmi_clk_set_rate,
/*
* We can't provide enable/disable callback as we can't perform the same
* in atomic context. Since the clock framework provides standard API
* clk_prepare_enable that helps cases using clk_enable in non-atomic
* context, it should be fine providing prepare/unprepare.
*/
.prepare = scmi_clk_enable,
.unprepare = scmi_clk_disable,
};
static int scmi_clk_ops_init(struct device *dev, struct scmi_clk *sclk)
static const struct clk_ops scmi_atomic_clk_ops = {
.recalc_rate = scmi_clk_recalc_rate,
.round_rate = scmi_clk_round_rate,
.set_rate = scmi_clk_set_rate,
.enable = scmi_clk_atomic_enable,
.disable = scmi_clk_atomic_disable,
};
static int scmi_clk_ops_init(struct device *dev, struct scmi_clk *sclk,
const struct clk_ops *scmi_ops)
{
int ret;
unsigned long min_rate, max_rate;
......@@ -110,7 +140,7 @@ static int scmi_clk_ops_init(struct device *dev, struct scmi_clk *sclk)
struct clk_init_data init = {
.flags = CLK_GET_RATE_NOCACHE,
.num_parents = 0,
.ops = &scmi_clk_ops,
.ops = scmi_ops,
.name = sclk->info->name,
};
......@@ -139,6 +169,8 @@ static int scmi_clk_ops_init(struct device *dev, struct scmi_clk *sclk)
static int scmi_clocks_probe(struct scmi_device *sdev)
{
int idx, count, err;
unsigned int atomic_threshold;
bool is_atomic;
struct clk_hw **hws;
struct clk_hw_onecell_data *clk_data;
struct device *dev = &sdev->dev;
......@@ -168,8 +200,11 @@ static int scmi_clocks_probe(struct scmi_device *sdev)
clk_data->num = count;
hws = clk_data->hws;
is_atomic = handle->is_transport_atomic(handle, &atomic_threshold);
for (idx = 0; idx < count; idx++) {
struct scmi_clk *sclk;
const struct clk_ops *scmi_ops;
sclk = devm_kzalloc(dev, sizeof(*sclk), GFP_KERNEL);
if (!sclk)
......@@ -184,13 +219,27 @@ static int scmi_clocks_probe(struct scmi_device *sdev)
sclk->id = idx;
sclk->ph = ph;
err = scmi_clk_ops_init(dev, sclk);
/*
* Note that when transport is atomic but SCMI protocol did not
* specify (or support) an enable_latency associated with a
* clock, we default to use atomic operations mode.
*/
if (is_atomic &&
sclk->info->enable_latency <= atomic_threshold)
scmi_ops = &scmi_atomic_clk_ops;
else
scmi_ops = &scmi_clk_ops;
err = scmi_clk_ops_init(dev, sclk, scmi_ops);
if (err) {
dev_err(dev, "failed to register clock %d\n", idx);
devm_kfree(dev, sclk);
hws[idx] = NULL;
} else {
dev_dbg(dev, "Registered clock:%s\n", sclk->info->name);
dev_dbg(dev, "Registered clock:%s%s\n",
sclk->info->name,
scmi_ops == &scmi_atomic_clk_ops ?
" (atomic ops)" : "");
hws[idx] = &sclk->hw;
}
}
......
......@@ -54,6 +54,18 @@ config ARM_SCMI_TRANSPORT_MAILBOX
If you want the ARM SCMI PROTOCOL stack to include support for a
transport based on mailboxes, answer Y.
config ARM_SCMI_TRANSPORT_OPTEE
bool "SCMI transport based on OP-TEE service"
depends on OPTEE=y || OPTEE=ARM_SCMI_PROTOCOL
select ARM_SCMI_HAVE_TRANSPORT
select ARM_SCMI_HAVE_SHMEM
default y
help
This enables the OP-TEE service based transport for SCMI.
If you want the ARM SCMI PROTOCOL stack to include support for a
transport based on OP-TEE SCMI service, answer Y.
config ARM_SCMI_TRANSPORT_SMC
bool "SCMI transport based on SMC"
depends on HAVE_ARM_SMCCC_DISCOVERY
......@@ -66,6 +78,20 @@ config ARM_SCMI_TRANSPORT_SMC
If you want the ARM SCMI PROTOCOL stack to include support for a
transport based on SMC, answer Y.
config ARM_SCMI_TRANSPORT_SMC_ATOMIC_ENABLE
bool "Enable atomic mode support for SCMI SMC transport"
depends on ARM_SCMI_TRANSPORT_SMC
help
Enable support of atomic operation for SCMI SMC based transport.
If you want the SCMI SMC based transport to operate in atomic
mode, avoiding any kind of sleeping behaviour for selected
transactions on the TX path, answer Y.
Enabling atomic mode operations allows any SCMI driver using this
transport to optionally ask for atomic SCMI transactions and operate
in atomic context too, at the price of using a number of busy-waiting
primitives all over instead. If unsure say N.
config ARM_SCMI_TRANSPORT_VIRTIO
bool "SCMI transport based on VirtIO"
depends on VIRTIO=y || VIRTIO=ARM_SCMI_PROTOCOL
......@@ -77,6 +103,36 @@ config ARM_SCMI_TRANSPORT_VIRTIO
If you want the ARM SCMI PROTOCOL stack to include support for a
transport based on VirtIO, answer Y.
config ARM_SCMI_TRANSPORT_VIRTIO_VERSION1_COMPLIANCE
bool "SCMI VirtIO transport Version 1 compliance"
depends on ARM_SCMI_TRANSPORT_VIRTIO
default y
help
This enforces strict compliance with VirtIO Version 1 specification.
If you want the ARM SCMI VirtIO transport layer to refuse to work
with Legacy VirtIO backends and instead support only VirtIO Version 1
devices (or above), answer Y.
If you want instead to support also old Legacy VirtIO backends (like
the ones implemented by kvmtool) and let the core Kernel VirtIO layer
take care of the needed conversions, say N.
config ARM_SCMI_TRANSPORT_VIRTIO_ATOMIC_ENABLE
bool "Enable atomic mode for SCMI VirtIO transport"
depends on ARM_SCMI_TRANSPORT_VIRTIO
help
Enable support of atomic operation for SCMI VirtIO based transport.
If you want the SCMI VirtIO based transport to operate in atomic
mode, avoiding any kind of sleeping behaviour for selected
transactions on the TX path, answer Y.
Enabling atomic mode operations allows any SCMI driver using this
transport to optionally ask for atomic SCMI transactions and operate
in atomic context too, at the price of using a number of busy-waiting
primitives all over instead. If unsure say N.
endif #ARM_SCMI_PROTOCOL
config ARM_SCMI_POWER_DOMAIN
......
......@@ -6,8 +6,16 @@ scmi-transport-$(CONFIG_ARM_SCMI_TRANSPORT_MAILBOX) += mailbox.o
scmi-transport-$(CONFIG_ARM_SCMI_TRANSPORT_SMC) += smc.o
scmi-transport-$(CONFIG_ARM_SCMI_HAVE_MSG) += msg.o
scmi-transport-$(CONFIG_ARM_SCMI_TRANSPORT_VIRTIO) += virtio.o
scmi-transport-$(CONFIG_ARM_SCMI_TRANSPORT_OPTEE) += optee.o
scmi-protocols-y = base.o clock.o perf.o power.o reset.o sensors.o system.o voltage.o
scmi-module-objs := $(scmi-bus-y) $(scmi-driver-y) $(scmi-protocols-y) \
$(scmi-transport-y)
obj-$(CONFIG_ARM_SCMI_PROTOCOL) += scmi-module.o
obj-$(CONFIG_ARM_SCMI_POWER_DOMAIN) += scmi_pm_domain.o
ifeq ($(CONFIG_THUMB2_KERNEL)$(CONFIG_CC_IS_CLANG),yy)
# The use of R7 in the SMCCC conflicts with the compiler's use of R7 as a frame
# pointer in Thumb2 mode, which is forcibly enabled by Clang when profiling
# hooks are inserted via the -pg switch.
CFLAGS_REMOVE_smc.o += $(CC_FLAGS_FTRACE)
endif
......@@ -27,7 +27,8 @@ struct scmi_msg_resp_clock_protocol_attributes {
struct scmi_msg_resp_clock_attributes {
__le32 attributes;
#define CLOCK_ENABLE BIT(0)
u8 name[SCMI_MAX_STR_SIZE];
u8 name[SCMI_MAX_STR_SIZE];
__le32 clock_enable_latency;
};
struct scmi_clock_set_config {
......@@ -116,10 +117,15 @@ static int scmi_clock_attributes_get(const struct scmi_protocol_handle *ph,
attr = t->rx.buf;
ret = ph->xops->do_xfer(ph, t);
if (!ret)
if (!ret) {
strlcpy(clk->name, attr->name, SCMI_MAX_STR_SIZE);
else
/* Is optional field clock_enable_latency provided ? */
if (t->rx.len == sizeof(*attr))
clk->enable_latency =
le32_to_cpu(attr->clock_enable_latency);
} else {
clk->name[0] = '\0';
}
ph->xops->xfer_put(ph, t);
return ret;
......@@ -273,7 +279,7 @@ static int scmi_clock_rate_set(const struct scmi_protocol_handle *ph,
static int
scmi_clock_config_set(const struct scmi_protocol_handle *ph, u32 clk_id,
u32 config)
u32 config, bool atomic)
{
int ret;
struct scmi_xfer *t;
......@@ -284,6 +290,8 @@ scmi_clock_config_set(const struct scmi_protocol_handle *ph, u32 clk_id,
if (ret)
return ret;
t->hdr.poll_completion = atomic;
cfg = t->tx.buf;
cfg->id = cpu_to_le32(clk_id);
cfg->attributes = cpu_to_le32(config);
......@@ -296,12 +304,24 @@ scmi_clock_config_set(const struct scmi_protocol_handle *ph, u32 clk_id,
static int scmi_clock_enable(const struct scmi_protocol_handle *ph, u32 clk_id)
{
return scmi_clock_config_set(ph, clk_id, CLOCK_ENABLE);
return scmi_clock_config_set(ph, clk_id, CLOCK_ENABLE, false);
}
static int scmi_clock_disable(const struct scmi_protocol_handle *ph, u32 clk_id)
{
return scmi_clock_config_set(ph, clk_id, 0);
return scmi_clock_config_set(ph, clk_id, 0, false);
}
static int scmi_clock_enable_atomic(const struct scmi_protocol_handle *ph,
u32 clk_id)
{
return scmi_clock_config_set(ph, clk_id, CLOCK_ENABLE, true);
}
static int scmi_clock_disable_atomic(const struct scmi_protocol_handle *ph,
u32 clk_id)
{
return scmi_clock_config_set(ph, clk_id, 0, true);
}
static int scmi_clock_count_get(const struct scmi_protocol_handle *ph)
......@@ -330,6 +350,8 @@ static const struct scmi_clk_proto_ops clk_proto_ops = {
.rate_set = scmi_clock_rate_set,
.enable = scmi_clock_enable,
.disable = scmi_clock_disable,
.enable_atomic = scmi_clock_enable_atomic,
.disable_atomic = scmi_clock_disable_atomic,
};
static int scmi_clock_protocol_init(const struct scmi_protocol_handle *ph)
......
......@@ -339,11 +339,16 @@ void scmi_protocol_release(const struct scmi_handle *handle, u8 protocol_id);
* @dev: Reference to device in the SCMI hierarchy corresponding to this
* channel
* @handle: Pointer to SCMI entity handle
* @no_completion_irq: Flag to indicate that this channel has no completion
* interrupt mechanism for synchronous commands.
* This can be dynamically set by transports at run-time
* inside their provided .chan_setup().
* @transport_info: Transport layer related information
*/
struct scmi_chan_info {
struct device *dev;
struct scmi_handle *handle;
bool no_completion_irq;
void *transport_info;
};
......@@ -373,7 +378,8 @@ struct scmi_transport_ops {
unsigned int (*get_max_msg)(struct scmi_chan_info *base_cinfo);
int (*send_message)(struct scmi_chan_info *cinfo,
struct scmi_xfer *xfer);
void (*mark_txdone)(struct scmi_chan_info *cinfo, int ret);
void (*mark_txdone)(struct scmi_chan_info *cinfo, int ret,
struct scmi_xfer *xfer);
void (*fetch_response)(struct scmi_chan_info *cinfo,
struct scmi_xfer *xfer);
void (*fetch_notification)(struct scmi_chan_info *cinfo,
......@@ -402,6 +408,18 @@ struct scmi_device *scmi_child_dev_find(struct device *parent,
* be pending simultaneously in the system. May be overridden by the
* get_max_msg op.
* @max_msg_size: Maximum size of data per message that can be handled.
* @force_polling: Flag to force this whole transport to use SCMI core polling
* mechanism instead of completion interrupts even if available.
* @sync_cmds_completed_on_ret: Flag to indicate that the transport assures
* synchronous-command messages are atomically
* completed on .send_message: no need to poll
* actively waiting for a response.
* Used by core internally only when polling is
* selected as a waiting for reply method: i.e.
* if a completion irq was found use that anyway.
* @atomic_enabled: Flag to indicate that this transport, which is assured not
* to sleep anywhere on the TX path, can be used in atomic mode
* when requested.
*/
struct scmi_desc {
int (*transport_init)(void);
......@@ -410,6 +428,9 @@ struct scmi_desc {
int max_rx_timeout_ms;
int max_msg;
int max_msg_size;
const bool force_polling;
const bool sync_cmds_completed_on_ret;
const bool atomic_enabled;
};
#ifdef CONFIG_ARM_SCMI_TRANSPORT_MAILBOX
......@@ -421,6 +442,9 @@ extern const struct scmi_desc scmi_smc_desc;
#ifdef CONFIG_ARM_SCMI_TRANSPORT_VIRTIO
extern const struct scmi_desc scmi_virtio_desc;
#endif
#ifdef CONFIG_ARM_SCMI_TRANSPORT_OPTEE
extern const struct scmi_desc scmi_optee_desc;
#endif
void scmi_rx_callback(struct scmi_chan_info *cinfo, u32 msg_hdr, void *priv);
void scmi_free_channel(struct scmi_chan_info *cinfo, struct idr *idr, int id);
......
This diff is collapsed.
......@@ -140,7 +140,8 @@ static int mailbox_send_message(struct scmi_chan_info *cinfo,
return ret;
}
static void mailbox_mark_txdone(struct scmi_chan_info *cinfo, int ret)
static void mailbox_mark_txdone(struct scmi_chan_info *cinfo, int ret,
struct scmi_xfer *__unused)
{
struct scmi_mailbox *smbox = cinfo->transport_info;
......
This diff is collapsed.
......@@ -7,6 +7,7 @@
*/
#include <linux/arm-smccc.h>
#include <linux/atomic.h>
#include <linux/device.h>
#include <linux/err.h>
#include <linux/interrupt.h>
......@@ -14,6 +15,7 @@
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/processor.h>
#include <linux/slab.h>
#include "common.h"
......@@ -23,26 +25,29 @@
*
* @cinfo: SCMI channel info
* @shmem: Transmit/Receive shared memory area
* @shmem_lock: Lock to protect access to Tx/Rx shared memory area
* @shmem_lock: Lock to protect access to Tx/Rx shared memory area.
* Used when NOT operating in atomic mode.
* @inflight: Atomic flag to protect access to Tx/Rx shared memory area.
* Used when operating in atomic mode.
* @func_id: smc/hvc call function id
* @irq: Optional; employed when platforms indicates msg completion by intr.
* @tx_complete: Optional, employed only when irq is valid.
*/
struct scmi_smc {
struct scmi_chan_info *cinfo;
struct scmi_shared_mem __iomem *shmem;
/* Protect access to shmem area */
struct mutex shmem_lock;
#define INFLIGHT_NONE MSG_TOKEN_MAX
atomic_t inflight;
u32 func_id;
int irq;
struct completion tx_complete;
};
static irqreturn_t smc_msg_done_isr(int irq, void *data)
{
struct scmi_smc *scmi_info = data;
complete(&scmi_info->tx_complete);
scmi_rx_callback(scmi_info->cinfo,
shmem_read_header(scmi_info->shmem), NULL);
return IRQ_HANDLED;
}
......@@ -57,6 +62,41 @@ static bool smc_chan_available(struct device *dev, int idx)
return true;
}
static inline void smc_channel_lock_init(struct scmi_smc *scmi_info)
{
if (IS_ENABLED(CONFIG_ARM_SCMI_TRANSPORT_SMC_ATOMIC_ENABLE))
atomic_set(&scmi_info->inflight, INFLIGHT_NONE);
else
mutex_init(&scmi_info->shmem_lock);
}
static bool smc_xfer_inflight(struct scmi_xfer *xfer, atomic_t *inflight)
{
int ret;
ret = atomic_cmpxchg(inflight, INFLIGHT_NONE, xfer->hdr.seq);
return ret == INFLIGHT_NONE;
}
static inline void
smc_channel_lock_acquire(struct scmi_smc *scmi_info,
struct scmi_xfer *xfer __maybe_unused)
{
if (IS_ENABLED(CONFIG_ARM_SCMI_TRANSPORT_SMC_ATOMIC_ENABLE))
spin_until_cond(smc_xfer_inflight(xfer, &scmi_info->inflight));
else
mutex_lock(&scmi_info->shmem_lock);
}
static inline void smc_channel_lock_release(struct scmi_smc *scmi_info)
{
if (IS_ENABLED(CONFIG_ARM_SCMI_TRANSPORT_SMC_ATOMIC_ENABLE))
atomic_set(&scmi_info->inflight, INFLIGHT_NONE);
else
mutex_unlock(&scmi_info->shmem_lock);
}
static int smc_chan_setup(struct scmi_chan_info *cinfo, struct device *dev,
bool tx)
{
......@@ -111,13 +151,13 @@ static int smc_chan_setup(struct scmi_chan_info *cinfo, struct device *dev,
dev_err(dev, "failed to setup SCMI smc irq\n");
return ret;
}
init_completion(&scmi_info->tx_complete);
scmi_info->irq = irq;
} else {
cinfo->no_completion_irq = true;
}
scmi_info->func_id = func_id;
scmi_info->cinfo = cinfo;
mutex_init(&scmi_info->shmem_lock);
smc_channel_lock_init(scmi_info);
cinfo->transport_info = scmi_info;
return 0;
......@@ -142,26 +182,22 @@ static int smc_send_message(struct scmi_chan_info *cinfo,
struct scmi_smc *scmi_info = cinfo->transport_info;
struct arm_smccc_res res;
mutex_lock(&scmi_info->shmem_lock);
/*
* Channel will be released only once response has been
* surely fully retrieved, so after .mark_txdone()
*/
smc_channel_lock_acquire(scmi_info, xfer);
shmem_tx_prepare(scmi_info->shmem, xfer);
if (scmi_info->irq)
reinit_completion(&scmi_info->tx_complete);
arm_smccc_1_1_invoke(scmi_info->func_id, 0, 0, 0, 0, 0, 0, 0, &res);
if (scmi_info->irq)
wait_for_completion(&scmi_info->tx_complete);
scmi_rx_callback(scmi_info->cinfo,
shmem_read_header(scmi_info->shmem), NULL);
mutex_unlock(&scmi_info->shmem_lock);
/* Only SMCCC_RET_NOT_SUPPORTED is valid error code */
if (res.a0)
if (res.a0) {
smc_channel_lock_release(scmi_info);
return -EOPNOTSUPP;
}
return 0;
}
......@@ -173,12 +209,12 @@ static void smc_fetch_response(struct scmi_chan_info *cinfo,
shmem_fetch_response(scmi_info->shmem, xfer);
}
static bool
smc_poll_done(struct scmi_chan_info *cinfo, struct scmi_xfer *xfer)
static void smc_mark_txdone(struct scmi_chan_info *cinfo, int ret,
struct scmi_xfer *__unused)
{
struct scmi_smc *scmi_info = cinfo->transport_info;
return shmem_poll_done(scmi_info->shmem, xfer);
smc_channel_lock_release(scmi_info);
}
static const struct scmi_transport_ops scmi_smc_ops = {
......@@ -186,8 +222,8 @@ static const struct scmi_transport_ops scmi_smc_ops = {
.chan_setup = smc_chan_setup,
.chan_free = smc_chan_free,
.send_message = smc_send_message,
.mark_txdone = smc_mark_txdone,
.fetch_response = smc_fetch_response,
.poll_done = smc_poll_done,
};
const struct scmi_desc scmi_smc_desc = {
......@@ -195,4 +231,14 @@ const struct scmi_desc scmi_smc_desc = {
.max_rx_timeout_ms = 30,
.max_msg = 20,
.max_msg_size = 128,
/*
* Setting .sync_cmds_atomic_replies to true for SMC assumes that,
* once the SMC instruction has completed successfully, the issued
* SCMI command would have been already fully processed by the SCMI
* platform firmware and so any possible response value expected
* for the issued command will be immmediately ready to be fetched
* from the shared memory area.
*/
.sync_cmds_completed_on_ret = true,
.atomic_enabled = IS_ENABLED(CONFIG_ARM_SCMI_TRANSPORT_SMC_ATOMIC_ENABLE),
};
This diff is collapsed.
......@@ -42,6 +42,7 @@ struct scmi_revision_info {
struct scmi_clock_info {
char name[SCMI_MAX_STR_SIZE];
unsigned int enable_latency;
bool rate_discrete;
union {
struct {
......@@ -82,6 +83,9 @@ struct scmi_clk_proto_ops {
u64 rate);
int (*enable)(const struct scmi_protocol_handle *ph, u32 clk_id);
int (*disable)(const struct scmi_protocol_handle *ph, u32 clk_id);
int (*enable_atomic)(const struct scmi_protocol_handle *ph, u32 clk_id);
int (*disable_atomic)(const struct scmi_protocol_handle *ph,
u32 clk_id);
};
/**
......@@ -612,6 +616,15 @@ struct scmi_notify_ops {
* @devm_protocol_get: devres managed method to acquire a protocol and get specific
* operations and a dedicated protocol handler
* @devm_protocol_put: devres managed method to release a protocol
* @is_transport_atomic: method to check if the underlying transport for this
* instance handle is configured to support atomic
* transactions for commands.
* Some users of the SCMI stack in the upper layers could
* be interested to know if they can assume SCMI
* command transactions associated to this handle will
* never sleep and act accordingly.
* An optional atomic threshold value could be returned
* where configured.
* @notify_ops: pointer to set of notifications related operations
*/
struct scmi_handle {
......@@ -622,6 +635,8 @@ struct scmi_handle {
(*devm_protocol_get)(struct scmi_device *sdev, u8 proto,
struct scmi_protocol_handle **ph);
void (*devm_protocol_put)(struct scmi_device *sdev, u8 proto);
bool (*is_transport_atomic)(const struct scmi_handle *handle,
unsigned int *atomic_threshold);
const struct scmi_notify_ops *notify_ops;
};
......
......@@ -33,6 +33,34 @@ TRACE_EVENT(scmi_xfer_begin,
__entry->seq, __entry->poll)
);
TRACE_EVENT(scmi_xfer_response_wait,
TP_PROTO(int transfer_id, u8 msg_id, u8 protocol_id, u16 seq,
u32 timeout, bool poll),
TP_ARGS(transfer_id, msg_id, protocol_id, seq, timeout, poll),
TP_STRUCT__entry(
__field(int, transfer_id)
__field(u8, msg_id)
__field(u8, protocol_id)
__field(u16, seq)
__field(u32, timeout)
__field(bool, poll)
),
TP_fast_assign(
__entry->transfer_id = transfer_id;
__entry->msg_id = msg_id;
__entry->protocol_id = protocol_id;
__entry->seq = seq;
__entry->timeout = timeout;
__entry->poll = poll;
),
TP_printk("transfer_id=%d msg_id=%u protocol_id=%u seq=%u tmo_ms=%u poll=%u",
__entry->transfer_id, __entry->msg_id, __entry->protocol_id,
__entry->seq, __entry->timeout, __entry->poll)
);
TRACE_EVENT(scmi_xfer_end,
TP_PROTO(int transfer_id, u8 msg_id, u8 protocol_id, u16 seq,
int status),
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment