Commit a4f7f931 authored by Arnd Bergmann's avatar Arnd Bergmann

Merge tag 'scmi-updates-5.19' of...

Merge tag 'scmi-updates-5.19' of git://git.kernel.org/pub/scm/linux/kernel/git/sudeep.holla/linux into arm/drivers

Arm SCMI firmware driver updates/fixes for v5.19

The main theme for most of the changes this time is around the addition
of the support for SCMI v3.1 specification changes. Though one of the main
addition in the specification is the powercap protocol, that is still
work in progress and this set includes all other changes bit and pieces
scattered all around the different parts of the specification. There are
few bugs discovered during the process and associated fixes and some
refactoring to simplify the addition of v3.1 support. It mainly includes
the support for extended names, few newly added notifications and async
command support.

Apart from v3.1 SCMI changes, OPTEE transport gets support for dynamic
shared memory.

* tag 'scmi-updates-5.19' of git://git.kernel.org/pub/scm/linux/kernel/git/sudeep.holla/linux: (24 commits)
  firmware: arm_scmi: Fix late checks on pointer dereference
  firmware: arm_scmi: Support optee shared memory in the optee transport
  firmware: arm_scmi: Add SCMI v3.1 VOLTAGE_LEVEL_SET_COMPLETE
  firmware: arm_scmi: Add SCMI v3.1 clock notifications
  firmware: arm_scmi: Add checks for min/max limits in PERFORMANCE_LIMITS_SET
  firmware: arm_scmi: Add SCMI v3.1 perf power-cost in microwatts
  firmware: arm_scmi: Use common iterators in the perf protocol
  firmware: arm_scmi: Use common iterators in the voltage protocol
  firmware: arm_scmi: Use common iterators in the clock protocol
  firmware: arm_scmi: Add SCMI v3.1 SENSOR_AXIS_NAME_GET support
  firmware: arm_scmi: Use common iterators in the sensor protocol
  firmware: arm_scmi: Add iterators for multi-part commands
  firmware: arm_scmi: Parse clock_enable_latency conditionally
  firmware: arm_scmi: Set clock latency to U32_MAX if it is not supported
  firmware: arm_scmi: Add SCMI v3.1 protocol extended names support
  firmware: arm_scmi: Introduce a common SCMI v3.1 .extended_name_get helper
  firmware: arm_scmi: Split protocol specific definitions in a dedicated header
  firmware: arm_scmi: Remove unneeded NULL termination of clk name
  firmware: arm_scmi: Check CLOCK_RATE_SET_COMPLETE async response
  firmware: arm_scmi: Make name_get operations return a const
  ...

Link: https://lore.kernel.org/r/20220504112906.3491985-1-sudeep.holla@arm.comSigned-off-by: default avatarArnd Bergmann <arnd@arndb.de>
parents 91f92d70 c7f8852d
......@@ -59,6 +59,7 @@ config ARM_SCMI_TRANSPORT_OPTEE
depends on OPTEE=y || OPTEE=ARM_SCMI_PROTOCOL
select ARM_SCMI_HAVE_TRANSPORT
select ARM_SCMI_HAVE_SHMEM
select ARM_SCMI_HAVE_MSG
default y
help
This enables the OP-TEE service based transport for SCMI.
......
......@@ -178,6 +178,7 @@ scmi_base_implementation_list_get(const struct scmi_protocol_handle *ph,
__le32 *num_skip, *num_ret;
u32 tot_num_ret = 0, loop_num_ret;
struct device *dev = ph->dev;
struct scmi_revision_info *rev = ph->get_priv(ph);
ret = ph->xops->xfer_get_init(ph, BASE_DISCOVER_LIST_PROTOCOLS,
sizeof(*num_skip), 0, &t);
......@@ -189,6 +190,9 @@ scmi_base_implementation_list_get(const struct scmi_protocol_handle *ph,
list = t->rx.buf + sizeof(*num_ret);
do {
size_t real_list_sz;
u32 calc_list_sz;
/* Set the number of protocols to be skipped/already read */
*num_skip = cpu_to_le32(tot_num_ret);
......@@ -197,8 +201,30 @@ scmi_base_implementation_list_get(const struct scmi_protocol_handle *ph,
break;
loop_num_ret = le32_to_cpu(*num_ret);
if (tot_num_ret + loop_num_ret > MAX_PROTOCOLS_IMP) {
dev_err(dev, "No. of Protocol > MAX_PROTOCOLS_IMP");
if (!loop_num_ret)
break;
if (loop_num_ret > rev->num_protocols - tot_num_ret) {
dev_err(dev,
"No. Returned protocols > Total protocols.\n");
break;
}
if (t->rx.len < (sizeof(u32) * 2)) {
dev_err(dev, "Truncated reply - rx.len:%zd\n",
t->rx.len);
ret = -EPROTO;
break;
}
real_list_sz = t->rx.len - sizeof(u32);
calc_list_sz = (1 + (loop_num_ret - 1) / sizeof(u32)) *
sizeof(u32);
if (calc_list_sz != real_list_sz) {
dev_err(dev,
"Malformed reply - real_sz:%zd calc_sz:%u\n",
real_list_sz, calc_list_sz);
ret = -EPROTO;
break;
}
......@@ -208,7 +234,7 @@ scmi_base_implementation_list_get(const struct scmi_protocol_handle *ph,
tot_num_ret += loop_num_ret;
ph->xops->reset_rx_to_maxsz(ph, t);
} while (loop_num_ret);
} while (tot_num_ret < rev->num_protocols);
ph->xops->xfer_put(ph, t);
......@@ -351,15 +377,19 @@ static int scmi_base_protocol_init(const struct scmi_protocol_handle *ph)
if (ret)
return ret;
prot_imp = devm_kcalloc(dev, MAX_PROTOCOLS_IMP, sizeof(u8), GFP_KERNEL);
if (!prot_imp)
return -ENOMEM;
rev->major_ver = PROTOCOL_REV_MAJOR(version),
rev->minor_ver = PROTOCOL_REV_MINOR(version);
ph->set_priv(ph, rev);
scmi_base_attributes_get(ph);
ret = scmi_base_attributes_get(ph);
if (ret)
return ret;
prot_imp = devm_kcalloc(dev, rev->num_protocols, sizeof(u8),
GFP_KERNEL);
if (!prot_imp)
return -ENOMEM;
scmi_base_vendor_id_get(ph, false);
scmi_base_vendor_id_get(ph, true);
scmi_base_implementation_version_get(ph);
......
This diff is collapsed.
......@@ -4,7 +4,7 @@
* driver common header file containing some definitions, structures
* and function prototypes used in all the different SCMI protocols.
*
* Copyright (C) 2018-2021 ARM Ltd.
* Copyright (C) 2018-2022 ARM Ltd.
*/
#ifndef _SCMI_COMMON_H
#define _SCMI_COMMON_H
......@@ -24,38 +24,9 @@
#include <asm/unaligned.h>
#include "protocols.h"
#include "notify.h"
#define PROTOCOL_REV_MINOR_MASK GENMASK(15, 0)
#define PROTOCOL_REV_MAJOR_MASK GENMASK(31, 16)
#define PROTOCOL_REV_MAJOR(x) (u16)(FIELD_GET(PROTOCOL_REV_MAJOR_MASK, (x)))
#define PROTOCOL_REV_MINOR(x) (u16)(FIELD_GET(PROTOCOL_REV_MINOR_MASK, (x)))
#define MAX_PROTOCOLS_IMP 16
#define MAX_OPPS 16
enum scmi_common_cmd {
PROTOCOL_VERSION = 0x0,
PROTOCOL_ATTRIBUTES = 0x1,
PROTOCOL_MESSAGE_ATTRIBUTES = 0x2,
};
/**
* struct scmi_msg_resp_prot_version - Response for a message
*
* @minor_version: Minor version of the ABI that firmware supports
* @major_version: Major version of the ABI that firmware supports
*
* In general, ABI version changes follow the rule that minor version increments
* are backward compatible. Major revision changes in ABI may not be
* backward compatible.
*
* Response to a generic message with message type SCMI_MSG_VERSION
*/
struct scmi_msg_resp_prot_version {
__le16 minor_version;
__le16 major_version;
};
#define MSG_ID_MASK GENMASK(7, 0)
#define MSG_XTRACT_ID(hdr) FIELD_GET(MSG_ID_MASK, (hdr))
#define MSG_TYPE_MASK GENMASK(9, 8)
......@@ -79,28 +50,6 @@ struct scmi_msg_resp_prot_version {
*/
#define SCMI_PENDING_XFERS_HT_ORDER_SZ 9
/**
* struct scmi_msg_hdr - Message(Tx/Rx) header
*
* @id: The identifier of the message being sent
* @protocol_id: The identifier of the protocol used to send @id message
* @type: The SCMI type for this message
* @seq: The token to identify the message. When a message returns, the
* platform returns the whole message header unmodified including the
* token
* @status: Status of the transfer once it's complete
* @poll_completion: Indicate if the transfer needs to be polled for
* completion or interrupt mode is used
*/
struct scmi_msg_hdr {
u8 id;
u8 protocol_id;
u8 type;
u16 seq;
u32 status;
bool poll_completion;
};
/**
* pack_scmi_header() - packs and returns 32-bit header
*
......@@ -130,72 +79,6 @@ static inline void unpack_scmi_header(u32 msg_hdr, struct scmi_msg_hdr *hdr)
hdr->type = MSG_XTRACT_TYPE(msg_hdr);
}
/**
* struct scmi_msg - Message(Tx/Rx) structure
*
* @buf: Buffer pointer
* @len: Length of data in the Buffer
*/
struct scmi_msg {
void *buf;
size_t len;
};
/**
* struct scmi_xfer - Structure representing a message flow
*
* @transfer_id: Unique ID for debug & profiling purpose
* @hdr: Transmit message header
* @tx: Transmit message
* @rx: Receive message, the buffer should be pre-allocated to store
* message. If request-ACK protocol is used, we can reuse the same
* buffer for the rx path as we use for the tx path.
* @done: command message transmit completion event
* @async_done: pointer to delayed response message received event completion
* @pending: True for xfers added to @pending_xfers hashtable
* @node: An hlist_node reference used to store this xfer, alternatively, on
* the free list @free_xfers or in the @pending_xfers hashtable
* @users: A refcount to track the active users for this xfer.
* This is meant to protect against the possibility that, when a command
* transaction times out concurrently with the reception of a valid
* response message, the xfer could be finally put on the TX path, and
* so vanish, while on the RX path scmi_rx_callback() is still
* processing it: in such a case this refcounting will ensure that, even
* though the timed-out transaction will anyway cause the command
* request to be reported as failed by time-out, the underlying xfer
* cannot be discarded and possibly reused until the last one user on
* the RX path has released it.
* @busy: An atomic flag to ensure exclusive write access to this xfer
* @state: The current state of this transfer, with states transitions deemed
* valid being:
* - SCMI_XFER_SENT_OK -> SCMI_XFER_RESP_OK [ -> SCMI_XFER_DRESP_OK ]
* - SCMI_XFER_SENT_OK -> SCMI_XFER_DRESP_OK
* (Missing synchronous response is assumed OK and ignored)
* @lock: A spinlock to protect state and busy fields.
* @priv: A pointer for transport private usage.
*/
struct scmi_xfer {
int transfer_id;
struct scmi_msg_hdr hdr;
struct scmi_msg tx;
struct scmi_msg rx;
struct completion done;
struct completion *async_done;
bool pending;
struct hlist_node node;
refcount_t users;
#define SCMI_XFER_FREE 0
#define SCMI_XFER_BUSY 1
atomic_t busy;
#define SCMI_XFER_SENT_OK 0
#define SCMI_XFER_RESP_OK 1
#define SCMI_XFER_DRESP_OK 2
int state;
/* A lock to protect state and busy fields */
spinlock_t lock;
void *priv;
};
/*
* An helper macro to lookup an xfer from the @pending_xfers hashtable
* using the message sequence number token as a key.
......@@ -211,64 +94,6 @@ struct scmi_xfer {
xfer_; \
})
struct scmi_xfer_ops;
/**
* struct scmi_protocol_handle - Reference to an initialized protocol instance
*
* @dev: A reference to the associated SCMI instance device (handle->dev).
* @xops: A reference to a struct holding refs to the core xfer operations that
* can be used by the protocol implementation to generate SCMI messages.
* @set_priv: A method to set protocol private data for this instance.
* @get_priv: A method to get protocol private data previously set.
*
* This structure represents a protocol initialized against specific SCMI
* instance and it will be used as follows:
* - as a parameter fed from the core to the protocol initialization code so
* that it can access the core xfer operations to build and generate SCMI
* messages exclusively for the specific underlying protocol instance.
* - as an opaque handle fed by an SCMI driver user when it tries to access
* this protocol through its own protocol operations.
* In this case this handle will be returned as an opaque object together
* with the related protocol operations when the SCMI driver tries to access
* the protocol.
*/
struct scmi_protocol_handle {
struct device *dev;
const struct scmi_xfer_ops *xops;
int (*set_priv)(const struct scmi_protocol_handle *ph, void *priv);
void *(*get_priv)(const struct scmi_protocol_handle *ph);
};
/**
* struct scmi_xfer_ops - References to the core SCMI xfer operations.
* @version_get: Get this version protocol.
* @xfer_get_init: Initialize one struct xfer if any xfer slot is free.
* @reset_rx_to_maxsz: Reset rx size to max transport size.
* @do_xfer: Do the SCMI transfer.
* @do_xfer_with_response: Do the SCMI transfer waiting for a response.
* @xfer_put: Free the xfer slot.
*
* Note that all this operations expect a protocol handle as first parameter;
* they then internally use it to infer the underlying protocol number: this
* way is not possible for a protocol implementation to forge messages for
* another protocol.
*/
struct scmi_xfer_ops {
int (*version_get)(const struct scmi_protocol_handle *ph, u32 *version);
int (*xfer_get_init)(const struct scmi_protocol_handle *ph, u8 msg_id,
size_t tx_size, size_t rx_size,
struct scmi_xfer **p);
void (*reset_rx_to_maxsz)(const struct scmi_protocol_handle *ph,
struct scmi_xfer *xfer);
int (*do_xfer)(const struct scmi_protocol_handle *ph,
struct scmi_xfer *xfer);
int (*do_xfer_with_response)(const struct scmi_protocol_handle *ph,
struct scmi_xfer *xfer);
void (*xfer_put)(const struct scmi_protocol_handle *ph,
struct scmi_xfer *xfer);
};
struct scmi_revision_info *
scmi_revision_area_get(const struct scmi_protocol_handle *ph);
int scmi_handle_put(const struct scmi_handle *handle);
......@@ -277,55 +102,9 @@ void scmi_set_handle(struct scmi_device *scmi_dev);
void scmi_setup_protocol_implemented(const struct scmi_protocol_handle *ph,
u8 *prot_imp);
typedef int (*scmi_prot_init_ph_fn_t)(const struct scmi_protocol_handle *);
/**
* struct scmi_protocol - Protocol descriptor
* @id: Protocol ID.
* @owner: Module reference if any.
* @instance_init: Mandatory protocol initialization function.
* @instance_deinit: Optional protocol de-initialization function.
* @ops: Optional reference to the operations provided by the protocol and
* exposed in scmi_protocol.h.
* @events: An optional reference to the events supported by this protocol.
*/
struct scmi_protocol {
const u8 id;
struct module *owner;
const scmi_prot_init_ph_fn_t instance_init;
const scmi_prot_init_ph_fn_t instance_deinit;
const void *ops;
const struct scmi_protocol_events *events;
};
int __init scmi_bus_init(void);
void __exit scmi_bus_exit(void);
#define DECLARE_SCMI_REGISTER_UNREGISTER(func) \
int __init scmi_##func##_register(void); \
void __exit scmi_##func##_unregister(void)
DECLARE_SCMI_REGISTER_UNREGISTER(base);
DECLARE_SCMI_REGISTER_UNREGISTER(clock);
DECLARE_SCMI_REGISTER_UNREGISTER(perf);
DECLARE_SCMI_REGISTER_UNREGISTER(power);
DECLARE_SCMI_REGISTER_UNREGISTER(reset);
DECLARE_SCMI_REGISTER_UNREGISTER(sensors);
DECLARE_SCMI_REGISTER_UNREGISTER(voltage);
DECLARE_SCMI_REGISTER_UNREGISTER(system);
#define DEFINE_SCMI_PROTOCOL_REGISTER_UNREGISTER(name, proto) \
static const struct scmi_protocol *__this_proto = &(proto); \
\
int __init scmi_##name##_register(void) \
{ \
return scmi_protocol_register(__this_proto); \
} \
\
void __exit scmi_##name##_unregister(void) \
{ \
scmi_protocol_unregister(__this_proto); \
}
const struct scmi_protocol *scmi_protocol_get(int protocol_id);
void scmi_protocol_put(int protocol_id);
......
......@@ -128,7 +128,8 @@ struct scmi_protocol_instance {
* usage.
* @protocols_mtx: A mutex to protect protocols instances initialization.
* @protocols_imp: List of protocols implemented, currently maximum of
* MAX_PROTOCOLS_IMP elements allocated by the base protocol
* scmi_revision_info.num_protocols elements allocated by the
* base protocol
* @active_protocols: IDR storing device_nodes for protocols actually defined
* in the DT and confirmed as implemented by fw.
* @atomic_threshold: Optional system wide DT-configured threshold, expressed
......@@ -1102,6 +1103,167 @@ static const struct scmi_xfer_ops xfer_ops = {
.xfer_put = xfer_put,
};
struct scmi_msg_resp_domain_name_get {
__le32 flags;
u8 name[SCMI_MAX_STR_SIZE];
};
/**
* scmi_common_extended_name_get - Common helper to get extended resources name
* @ph: A protocol handle reference.
* @cmd_id: The specific command ID to use.
* @res_id: The specific resource ID to use.
* @name: A pointer to the preallocated area where the retrieved name will be
* stored as a NULL terminated string.
* @len: The len in bytes of the @name char array.
*
* Return: 0 on Succcess
*/
static int scmi_common_extended_name_get(const struct scmi_protocol_handle *ph,
u8 cmd_id, u32 res_id, char *name,
size_t len)
{
int ret;
struct scmi_xfer *t;
struct scmi_msg_resp_domain_name_get *resp;
ret = ph->xops->xfer_get_init(ph, cmd_id, sizeof(res_id),
sizeof(*resp), &t);
if (ret)
goto out;
put_unaligned_le32(res_id, t->tx.buf);
resp = t->rx.buf;
ret = ph->xops->do_xfer(ph, t);
if (!ret)
strscpy(name, resp->name, len);
ph->xops->xfer_put(ph, t);
out:
if (ret)
dev_warn(ph->dev,
"Failed to get extended name - id:%u (ret:%d). Using %s\n",
res_id, ret, name);
return ret;
}
/**
* struct scmi_iterator - Iterator descriptor
* @msg: A reference to the message TX buffer; filled by @prepare_message with
* a proper custom command payload for each multi-part command request.
* @resp: A reference to the response RX buffer; used by @update_state and
* @process_response to parse the multi-part replies.
* @t: A reference to the underlying xfer initialized and used transparently by
* the iterator internal routines.
* @ph: A reference to the associated protocol handle to be used.
* @ops: A reference to the custom provided iterator operations.
* @state: The current iterator state; used and updated in turn by the iterators
* internal routines and by the caller-provided @scmi_iterator_ops.
* @priv: A reference to optional private data as provided by the caller and
* passed back to the @@scmi_iterator_ops.
*/
struct scmi_iterator {
void *msg;
void *resp;
struct scmi_xfer *t;
const struct scmi_protocol_handle *ph;
struct scmi_iterator_ops *ops;
struct scmi_iterator_state state;
void *priv;
};
static void *scmi_iterator_init(const struct scmi_protocol_handle *ph,
struct scmi_iterator_ops *ops,
unsigned int max_resources, u8 msg_id,
size_t tx_size, void *priv)
{
int ret;
struct scmi_iterator *i;
i = devm_kzalloc(ph->dev, sizeof(*i), GFP_KERNEL);
if (!i)
return ERR_PTR(-ENOMEM);
i->ph = ph;
i->ops = ops;
i->priv = priv;
ret = ph->xops->xfer_get_init(ph, msg_id, tx_size, 0, &i->t);
if (ret) {
devm_kfree(ph->dev, i);
return ERR_PTR(ret);
}
i->state.max_resources = max_resources;
i->msg = i->t->tx.buf;
i->resp = i->t->rx.buf;
return i;
}
static int scmi_iterator_run(void *iter)
{
int ret = -EINVAL;
struct scmi_iterator_ops *iops;
const struct scmi_protocol_handle *ph;
struct scmi_iterator_state *st;
struct scmi_iterator *i = iter;
if (!i || !i->ops || !i->ph)
return ret;
iops = i->ops;
ph = i->ph;
st = &i->state;
do {
iops->prepare_message(i->msg, st->desc_index, i->priv);
ret = ph->xops->do_xfer(ph, i->t);
if (ret)
break;
ret = iops->update_state(st, i->resp, i->priv);
if (ret)
break;
if (st->num_returned > st->max_resources - st->desc_index) {
dev_err(ph->dev,
"No. of resources can't exceed %d\n",
st->max_resources);
ret = -EINVAL;
break;
}
for (st->loop_idx = 0; st->loop_idx < st->num_returned;
st->loop_idx++) {
ret = iops->process_response(ph, i->resp, st, i->priv);
if (ret)
goto out;
}
st->desc_index += st->num_returned;
ph->xops->reset_rx_to_maxsz(ph, i->t);
/*
* check for both returned and remaining to avoid infinite
* loop due to buggy firmware
*/
} while (st->num_returned && st->num_remaining);
out:
/* Finalize and destroy iterator */
ph->xops->xfer_put(ph, i->t);
devm_kfree(ph->dev, i);
return ret;
}
static const struct scmi_proto_helpers_ops helpers_ops = {
.extended_name_get = scmi_common_extended_name_get,
.iter_response_init = scmi_iterator_init,
.iter_response_run = scmi_iterator_run,
};
/**
* scmi_revision_area_get - Retrieve version memory area.
*
......@@ -1162,6 +1324,7 @@ scmi_alloc_init_protocol_instance(struct scmi_info *info,
pi->handle = handle;
pi->ph.dev = handle->dev;
pi->ph.xops = &xfer_ops;
pi->ph.hops = &helpers_ops;
pi->ph.set_priv = scmi_set_protocol_priv;
pi->ph.get_priv = scmi_get_protocol_priv;
refcount_set(&pi->users, 1);
......@@ -1310,11 +1473,12 @@ scmi_is_protocol_implemented(const struct scmi_handle *handle, u8 prot_id)
{
int i;
struct scmi_info *info = handle_to_scmi_info(handle);
struct scmi_revision_info *rev = handle->version;
if (!info->protocols_imp)
return false;
for (i = 0; i < MAX_PROTOCOLS_IMP; i++)
for (i = 0; i < rev->num_protocols; i++)
if (info->protocols_imp[i] == prot_id)
return true;
return false;
......
......@@ -64,6 +64,22 @@ enum scmi_optee_pta_cmd {
* [in] value[0].b: Requested capabilities mask (enum pta_scmi_caps)
*/
PTA_SCMI_CMD_GET_CHANNEL = 3,
/*
* PTA_SCMI_CMD_PROCESS_MSG_CHANNEL - Process SCMI message in a MSG
* buffer pointed by memref parameters
*
* [in] value[0].a: Channel handle
* [in] memref[1]: Message buffer (MSG and SCMI payload)
* [out] memref[2]: Response buffer (MSG and SCMI payload)
*
* Shared memories used for SCMI message/response are MSG buffers
* referenced by param[1] and param[2]. MSG transport protocol
* uses a 32bit header to carry SCMI meta-data (protocol ID and
* protocol message ID) followed by the effective SCMI message
* payload.
*/
PTA_SCMI_CMD_PROCESS_MSG_CHANNEL = 4,
};
/*
......@@ -72,9 +88,17 @@ enum scmi_optee_pta_cmd {
* PTA_SCMI_CAPS_SMT_HEADER
* When set, OP-TEE supports command using SMT header protocol (SCMI shmem) in
* shared memory buffers to carry SCMI protocol synchronisation information.
*
* PTA_SCMI_CAPS_MSG_HEADER
* When set, OP-TEE supports command using MSG header protocol in an OP-TEE
* shared memory to carry SCMI protocol synchronisation information and SCMI
* message payload.
*/
#define PTA_SCMI_CAPS_NONE 0
#define PTA_SCMI_CAPS_SMT_HEADER BIT(0)
#define PTA_SCMI_CAPS_MSG_HEADER BIT(1)
#define PTA_SCMI_CAPS_MASK (PTA_SCMI_CAPS_SMT_HEADER | \
PTA_SCMI_CAPS_MSG_HEADER)
/**
* struct scmi_optee_channel - Description of an OP-TEE SCMI channel
......@@ -85,7 +109,8 @@ enum scmi_optee_pta_cmd {
* @mu: Mutex protection on channel access
* @cinfo: SCMI channel information
* @shmem: Virtual base address of the shared memory
* @tee_shm: Reference to TEE shared memory or NULL if using static shmem
* @req: Shared memory protocol handle for SCMI request and synchronous response
* @tee_shm: TEE shared memory handle @req or NULL if using IOMEM shmem
* @link: Reference in agent's channel list
*/
struct scmi_optee_channel {
......@@ -94,7 +119,10 @@ struct scmi_optee_channel {
u32 caps;
struct mutex mu;
struct scmi_chan_info *cinfo;
union {
struct scmi_shared_mem __iomem *shmem;
struct scmi_msg_payld *msg;
} req;
struct tee_shm *tee_shm;
struct list_head link;
};
......@@ -178,8 +206,8 @@ static int get_capabilities(struct scmi_optee_agent *agent)
caps = param[0].u.value.a;
if (!(caps & PTA_SCMI_CAPS_SMT_HEADER)) {
dev_err(agent->dev, "OP-TEE SCMI PTA doesn't support SMT\n");
if (!(caps & (PTA_SCMI_CAPS_SMT_HEADER | PTA_SCMI_CAPS_MSG_HEADER))) {
dev_err(agent->dev, "OP-TEE SCMI PTA doesn't support SMT and MSG\n");
return -EOPNOTSUPP;
}
......@@ -193,9 +221,14 @@ static int get_channel(struct scmi_optee_channel *channel)
struct device *dev = scmi_optee_private->dev;
struct tee_ioctl_invoke_arg arg = { };
struct tee_param param[1] = { };
unsigned int caps = PTA_SCMI_CAPS_SMT_HEADER;
unsigned int caps = 0;
int ret;
if (channel->tee_shm)
caps = PTA_SCMI_CAPS_MSG_HEADER;
else
caps = PTA_SCMI_CAPS_SMT_HEADER;
arg.func = PTA_SCMI_CMD_GET_CHANNEL;
arg.session = channel->tee_session;
arg.num_params = 1;
......@@ -220,25 +253,48 @@ static int get_channel(struct scmi_optee_channel *channel)
static int invoke_process_smt_channel(struct scmi_optee_channel *channel)
{
struct tee_ioctl_invoke_arg arg = { };
struct tee_param param[2] = { };
struct tee_ioctl_invoke_arg arg = {
.func = PTA_SCMI_CMD_PROCESS_SMT_CHANNEL,
.session = channel->tee_session,
.num_params = 1,
};
struct tee_param param[1] = { };
int ret;
arg.session = channel->tee_session;
param[0].attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT;
param[0].u.value.a = channel->channel_id;
if (channel->tee_shm) {
param[1].attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT;
param[1].u.memref.shm = channel->tee_shm;
param[1].u.memref.size = SCMI_OPTEE_MAX_MSG_SIZE;
arg.num_params = 2;
arg.func = PTA_SCMI_CMD_PROCESS_SMT_CHANNEL_MESSAGE;
} else {
arg.num_params = 1;
arg.func = PTA_SCMI_CMD_PROCESS_SMT_CHANNEL;
ret = tee_client_invoke_func(scmi_optee_private->tee_ctx, &arg, param);
if (ret < 0 || arg.ret) {
dev_err(scmi_optee_private->dev, "Can't invoke channel %u: %d / %#x\n",
channel->channel_id, ret, arg.ret);
return -EIO;
}
return 0;
}
static int invoke_process_msg_channel(struct scmi_optee_channel *channel, size_t msg_size)
{
struct tee_ioctl_invoke_arg arg = {
.func = PTA_SCMI_CMD_PROCESS_MSG_CHANNEL,
.session = channel->tee_session,
.num_params = 3,
};
struct tee_param param[3] = { };
int ret;
param[0].attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT;
param[0].u.value.a = channel->channel_id;
param[1].attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT;
param[1].u.memref.shm = channel->tee_shm;
param[1].u.memref.size = msg_size;
param[2].attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT;
param[2].u.memref.shm = channel->tee_shm;
param[2].u.memref.size = SCMI_OPTEE_MAX_MSG_SIZE;
ret = tee_client_invoke_func(scmi_optee_private->tee_ctx, &arg, param);
if (ret < 0 || arg.ret) {
dev_err(scmi_optee_private->dev, "Can't invoke channel %u: %d / %#x\n",
......@@ -279,7 +335,26 @@ static void scmi_optee_clear_channel(struct scmi_chan_info *cinfo)
{
struct scmi_optee_channel *channel = cinfo->transport_info;
shmem_clear_channel(channel->shmem);
if (!channel->tee_shm)
shmem_clear_channel(channel->req.shmem);
}
static int setup_dynamic_shmem(struct device *dev, struct scmi_optee_channel *channel)
{
const size_t msg_size = SCMI_OPTEE_MAX_MSG_SIZE;
void *shbuf;
channel->tee_shm = tee_shm_alloc_kernel_buf(scmi_optee_private->tee_ctx, msg_size);
if (IS_ERR(channel->tee_shm)) {
dev_err(channel->cinfo->dev, "shmem allocation failed\n");
return -ENOMEM;
}
shbuf = tee_shm_get_va(channel->tee_shm, 0);
memset(shbuf, 0, msg_size);
channel->req.msg = shbuf;
return 0;
}
static int setup_static_shmem(struct device *dev, struct scmi_chan_info *cinfo,
......@@ -304,8 +379,8 @@ static int setup_static_shmem(struct device *dev, struct scmi_chan_info *cinfo,
size = resource_size(&res);
channel->shmem = devm_ioremap(dev, res.start, size);
if (!channel->shmem) {
channel->req.shmem = devm_ioremap(dev, res.start, size);
if (!channel->req.shmem) {
dev_err(dev, "Failed to ioremap SCMI Tx shared memory\n");
ret = -EADDRNOTAVAIL;
goto out;
......@@ -325,7 +400,7 @@ static int setup_shmem(struct device *dev, struct scmi_chan_info *cinfo,
if (of_find_property(cinfo->dev->of_node, "shmem", NULL))
return setup_static_shmem(dev, cinfo, channel);
else
return -ENOMEM;
return setup_dynamic_shmem(dev, channel);
}
static int scmi_optee_chan_setup(struct scmi_chan_info *cinfo, struct device *dev, bool tx)
......@@ -405,27 +480,22 @@ static int scmi_optee_chan_free(int id, void *p, void *data)
return 0;
}
static struct scmi_shared_mem __iomem *
get_channel_shm(struct scmi_optee_channel *chan, struct scmi_xfer *xfer)
{
if (!chan)
return NULL;
return chan->shmem;
}
static int scmi_optee_send_message(struct scmi_chan_info *cinfo,
struct scmi_xfer *xfer)
{
struct scmi_optee_channel *channel = cinfo->transport_info;
struct scmi_shared_mem __iomem *shmem = get_channel_shm(channel, xfer);
int ret;
mutex_lock(&channel->mu);
shmem_tx_prepare(shmem, xfer);
if (channel->tee_shm) {
msg_tx_prepare(channel->req.msg, xfer);
ret = invoke_process_msg_channel(channel, msg_command_size(xfer));
} else {
shmem_tx_prepare(channel->req.shmem, xfer);
ret = invoke_process_smt_channel(channel);
}
if (ret)
mutex_unlock(&channel->mu);
......@@ -436,9 +506,11 @@ static void scmi_optee_fetch_response(struct scmi_chan_info *cinfo,
struct scmi_xfer *xfer)
{
struct scmi_optee_channel *channel = cinfo->transport_info;
struct scmi_shared_mem __iomem *shmem = get_channel_shm(channel, xfer);
shmem_fetch_response(shmem, xfer);
if (channel->tee_shm)
msg_fetch_response(channel->req.msg, SCMI_OPTEE_MAX_MSG_SIZE, xfer);
else
shmem_fetch_response(channel->req.shmem, xfer);
}
static void scmi_optee_mark_txdone(struct scmi_chan_info *cinfo, int ret,
......
......@@ -2,7 +2,7 @@
/*
* System Control and Management Interface (SCMI) Performance Protocol
*
* Copyright (C) 2018-2021 ARM Ltd.
* Copyright (C) 2018-2022 ARM Ltd.
*/
#define pr_fmt(fmt) "SCMI Notifications PERF - " fmt
......@@ -17,9 +17,11 @@
#include <linux/scmi_protocol.h>
#include <linux/sort.h>
#include "common.h"
#include "protocols.h"
#include "notify.h"
#define MAX_OPPS 16
enum scmi_performance_protocol_cmd {
PERF_DOMAIN_ATTRIBUTES = 0x3,
PERF_DESCRIBE_LEVELS = 0x4,
......@@ -30,6 +32,7 @@ enum scmi_performance_protocol_cmd {
PERF_NOTIFY_LIMITS = 0x9,
PERF_NOTIFY_LEVEL = 0xa,
PERF_DESCRIBE_FASTCHANNEL = 0xb,
PERF_DOMAIN_NAME_GET = 0xc,
};
struct scmi_opp {
......@@ -42,6 +45,7 @@ struct scmi_msg_resp_perf_attributes {
__le16 num_domains;
__le16 flags;
#define POWER_SCALE_IN_MILLIWATT(x) ((x) & BIT(0))
#define POWER_SCALE_IN_MICROWATT(x) ((x) & BIT(1))
__le32 stats_addr_low;
__le32 stats_addr_high;
__le32 stats_size;
......@@ -54,10 +58,11 @@ struct scmi_msg_resp_perf_domain_attributes {
#define SUPPORTS_PERF_LIMIT_NOTIFY(x) ((x) & BIT(29))
#define SUPPORTS_PERF_LEVEL_NOTIFY(x) ((x) & BIT(28))
#define SUPPORTS_PERF_FASTCHANNELS(x) ((x) & BIT(27))
#define SUPPORTS_EXTENDED_NAMES(x) ((x) & BIT(26))
__le32 rate_limit_us;
__le32 sustained_freq_khz;
__le32 sustained_perf_level;
u8 name[SCMI_MAX_STR_SIZE];
u8 name[SCMI_SHORT_NAME_MAX_SIZE];
};
struct scmi_msg_perf_describe_levels {
......@@ -166,6 +171,7 @@ struct scmi_perf_info {
u32 version;
int num_domains;
bool power_scale_mw;
bool power_scale_uw;
u64 stats_addr;
u32 stats_size;
struct perf_dom_info *dom_info;
......@@ -196,6 +202,8 @@ static int scmi_perf_attributes_get(const struct scmi_protocol_handle *ph,
pi->num_domains = le16_to_cpu(attr->num_domains);
pi->power_scale_mw = POWER_SCALE_IN_MILLIWATT(flags);
if (PROTOCOL_REV_MAJOR(pi->version) >= 0x3)
pi->power_scale_uw = POWER_SCALE_IN_MICROWATT(flags);
pi->stats_addr = le32_to_cpu(attr->stats_addr_low) |
(u64)le32_to_cpu(attr->stats_addr_high) << 32;
pi->stats_size = le32_to_cpu(attr->stats_size);
......@@ -207,9 +215,11 @@ static int scmi_perf_attributes_get(const struct scmi_protocol_handle *ph,
static int
scmi_perf_domain_attributes_get(const struct scmi_protocol_handle *ph,
u32 domain, struct perf_dom_info *dom_info)
u32 domain, struct perf_dom_info *dom_info,
u32 version)
{
int ret;
u32 flags;
struct scmi_xfer *t;
struct scmi_msg_resp_perf_domain_attributes *attr;
......@@ -223,7 +233,7 @@ scmi_perf_domain_attributes_get(const struct scmi_protocol_handle *ph,
ret = ph->xops->do_xfer(ph, t);
if (!ret) {
u32 flags = le32_to_cpu(attr->flags);
flags = le32_to_cpu(attr->flags);
dom_info->set_limits = SUPPORTS_SET_LIMITS(flags);
dom_info->set_perf = SUPPORTS_SET_PERF_LVL(flags);
......@@ -246,6 +256,16 @@ scmi_perf_domain_attributes_get(const struct scmi_protocol_handle *ph,
}
ph->xops->xfer_put(ph, t);
/*
* If supported overwrite short name with the extended one;
* on error just carry on and use already provided short name.
*/
if (!ret && PROTOCOL_REV_MAJOR(version) >= 0x3 &&
SUPPORTS_EXTENDED_NAMES(flags))
ph->hops->extended_name_get(ph, PERF_DOMAIN_NAME_GET, domain,
dom_info->name, SCMI_MAX_STR_SIZE);
return ret;
}
......@@ -256,66 +276,87 @@ static int opp_cmp_func(const void *opp1, const void *opp2)
return t1->perf - t2->perf;
}
static int
scmi_perf_describe_levels_get(const struct scmi_protocol_handle *ph, u32 domain,
struct perf_dom_info *perf_dom)
struct scmi_perf_ipriv {
u32 domain;
struct perf_dom_info *perf_dom;
};
static void iter_perf_levels_prepare_message(void *message,
unsigned int desc_index,
const void *priv)
{
int ret, cnt;
u32 tot_opp_cnt = 0;
u16 num_returned, num_remaining;
struct scmi_xfer *t;
struct scmi_opp *opp;
struct scmi_msg_perf_describe_levels *dom_info;
struct scmi_msg_resp_perf_describe_levels *level_info;
struct scmi_msg_perf_describe_levels *msg = message;
const struct scmi_perf_ipriv *p = priv;
ret = ph->xops->xfer_get_init(ph, PERF_DESCRIBE_LEVELS,
sizeof(*dom_info), 0, &t);
if (ret)
return ret;
msg->domain = cpu_to_le32(p->domain);
/* Set the number of OPPs to be skipped/already read */
msg->level_index = cpu_to_le32(desc_index);
}
dom_info = t->tx.buf;
level_info = t->rx.buf;
static int iter_perf_levels_update_state(struct scmi_iterator_state *st,
const void *response, void *priv)
{
const struct scmi_msg_resp_perf_describe_levels *r = response;
do {
dom_info->domain = cpu_to_le32(domain);
/* Set the number of OPPs to be skipped/already read */
dom_info->level_index = cpu_to_le32(tot_opp_cnt);
st->num_returned = le16_to_cpu(r->num_returned);
st->num_remaining = le16_to_cpu(r->num_remaining);
ret = ph->xops->do_xfer(ph, t);
if (ret)
break;
return 0;
}
num_returned = le16_to_cpu(level_info->num_returned);
num_remaining = le16_to_cpu(level_info->num_remaining);
if (tot_opp_cnt + num_returned > MAX_OPPS) {
dev_err(ph->dev, "No. of OPPs exceeded MAX_OPPS");
break;
}
static int
iter_perf_levels_process_response(const struct scmi_protocol_handle *ph,
const void *response,
struct scmi_iterator_state *st, void *priv)
{
struct scmi_opp *opp;
const struct scmi_msg_resp_perf_describe_levels *r = response;
struct scmi_perf_ipriv *p = priv;
opp = &perf_dom->opp[tot_opp_cnt];
for (cnt = 0; cnt < num_returned; cnt++, opp++) {
opp->perf = le32_to_cpu(level_info->opp[cnt].perf_val);
opp->power = le32_to_cpu(level_info->opp[cnt].power);
opp->trans_latency_us = le16_to_cpu
(level_info->opp[cnt].transition_latency_us);
opp = &p->perf_dom->opp[st->desc_index + st->loop_idx];
opp->perf = le32_to_cpu(r->opp[st->loop_idx].perf_val);
opp->power = le32_to_cpu(r->opp[st->loop_idx].power);
opp->trans_latency_us =
le16_to_cpu(r->opp[st->loop_idx].transition_latency_us);
p->perf_dom->opp_count++;
dev_dbg(ph->dev, "Level %d Power %d Latency %dus\n",
opp->perf, opp->power, opp->trans_latency_us);
}
tot_opp_cnt += num_returned;
return 0;
}
ph->xops->reset_rx_to_maxsz(ph, t);
/*
* check for both returned and remaining to avoid infinite
* loop due to buggy firmware
*/
} while (num_returned && num_remaining);
static int
scmi_perf_describe_levels_get(const struct scmi_protocol_handle *ph, u32 domain,
struct perf_dom_info *perf_dom)
{
int ret;
void *iter;
struct scmi_msg_perf_describe_levels *msg;
struct scmi_iterator_ops ops = {
.prepare_message = iter_perf_levels_prepare_message,
.update_state = iter_perf_levels_update_state,
.process_response = iter_perf_levels_process_response,
};
struct scmi_perf_ipriv ppriv = {
.domain = domain,
.perf_dom = perf_dom,
};
iter = ph->hops->iter_response_init(ph, &ops, MAX_OPPS,
PERF_DESCRIBE_LEVELS,
sizeof(*msg), &ppriv);
if (IS_ERR(iter))
return PTR_ERR(iter);
ret = ph->hops->iter_response_run(iter);
if (ret)
return ret;
perf_dom->opp_count = tot_opp_cnt;
ph->xops->xfer_put(ph, t);
if (perf_dom->opp_count)
sort(perf_dom->opp, perf_dom->opp_count,
sizeof(struct scmi_opp), opp_cmp_func, NULL);
sort(perf_dom->opp, tot_opp_cnt, sizeof(*opp), opp_cmp_func, NULL);
return ret;
}
......@@ -382,6 +423,9 @@ static int scmi_perf_limits_set(const struct scmi_protocol_handle *ph,
struct scmi_perf_info *pi = ph->get_priv(ph);
struct perf_dom_info *dom = pi->dom_info + domain;
if (PROTOCOL_REV_MAJOR(pi->version) >= 0x3 && !max_perf && !min_perf)
return -EINVAL;
if (dom->fc_info && dom->fc_info->limit_set_addr) {
iowrite32(max_perf, dom->fc_info->limit_set_addr);
iowrite32(min_perf, dom->fc_info->limit_set_addr + 4);
......@@ -873,11 +917,13 @@ static const struct scmi_protocol_events perf_protocol_events = {
static int scmi_perf_protocol_init(const struct scmi_protocol_handle *ph)
{
int domain;
int domain, ret;
u32 version;
struct scmi_perf_info *pinfo;
ph->xops->version_get(ph, &version);
ret = ph->xops->version_get(ph, &version);
if (ret)
return ret;
dev_dbg(ph->dev, "Performance Version %d.%d\n",
PROTOCOL_REV_MAJOR(version), PROTOCOL_REV_MINOR(version));
......@@ -886,7 +932,9 @@ static int scmi_perf_protocol_init(const struct scmi_protocol_handle *ph)
if (!pinfo)
return -ENOMEM;
scmi_perf_attributes_get(ph, pinfo);
ret = scmi_perf_attributes_get(ph, pinfo);
if (ret)
return ret;
pinfo->dom_info = devm_kcalloc(ph->dev, pinfo->num_domains,
sizeof(*pinfo->dom_info), GFP_KERNEL);
......@@ -896,7 +944,7 @@ static int scmi_perf_protocol_init(const struct scmi_protocol_handle *ph)
for (domain = 0; domain < pinfo->num_domains; domain++) {
struct perf_dom_info *dom = pinfo->dom_info + domain;
scmi_perf_domain_attributes_get(ph, domain, dom);
scmi_perf_domain_attributes_get(ph, domain, dom, version);
scmi_perf_describe_levels_get(ph, domain, dom);
if (dom->perf_fastchannels)
......
......@@ -2,7 +2,7 @@
/*
* System Control and Management Interface (SCMI) Power Protocol
*
* Copyright (C) 2018-2021 ARM Ltd.
* Copyright (C) 2018-2022 ARM Ltd.
*/
#define pr_fmt(fmt) "SCMI Notifications POWER - " fmt
......@@ -10,7 +10,7 @@
#include <linux/module.h>
#include <linux/scmi_protocol.h>
#include "common.h"
#include "protocols.h"
#include "notify.h"
enum scmi_power_protocol_cmd {
......@@ -18,6 +18,7 @@ enum scmi_power_protocol_cmd {
POWER_STATE_SET = 0x4,
POWER_STATE_GET = 0x5,
POWER_STATE_NOTIFY = 0x6,
POWER_DOMAIN_NAME_GET = 0x8,
};
struct scmi_msg_resp_power_attributes {
......@@ -33,7 +34,8 @@ struct scmi_msg_resp_power_domain_attributes {
#define SUPPORTS_STATE_SET_NOTIFY(x) ((x) & BIT(31))
#define SUPPORTS_STATE_SET_ASYNC(x) ((x) & BIT(30))
#define SUPPORTS_STATE_SET_SYNC(x) ((x) & BIT(29))
u8 name[SCMI_MAX_STR_SIZE];
#define SUPPORTS_EXTENDED_NAMES(x) ((x) & BIT(27))
u8 name[SCMI_SHORT_NAME_MAX_SIZE];
};
struct scmi_power_set_state {
......@@ -97,9 +99,11 @@ static int scmi_power_attributes_get(const struct scmi_protocol_handle *ph,
static int
scmi_power_domain_attributes_get(const struct scmi_protocol_handle *ph,
u32 domain, struct power_dom_info *dom_info)
u32 domain, struct power_dom_info *dom_info,
u32 version)
{
int ret;
u32 flags;
struct scmi_xfer *t;
struct scmi_msg_resp_power_domain_attributes *attr;
......@@ -113,15 +117,26 @@ scmi_power_domain_attributes_get(const struct scmi_protocol_handle *ph,
ret = ph->xops->do_xfer(ph, t);
if (!ret) {
u32 flags = le32_to_cpu(attr->flags);
flags = le32_to_cpu(attr->flags);
dom_info->state_set_notify = SUPPORTS_STATE_SET_NOTIFY(flags);
dom_info->state_set_async = SUPPORTS_STATE_SET_ASYNC(flags);
dom_info->state_set_sync = SUPPORTS_STATE_SET_SYNC(flags);
strlcpy(dom_info->name, attr->name, SCMI_MAX_STR_SIZE);
}
ph->xops->xfer_put(ph, t);
/*
* If supported overwrite short name with the extended one;
* on error just carry on and use already provided short name.
*/
if (!ret && PROTOCOL_REV_MAJOR(version) >= 0x3 &&
SUPPORTS_EXTENDED_NAMES(flags)) {
ph->hops->extended_name_get(ph, POWER_DOMAIN_NAME_GET,
domain, dom_info->name,
SCMI_MAX_STR_SIZE);
}
return ret;
}
......@@ -174,7 +189,8 @@ static int scmi_power_num_domains_get(const struct scmi_protocol_handle *ph)
return pi->num_domains;
}
static char *scmi_power_name_get(const struct scmi_protocol_handle *ph,
static const char *
scmi_power_name_get(const struct scmi_protocol_handle *ph,
u32 domain)
{
struct scmi_power_info *pi = ph->get_priv(ph);
......@@ -280,11 +296,13 @@ static const struct scmi_protocol_events power_protocol_events = {
static int scmi_power_protocol_init(const struct scmi_protocol_handle *ph)
{
int domain;
int domain, ret;
u32 version;
struct scmi_power_info *pinfo;
ph->xops->version_get(ph, &version);
ret = ph->xops->version_get(ph, &version);
if (ret)
return ret;
dev_dbg(ph->dev, "Power Version %d.%d\n",
PROTOCOL_REV_MAJOR(version), PROTOCOL_REV_MINOR(version));
......@@ -293,7 +311,9 @@ static int scmi_power_protocol_init(const struct scmi_protocol_handle *ph)
if (!pinfo)
return -ENOMEM;
scmi_power_attributes_get(ph, pinfo);
ret = scmi_power_attributes_get(ph, pinfo);
if (ret)
return ret;
pinfo->dom_info = devm_kcalloc(ph->dev, pinfo->num_domains,
sizeof(*pinfo->dom_info), GFP_KERNEL);
......@@ -303,7 +323,7 @@ static int scmi_power_protocol_init(const struct scmi_protocol_handle *ph)
for (domain = 0; domain < pinfo->num_domains; domain++) {
struct power_dom_info *dom = pinfo->dom_info + domain;
scmi_power_domain_attributes_get(ph, domain, dom);
scmi_power_domain_attributes_get(ph, domain, dom, version);
}
pinfo->version = version;
......
This diff is collapsed.
......@@ -2,7 +2,7 @@
/*
* System Control and Management Interface (SCMI) Reset Protocol
*
* Copyright (C) 2019-2021 ARM Ltd.
* Copyright (C) 2019-2022 ARM Ltd.
*/
#define pr_fmt(fmt) "SCMI Notifications RESET - " fmt
......@@ -10,13 +10,14 @@
#include <linux/module.h>
#include <linux/scmi_protocol.h>
#include "common.h"
#include "protocols.h"
#include "notify.h"
enum scmi_reset_protocol_cmd {
RESET_DOMAIN_ATTRIBUTES = 0x3,
RESET = 0x4,
RESET_NOTIFY = 0x5,
RESET_DOMAIN_NAME_GET = 0x6,
};
#define NUM_RESET_DOMAIN_MASK 0xffff
......@@ -26,8 +27,9 @@ struct scmi_msg_resp_reset_domain_attributes {
__le32 attributes;
#define SUPPORTS_ASYNC_RESET(x) ((x) & BIT(31))
#define SUPPORTS_NOTIFY_RESET(x) ((x) & BIT(30))
#define SUPPORTS_EXTENDED_NAMES(x) ((x) & BIT(29))
__le32 latency;
u8 name[SCMI_MAX_STR_SIZE];
u8 name[SCMI_SHORT_NAME_MAX_SIZE];
};
struct scmi_msg_reset_domain_reset {
......@@ -89,9 +91,11 @@ static int scmi_reset_attributes_get(const struct scmi_protocol_handle *ph,
static int
scmi_reset_domain_attributes_get(const struct scmi_protocol_handle *ph,
u32 domain, struct reset_dom_info *dom_info)
u32 domain, struct reset_dom_info *dom_info,
u32 version)
{
int ret;
u32 attributes;
struct scmi_xfer *t;
struct scmi_msg_resp_reset_domain_attributes *attr;
......@@ -105,7 +109,7 @@ scmi_reset_domain_attributes_get(const struct scmi_protocol_handle *ph,
ret = ph->xops->do_xfer(ph, t);
if (!ret) {
u32 attributes = le32_to_cpu(attr->attributes);
attributes = le32_to_cpu(attr->attributes);
dom_info->async_reset = SUPPORTS_ASYNC_RESET(attributes);
dom_info->reset_notify = SUPPORTS_NOTIFY_RESET(attributes);
......@@ -116,6 +120,16 @@ scmi_reset_domain_attributes_get(const struct scmi_protocol_handle *ph,
}
ph->xops->xfer_put(ph, t);
/*
* If supported overwrite short name with the extended one;
* on error just carry on and use already provided short name.
*/
if (!ret && PROTOCOL_REV_MAJOR(version) >= 0x3 &&
SUPPORTS_EXTENDED_NAMES(attributes))
ph->hops->extended_name_get(ph, RESET_DOMAIN_NAME_GET, domain,
dom_info->name, SCMI_MAX_STR_SIZE);
return ret;
}
......@@ -126,8 +140,8 @@ static int scmi_reset_num_domains_get(const struct scmi_protocol_handle *ph)
return pi->num_domains;
}
static char *scmi_reset_name_get(const struct scmi_protocol_handle *ph,
u32 domain)
static const char *
scmi_reset_name_get(const struct scmi_protocol_handle *ph, u32 domain)
{
struct scmi_reset_info *pi = ph->get_priv(ph);
......@@ -293,11 +307,13 @@ static const struct scmi_protocol_events reset_protocol_events = {
static int scmi_reset_protocol_init(const struct scmi_protocol_handle *ph)
{
int domain;
int domain, ret;
u32 version;
struct scmi_reset_info *pinfo;
ph->xops->version_get(ph, &version);
ret = ph->xops->version_get(ph, &version);
if (ret)
return ret;
dev_dbg(ph->dev, "Reset Version %d.%d\n",
PROTOCOL_REV_MAJOR(version), PROTOCOL_REV_MINOR(version));
......@@ -306,7 +322,9 @@ static int scmi_reset_protocol_init(const struct scmi_protocol_handle *ph)
if (!pinfo)
return -ENOMEM;
scmi_reset_attributes_get(ph, pinfo);
ret = scmi_reset_attributes_get(ph, pinfo);
if (ret)
return ret;
pinfo->dom_info = devm_kcalloc(ph->dev, pinfo->num_domains,
sizeof(*pinfo->dom_info), GFP_KERNEL);
......@@ -316,7 +334,7 @@ static int scmi_reset_protocol_init(const struct scmi_protocol_handle *ph)
for (domain = 0; domain < pinfo->num_domains; domain++) {
struct reset_dom_info *dom = pinfo->dom_info + domain;
scmi_reset_domain_attributes_get(ph, domain, dom);
scmi_reset_domain_attributes_get(ph, domain, dom, version);
}
pinfo->version = version;
......
This diff is collapsed.
......@@ -2,7 +2,7 @@
/*
* System Control and Management Interface (SCMI) System Power Protocol
*
* Copyright (C) 2020-2021 ARM Ltd.
* Copyright (C) 2020-2022 ARM Ltd.
*/
#define pr_fmt(fmt) "SCMI Notifications SYSTEM - " fmt
......@@ -10,7 +10,7 @@
#include <linux/module.h>
#include <linux/scmi_protocol.h>
#include "common.h"
#include "protocols.h"
#include "notify.h"
#define SCMI_SYSTEM_NUM_SOURCES 1
......@@ -113,10 +113,13 @@ static const struct scmi_protocol_events system_protocol_events = {
static int scmi_system_protocol_init(const struct scmi_protocol_handle *ph)
{
int ret;
u32 version;
struct scmi_system_info *pinfo;
ph->xops->version_get(ph, &version);
ret = ph->xops->version_get(ph, &version);
if (ret)
return ret;
dev_dbg(ph->dev, "System Power Version %d.%d\n",
PROTOCOL_REV_MAJOR(version), PROTOCOL_REV_MINOR(version));
......
......@@ -2,13 +2,13 @@
/*
* System Control and Management Interface (SCMI) Voltage Protocol
*
* Copyright (C) 2020-2021 ARM Ltd.
* Copyright (C) 2020-2022 ARM Ltd.
*/
#include <linux/module.h>
#include <linux/scmi_protocol.h>
#include "common.h"
#include "protocols.h"
#define VOLTAGE_DOMS_NUM_MASK GENMASK(15, 0)
#define REMAINING_LEVELS_MASK GENMASK(31, 16)
......@@ -21,13 +21,16 @@ enum scmi_voltage_protocol_cmd {
VOLTAGE_CONFIG_GET = 0x6,
VOLTAGE_LEVEL_SET = 0x7,
VOLTAGE_LEVEL_GET = 0x8,
VOLTAGE_DOMAIN_NAME_GET = 0x09,
};
#define NUM_VOLTAGE_DOMAINS(x) ((u16)(FIELD_GET(VOLTAGE_DOMS_NUM_MASK, (x))))
struct scmi_msg_resp_domain_attributes {
__le32 attr;
u8 name[SCMI_MAX_STR_SIZE];
#define SUPPORTS_ASYNC_LEVEL_SET(x) ((x) & BIT(31))
#define SUPPORTS_EXTENDED_NAMES(x) ((x) & BIT(30))
u8 name[SCMI_SHORT_NAME_MAX_SIZE];
};
struct scmi_msg_cmd_describe_levels {
......@@ -54,6 +57,11 @@ struct scmi_msg_cmd_level_set {
__le32 voltage_level;
};
struct scmi_resp_voltage_level_set_complete {
__le32 domain_id;
__le32 voltage_level;
};
struct voltage_info {
unsigned int version;
unsigned int num_domains;
......@@ -110,14 +118,100 @@ static int scmi_init_voltage_levels(struct device *dev,
return 0;
}
struct scmi_volt_ipriv {
struct device *dev;
struct scmi_voltage_info *v;
};
static void iter_volt_levels_prepare_message(void *message,
unsigned int desc_index,
const void *priv)
{
struct scmi_msg_cmd_describe_levels *msg = message;
const struct scmi_volt_ipriv *p = priv;
msg->domain_id = cpu_to_le32(p->v->id);
msg->level_index = cpu_to_le32(desc_index);
}
static int iter_volt_levels_update_state(struct scmi_iterator_state *st,
const void *response, void *priv)
{
int ret = 0;
u32 flags;
const struct scmi_msg_resp_describe_levels *r = response;
struct scmi_volt_ipriv *p = priv;
flags = le32_to_cpu(r->flags);
st->num_returned = NUM_RETURNED_LEVELS(flags);
st->num_remaining = NUM_REMAINING_LEVELS(flags);
/* Allocate space for num_levels if not already done */
if (!p->v->num_levels) {
ret = scmi_init_voltage_levels(p->dev, p->v, st->num_returned,
st->num_remaining,
SUPPORTS_SEGMENTED_LEVELS(flags));
if (!ret)
st->max_resources = p->v->num_levels;
}
return ret;
}
static int
iter_volt_levels_process_response(const struct scmi_protocol_handle *ph,
const void *response,
struct scmi_iterator_state *st, void *priv)
{
s32 val;
const struct scmi_msg_resp_describe_levels *r = response;
struct scmi_volt_ipriv *p = priv;
val = (s32)le32_to_cpu(r->voltage[st->loop_idx]);
p->v->levels_uv[st->desc_index + st->loop_idx] = val;
if (val < 0)
p->v->negative_volts_allowed = true;
return 0;
}
static int scmi_voltage_levels_get(const struct scmi_protocol_handle *ph,
struct scmi_voltage_info *v)
{
int ret;
void *iter;
struct scmi_msg_cmd_describe_levels *msg;
struct scmi_iterator_ops ops = {
.prepare_message = iter_volt_levels_prepare_message,
.update_state = iter_volt_levels_update_state,
.process_response = iter_volt_levels_process_response,
};
struct scmi_volt_ipriv vpriv = {
.dev = ph->dev,
.v = v,
};
iter = ph->hops->iter_response_init(ph, &ops, v->num_levels,
VOLTAGE_DESCRIBE_LEVELS,
sizeof(*msg), &vpriv);
if (IS_ERR(iter))
return PTR_ERR(iter);
ret = ph->hops->iter_response_run(iter);
if (ret) {
v->num_levels = 0;
devm_kfree(ph->dev, v->levels_uv);
}
return ret;
}
static int scmi_voltage_descriptors_get(const struct scmi_protocol_handle *ph,
struct voltage_info *vinfo)
{
int ret, dom;
struct scmi_xfer *td, *tl;
struct device *dev = ph->dev;
struct scmi_xfer *td;
struct scmi_msg_resp_domain_attributes *resp_dom;
struct scmi_msg_resp_describe_levels *resp_levels;
ret = ph->xops->xfer_get_init(ph, VOLTAGE_DOMAIN_ATTRIBUTES,
sizeof(__le32), sizeof(*resp_dom), &td);
......@@ -125,16 +219,8 @@ static int scmi_voltage_descriptors_get(const struct scmi_protocol_handle *ph,
return ret;
resp_dom = td->rx.buf;
ret = ph->xops->xfer_get_init(ph, VOLTAGE_DESCRIBE_LEVELS,
sizeof(__le64), 0, &tl);
if (ret)
goto outd;
resp_levels = tl->rx.buf;
for (dom = 0; dom < vinfo->num_domains; dom++) {
u32 desc_index = 0;
u16 num_returned = 0, num_remaining = 0;
struct scmi_msg_cmd_describe_levels *cmd;
u32 attributes;
struct scmi_voltage_info *v;
/* Retrieve domain attributes at first ... */
......@@ -146,69 +232,31 @@ static int scmi_voltage_descriptors_get(const struct scmi_protocol_handle *ph,
v = vinfo->domains + dom;
v->id = dom;
v->attributes = le32_to_cpu(resp_dom->attr);
attributes = le32_to_cpu(resp_dom->attr);
strlcpy(v->name, resp_dom->name, SCMI_MAX_STR_SIZE);
cmd = tl->tx.buf;
/* ...then retrieve domain levels descriptions */
do {
u32 flags;
int cnt;
cmd->domain_id = cpu_to_le32(v->id);
cmd->level_index = cpu_to_le32(desc_index);
ret = ph->xops->do_xfer(ph, tl);
if (ret)
break;
flags = le32_to_cpu(resp_levels->flags);
num_returned = NUM_RETURNED_LEVELS(flags);
num_remaining = NUM_REMAINING_LEVELS(flags);
/* Allocate space for num_levels if not already done */
if (!v->num_levels) {
ret = scmi_init_voltage_levels(dev, v,
num_returned,
num_remaining,
SUPPORTS_SEGMENTED_LEVELS(flags));
if (ret)
break;
}
if (desc_index + num_returned > v->num_levels) {
dev_err(ph->dev,
"No. of voltage levels can't exceed %d\n",
v->num_levels);
ret = -EINVAL;
break;
}
for (cnt = 0; cnt < num_returned; cnt++) {
s32 val;
val =
(s32)le32_to_cpu(resp_levels->voltage[cnt]);
v->levels_uv[desc_index + cnt] = val;
if (val < 0)
v->negative_volts_allowed = true;
/*
* If supported overwrite short name with the extended one;
* on error just carry on and use already provided short name.
*/
if (PROTOCOL_REV_MAJOR(vinfo->version) >= 0x2) {
if (SUPPORTS_EXTENDED_NAMES(attributes))
ph->hops->extended_name_get(ph,
VOLTAGE_DOMAIN_NAME_GET,
v->id, v->name,
SCMI_MAX_STR_SIZE);
if (SUPPORTS_ASYNC_LEVEL_SET(attributes))
v->async_level_set = true;
}
desc_index += num_returned;
ph->xops->reset_rx_to_maxsz(ph, tl);
/* check both to avoid infinite loop due to buggy fw */
} while (num_returned && num_remaining);
if (ret) {
v->num_levels = 0;
devm_kfree(dev, v->levels_uv);
}
ret = scmi_voltage_levels_get(ph, v);
/* Skip invalid voltage descriptors */
if (ret)
continue;
ph->xops->reset_rx_to_maxsz(ph, td);
}
ph->xops->xfer_put(ph, tl);
outd:
ph->xops->xfer_put(ph, td);
return ret;
......@@ -271,12 +319,15 @@ static int scmi_voltage_config_get(const struct scmi_protocol_handle *ph,
}
static int scmi_voltage_level_set(const struct scmi_protocol_handle *ph,
u32 domain_id, u32 flags, s32 volt_uV)
u32 domain_id,
enum scmi_voltage_level_mode mode,
s32 volt_uV)
{
int ret;
struct scmi_xfer *t;
struct voltage_info *vinfo = ph->get_priv(ph);
struct scmi_msg_cmd_level_set *cmd;
struct scmi_voltage_info *v;
if (domain_id >= vinfo->num_domains)
return -EINVAL;
......@@ -286,12 +337,31 @@ static int scmi_voltage_level_set(const struct scmi_protocol_handle *ph,
if (ret)
return ret;
v = vinfo->domains + domain_id;
cmd = t->tx.buf;
cmd->domain_id = cpu_to_le32(domain_id);
cmd->flags = cpu_to_le32(flags);
cmd->voltage_level = cpu_to_le32(volt_uV);
if (!v->async_level_set || mode != SCMI_VOLTAGE_LEVEL_SET_AUTO) {
cmd->flags = cpu_to_le32(0x0);
ret = ph->xops->do_xfer(ph, t);
} else {
cmd->flags = cpu_to_le32(0x1);
ret = ph->xops->do_xfer_with_response(ph, t);
if (!ret) {
struct scmi_resp_voltage_level_set_complete *resp;
resp = t->rx.buf;
if (le32_to_cpu(resp->domain_id) == domain_id)
dev_dbg(ph->dev,
"Voltage domain %d set async to %d\n",
v->id,
le32_to_cpu(resp->voltage_level));
else
ret = -EPROTO;
}
}
ph->xops->xfer_put(ph, t);
return ret;
......
......@@ -13,7 +13,7 @@
#include <linux/notifier.h>
#include <linux/types.h>
#define SCMI_MAX_STR_SIZE 16
#define SCMI_MAX_STR_SIZE 64
#define SCMI_MAX_NUM_RATES 16
/**
......@@ -44,6 +44,8 @@ struct scmi_clock_info {
char name[SCMI_MAX_STR_SIZE];
unsigned int enable_latency;
bool rate_discrete;
bool rate_changed_notifications;
bool rate_change_requested_notifications;
union {
struct {
int num_rates;
......@@ -146,7 +148,8 @@ struct scmi_perf_proto_ops {
*/
struct scmi_power_proto_ops {
int (*num_domains_get)(const struct scmi_protocol_handle *ph);
char *(*name_get)(const struct scmi_protocol_handle *ph, u32 domain);
const char *(*name_get)(const struct scmi_protocol_handle *ph,
u32 domain);
#define SCMI_POWER_STATE_TYPE_SHIFT 30
#define SCMI_POWER_STATE_ID_MASK (BIT(28) - 1)
#define SCMI_POWER_STATE_PARAM(type, id) \
......@@ -484,13 +487,19 @@ struct scmi_sensor_proto_ops {
*/
struct scmi_reset_proto_ops {
int (*num_domains_get)(const struct scmi_protocol_handle *ph);
char *(*name_get)(const struct scmi_protocol_handle *ph, u32 domain);
const char *(*name_get)(const struct scmi_protocol_handle *ph,
u32 domain);
int (*latency_get)(const struct scmi_protocol_handle *ph, u32 domain);
int (*reset)(const struct scmi_protocol_handle *ph, u32 domain);
int (*assert)(const struct scmi_protocol_handle *ph, u32 domain);
int (*deassert)(const struct scmi_protocol_handle *ph, u32 domain);
};
enum scmi_voltage_level_mode {
SCMI_VOLTAGE_LEVEL_SET_AUTO,
SCMI_VOLTAGE_LEVEL_SET_SYNC,
};
/**
* struct scmi_voltage_info - describe one available SCMI Voltage Domain
*
......@@ -503,7 +512,8 @@ struct scmi_reset_proto_ops {
* supported voltage level
* @negative_volts_allowed: True if any of the entries of @levels_uv represent
* a negative voltage.
* @attributes: represents Voltage Domain advertised attributes
* @async_level_set: True when the voltage domain supports asynchronous level
* set commands.
* @name: name assigned to the Voltage Domain by platform
* @num_levels: number of total entries in @levels_uv.
* @levels_uv: array of entries describing the available voltage levels for
......@@ -513,7 +523,7 @@ struct scmi_voltage_info {
unsigned int id;
bool segmented;
bool negative_volts_allowed;
unsigned int attributes;
bool async_level_set;
char name[SCMI_MAX_STR_SIZE];
unsigned int num_levels;
#define SCMI_VOLTAGE_SEGMENT_LOW 0
......@@ -544,7 +554,7 @@ struct scmi_voltage_proto_ops {
int (*config_get)(const struct scmi_protocol_handle *ph, u32 domain_id,
u32 *config);
int (*level_set)(const struct scmi_protocol_handle *ph, u32 domain_id,
u32 flags, s32 volt_uV);
enum scmi_voltage_level_mode mode, s32 volt_uV);
int (*level_get)(const struct scmi_protocol_handle *ph, u32 domain_id,
s32 *volt_uV);
};
......@@ -742,6 +752,8 @@ void scmi_protocol_unregister(const struct scmi_protocol *proto);
/* SCMI Notification API - Custom Event Reports */
enum scmi_notification_events {
SCMI_EVENT_POWER_STATE_CHANGED = 0x0,
SCMI_EVENT_CLOCK_RATE_CHANGED = 0x0,
SCMI_EVENT_CLOCK_RATE_CHANGE_REQUESTED = 0x1,
SCMI_EVENT_PERFORMANCE_LIMITS_CHANGED = 0x0,
SCMI_EVENT_PERFORMANCE_LEVEL_CHANGED = 0x1,
SCMI_EVENT_SENSOR_TRIP_POINT_EVENT = 0x0,
......@@ -758,6 +770,13 @@ struct scmi_power_state_changed_report {
unsigned int power_state;
};
struct scmi_clock_rate_notif_report {
ktime_t timestamp;
unsigned int agent_id;
unsigned int clock_id;
unsigned long long rate;
};
struct scmi_system_power_state_notifier_report {
ktime_t timestamp;
unsigned int agent_id;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment