Commit 88850f7c authored by Greg Kroah-Hartman's avatar Greg Kroah-Hartman

Merge tag 'coresight-next-v6.12' of...

Merge tag 'coresight-next-v6.12' of ssh://gitolite.kernel.org/pub/scm/linux/kernel/git/coresight/linux into char-misc-next

Suzuki writes:

coresight: updates for Linux v6.12

CoreSight/hwtracing subsystem updates targeting Linux v6.12:
 - Miscellaneous fixes and cleanups
 - TraceID allocation per sink, allowing system with > 110 cores for
   perf tracing.
Signed-off-by: default avatarSuzuki K Poulose <suzuki.poulose@arm.com>

* tag 'coresight-next-v6.12' of ssh://gitolite.kernel.org/pub/scm/linux/kernel/git/coresight/linux:
  coresight: Make trace ID map spinlock local to the map
  coresight: Emit sink ID in the HW_ID packets
  coresight: Remove pending trace ID release mechanism
  coresight: Use per-sink trace ID maps for Perf sessions
  coresight: Make CPU id map a property of a trace ID map
  coresight: Expose map arguments in trace ID API
  coresight: Move struct coresight_trace_id_map to common header
  coresight: Clarify comments around the PID of the sink owner
  coresight: Remove unused ETM Perf stubs
  coresight: tmc: sg: Do not leak sg_table
  Coresight: Set correct cs_mode for dummy source to fix disable issue
  Coresight: Set correct cs_mode for TPDM to fix disable issue
  coresight: cti: use device_* to iterate over device child nodes
parents f53835f1 988d40a4
...@@ -487,23 +487,25 @@ struct coresight_device *coresight_get_sink(struct list_head *path) ...@@ -487,23 +487,25 @@ struct coresight_device *coresight_get_sink(struct list_head *path)
return csdev; return csdev;
} }
u32 coresight_get_sink_id(struct coresight_device *csdev)
{
if (!csdev->ea)
return 0;
/*
* See function etm_perf_add_symlink_sink() to know where
* this comes from.
*/
return (u32) (unsigned long) csdev->ea->var;
}
static int coresight_sink_by_id(struct device *dev, const void *data) static int coresight_sink_by_id(struct device *dev, const void *data)
{ {
struct coresight_device *csdev = to_coresight_device(dev); struct coresight_device *csdev = to_coresight_device(dev);
unsigned long hash;
if (csdev->type == CORESIGHT_DEV_TYPE_SINK || if (csdev->type == CORESIGHT_DEV_TYPE_SINK ||
csdev->type == CORESIGHT_DEV_TYPE_LINKSINK) { csdev->type == CORESIGHT_DEV_TYPE_LINKSINK) {
if (coresight_get_sink_id(csdev) == *(u32 *)data)
if (!csdev->ea)
return 0;
/*
* See function etm_perf_add_symlink_sink() to know where
* this comes from.
*/
hash = (unsigned long)csdev->ea->var;
if ((u32)hash == *(u32 *)data)
return 1; return 1;
} }
...@@ -902,6 +904,7 @@ static void coresight_device_release(struct device *dev) ...@@ -902,6 +904,7 @@ static void coresight_device_release(struct device *dev)
struct coresight_device *csdev = to_coresight_device(dev); struct coresight_device *csdev = to_coresight_device(dev);
fwnode_handle_put(csdev->dev.fwnode); fwnode_handle_put(csdev->dev.fwnode);
free_percpu(csdev->perf_sink_id_map.cpu_map);
kfree(csdev); kfree(csdev);
} }
...@@ -1159,6 +1162,16 @@ struct coresight_device *coresight_register(struct coresight_desc *desc) ...@@ -1159,6 +1162,16 @@ struct coresight_device *coresight_register(struct coresight_desc *desc)
csdev->dev.fwnode = fwnode_handle_get(dev_fwnode(desc->dev)); csdev->dev.fwnode = fwnode_handle_get(dev_fwnode(desc->dev));
dev_set_name(&csdev->dev, "%s", desc->name); dev_set_name(&csdev->dev, "%s", desc->name);
if (csdev->type == CORESIGHT_DEV_TYPE_SINK ||
csdev->type == CORESIGHT_DEV_TYPE_LINKSINK) {
spin_lock_init(&csdev->perf_sink_id_map.lock);
csdev->perf_sink_id_map.cpu_map = alloc_percpu(atomic_t);
if (!csdev->perf_sink_id_map.cpu_map) {
kfree(csdev);
ret = -ENOMEM;
goto err_out;
}
}
/* /*
* Make sure the device registration and the connection fixup * Make sure the device registration and the connection fixup
* are synchronised, so that we don't see uninitialised devices * are synchronised, so that we don't see uninitialised devices
......
...@@ -416,20 +416,16 @@ static int cti_plat_create_impdef_connections(struct device *dev, ...@@ -416,20 +416,16 @@ static int cti_plat_create_impdef_connections(struct device *dev,
struct cti_drvdata *drvdata) struct cti_drvdata *drvdata)
{ {
int rc = 0; int rc = 0;
struct fwnode_handle *fwnode = dev_fwnode(dev);
struct fwnode_handle *child = NULL;
if (IS_ERR_OR_NULL(fwnode)) if (IS_ERR_OR_NULL(dev_fwnode(dev)))
return -EINVAL; return -EINVAL;
fwnode_for_each_child_node(fwnode, child) { device_for_each_child_node_scoped(dev, child) {
if (cti_plat_node_name_eq(child, CTI_DT_CONNS)) if (cti_plat_node_name_eq(child, CTI_DT_CONNS))
rc = cti_plat_create_connection(dev, drvdata, rc = cti_plat_create_connection(dev, drvdata, child);
child);
if (rc != 0) if (rc != 0)
break; break;
} }
fwnode_handle_put(child);
return rc; return rc;
} }
......
...@@ -21,8 +21,12 @@ DEFINE_CORESIGHT_DEVLIST(source_devs, "dummy_source"); ...@@ -21,8 +21,12 @@ DEFINE_CORESIGHT_DEVLIST(source_devs, "dummy_source");
DEFINE_CORESIGHT_DEVLIST(sink_devs, "dummy_sink"); DEFINE_CORESIGHT_DEVLIST(sink_devs, "dummy_sink");
static int dummy_source_enable(struct coresight_device *csdev, static int dummy_source_enable(struct coresight_device *csdev,
struct perf_event *event, enum cs_mode mode) struct perf_event *event, enum cs_mode mode,
__maybe_unused struct coresight_trace_id_map *id_map)
{ {
if (!coresight_take_mode(csdev, mode))
return -EBUSY;
dev_dbg(csdev->dev.parent, "Dummy source enabled\n"); dev_dbg(csdev->dev.parent, "Dummy source enabled\n");
return 0; return 0;
...@@ -31,6 +35,7 @@ static int dummy_source_enable(struct coresight_device *csdev, ...@@ -31,6 +35,7 @@ static int dummy_source_enable(struct coresight_device *csdev,
static void dummy_source_disable(struct coresight_device *csdev, static void dummy_source_disable(struct coresight_device *csdev,
struct perf_event *event) struct perf_event *event)
{ {
coresight_set_mode(csdev, CS_MODE_DISABLED);
dev_dbg(csdev->dev.parent, "Dummy source disabled\n"); dev_dbg(csdev->dev.parent, "Dummy source disabled\n");
} }
......
...@@ -229,15 +229,24 @@ static void free_event_data(struct work_struct *work) ...@@ -229,15 +229,24 @@ static void free_event_data(struct work_struct *work)
struct list_head **ppath; struct list_head **ppath;
ppath = etm_event_cpu_path_ptr(event_data, cpu); ppath = etm_event_cpu_path_ptr(event_data, cpu);
if (!(IS_ERR_OR_NULL(*ppath))) if (!(IS_ERR_OR_NULL(*ppath))) {
struct coresight_device *sink = coresight_get_sink(*ppath);
/*
* Mark perf event as done for trace id allocator, but don't call
* coresight_trace_id_put_cpu_id_map() on individual IDs. Perf sessions
* never free trace IDs to ensure that the ID associated with a CPU
* cannot change during their and other's concurrent sessions. Instead,
* a refcount is used so that the last event to call
* coresight_trace_id_perf_stop() frees all IDs.
*/
coresight_trace_id_perf_stop(&sink->perf_sink_id_map);
coresight_release_path(*ppath); coresight_release_path(*ppath);
}
*ppath = NULL; *ppath = NULL;
coresight_trace_id_put_cpu_id(cpu);
} }
/* mark perf event as done for trace id allocator */
coresight_trace_id_perf_stop();
free_percpu(event_data->path); free_percpu(event_data->path);
kfree(event_data); kfree(event_data);
} }
...@@ -325,9 +334,6 @@ static void *etm_setup_aux(struct perf_event *event, void **pages, ...@@ -325,9 +334,6 @@ static void *etm_setup_aux(struct perf_event *event, void **pages,
sink = user_sink = coresight_get_sink_by_id(id); sink = user_sink = coresight_get_sink_by_id(id);
} }
/* tell the trace ID allocator that a perf event is starting up */
coresight_trace_id_perf_start();
/* check if user wants a coresight configuration selected */ /* check if user wants a coresight configuration selected */
cfg_hash = (u32)((event->attr.config2 & GENMASK_ULL(63, 32)) >> 32); cfg_hash = (u32)((event->attr.config2 & GENMASK_ULL(63, 32)) >> 32);
if (cfg_hash) { if (cfg_hash) {
...@@ -401,13 +407,14 @@ static void *etm_setup_aux(struct perf_event *event, void **pages, ...@@ -401,13 +407,14 @@ static void *etm_setup_aux(struct perf_event *event, void **pages,
} }
/* ensure we can allocate a trace ID for this CPU */ /* ensure we can allocate a trace ID for this CPU */
trace_id = coresight_trace_id_get_cpu_id(cpu); trace_id = coresight_trace_id_get_cpu_id_map(cpu, &sink->perf_sink_id_map);
if (!IS_VALID_CS_TRACE_ID(trace_id)) { if (!IS_VALID_CS_TRACE_ID(trace_id)) {
cpumask_clear_cpu(cpu, mask); cpumask_clear_cpu(cpu, mask);
coresight_release_path(path); coresight_release_path(path);
continue; continue;
} }
coresight_trace_id_perf_start(&sink->perf_sink_id_map);
*etm_event_cpu_path_ptr(event_data, cpu) = path; *etm_event_cpu_path_ptr(event_data, cpu) = path;
} }
...@@ -453,6 +460,7 @@ static void etm_event_start(struct perf_event *event, int flags) ...@@ -453,6 +460,7 @@ static void etm_event_start(struct perf_event *event, int flags)
struct coresight_device *sink, *csdev = per_cpu(csdev_src, cpu); struct coresight_device *sink, *csdev = per_cpu(csdev_src, cpu);
struct list_head *path; struct list_head *path;
u64 hw_id; u64 hw_id;
u8 trace_id;
if (!csdev) if (!csdev)
goto fail; goto fail;
...@@ -495,7 +503,8 @@ static void etm_event_start(struct perf_event *event, int flags) ...@@ -495,7 +503,8 @@ static void etm_event_start(struct perf_event *event, int flags)
goto fail_end_stop; goto fail_end_stop;
/* Finally enable the tracer */ /* Finally enable the tracer */
if (source_ops(csdev)->enable(csdev, event, CS_MODE_PERF)) if (source_ops(csdev)->enable(csdev, event, CS_MODE_PERF,
&sink->perf_sink_id_map))
goto fail_disable_path; goto fail_disable_path;
/* /*
...@@ -504,10 +513,16 @@ static void etm_event_start(struct perf_event *event, int flags) ...@@ -504,10 +513,16 @@ static void etm_event_start(struct perf_event *event, int flags)
*/ */
if (!cpumask_test_cpu(cpu, &event_data->aux_hwid_done)) { if (!cpumask_test_cpu(cpu, &event_data->aux_hwid_done)) {
cpumask_set_cpu(cpu, &event_data->aux_hwid_done); cpumask_set_cpu(cpu, &event_data->aux_hwid_done);
hw_id = FIELD_PREP(CS_AUX_HW_ID_VERSION_MASK,
CS_AUX_HW_ID_CURR_VERSION); trace_id = coresight_trace_id_read_cpu_id_map(cpu, &sink->perf_sink_id_map);
hw_id |= FIELD_PREP(CS_AUX_HW_ID_TRACE_ID_MASK,
coresight_trace_id_read_cpu_id(cpu)); hw_id = FIELD_PREP(CS_AUX_HW_ID_MAJOR_VERSION_MASK,
CS_AUX_HW_ID_MAJOR_VERSION);
hw_id |= FIELD_PREP(CS_AUX_HW_ID_MINOR_VERSION_MASK,
CS_AUX_HW_ID_MINOR_VERSION);
hw_id |= FIELD_PREP(CS_AUX_HW_ID_TRACE_ID_MASK, trace_id);
hw_id |= FIELD_PREP(CS_AUX_HW_ID_SINK_ID_MASK, coresight_get_sink_id(sink));
perf_report_aux_output_id(event, hw_id); perf_report_aux_output_id(event, hw_id);
} }
......
...@@ -62,7 +62,6 @@ struct etm_event_data { ...@@ -62,7 +62,6 @@ struct etm_event_data {
struct list_head * __percpu *path; struct list_head * __percpu *path;
}; };
#if IS_ENABLED(CONFIG_CORESIGHT)
int etm_perf_symlink(struct coresight_device *csdev, bool link); int etm_perf_symlink(struct coresight_device *csdev, bool link);
int etm_perf_add_symlink_sink(struct coresight_device *csdev); int etm_perf_add_symlink_sink(struct coresight_device *csdev);
void etm_perf_del_symlink_sink(struct coresight_device *csdev); void etm_perf_del_symlink_sink(struct coresight_device *csdev);
...@@ -77,23 +76,6 @@ static inline void *etm_perf_sink_config(struct perf_output_handle *handle) ...@@ -77,23 +76,6 @@ static inline void *etm_perf_sink_config(struct perf_output_handle *handle)
int etm_perf_add_symlink_cscfg(struct device *dev, int etm_perf_add_symlink_cscfg(struct device *dev,
struct cscfg_config_desc *config_desc); struct cscfg_config_desc *config_desc);
void etm_perf_del_symlink_cscfg(struct cscfg_config_desc *config_desc); void etm_perf_del_symlink_cscfg(struct cscfg_config_desc *config_desc);
#else
static inline int etm_perf_symlink(struct coresight_device *csdev, bool link)
{ return -EINVAL; }
int etm_perf_add_symlink_sink(struct coresight_device *csdev)
{ return -EINVAL; }
void etm_perf_del_symlink_sink(struct coresight_device *csdev) {}
static inline void *etm_perf_sink_config(struct perf_output_handle *handle)
{
return NULL;
}
int etm_perf_add_symlink_cscfg(struct device *dev,
struct cscfg_config_desc *config_desc)
{ return -EINVAL; }
void etm_perf_del_symlink_cscfg(struct cscfg_config_desc *config_desc) {}
#endif /* CONFIG_CORESIGHT */
int __init etm_perf_init(void); int __init etm_perf_init(void);
void etm_perf_exit(void); void etm_perf_exit(void);
......
...@@ -481,7 +481,8 @@ void etm_release_trace_id(struct etm_drvdata *drvdata) ...@@ -481,7 +481,8 @@ void etm_release_trace_id(struct etm_drvdata *drvdata)
} }
static int etm_enable_perf(struct coresight_device *csdev, static int etm_enable_perf(struct coresight_device *csdev,
struct perf_event *event) struct perf_event *event,
struct coresight_trace_id_map *id_map)
{ {
struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
int trace_id; int trace_id;
...@@ -500,7 +501,7 @@ static int etm_enable_perf(struct coresight_device *csdev, ...@@ -500,7 +501,7 @@ static int etm_enable_perf(struct coresight_device *csdev,
* with perf locks - we know the ID cannot change until perf shuts down * with perf locks - we know the ID cannot change until perf shuts down
* the session * the session
*/ */
trace_id = coresight_trace_id_read_cpu_id(drvdata->cpu); trace_id = coresight_trace_id_read_cpu_id_map(drvdata->cpu, id_map);
if (!IS_VALID_CS_TRACE_ID(trace_id)) { if (!IS_VALID_CS_TRACE_ID(trace_id)) {
dev_err(&drvdata->csdev->dev, "Failed to set trace ID for %s on CPU%d\n", dev_err(&drvdata->csdev->dev, "Failed to set trace ID for %s on CPU%d\n",
dev_name(&drvdata->csdev->dev), drvdata->cpu); dev_name(&drvdata->csdev->dev), drvdata->cpu);
...@@ -553,7 +554,7 @@ static int etm_enable_sysfs(struct coresight_device *csdev) ...@@ -553,7 +554,7 @@ static int etm_enable_sysfs(struct coresight_device *csdev)
} }
static int etm_enable(struct coresight_device *csdev, struct perf_event *event, static int etm_enable(struct coresight_device *csdev, struct perf_event *event,
enum cs_mode mode) enum cs_mode mode, struct coresight_trace_id_map *id_map)
{ {
int ret; int ret;
struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
...@@ -568,7 +569,7 @@ static int etm_enable(struct coresight_device *csdev, struct perf_event *event, ...@@ -568,7 +569,7 @@ static int etm_enable(struct coresight_device *csdev, struct perf_event *event,
ret = etm_enable_sysfs(csdev); ret = etm_enable_sysfs(csdev);
break; break;
case CS_MODE_PERF: case CS_MODE_PERF:
ret = etm_enable_perf(csdev, event); ret = etm_enable_perf(csdev, event, id_map);
break; break;
default: default:
ret = -EINVAL; ret = -EINVAL;
......
...@@ -752,7 +752,8 @@ static int etm4_parse_event_config(struct coresight_device *csdev, ...@@ -752,7 +752,8 @@ static int etm4_parse_event_config(struct coresight_device *csdev,
} }
static int etm4_enable_perf(struct coresight_device *csdev, static int etm4_enable_perf(struct coresight_device *csdev,
struct perf_event *event) struct perf_event *event,
struct coresight_trace_id_map *id_map)
{ {
int ret = 0, trace_id; int ret = 0, trace_id;
struct etmv4_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); struct etmv4_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
...@@ -775,7 +776,7 @@ static int etm4_enable_perf(struct coresight_device *csdev, ...@@ -775,7 +776,7 @@ static int etm4_enable_perf(struct coresight_device *csdev,
* with perf locks - we know the ID cannot change until perf shuts down * with perf locks - we know the ID cannot change until perf shuts down
* the session * the session
*/ */
trace_id = coresight_trace_id_read_cpu_id(drvdata->cpu); trace_id = coresight_trace_id_read_cpu_id_map(drvdata->cpu, id_map);
if (!IS_VALID_CS_TRACE_ID(trace_id)) { if (!IS_VALID_CS_TRACE_ID(trace_id)) {
dev_err(&drvdata->csdev->dev, "Failed to set trace ID for %s on CPU%d\n", dev_err(&drvdata->csdev->dev, "Failed to set trace ID for %s on CPU%d\n",
dev_name(&drvdata->csdev->dev), drvdata->cpu); dev_name(&drvdata->csdev->dev), drvdata->cpu);
...@@ -837,7 +838,7 @@ static int etm4_enable_sysfs(struct coresight_device *csdev) ...@@ -837,7 +838,7 @@ static int etm4_enable_sysfs(struct coresight_device *csdev)
} }
static int etm4_enable(struct coresight_device *csdev, struct perf_event *event, static int etm4_enable(struct coresight_device *csdev, struct perf_event *event,
enum cs_mode mode) enum cs_mode mode, struct coresight_trace_id_map *id_map)
{ {
int ret; int ret;
...@@ -851,7 +852,7 @@ static int etm4_enable(struct coresight_device *csdev, struct perf_event *event, ...@@ -851,7 +852,7 @@ static int etm4_enable(struct coresight_device *csdev, struct perf_event *event,
ret = etm4_enable_sysfs(csdev); ret = etm4_enable_sysfs(csdev);
break; break;
case CS_MODE_PERF: case CS_MODE_PERF:
ret = etm4_enable_perf(csdev, event); ret = etm4_enable_perf(csdev, event, id_map);
break; break;
default: default:
ret = -EINVAL; ret = -EINVAL;
......
...@@ -148,6 +148,7 @@ int coresight_make_links(struct coresight_device *orig, ...@@ -148,6 +148,7 @@ int coresight_make_links(struct coresight_device *orig,
struct coresight_device *target); struct coresight_device *target);
void coresight_remove_links(struct coresight_device *orig, void coresight_remove_links(struct coresight_device *orig,
struct coresight_connection *conn); struct coresight_connection *conn);
u32 coresight_get_sink_id(struct coresight_device *csdev);
#if IS_ENABLED(CONFIG_CORESIGHT_SOURCE_ETM3X) #if IS_ENABLED(CONFIG_CORESIGHT_SOURCE_ETM3X)
extern int etm_readl_cp14(u32 off, unsigned int *val); extern int etm_readl_cp14(u32 off, unsigned int *val);
......
...@@ -194,7 +194,8 @@ static void stm_enable_hw(struct stm_drvdata *drvdata) ...@@ -194,7 +194,8 @@ static void stm_enable_hw(struct stm_drvdata *drvdata)
} }
static int stm_enable(struct coresight_device *csdev, struct perf_event *event, static int stm_enable(struct coresight_device *csdev, struct perf_event *event,
enum cs_mode mode) enum cs_mode mode,
__maybe_unused struct coresight_trace_id_map *trace_id)
{ {
struct stm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); struct stm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
......
...@@ -9,6 +9,7 @@ ...@@ -9,6 +9,7 @@
#include <linux/kernel.h> #include <linux/kernel.h>
#include "coresight-priv.h" #include "coresight-priv.h"
#include "coresight-trace-id.h"
/* /*
* Use IDR to map the hash of the source's device name * Use IDR to map the hash of the source's device name
...@@ -63,7 +64,7 @@ static int coresight_enable_source_sysfs(struct coresight_device *csdev, ...@@ -63,7 +64,7 @@ static int coresight_enable_source_sysfs(struct coresight_device *csdev,
*/ */
lockdep_assert_held(&coresight_mutex); lockdep_assert_held(&coresight_mutex);
if (coresight_get_mode(csdev) != CS_MODE_SYSFS) { if (coresight_get_mode(csdev) != CS_MODE_SYSFS) {
ret = source_ops(csdev)->enable(csdev, data, mode); ret = source_ops(csdev)->enable(csdev, data, mode, NULL);
if (ret) if (ret)
return ret; return ret;
} }
......
...@@ -36,7 +36,8 @@ struct etr_buf_hw { ...@@ -36,7 +36,8 @@ struct etr_buf_hw {
* etr_perf_buffer - Perf buffer used for ETR * etr_perf_buffer - Perf buffer used for ETR
* @drvdata - The ETR drvdaga this buffer has been allocated for. * @drvdata - The ETR drvdaga this buffer has been allocated for.
* @etr_buf - Actual buffer used by the ETR * @etr_buf - Actual buffer used by the ETR
* @pid - The PID this etr_perf_buffer belongs to. * @pid - The PID of the session owner that etr_perf_buffer
* belongs to.
* @snaphost - Perf session mode * @snaphost - Perf session mode
* @nr_pages - Number of pages in the ring buffer. * @nr_pages - Number of pages in the ring buffer.
* @pages - Array of Pages in the ring buffer. * @pages - Array of Pages in the ring buffer.
...@@ -261,6 +262,7 @@ void tmc_free_sg_table(struct tmc_sg_table *sg_table) ...@@ -261,6 +262,7 @@ void tmc_free_sg_table(struct tmc_sg_table *sg_table)
{ {
tmc_free_table_pages(sg_table); tmc_free_table_pages(sg_table);
tmc_free_data_pages(sg_table); tmc_free_data_pages(sg_table);
kfree(sg_table);
} }
EXPORT_SYMBOL_GPL(tmc_free_sg_table); EXPORT_SYMBOL_GPL(tmc_free_sg_table);
...@@ -342,7 +344,6 @@ struct tmc_sg_table *tmc_alloc_sg_table(struct device *dev, ...@@ -342,7 +344,6 @@ struct tmc_sg_table *tmc_alloc_sg_table(struct device *dev,
rc = tmc_alloc_table_pages(sg_table); rc = tmc_alloc_table_pages(sg_table);
if (rc) { if (rc) {
tmc_free_sg_table(sg_table); tmc_free_sg_table(sg_table);
kfree(sg_table);
return ERR_PTR(rc); return ERR_PTR(rc);
} }
...@@ -1662,7 +1663,7 @@ static int tmc_enable_etr_sink_perf(struct coresight_device *csdev, void *data) ...@@ -1662,7 +1663,7 @@ static int tmc_enable_etr_sink_perf(struct coresight_device *csdev, void *data)
goto unlock_out; goto unlock_out;
} }
/* Get a handle on the pid of the process to monitor */ /* Get a handle on the pid of the session owner */
pid = etr_perf->pid; pid = etr_perf->pid;
/* Do not proceed if this device is associated with another session */ /* Do not proceed if this device is associated with another session */
......
...@@ -171,8 +171,9 @@ struct etr_buf { ...@@ -171,8 +171,9 @@ struct etr_buf {
* @csdev: component vitals needed by the framework. * @csdev: component vitals needed by the framework.
* @miscdev: specifics to handle "/dev/xyz.tmc" entry. * @miscdev: specifics to handle "/dev/xyz.tmc" entry.
* @spinlock: only one at a time pls. * @spinlock: only one at a time pls.
* @pid: Process ID of the process being monitored by the session * @pid: Process ID of the process that owns the session that is using
* that is using this component. * this component. For example this would be the pid of the Perf
* process.
* @buf: Snapshot of the trace data for ETF/ETB. * @buf: Snapshot of the trace data for ETF/ETB.
* @etr_buf: details of buffer used in TMC-ETR * @etr_buf: details of buffer used in TMC-ETR
* @len: size of the available trace for ETF/ETB. * @len: size of the available trace for ETF/ETB.
......
...@@ -439,7 +439,8 @@ static void __tpdm_enable(struct tpdm_drvdata *drvdata) ...@@ -439,7 +439,8 @@ static void __tpdm_enable(struct tpdm_drvdata *drvdata)
} }
static int tpdm_enable(struct coresight_device *csdev, struct perf_event *event, static int tpdm_enable(struct coresight_device *csdev, struct perf_event *event,
enum cs_mode mode) enum cs_mode mode,
__maybe_unused struct coresight_trace_id_map *id_map)
{ {
struct tpdm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); struct tpdm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
...@@ -449,6 +450,11 @@ static int tpdm_enable(struct coresight_device *csdev, struct perf_event *event, ...@@ -449,6 +450,11 @@ static int tpdm_enable(struct coresight_device *csdev, struct perf_event *event,
return -EBUSY; return -EBUSY;
} }
if (!coresight_take_mode(csdev, mode)) {
spin_unlock(&drvdata->spinlock);
return -EBUSY;
}
__tpdm_enable(drvdata); __tpdm_enable(drvdata);
drvdata->enable = true; drvdata->enable = true;
spin_unlock(&drvdata->spinlock); spin_unlock(&drvdata->spinlock);
...@@ -506,6 +512,7 @@ static void tpdm_disable(struct coresight_device *csdev, ...@@ -506,6 +512,7 @@ static void tpdm_disable(struct coresight_device *csdev,
} }
__tpdm_disable(drvdata); __tpdm_disable(drvdata);
coresight_set_mode(csdev, CS_MODE_DISABLED);
drvdata->enable = false; drvdata->enable = false;
spin_unlock(&drvdata->spinlock); spin_unlock(&drvdata->spinlock);
......
...@@ -3,6 +3,7 @@ ...@@ -3,6 +3,7 @@
* Copyright (c) 2022, Linaro Limited, All rights reserved. * Copyright (c) 2022, Linaro Limited, All rights reserved.
* Author: Mike Leach <mike.leach@linaro.org> * Author: Mike Leach <mike.leach@linaro.org>
*/ */
#include <linux/coresight.h>
#include <linux/coresight-pmu.h> #include <linux/coresight-pmu.h>
#include <linux/cpumask.h> #include <linux/cpumask.h>
#include <linux/kernel.h> #include <linux/kernel.h>
...@@ -11,18 +12,12 @@ ...@@ -11,18 +12,12 @@
#include "coresight-trace-id.h" #include "coresight-trace-id.h"
/* Default trace ID map. Used on systems that don't require per sink mappings */ /* Default trace ID map. Used in sysfs mode and for system sources */
static struct coresight_trace_id_map id_map_default; static DEFINE_PER_CPU(atomic_t, id_map_default_cpu_ids) = ATOMIC_INIT(0);
static struct coresight_trace_id_map id_map_default = {
/* maintain a record of the mapping of IDs and pending releases per cpu */ .cpu_map = &id_map_default_cpu_ids,
static DEFINE_PER_CPU(atomic_t, cpu_id) = ATOMIC_INIT(0); .lock = __SPIN_LOCK_UNLOCKED(id_map_default.lock)
static cpumask_t cpu_id_release_pending; };
/* perf session active counter */
static atomic_t perf_cs_etm_session_active = ATOMIC_INIT(0);
/* lock to protect id_map and cpu data */
static DEFINE_SPINLOCK(id_map_lock);
/* #define TRACE_ID_DEBUG 1 */ /* #define TRACE_ID_DEBUG 1 */
#if defined(TRACE_ID_DEBUG) || defined(CONFIG_COMPILE_TEST) #if defined(TRACE_ID_DEBUG) || defined(CONFIG_COMPILE_TEST)
...@@ -32,7 +27,6 @@ static void coresight_trace_id_dump_table(struct coresight_trace_id_map *id_map, ...@@ -32,7 +27,6 @@ static void coresight_trace_id_dump_table(struct coresight_trace_id_map *id_map,
{ {
pr_debug("%s id_map::\n", func_name); pr_debug("%s id_map::\n", func_name);
pr_debug("Used = %*pb\n", CORESIGHT_TRACE_IDS_MAX, id_map->used_ids); pr_debug("Used = %*pb\n", CORESIGHT_TRACE_IDS_MAX, id_map->used_ids);
pr_debug("Pend = %*pb\n", CORESIGHT_TRACE_IDS_MAX, id_map->pend_rel_ids);
} }
#define DUMP_ID_MAP(map) coresight_trace_id_dump_table(map, __func__) #define DUMP_ID_MAP(map) coresight_trace_id_dump_table(map, __func__)
#define DUMP_ID_CPU(cpu, id) pr_debug("%s called; cpu=%d, id=%d\n", __func__, cpu, id) #define DUMP_ID_CPU(cpu, id) pr_debug("%s called; cpu=%d, id=%d\n", __func__, cpu, id)
...@@ -46,9 +40,9 @@ static void coresight_trace_id_dump_table(struct coresight_trace_id_map *id_map, ...@@ -46,9 +40,9 @@ static void coresight_trace_id_dump_table(struct coresight_trace_id_map *id_map,
#endif #endif
/* unlocked read of current trace ID value for given CPU */ /* unlocked read of current trace ID value for given CPU */
static int _coresight_trace_id_read_cpu_id(int cpu) static int _coresight_trace_id_read_cpu_id(int cpu, struct coresight_trace_id_map *id_map)
{ {
return atomic_read(&per_cpu(cpu_id, cpu)); return atomic_read(per_cpu_ptr(id_map->cpu_map, cpu));
} }
/* look for next available odd ID, return 0 if none found */ /* look for next available odd ID, return 0 if none found */
...@@ -119,49 +113,33 @@ static void coresight_trace_id_free(int id, struct coresight_trace_id_map *id_ma ...@@ -119,49 +113,33 @@ static void coresight_trace_id_free(int id, struct coresight_trace_id_map *id_ma
clear_bit(id, id_map->used_ids); clear_bit(id, id_map->used_ids);
} }
static void coresight_trace_id_set_pend_rel(int id, struct coresight_trace_id_map *id_map)
{
if (WARN(!IS_VALID_CS_TRACE_ID(id), "Invalid Trace ID %d\n", id))
return;
set_bit(id, id_map->pend_rel_ids);
}
/* /*
* release all pending IDs for all current maps & clear CPU associations * Release all IDs and clear CPU associations.
*
* This currently operates on the default id map, but may be extended to
* operate on all registered id maps if per sink id maps are used.
*/ */
static void coresight_trace_id_release_all_pending(void) static void coresight_trace_id_release_all(struct coresight_trace_id_map *id_map)
{ {
struct coresight_trace_id_map *id_map = &id_map_default;
unsigned long flags; unsigned long flags;
int cpu, bit; int cpu;
spin_lock_irqsave(&id_map_lock, flags); spin_lock_irqsave(&id_map->lock, flags);
for_each_set_bit(bit, id_map->pend_rel_ids, CORESIGHT_TRACE_ID_RES_TOP) { bitmap_zero(id_map->used_ids, CORESIGHT_TRACE_IDS_MAX);
clear_bit(bit, id_map->used_ids); for_each_possible_cpu(cpu)
clear_bit(bit, id_map->pend_rel_ids); atomic_set(per_cpu_ptr(id_map->cpu_map, cpu), 0);
} spin_unlock_irqrestore(&id_map->lock, flags);
for_each_cpu(cpu, &cpu_id_release_pending) {
atomic_set(&per_cpu(cpu_id, cpu), 0);
cpumask_clear_cpu(cpu, &cpu_id_release_pending);
}
spin_unlock_irqrestore(&id_map_lock, flags);
DUMP_ID_MAP(id_map); DUMP_ID_MAP(id_map);
} }
static int coresight_trace_id_map_get_cpu_id(int cpu, struct coresight_trace_id_map *id_map) static int _coresight_trace_id_get_cpu_id(int cpu, struct coresight_trace_id_map *id_map)
{ {
unsigned long flags; unsigned long flags;
int id; int id;
spin_lock_irqsave(&id_map_lock, flags); spin_lock_irqsave(&id_map->lock, flags);
/* check for existing allocation for this CPU */ /* check for existing allocation for this CPU */
id = _coresight_trace_id_read_cpu_id(cpu); id = _coresight_trace_id_read_cpu_id(cpu, id_map);
if (id) if (id)
goto get_cpu_id_clr_pend; goto get_cpu_id_out_unlock;
/* /*
* Find a new ID. * Find a new ID.
...@@ -180,44 +158,32 @@ static int coresight_trace_id_map_get_cpu_id(int cpu, struct coresight_trace_id_ ...@@ -180,44 +158,32 @@ static int coresight_trace_id_map_get_cpu_id(int cpu, struct coresight_trace_id_
goto get_cpu_id_out_unlock; goto get_cpu_id_out_unlock;
/* allocate the new id to the cpu */ /* allocate the new id to the cpu */
atomic_set(&per_cpu(cpu_id, cpu), id); atomic_set(per_cpu_ptr(id_map->cpu_map, cpu), id);
get_cpu_id_clr_pend:
/* we are (re)using this ID - so ensure it is not marked for release */
cpumask_clear_cpu(cpu, &cpu_id_release_pending);
clear_bit(id, id_map->pend_rel_ids);
get_cpu_id_out_unlock: get_cpu_id_out_unlock:
spin_unlock_irqrestore(&id_map_lock, flags); spin_unlock_irqrestore(&id_map->lock, flags);
DUMP_ID_CPU(cpu, id); DUMP_ID_CPU(cpu, id);
DUMP_ID_MAP(id_map); DUMP_ID_MAP(id_map);
return id; return id;
} }
static void coresight_trace_id_map_put_cpu_id(int cpu, struct coresight_trace_id_map *id_map) static void _coresight_trace_id_put_cpu_id(int cpu, struct coresight_trace_id_map *id_map)
{ {
unsigned long flags; unsigned long flags;
int id; int id;
/* check for existing allocation for this CPU */ /* check for existing allocation for this CPU */
id = _coresight_trace_id_read_cpu_id(cpu); id = _coresight_trace_id_read_cpu_id(cpu, id_map);
if (!id) if (!id)
return; return;
spin_lock_irqsave(&id_map_lock, flags); spin_lock_irqsave(&id_map->lock, flags);
if (atomic_read(&perf_cs_etm_session_active)) { coresight_trace_id_free(id, id_map);
/* set release at pending if perf still active */ atomic_set(per_cpu_ptr(id_map->cpu_map, cpu), 0);
coresight_trace_id_set_pend_rel(id, id_map);
cpumask_set_cpu(cpu, &cpu_id_release_pending);
} else {
/* otherwise clear id */
coresight_trace_id_free(id, id_map);
atomic_set(&per_cpu(cpu_id, cpu), 0);
}
spin_unlock_irqrestore(&id_map_lock, flags); spin_unlock_irqrestore(&id_map->lock, flags);
DUMP_ID_CPU(cpu, id); DUMP_ID_CPU(cpu, id);
DUMP_ID_MAP(id_map); DUMP_ID_MAP(id_map);
} }
...@@ -227,10 +193,10 @@ static int coresight_trace_id_map_get_system_id(struct coresight_trace_id_map *i ...@@ -227,10 +193,10 @@ static int coresight_trace_id_map_get_system_id(struct coresight_trace_id_map *i
unsigned long flags; unsigned long flags;
int id; int id;
spin_lock_irqsave(&id_map_lock, flags); spin_lock_irqsave(&id_map->lock, flags);
/* prefer odd IDs for system components to avoid legacy CPU IDS */ /* prefer odd IDs for system components to avoid legacy CPU IDS */
id = coresight_trace_id_alloc_new_id(id_map, 0, true); id = coresight_trace_id_alloc_new_id(id_map, 0, true);
spin_unlock_irqrestore(&id_map_lock, flags); spin_unlock_irqrestore(&id_map->lock, flags);
DUMP_ID(id); DUMP_ID(id);
DUMP_ID_MAP(id_map); DUMP_ID_MAP(id_map);
...@@ -241,9 +207,9 @@ static void coresight_trace_id_map_put_system_id(struct coresight_trace_id_map * ...@@ -241,9 +207,9 @@ static void coresight_trace_id_map_put_system_id(struct coresight_trace_id_map *
{ {
unsigned long flags; unsigned long flags;
spin_lock_irqsave(&id_map_lock, flags); spin_lock_irqsave(&id_map->lock, flags);
coresight_trace_id_free(id, id_map); coresight_trace_id_free(id, id_map);
spin_unlock_irqrestore(&id_map_lock, flags); spin_unlock_irqrestore(&id_map->lock, flags);
DUMP_ID(id); DUMP_ID(id);
DUMP_ID_MAP(id_map); DUMP_ID_MAP(id_map);
...@@ -253,22 +219,40 @@ static void coresight_trace_id_map_put_system_id(struct coresight_trace_id_map * ...@@ -253,22 +219,40 @@ static void coresight_trace_id_map_put_system_id(struct coresight_trace_id_map *
int coresight_trace_id_get_cpu_id(int cpu) int coresight_trace_id_get_cpu_id(int cpu)
{ {
return coresight_trace_id_map_get_cpu_id(cpu, &id_map_default); return _coresight_trace_id_get_cpu_id(cpu, &id_map_default);
} }
EXPORT_SYMBOL_GPL(coresight_trace_id_get_cpu_id); EXPORT_SYMBOL_GPL(coresight_trace_id_get_cpu_id);
int coresight_trace_id_get_cpu_id_map(int cpu, struct coresight_trace_id_map *id_map)
{
return _coresight_trace_id_get_cpu_id(cpu, id_map);
}
EXPORT_SYMBOL_GPL(coresight_trace_id_get_cpu_id_map);
void coresight_trace_id_put_cpu_id(int cpu) void coresight_trace_id_put_cpu_id(int cpu)
{ {
coresight_trace_id_map_put_cpu_id(cpu, &id_map_default); _coresight_trace_id_put_cpu_id(cpu, &id_map_default);
} }
EXPORT_SYMBOL_GPL(coresight_trace_id_put_cpu_id); EXPORT_SYMBOL_GPL(coresight_trace_id_put_cpu_id);
void coresight_trace_id_put_cpu_id_map(int cpu, struct coresight_trace_id_map *id_map)
{
_coresight_trace_id_put_cpu_id(cpu, id_map);
}
EXPORT_SYMBOL_GPL(coresight_trace_id_put_cpu_id_map);
int coresight_trace_id_read_cpu_id(int cpu) int coresight_trace_id_read_cpu_id(int cpu)
{ {
return _coresight_trace_id_read_cpu_id(cpu); return _coresight_trace_id_read_cpu_id(cpu, &id_map_default);
} }
EXPORT_SYMBOL_GPL(coresight_trace_id_read_cpu_id); EXPORT_SYMBOL_GPL(coresight_trace_id_read_cpu_id);
int coresight_trace_id_read_cpu_id_map(int cpu, struct coresight_trace_id_map *id_map)
{
return _coresight_trace_id_read_cpu_id(cpu, id_map);
}
EXPORT_SYMBOL_GPL(coresight_trace_id_read_cpu_id_map);
int coresight_trace_id_get_system_id(void) int coresight_trace_id_get_system_id(void)
{ {
return coresight_trace_id_map_get_system_id(&id_map_default); return coresight_trace_id_map_get_system_id(&id_map_default);
...@@ -281,17 +265,17 @@ void coresight_trace_id_put_system_id(int id) ...@@ -281,17 +265,17 @@ void coresight_trace_id_put_system_id(int id)
} }
EXPORT_SYMBOL_GPL(coresight_trace_id_put_system_id); EXPORT_SYMBOL_GPL(coresight_trace_id_put_system_id);
void coresight_trace_id_perf_start(void) void coresight_trace_id_perf_start(struct coresight_trace_id_map *id_map)
{ {
atomic_inc(&perf_cs_etm_session_active); atomic_inc(&id_map->perf_cs_etm_session_active);
PERF_SESSION(atomic_read(&perf_cs_etm_session_active)); PERF_SESSION(atomic_read(&id_map->perf_cs_etm_session_active));
} }
EXPORT_SYMBOL_GPL(coresight_trace_id_perf_start); EXPORT_SYMBOL_GPL(coresight_trace_id_perf_start);
void coresight_trace_id_perf_stop(void) void coresight_trace_id_perf_stop(struct coresight_trace_id_map *id_map)
{ {
if (!atomic_dec_return(&perf_cs_etm_session_active)) if (!atomic_dec_return(&id_map->perf_cs_etm_session_active))
coresight_trace_id_release_all_pending(); coresight_trace_id_release_all(id_map);
PERF_SESSION(atomic_read(&perf_cs_etm_session_active)); PERF_SESSION(atomic_read(&id_map->perf_cs_etm_session_active));
} }
EXPORT_SYMBOL_GPL(coresight_trace_id_perf_stop); EXPORT_SYMBOL_GPL(coresight_trace_id_perf_stop);
...@@ -17,9 +17,10 @@ ...@@ -17,9 +17,10 @@
* released when done. * released when done.
* *
* In order to ensure that a consistent cpu / ID matching is maintained * In order to ensure that a consistent cpu / ID matching is maintained
* throughout a perf cs_etm event session - a session in progress flag will * throughout a perf cs_etm event session - a session in progress flag will be
* be maintained, and released IDs not cleared until the perf session is * maintained for each sink, and IDs are cleared when all the perf sessions
* complete. This allows the same CPU to be re-allocated its prior ID. * complete. This allows the same CPU to be re-allocated its prior ID when
* events are scheduled in and out.
* *
* *
* Trace ID maps will be created and initialised to prevent architecturally * Trace ID maps will be created and initialised to prevent architecturally
...@@ -32,10 +33,6 @@ ...@@ -32,10 +33,6 @@
#include <linux/bitops.h> #include <linux/bitops.h>
#include <linux/types.h> #include <linux/types.h>
/* architecturally we have 128 IDs some of which are reserved */
#define CORESIGHT_TRACE_IDS_MAX 128
/* ID 0 is reserved */ /* ID 0 is reserved */
#define CORESIGHT_TRACE_ID_RES_0 0 #define CORESIGHT_TRACE_ID_RES_0 0
...@@ -46,23 +43,6 @@ ...@@ -46,23 +43,6 @@
#define IS_VALID_CS_TRACE_ID(id) \ #define IS_VALID_CS_TRACE_ID(id) \
((id > CORESIGHT_TRACE_ID_RES_0) && (id < CORESIGHT_TRACE_ID_RES_TOP)) ((id > CORESIGHT_TRACE_ID_RES_0) && (id < CORESIGHT_TRACE_ID_RES_TOP))
/**
* Trace ID map.
*
* @used_ids: Bitmap to register available (bit = 0) and in use (bit = 1) IDs.
* Initialised so that the reserved IDs are permanently marked as
* in use.
* @pend_rel_ids: CPU IDs that have been released by the trace source but not
* yet marked as available, to allow re-allocation to the same
* CPU during a perf session.
*/
struct coresight_trace_id_map {
DECLARE_BITMAP(used_ids, CORESIGHT_TRACE_IDS_MAX);
DECLARE_BITMAP(pend_rel_ids, CORESIGHT_TRACE_IDS_MAX);
};
/* Allocate and release IDs for a single default trace ID map */
/** /**
* Read and optionally allocate a CoreSight trace ID and associate with a CPU. * Read and optionally allocate a CoreSight trace ID and associate with a CPU.
* *
...@@ -78,19 +58,27 @@ struct coresight_trace_id_map { ...@@ -78,19 +58,27 @@ struct coresight_trace_id_map {
*/ */
int coresight_trace_id_get_cpu_id(int cpu); int coresight_trace_id_get_cpu_id(int cpu);
/**
* Version of coresight_trace_id_get_cpu_id() that allows the ID map to operate
* on to be provided.
*/
int coresight_trace_id_get_cpu_id_map(int cpu, struct coresight_trace_id_map *id_map);
/** /**
* Release an allocated trace ID associated with the CPU. * Release an allocated trace ID associated with the CPU.
* *
* This will release the CoreSight trace ID associated with the CPU, * This will release the CoreSight trace ID associated with the CPU.
* unless a perf session is in operation.
*
* If a perf session is in operation then the ID will be marked as pending
* release.
* *
* @cpu: The CPU index to release the associated trace ID. * @cpu: The CPU index to release the associated trace ID.
*/ */
void coresight_trace_id_put_cpu_id(int cpu); void coresight_trace_id_put_cpu_id(int cpu);
/**
* Version of coresight_trace_id_put_cpu_id() that allows the ID map to operate
* on to be provided.
*/
void coresight_trace_id_put_cpu_id_map(int cpu, struct coresight_trace_id_map *id_map);
/** /**
* Read the current allocated CoreSight Trace ID value for the CPU. * Read the current allocated CoreSight Trace ID value for the CPU.
* *
...@@ -111,6 +99,12 @@ void coresight_trace_id_put_cpu_id(int cpu); ...@@ -111,6 +99,12 @@ void coresight_trace_id_put_cpu_id(int cpu);
*/ */
int coresight_trace_id_read_cpu_id(int cpu); int coresight_trace_id_read_cpu_id(int cpu);
/**
* Version of coresight_trace_id_read_cpu_id() that allows the ID map to operate
* on to be provided.
*/
int coresight_trace_id_read_cpu_id_map(int cpu, struct coresight_trace_id_map *id_map);
/** /**
* Allocate a CoreSight trace ID for a system component. * Allocate a CoreSight trace ID for a system component.
* *
...@@ -136,21 +130,21 @@ void coresight_trace_id_put_system_id(int id); ...@@ -136,21 +130,21 @@ void coresight_trace_id_put_system_id(int id);
/** /**
* Notify the Trace ID allocator that a perf session is starting. * Notify the Trace ID allocator that a perf session is starting.
* *
* Increase the perf session reference count - called by perf when setting up * Increase the perf session reference count - called by perf when setting up a
* a trace event. * trace event.
* *
* This reference count is used by the ID allocator to ensure that trace IDs * Perf sessions never free trace IDs to ensure that the ID associated with a
* associated with a CPU cannot change or be released during a perf session. * CPU cannot change during their and other's concurrent sessions. Instead,
* this refcount is used so that the last event to finish always frees all IDs.
*/ */
void coresight_trace_id_perf_start(void); void coresight_trace_id_perf_start(struct coresight_trace_id_map *id_map);
/** /**
* Notify the ID allocator that a perf session is stopping. * Notify the ID allocator that a perf session is stopping.
* *
* Decrease the perf session reference count. * Decrease the perf session reference count. If this causes the count to go to
* if this causes the count to go to zero, then all Trace IDs marked as pending * zero, then all Trace IDs will be released.
* release, will be released.
*/ */
void coresight_trace_id_perf_stop(void); void coresight_trace_id_perf_stop(struct coresight_trace_id_map *id_map);
#endif /* _CORESIGHT_TRACE_ID_H */ #endif /* _CORESIGHT_TRACE_ID_H */
...@@ -49,12 +49,21 @@ ...@@ -49,12 +49,21 @@
* Interpretation of the PERF_RECORD_AUX_OUTPUT_HW_ID payload. * Interpretation of the PERF_RECORD_AUX_OUTPUT_HW_ID payload.
* Used to associate a CPU with the CoreSight Trace ID. * Used to associate a CPU with the CoreSight Trace ID.
* [07:00] - Trace ID - uses 8 bits to make value easy to read in file. * [07:00] - Trace ID - uses 8 bits to make value easy to read in file.
* [59:08] - Unused (SBZ) * [39:08] - Sink ID - as reported in /sys/bus/event_source/devices/cs_etm/sinks/
* [63:60] - Version * Added in minor version 1.
* [55:40] - Unused (SBZ)
* [59:56] - Minor Version - previously existing fields are compatible with
* all minor versions.
* [63:60] - Major Version - previously existing fields mean different things
* in new major versions.
*/ */
#define CS_AUX_HW_ID_TRACE_ID_MASK GENMASK_ULL(7, 0) #define CS_AUX_HW_ID_TRACE_ID_MASK GENMASK_ULL(7, 0)
#define CS_AUX_HW_ID_VERSION_MASK GENMASK_ULL(63, 60) #define CS_AUX_HW_ID_SINK_ID_MASK GENMASK_ULL(39, 8)
#define CS_AUX_HW_ID_CURR_VERSION 0 #define CS_AUX_HW_ID_MINOR_VERSION_MASK GENMASK_ULL(59, 56)
#define CS_AUX_HW_ID_MAJOR_VERSION_MASK GENMASK_ULL(63, 60)
#define CS_AUX_HW_ID_MAJOR_VERSION 0
#define CS_AUX_HW_ID_MINOR_VERSION 1
#endif #endif
...@@ -218,6 +218,24 @@ struct coresight_sysfs_link { ...@@ -218,6 +218,24 @@ struct coresight_sysfs_link {
const char *target_name; const char *target_name;
}; };
/* architecturally we have 128 IDs some of which are reserved */
#define CORESIGHT_TRACE_IDS_MAX 128
/**
* Trace ID map.
*
* @used_ids: Bitmap to register available (bit = 0) and in use (bit = 1) IDs.
* Initialised so that the reserved IDs are permanently marked as
* in use.
* @perf_cs_etm_session_active: Number of Perf sessions using this ID map.
*/
struct coresight_trace_id_map {
DECLARE_BITMAP(used_ids, CORESIGHT_TRACE_IDS_MAX);
atomic_t __percpu *cpu_map;
atomic_t perf_cs_etm_session_active;
spinlock_t lock;
};
/** /**
* struct coresight_device - representation of a device as used by the framework * struct coresight_device - representation of a device as used by the framework
* @pdata: Platform data with device connections associated to this device. * @pdata: Platform data with device connections associated to this device.
...@@ -271,6 +289,7 @@ struct coresight_device { ...@@ -271,6 +289,7 @@ struct coresight_device {
bool sysfs_sink_activated; bool sysfs_sink_activated;
struct dev_ext_attribute *ea; struct dev_ext_attribute *ea;
struct coresight_device *def_sink; struct coresight_device *def_sink;
struct coresight_trace_id_map perf_sink_id_map;
/* sysfs links between components */ /* sysfs links between components */
int nr_links; int nr_links;
bool has_conns_grp; bool has_conns_grp;
...@@ -365,7 +384,7 @@ struct coresight_ops_link { ...@@ -365,7 +384,7 @@ struct coresight_ops_link {
struct coresight_ops_source { struct coresight_ops_source {
int (*cpu_id)(struct coresight_device *csdev); int (*cpu_id)(struct coresight_device *csdev);
int (*enable)(struct coresight_device *csdev, struct perf_event *event, int (*enable)(struct coresight_device *csdev, struct perf_event *event,
enum cs_mode mode); enum cs_mode mode, struct coresight_trace_id_map *id_map);
void (*disable)(struct coresight_device *csdev, void (*disable)(struct coresight_device *csdev,
struct perf_event *event); struct perf_event *event);
}; };
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment