Commit de0029fd authored by James Clark's avatar James Clark Committed by Suzuki K Poulose

coresight: Remove pending trace ID release mechanism

Pending the release of IDs was a way of managing concurrent sysfs and
Perf sessions in a single global ID map. Perf may have finished while
sysfs hadn't, and Perf shouldn't release the IDs in use by sysfs and
vice versa.

Now that Perf uses its own exclusive ID maps, pending release doesn't
result in any different behavior than just releasing all IDs when the
last Perf session finishes. As part of the per-sink trace ID change, we
would have still had to make the pending mechanism work on a per-sink
basis, due to the overlapping ID allocations, so instead of making that
more complicated, just remove it.
Signed-off-by: default avatarJames Clark <james.clark@arm.com>
Reviewed-by: default avatarMike Leach <mike.leach@linaro.org>
Signed-off-by: default avatarJames Clark <james.clark@linaro.org>
Signed-off-by: default avatarSuzuki K Poulose <suzuki.poulose@arm.com>
Link: https://lore.kernel.org/r/20240722101202.26915-16-james.clark@linaro.org
parent 5ad628a7
...@@ -232,15 +232,21 @@ static void free_event_data(struct work_struct *work) ...@@ -232,15 +232,21 @@ static void free_event_data(struct work_struct *work)
if (!(IS_ERR_OR_NULL(*ppath))) { if (!(IS_ERR_OR_NULL(*ppath))) {
struct coresight_device *sink = coresight_get_sink(*ppath); struct coresight_device *sink = coresight_get_sink(*ppath);
coresight_trace_id_put_cpu_id_map(cpu, &sink->perf_sink_id_map); /*
* Mark perf event as done for trace id allocator, but don't call
* coresight_trace_id_put_cpu_id_map() on individual IDs. Perf sessions
* never free trace IDs to ensure that the ID associated with a CPU
* cannot change during their and other's concurrent sessions. Instead,
* a refcount is used so that the last event to call
* coresight_trace_id_perf_stop() frees all IDs.
*/
coresight_trace_id_perf_stop(&sink->perf_sink_id_map);
coresight_release_path(*ppath); coresight_release_path(*ppath);
} }
*ppath = NULL; *ppath = NULL;
} }
/* mark perf event as done for trace id allocator */
coresight_trace_id_perf_stop();
free_percpu(event_data->path); free_percpu(event_data->path);
kfree(event_data); kfree(event_data);
} }
...@@ -328,9 +334,6 @@ static void *etm_setup_aux(struct perf_event *event, void **pages, ...@@ -328,9 +334,6 @@ static void *etm_setup_aux(struct perf_event *event, void **pages,
sink = user_sink = coresight_get_sink_by_id(id); sink = user_sink = coresight_get_sink_by_id(id);
} }
/* tell the trace ID allocator that a perf event is starting up */
coresight_trace_id_perf_start();
/* check if user wants a coresight configuration selected */ /* check if user wants a coresight configuration selected */
cfg_hash = (u32)((event->attr.config2 & GENMASK_ULL(63, 32)) >> 32); cfg_hash = (u32)((event->attr.config2 & GENMASK_ULL(63, 32)) >> 32);
if (cfg_hash) { if (cfg_hash) {
...@@ -411,6 +414,7 @@ static void *etm_setup_aux(struct perf_event *event, void **pages, ...@@ -411,6 +414,7 @@ static void *etm_setup_aux(struct perf_event *event, void **pages,
continue; continue;
} }
coresight_trace_id_perf_start(&sink->perf_sink_id_map);
*etm_event_cpu_path_ptr(event_data, cpu) = path; *etm_event_cpu_path_ptr(event_data, cpu) = path;
} }
......
...@@ -18,12 +18,6 @@ static struct coresight_trace_id_map id_map_default = { ...@@ -18,12 +18,6 @@ static struct coresight_trace_id_map id_map_default = {
.cpu_map = &id_map_default_cpu_ids .cpu_map = &id_map_default_cpu_ids
}; };
/* maintain a record of the pending releases per cpu */
static cpumask_t cpu_id_release_pending;
/* perf session active counter */
static atomic_t perf_cs_etm_session_active = ATOMIC_INIT(0);
/* lock to protect id_map and cpu data */ /* lock to protect id_map and cpu data */
static DEFINE_SPINLOCK(id_map_lock); static DEFINE_SPINLOCK(id_map_lock);
...@@ -35,7 +29,6 @@ static void coresight_trace_id_dump_table(struct coresight_trace_id_map *id_map, ...@@ -35,7 +29,6 @@ static void coresight_trace_id_dump_table(struct coresight_trace_id_map *id_map,
{ {
pr_debug("%s id_map::\n", func_name); pr_debug("%s id_map::\n", func_name);
pr_debug("Used = %*pb\n", CORESIGHT_TRACE_IDS_MAX, id_map->used_ids); pr_debug("Used = %*pb\n", CORESIGHT_TRACE_IDS_MAX, id_map->used_ids);
pr_debug("Pend = %*pb\n", CORESIGHT_TRACE_IDS_MAX, id_map->pend_rel_ids);
} }
#define DUMP_ID_MAP(map) coresight_trace_id_dump_table(map, __func__) #define DUMP_ID_MAP(map) coresight_trace_id_dump_table(map, __func__)
#define DUMP_ID_CPU(cpu, id) pr_debug("%s called; cpu=%d, id=%d\n", __func__, cpu, id) #define DUMP_ID_CPU(cpu, id) pr_debug("%s called; cpu=%d, id=%d\n", __func__, cpu, id)
...@@ -122,34 +115,18 @@ static void coresight_trace_id_free(int id, struct coresight_trace_id_map *id_ma ...@@ -122,34 +115,18 @@ static void coresight_trace_id_free(int id, struct coresight_trace_id_map *id_ma
clear_bit(id, id_map->used_ids); clear_bit(id, id_map->used_ids);
} }
static void coresight_trace_id_set_pend_rel(int id, struct coresight_trace_id_map *id_map)
{
if (WARN(!IS_VALID_CS_TRACE_ID(id), "Invalid Trace ID %d\n", id))
return;
set_bit(id, id_map->pend_rel_ids);
}
/* /*
* release all pending IDs for all current maps & clear CPU associations * Release all IDs and clear CPU associations.
*
* This currently operates on the default id map, but may be extended to
* operate on all registered id maps if per sink id maps are used.
*/ */
static void coresight_trace_id_release_all_pending(void) static void coresight_trace_id_release_all(struct coresight_trace_id_map *id_map)
{ {
struct coresight_trace_id_map *id_map = &id_map_default;
unsigned long flags; unsigned long flags;
int cpu, bit; int cpu;
spin_lock_irqsave(&id_map_lock, flags); spin_lock_irqsave(&id_map_lock, flags);
for_each_set_bit(bit, id_map->pend_rel_ids, CORESIGHT_TRACE_ID_RES_TOP) { bitmap_zero(id_map->used_ids, CORESIGHT_TRACE_IDS_MAX);
clear_bit(bit, id_map->used_ids); for_each_possible_cpu(cpu)
clear_bit(bit, id_map->pend_rel_ids); atomic_set(per_cpu_ptr(id_map->cpu_map, cpu), 0);
}
for_each_cpu(cpu, &cpu_id_release_pending) {
atomic_set(per_cpu_ptr(id_map_default.cpu_map, cpu), 0);
cpumask_clear_cpu(cpu, &cpu_id_release_pending);
}
spin_unlock_irqrestore(&id_map_lock, flags); spin_unlock_irqrestore(&id_map_lock, flags);
DUMP_ID_MAP(id_map); DUMP_ID_MAP(id_map);
} }
...@@ -164,7 +141,7 @@ static int _coresight_trace_id_get_cpu_id(int cpu, struct coresight_trace_id_map ...@@ -164,7 +141,7 @@ static int _coresight_trace_id_get_cpu_id(int cpu, struct coresight_trace_id_map
/* check for existing allocation for this CPU */ /* check for existing allocation for this CPU */
id = _coresight_trace_id_read_cpu_id(cpu, id_map); id = _coresight_trace_id_read_cpu_id(cpu, id_map);
if (id) if (id)
goto get_cpu_id_clr_pend; goto get_cpu_id_out_unlock;
/* /*
* Find a new ID. * Find a new ID.
...@@ -185,11 +162,6 @@ static int _coresight_trace_id_get_cpu_id(int cpu, struct coresight_trace_id_map ...@@ -185,11 +162,6 @@ static int _coresight_trace_id_get_cpu_id(int cpu, struct coresight_trace_id_map
/* allocate the new id to the cpu */ /* allocate the new id to the cpu */
atomic_set(per_cpu_ptr(id_map->cpu_map, cpu), id); atomic_set(per_cpu_ptr(id_map->cpu_map, cpu), id);
get_cpu_id_clr_pend:
/* we are (re)using this ID - so ensure it is not marked for release */
cpumask_clear_cpu(cpu, &cpu_id_release_pending);
clear_bit(id, id_map->pend_rel_ids);
get_cpu_id_out_unlock: get_cpu_id_out_unlock:
spin_unlock_irqrestore(&id_map_lock, flags); spin_unlock_irqrestore(&id_map_lock, flags);
...@@ -210,15 +182,8 @@ static void _coresight_trace_id_put_cpu_id(int cpu, struct coresight_trace_id_ma ...@@ -210,15 +182,8 @@ static void _coresight_trace_id_put_cpu_id(int cpu, struct coresight_trace_id_ma
spin_lock_irqsave(&id_map_lock, flags); spin_lock_irqsave(&id_map_lock, flags);
if (atomic_read(&perf_cs_etm_session_active)) { coresight_trace_id_free(id, id_map);
/* set release at pending if perf still active */ atomic_set(per_cpu_ptr(id_map->cpu_map, cpu), 0);
coresight_trace_id_set_pend_rel(id, id_map);
cpumask_set_cpu(cpu, &cpu_id_release_pending);
} else {
/* otherwise clear id */
coresight_trace_id_free(id, id_map);
atomic_set(per_cpu_ptr(id_map->cpu_map, cpu), 0);
}
spin_unlock_irqrestore(&id_map_lock, flags); spin_unlock_irqrestore(&id_map_lock, flags);
DUMP_ID_CPU(cpu, id); DUMP_ID_CPU(cpu, id);
...@@ -302,17 +267,17 @@ void coresight_trace_id_put_system_id(int id) ...@@ -302,17 +267,17 @@ void coresight_trace_id_put_system_id(int id)
} }
EXPORT_SYMBOL_GPL(coresight_trace_id_put_system_id); EXPORT_SYMBOL_GPL(coresight_trace_id_put_system_id);
void coresight_trace_id_perf_start(void) void coresight_trace_id_perf_start(struct coresight_trace_id_map *id_map)
{ {
atomic_inc(&perf_cs_etm_session_active); atomic_inc(&id_map->perf_cs_etm_session_active);
PERF_SESSION(atomic_read(&perf_cs_etm_session_active)); PERF_SESSION(atomic_read(&id_map->perf_cs_etm_session_active));
} }
EXPORT_SYMBOL_GPL(coresight_trace_id_perf_start); EXPORT_SYMBOL_GPL(coresight_trace_id_perf_start);
void coresight_trace_id_perf_stop(void) void coresight_trace_id_perf_stop(struct coresight_trace_id_map *id_map)
{ {
if (!atomic_dec_return(&perf_cs_etm_session_active)) if (!atomic_dec_return(&id_map->perf_cs_etm_session_active))
coresight_trace_id_release_all_pending(); coresight_trace_id_release_all(id_map);
PERF_SESSION(atomic_read(&perf_cs_etm_session_active)); PERF_SESSION(atomic_read(&id_map->perf_cs_etm_session_active));
} }
EXPORT_SYMBOL_GPL(coresight_trace_id_perf_stop); EXPORT_SYMBOL_GPL(coresight_trace_id_perf_stop);
...@@ -17,9 +17,10 @@ ...@@ -17,9 +17,10 @@
* released when done. * released when done.
* *
* In order to ensure that a consistent cpu / ID matching is maintained * In order to ensure that a consistent cpu / ID matching is maintained
* throughout a perf cs_etm event session - a session in progress flag will * throughout a perf cs_etm event session - a session in progress flag will be
* be maintained, and released IDs not cleared until the perf session is * maintained for each sink, and IDs are cleared when all the perf sessions
* complete. This allows the same CPU to be re-allocated its prior ID. * complete. This allows the same CPU to be re-allocated its prior ID when
* events are scheduled in and out.
* *
* *
* Trace ID maps will be created and initialised to prevent architecturally * Trace ID maps will be created and initialised to prevent architecturally
...@@ -66,11 +67,7 @@ int coresight_trace_id_get_cpu_id_map(int cpu, struct coresight_trace_id_map *id ...@@ -66,11 +67,7 @@ int coresight_trace_id_get_cpu_id_map(int cpu, struct coresight_trace_id_map *id
/** /**
* Release an allocated trace ID associated with the CPU. * Release an allocated trace ID associated with the CPU.
* *
* This will release the CoreSight trace ID associated with the CPU, * This will release the CoreSight trace ID associated with the CPU.
* unless a perf session is in operation.
*
* If a perf session is in operation then the ID will be marked as pending
* release.
* *
* @cpu: The CPU index to release the associated trace ID. * @cpu: The CPU index to release the associated trace ID.
*/ */
...@@ -133,21 +130,21 @@ void coresight_trace_id_put_system_id(int id); ...@@ -133,21 +130,21 @@ void coresight_trace_id_put_system_id(int id);
/** /**
* Notify the Trace ID allocator that a perf session is starting. * Notify the Trace ID allocator that a perf session is starting.
* *
* Increase the perf session reference count - called by perf when setting up * Increase the perf session reference count - called by perf when setting up a
* a trace event. * trace event.
* *
* This reference count is used by the ID allocator to ensure that trace IDs * Perf sessions never free trace IDs to ensure that the ID associated with a
* associated with a CPU cannot change or be released during a perf session. * CPU cannot change during their and other's concurrent sessions. Instead,
* this refcount is used so that the last event to finish always frees all IDs.
*/ */
void coresight_trace_id_perf_start(void); void coresight_trace_id_perf_start(struct coresight_trace_id_map *id_map);
/** /**
* Notify the ID allocator that a perf session is stopping. * Notify the ID allocator that a perf session is stopping.
* *
* Decrease the perf session reference count. * Decrease the perf session reference count. If this causes the count to go to
* if this causes the count to go to zero, then all Trace IDs marked as pending * zero, then all Trace IDs will be released.
* release, will be released.
*/ */
void coresight_trace_id_perf_stop(void); void coresight_trace_id_perf_stop(struct coresight_trace_id_map *id_map);
#endif /* _CORESIGHT_TRACE_ID_H */ #endif /* _CORESIGHT_TRACE_ID_H */
...@@ -227,14 +227,12 @@ struct coresight_sysfs_link { ...@@ -227,14 +227,12 @@ struct coresight_sysfs_link {
* @used_ids: Bitmap to register available (bit = 0) and in use (bit = 1) IDs. * @used_ids: Bitmap to register available (bit = 0) and in use (bit = 1) IDs.
* Initialised so that the reserved IDs are permanently marked as * Initialised so that the reserved IDs are permanently marked as
* in use. * in use.
* @pend_rel_ids: CPU IDs that have been released by the trace source but not * @perf_cs_etm_session_active: Number of Perf sessions using this ID map.
* yet marked as available, to allow re-allocation to the same
* CPU during a perf session.
*/ */
struct coresight_trace_id_map { struct coresight_trace_id_map {
DECLARE_BITMAP(used_ids, CORESIGHT_TRACE_IDS_MAX); DECLARE_BITMAP(used_ids, CORESIGHT_TRACE_IDS_MAX);
DECLARE_BITMAP(pend_rel_ids, CORESIGHT_TRACE_IDS_MAX);
atomic_t __percpu *cpu_map; atomic_t __percpu *cpu_map;
atomic_t perf_cs_etm_session_active;
}; };
/** /**
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment