xref: /openbmc/linux/drivers/hwtracing/coresight/coresight-trace-id.c (revision 9a87ffc99ec8eb8d35eed7c4f816d75f5cc9662e)
1338a588eSMike Leach // SPDX-License-Identifier: GPL-2.0
2338a588eSMike Leach /*
3338a588eSMike Leach  * Copyright (c) 2022, Linaro Limited, All rights reserved.
4338a588eSMike Leach  * Author: Mike Leach <mike.leach@linaro.org>
5338a588eSMike Leach  */
6338a588eSMike Leach #include <linux/coresight-pmu.h>
7338a588eSMike Leach #include <linux/cpumask.h>
8338a588eSMike Leach #include <linux/kernel.h>
9338a588eSMike Leach #include <linux/spinlock.h>
10338a588eSMike Leach #include <linux/types.h>
11338a588eSMike Leach 
12338a588eSMike Leach #include "coresight-trace-id.h"
13338a588eSMike Leach 
14338a588eSMike Leach /* Default trace ID map. Used on systems that don't require per sink mappings */
15338a588eSMike Leach static struct coresight_trace_id_map id_map_default;
16338a588eSMike Leach 
17338a588eSMike Leach /* maintain a record of the mapping of IDs and pending releases per cpu */
18338a588eSMike Leach static DEFINE_PER_CPU(atomic_t, cpu_id) = ATOMIC_INIT(0);
19338a588eSMike Leach static cpumask_t cpu_id_release_pending;
20338a588eSMike Leach 
21338a588eSMike Leach /* perf session active counter */
22338a588eSMike Leach static atomic_t perf_cs_etm_session_active = ATOMIC_INIT(0);
23338a588eSMike Leach 
24338a588eSMike Leach /* lock to protect id_map and cpu data  */
25338a588eSMike Leach static DEFINE_SPINLOCK(id_map_lock);
26338a588eSMike Leach 
27*fd30b085SMike Leach /* #define TRACE_ID_DEBUG 1 */
28*fd30b085SMike Leach #if defined(TRACE_ID_DEBUG) || defined(CONFIG_COMPILE_TEST)
29*fd30b085SMike Leach 
coresight_trace_id_dump_table(struct coresight_trace_id_map * id_map,const char * func_name)30*fd30b085SMike Leach static void coresight_trace_id_dump_table(struct coresight_trace_id_map *id_map,
31*fd30b085SMike Leach 					  const char *func_name)
32*fd30b085SMike Leach {
33*fd30b085SMike Leach 	pr_debug("%s id_map::\n", func_name);
34*fd30b085SMike Leach 	pr_debug("Used = %*pb\n", CORESIGHT_TRACE_IDS_MAX, id_map->used_ids);
35*fd30b085SMike Leach 	pr_debug("Pend = %*pb\n", CORESIGHT_TRACE_IDS_MAX, id_map->pend_rel_ids);
36*fd30b085SMike Leach }
37*fd30b085SMike Leach #define DUMP_ID_MAP(map)   coresight_trace_id_dump_table(map, __func__)
38*fd30b085SMike Leach #define DUMP_ID_CPU(cpu, id) pr_debug("%s called;  cpu=%d, id=%d\n", __func__, cpu, id)
39*fd30b085SMike Leach #define DUMP_ID(id)   pr_debug("%s called; id=%d\n", __func__, id)
40*fd30b085SMike Leach #define PERF_SESSION(n) pr_debug("%s perf count %d\n", __func__, n)
41*fd30b085SMike Leach #else
42*fd30b085SMike Leach #define DUMP_ID_MAP(map)
43*fd30b085SMike Leach #define DUMP_ID(id)
44*fd30b085SMike Leach #define DUMP_ID_CPU(cpu, id)
45*fd30b085SMike Leach #define PERF_SESSION(n)
46*fd30b085SMike Leach #endif
47*fd30b085SMike Leach 
48338a588eSMike Leach /* unlocked read of current trace ID value for given CPU */
_coresight_trace_id_read_cpu_id(int cpu)49338a588eSMike Leach static int _coresight_trace_id_read_cpu_id(int cpu)
50338a588eSMike Leach {
51338a588eSMike Leach 	return atomic_read(&per_cpu(cpu_id, cpu));
52338a588eSMike Leach }
53338a588eSMike Leach 
54338a588eSMike Leach /* look for next available odd ID, return 0 if none found */
coresight_trace_id_find_odd_id(struct coresight_trace_id_map * id_map)55338a588eSMike Leach static int coresight_trace_id_find_odd_id(struct coresight_trace_id_map *id_map)
56338a588eSMike Leach {
57338a588eSMike Leach 	int found_id = 0, bit = 1, next_id;
58338a588eSMike Leach 
59338a588eSMike Leach 	while ((bit < CORESIGHT_TRACE_ID_RES_TOP) && !found_id) {
60338a588eSMike Leach 		/*
61338a588eSMike Leach 		 * bitmap length of CORESIGHT_TRACE_ID_RES_TOP,
62338a588eSMike Leach 		 * search from offset `bit`.
63338a588eSMike Leach 		 */
64338a588eSMike Leach 		next_id = find_next_zero_bit(id_map->used_ids,
65338a588eSMike Leach 					     CORESIGHT_TRACE_ID_RES_TOP, bit);
66338a588eSMike Leach 		if ((next_id < CORESIGHT_TRACE_ID_RES_TOP) && (next_id & 0x1))
67338a588eSMike Leach 			found_id = next_id;
68338a588eSMike Leach 		else
69338a588eSMike Leach 			bit = next_id + 1;
70338a588eSMike Leach 	}
71338a588eSMike Leach 	return found_id;
72338a588eSMike Leach }
73338a588eSMike Leach 
74338a588eSMike Leach /*
75338a588eSMike Leach  * Allocate new ID and set in use
76338a588eSMike Leach  *
77338a588eSMike Leach  * if @preferred_id is a valid id then try to use that value if available.
78338a588eSMike Leach  * if @preferred_id is not valid and @prefer_odd_id is true, try for odd id.
79338a588eSMike Leach  *
80338a588eSMike Leach  * Otherwise allocate next available ID.
81338a588eSMike Leach  */
coresight_trace_id_alloc_new_id(struct coresight_trace_id_map * id_map,int preferred_id,bool prefer_odd_id)82338a588eSMike Leach static int coresight_trace_id_alloc_new_id(struct coresight_trace_id_map *id_map,
83338a588eSMike Leach 					   int preferred_id, bool prefer_odd_id)
84338a588eSMike Leach {
85338a588eSMike Leach 	int id = 0;
86338a588eSMike Leach 
87338a588eSMike Leach 	/* for backwards compatibility, cpu IDs may use preferred value */
88338a588eSMike Leach 	if (IS_VALID_CS_TRACE_ID(preferred_id) &&
89338a588eSMike Leach 	    !test_bit(preferred_id, id_map->used_ids)) {
90338a588eSMike Leach 		id = preferred_id;
91338a588eSMike Leach 		goto trace_id_allocated;
92338a588eSMike Leach 	} else if (prefer_odd_id) {
93338a588eSMike Leach 	/* may use odd ids to avoid preferred legacy cpu IDs */
94338a588eSMike Leach 		id = coresight_trace_id_find_odd_id(id_map);
95338a588eSMike Leach 		if (id)
96338a588eSMike Leach 			goto trace_id_allocated;
97338a588eSMike Leach 	}
98338a588eSMike Leach 
99338a588eSMike Leach 	/*
100338a588eSMike Leach 	 * skip reserved bit 0, look at bitmap length of
101338a588eSMike Leach 	 * CORESIGHT_TRACE_ID_RES_TOP from offset of bit 1.
102338a588eSMike Leach 	 */
103338a588eSMike Leach 	id = find_next_zero_bit(id_map->used_ids, CORESIGHT_TRACE_ID_RES_TOP, 1);
104338a588eSMike Leach 	if (id >= CORESIGHT_TRACE_ID_RES_TOP)
105338a588eSMike Leach 		return -EINVAL;
106338a588eSMike Leach 
107338a588eSMike Leach 	/* mark as used */
108338a588eSMike Leach trace_id_allocated:
109338a588eSMike Leach 	set_bit(id, id_map->used_ids);
110338a588eSMike Leach 	return id;
111338a588eSMike Leach }
112338a588eSMike Leach 
coresight_trace_id_free(int id,struct coresight_trace_id_map * id_map)113338a588eSMike Leach static void coresight_trace_id_free(int id, struct coresight_trace_id_map *id_map)
114338a588eSMike Leach {
115338a588eSMike Leach 	if (WARN(!IS_VALID_CS_TRACE_ID(id), "Invalid Trace ID %d\n", id))
116338a588eSMike Leach 		return;
117338a588eSMike Leach 	if (WARN(!test_bit(id, id_map->used_ids), "Freeing unused ID %d\n", id))
118338a588eSMike Leach 		return;
119338a588eSMike Leach 	clear_bit(id, id_map->used_ids);
120338a588eSMike Leach }
121338a588eSMike Leach 
coresight_trace_id_set_pend_rel(int id,struct coresight_trace_id_map * id_map)122338a588eSMike Leach static void coresight_trace_id_set_pend_rel(int id, struct coresight_trace_id_map *id_map)
123338a588eSMike Leach {
124338a588eSMike Leach 	if (WARN(!IS_VALID_CS_TRACE_ID(id), "Invalid Trace ID %d\n", id))
125338a588eSMike Leach 		return;
126338a588eSMike Leach 	set_bit(id, id_map->pend_rel_ids);
127338a588eSMike Leach }
128338a588eSMike Leach 
129338a588eSMike Leach /*
130338a588eSMike Leach  * release all pending IDs for all current maps & clear CPU associations
131338a588eSMike Leach  *
132338a588eSMike Leach  * This currently operates on the default id map, but may be extended to
133338a588eSMike Leach  * operate on all registered id maps if per sink id maps are used.
134338a588eSMike Leach  */
coresight_trace_id_release_all_pending(void)135338a588eSMike Leach static void coresight_trace_id_release_all_pending(void)
136338a588eSMike Leach {
137338a588eSMike Leach 	struct coresight_trace_id_map *id_map = &id_map_default;
138338a588eSMike Leach 	unsigned long flags;
139338a588eSMike Leach 	int cpu, bit;
140338a588eSMike Leach 
141338a588eSMike Leach 	spin_lock_irqsave(&id_map_lock, flags);
142338a588eSMike Leach 	for_each_set_bit(bit, id_map->pend_rel_ids, CORESIGHT_TRACE_ID_RES_TOP) {
143338a588eSMike Leach 		clear_bit(bit, id_map->used_ids);
144338a588eSMike Leach 		clear_bit(bit, id_map->pend_rel_ids);
145338a588eSMike Leach 	}
146338a588eSMike Leach 	for_each_cpu(cpu, &cpu_id_release_pending) {
147338a588eSMike Leach 		atomic_set(&per_cpu(cpu_id, cpu), 0);
148338a588eSMike Leach 		cpumask_clear_cpu(cpu, &cpu_id_release_pending);
149338a588eSMike Leach 	}
150338a588eSMike Leach 	spin_unlock_irqrestore(&id_map_lock, flags);
151*fd30b085SMike Leach 	DUMP_ID_MAP(id_map);
152338a588eSMike Leach }
153338a588eSMike Leach 
coresight_trace_id_map_get_cpu_id(int cpu,struct coresight_trace_id_map * id_map)154338a588eSMike Leach static int coresight_trace_id_map_get_cpu_id(int cpu, struct coresight_trace_id_map *id_map)
155338a588eSMike Leach {
156338a588eSMike Leach 	unsigned long flags;
157338a588eSMike Leach 	int id;
158338a588eSMike Leach 
159338a588eSMike Leach 	spin_lock_irqsave(&id_map_lock, flags);
160338a588eSMike Leach 
161338a588eSMike Leach 	/* check for existing allocation for this CPU */
162338a588eSMike Leach 	id = _coresight_trace_id_read_cpu_id(cpu);
163338a588eSMike Leach 	if (id)
164338a588eSMike Leach 		goto get_cpu_id_clr_pend;
165338a588eSMike Leach 
166338a588eSMike Leach 	/*
167338a588eSMike Leach 	 * Find a new ID.
168338a588eSMike Leach 	 *
169338a588eSMike Leach 	 * Use legacy values where possible in the dynamic trace ID allocator to
170338a588eSMike Leach 	 * allow older tools to continue working if they are not upgraded at the
171338a588eSMike Leach 	 * same time as the kernel drivers.
172338a588eSMike Leach 	 *
173338a588eSMike Leach 	 * If the generated legacy ID is invalid, or not available then the next
174338a588eSMike Leach 	 * available dynamic ID will be used.
175338a588eSMike Leach 	 */
176338a588eSMike Leach 	id = coresight_trace_id_alloc_new_id(id_map,
177338a588eSMike Leach 					     CORESIGHT_LEGACY_CPU_TRACE_ID(cpu),
178338a588eSMike Leach 					     false);
179338a588eSMike Leach 	if (!IS_VALID_CS_TRACE_ID(id))
180338a588eSMike Leach 		goto get_cpu_id_out_unlock;
181338a588eSMike Leach 
182338a588eSMike Leach 	/* allocate the new id to the cpu */
183338a588eSMike Leach 	atomic_set(&per_cpu(cpu_id, cpu), id);
184338a588eSMike Leach 
185338a588eSMike Leach get_cpu_id_clr_pend:
186338a588eSMike Leach 	/* we are (re)using this ID - so ensure it is not marked for release */
187338a588eSMike Leach 	cpumask_clear_cpu(cpu, &cpu_id_release_pending);
188338a588eSMike Leach 	clear_bit(id, id_map->pend_rel_ids);
189338a588eSMike Leach 
190338a588eSMike Leach get_cpu_id_out_unlock:
191338a588eSMike Leach 	spin_unlock_irqrestore(&id_map_lock, flags);
192338a588eSMike Leach 
193*fd30b085SMike Leach 	DUMP_ID_CPU(cpu, id);
194*fd30b085SMike Leach 	DUMP_ID_MAP(id_map);
195338a588eSMike Leach 	return id;
196338a588eSMike Leach }
197338a588eSMike Leach 
coresight_trace_id_map_put_cpu_id(int cpu,struct coresight_trace_id_map * id_map)198338a588eSMike Leach static void coresight_trace_id_map_put_cpu_id(int cpu, struct coresight_trace_id_map *id_map)
199338a588eSMike Leach {
200338a588eSMike Leach 	unsigned long flags;
201338a588eSMike Leach 	int id;
202338a588eSMike Leach 
203338a588eSMike Leach 	/* check for existing allocation for this CPU */
204338a588eSMike Leach 	id = _coresight_trace_id_read_cpu_id(cpu);
205338a588eSMike Leach 	if (!id)
206338a588eSMike Leach 		return;
207338a588eSMike Leach 
208338a588eSMike Leach 	spin_lock_irqsave(&id_map_lock, flags);
209338a588eSMike Leach 
210338a588eSMike Leach 	if (atomic_read(&perf_cs_etm_session_active)) {
211338a588eSMike Leach 		/* set release at pending if perf still active */
212338a588eSMike Leach 		coresight_trace_id_set_pend_rel(id, id_map);
213338a588eSMike Leach 		cpumask_set_cpu(cpu, &cpu_id_release_pending);
214338a588eSMike Leach 	} else {
215338a588eSMike Leach 		/* otherwise clear id */
216338a588eSMike Leach 		coresight_trace_id_free(id, id_map);
217338a588eSMike Leach 		atomic_set(&per_cpu(cpu_id, cpu), 0);
218338a588eSMike Leach 	}
219338a588eSMike Leach 
220338a588eSMike Leach 	spin_unlock_irqrestore(&id_map_lock, flags);
221*fd30b085SMike Leach 	DUMP_ID_CPU(cpu, id);
222*fd30b085SMike Leach 	DUMP_ID_MAP(id_map);
223338a588eSMike Leach }
224338a588eSMike Leach 
coresight_trace_id_map_get_system_id(struct coresight_trace_id_map * id_map)225338a588eSMike Leach static int coresight_trace_id_map_get_system_id(struct coresight_trace_id_map *id_map)
226338a588eSMike Leach {
227338a588eSMike Leach 	unsigned long flags;
228338a588eSMike Leach 	int id;
229338a588eSMike Leach 
230338a588eSMike Leach 	spin_lock_irqsave(&id_map_lock, flags);
231338a588eSMike Leach 	/* prefer odd IDs for system components to avoid legacy CPU IDS */
232338a588eSMike Leach 	id = coresight_trace_id_alloc_new_id(id_map, 0, true);
233338a588eSMike Leach 	spin_unlock_irqrestore(&id_map_lock, flags);
234338a588eSMike Leach 
235*fd30b085SMike Leach 	DUMP_ID(id);
236*fd30b085SMike Leach 	DUMP_ID_MAP(id_map);
237338a588eSMike Leach 	return id;
238338a588eSMike Leach }
239338a588eSMike Leach 
coresight_trace_id_map_put_system_id(struct coresight_trace_id_map * id_map,int id)240338a588eSMike Leach static void coresight_trace_id_map_put_system_id(struct coresight_trace_id_map *id_map, int id)
241338a588eSMike Leach {
242338a588eSMike Leach 	unsigned long flags;
243338a588eSMike Leach 
244338a588eSMike Leach 	spin_lock_irqsave(&id_map_lock, flags);
245338a588eSMike Leach 	coresight_trace_id_free(id, id_map);
246338a588eSMike Leach 	spin_unlock_irqrestore(&id_map_lock, flags);
247*fd30b085SMike Leach 
248*fd30b085SMike Leach 	DUMP_ID(id);
249*fd30b085SMike Leach 	DUMP_ID_MAP(id_map);
250338a588eSMike Leach }
251338a588eSMike Leach 
252338a588eSMike Leach /* API functions */
253338a588eSMike Leach 
coresight_trace_id_get_cpu_id(int cpu)254338a588eSMike Leach int coresight_trace_id_get_cpu_id(int cpu)
255338a588eSMike Leach {
256338a588eSMike Leach 	return coresight_trace_id_map_get_cpu_id(cpu, &id_map_default);
257338a588eSMike Leach }
258338a588eSMike Leach EXPORT_SYMBOL_GPL(coresight_trace_id_get_cpu_id);
259338a588eSMike Leach 
coresight_trace_id_put_cpu_id(int cpu)260338a588eSMike Leach void coresight_trace_id_put_cpu_id(int cpu)
261338a588eSMike Leach {
262338a588eSMike Leach 	coresight_trace_id_map_put_cpu_id(cpu, &id_map_default);
263338a588eSMike Leach }
264338a588eSMike Leach EXPORT_SYMBOL_GPL(coresight_trace_id_put_cpu_id);
265338a588eSMike Leach 
coresight_trace_id_read_cpu_id(int cpu)266338a588eSMike Leach int coresight_trace_id_read_cpu_id(int cpu)
267338a588eSMike Leach {
268338a588eSMike Leach 	return _coresight_trace_id_read_cpu_id(cpu);
269338a588eSMike Leach }
270338a588eSMike Leach EXPORT_SYMBOL_GPL(coresight_trace_id_read_cpu_id);
271338a588eSMike Leach 
coresight_trace_id_get_system_id(void)272338a588eSMike Leach int coresight_trace_id_get_system_id(void)
273338a588eSMike Leach {
274338a588eSMike Leach 	return coresight_trace_id_map_get_system_id(&id_map_default);
275338a588eSMike Leach }
276338a588eSMike Leach EXPORT_SYMBOL_GPL(coresight_trace_id_get_system_id);
277338a588eSMike Leach 
coresight_trace_id_put_system_id(int id)278338a588eSMike Leach void coresight_trace_id_put_system_id(int id)
279338a588eSMike Leach {
280338a588eSMike Leach 	coresight_trace_id_map_put_system_id(&id_map_default, id);
281338a588eSMike Leach }
282338a588eSMike Leach EXPORT_SYMBOL_GPL(coresight_trace_id_put_system_id);
283338a588eSMike Leach 
coresight_trace_id_perf_start(void)284338a588eSMike Leach void coresight_trace_id_perf_start(void)
285338a588eSMike Leach {
286338a588eSMike Leach 	atomic_inc(&perf_cs_etm_session_active);
287*fd30b085SMike Leach 	PERF_SESSION(atomic_read(&perf_cs_etm_session_active));
288338a588eSMike Leach }
289338a588eSMike Leach EXPORT_SYMBOL_GPL(coresight_trace_id_perf_start);
290338a588eSMike Leach 
coresight_trace_id_perf_stop(void)291338a588eSMike Leach void coresight_trace_id_perf_stop(void)
292338a588eSMike Leach {
293338a588eSMike Leach 	if (!atomic_dec_return(&perf_cs_etm_session_active))
294338a588eSMike Leach 		coresight_trace_id_release_all_pending();
295*fd30b085SMike Leach 	PERF_SESSION(atomic_read(&perf_cs_etm_session_active));
296338a588eSMike Leach }
297338a588eSMike Leach EXPORT_SYMBOL_GPL(coresight_trace_id_perf_stop);
298