xref: /openbmc/linux/drivers/hwtracing/coresight/coresight-tmc-etf.c (revision 1830dad34c070161fda2ff1db77b39ffa78aa380)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright(C) 2016 Linaro Limited. All rights reserved.
4  * Author: Mathieu Poirier <mathieu.poirier@linaro.org>
5  */
6 
7 #include <linux/circ_buf.h>
8 #include <linux/coresight.h>
9 #include <linux/perf_event.h>
10 #include <linux/slab.h>
11 #include "coresight-priv.h"
12 #include "coresight-tmc.h"
13 #include "coresight-etm-perf.h"
14 
15 static int tmc_set_etf_buffer(struct coresight_device *csdev,
16 			      struct perf_output_handle *handle);
17 
18 static void __tmc_etb_enable_hw(struct tmc_drvdata *drvdata)
19 {
20 	CS_UNLOCK(drvdata->base);
21 
22 	/* Wait for TMCSReady bit to be set */
23 	tmc_wait_for_tmcready(drvdata);
24 
25 	writel_relaxed(TMC_MODE_CIRCULAR_BUFFER, drvdata->base + TMC_MODE);
26 	writel_relaxed(TMC_FFCR_EN_FMT | TMC_FFCR_EN_TI |
27 		       TMC_FFCR_FON_FLIN | TMC_FFCR_FON_TRIG_EVT |
28 		       TMC_FFCR_TRIGON_TRIGIN,
29 		       drvdata->base + TMC_FFCR);
30 
31 	writel_relaxed(drvdata->trigger_cntr, drvdata->base + TMC_TRG);
32 	tmc_enable_hw(drvdata);
33 
34 	CS_LOCK(drvdata->base);
35 }
36 
37 static int tmc_etb_enable_hw(struct tmc_drvdata *drvdata)
38 {
39 	__tmc_etb_enable_hw(drvdata);
40 	return 0;
41 }
42 
43 static void tmc_etb_dump_hw(struct tmc_drvdata *drvdata)
44 {
45 	char *bufp;
46 	u32 read_data, lost;
47 
48 	/* Check if the buffer wrapped around. */
49 	lost = readl_relaxed(drvdata->base + TMC_STS) & TMC_STS_FULL;
50 	bufp = drvdata->buf;
51 	drvdata->len = 0;
52 	while (1) {
53 		read_data = readl_relaxed(drvdata->base + TMC_RRD);
54 		if (read_data == 0xFFFFFFFF)
55 			break;
56 		memcpy(bufp, &read_data, 4);
57 		bufp += 4;
58 		drvdata->len += 4;
59 	}
60 
61 	if (lost)
62 		coresight_insert_barrier_packet(drvdata->buf);
63 	return;
64 }
65 
66 static void tmc_etb_disable_hw(struct tmc_drvdata *drvdata)
67 {
68 	CS_UNLOCK(drvdata->base);
69 
70 	tmc_flush_and_stop(drvdata);
71 	/*
72 	 * When operating in sysFS mode the content of the buffer needs to be
73 	 * read before the TMC is disabled.
74 	 */
75 	if (drvdata->mode == CS_MODE_SYSFS)
76 		tmc_etb_dump_hw(drvdata);
77 	tmc_disable_hw(drvdata);
78 
79 	CS_LOCK(drvdata->base);
80 }
81 
82 static void __tmc_etf_enable_hw(struct tmc_drvdata *drvdata)
83 {
84 	CS_UNLOCK(drvdata->base);
85 
86 	/* Wait for TMCSReady bit to be set */
87 	tmc_wait_for_tmcready(drvdata);
88 
89 	writel_relaxed(TMC_MODE_HARDWARE_FIFO, drvdata->base + TMC_MODE);
90 	writel_relaxed(TMC_FFCR_EN_FMT | TMC_FFCR_EN_TI,
91 		       drvdata->base + TMC_FFCR);
92 	writel_relaxed(0x0, drvdata->base + TMC_BUFWM);
93 	tmc_enable_hw(drvdata);
94 
95 	CS_LOCK(drvdata->base);
96 }
97 
98 static int tmc_etf_enable_hw(struct tmc_drvdata *drvdata)
99 {
100 	__tmc_etf_enable_hw(drvdata);
101 	return 0;
102 }
103 
104 static void tmc_etf_disable_hw(struct tmc_drvdata *drvdata)
105 {
106 	CS_UNLOCK(drvdata->base);
107 
108 	tmc_flush_and_stop(drvdata);
109 	tmc_disable_hw(drvdata);
110 
111 	CS_LOCK(drvdata->base);
112 }
113 
114 /*
115  * Return the available trace data in the buffer from @pos, with
116  * a maximum limit of @len, updating the @bufpp on where to
117  * find it.
118  */
119 ssize_t tmc_etb_get_sysfs_trace(struct tmc_drvdata *drvdata,
120 				loff_t pos, size_t len, char **bufpp)
121 {
122 	ssize_t actual = len;
123 
124 	/* Adjust the len to available size @pos */
125 	if (pos + actual > drvdata->len)
126 		actual = drvdata->len - pos;
127 	if (actual > 0)
128 		*bufpp = drvdata->buf + pos;
129 	return actual;
130 }
131 
132 static int tmc_enable_etf_sink_sysfs(struct coresight_device *csdev)
133 {
134 	int ret = 0;
135 	bool used = false;
136 	char *buf = NULL;
137 	unsigned long flags;
138 	struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
139 
140 	/*
141 	 * If we don't have a buffer release the lock and allocate memory.
142 	 * Otherwise keep the lock and move along.
143 	 */
144 	spin_lock_irqsave(&drvdata->spinlock, flags);
145 	if (!drvdata->buf) {
146 		spin_unlock_irqrestore(&drvdata->spinlock, flags);
147 
148 		/* Allocating the memory here while outside of the spinlock */
149 		buf = kzalloc(drvdata->size, GFP_KERNEL);
150 		if (!buf)
151 			return -ENOMEM;
152 
153 		/* Let's try again */
154 		spin_lock_irqsave(&drvdata->spinlock, flags);
155 	}
156 
157 	if (drvdata->reading) {
158 		ret = -EBUSY;
159 		goto out;
160 	}
161 
162 	/*
163 	 * In sysFS mode we can have multiple writers per sink.  Since this
164 	 * sink is already enabled no memory is needed and the HW need not be
165 	 * touched.
166 	 */
167 	if (drvdata->mode == CS_MODE_SYSFS)
168 		goto out;
169 
170 	/*
171 	 * If drvdata::buf isn't NULL, memory was allocated for a previous
172 	 * trace run but wasn't read.  If so simply zero-out the memory.
173 	 * Otherwise use the memory allocated above.
174 	 *
175 	 * The memory is freed when users read the buffer using the
176 	 * /dev/xyz.{etf|etb} interface.  See tmc_read_unprepare_etf() for
177 	 * details.
178 	 */
179 	if (drvdata->buf) {
180 		memset(drvdata->buf, 0, drvdata->size);
181 	} else {
182 		used = true;
183 		drvdata->buf = buf;
184 	}
185 
186 	ret = tmc_etb_enable_hw(drvdata);
187 	if (!ret)
188 		drvdata->mode = CS_MODE_SYSFS;
189 	else
190 		/* Free up the buffer if we failed to enable */
191 		used = false;
192 out:
193 	spin_unlock_irqrestore(&drvdata->spinlock, flags);
194 
195 	/* Free memory outside the spinlock if need be */
196 	if (!used)
197 		kfree(buf);
198 
199 	return ret;
200 }
201 
202 static int tmc_enable_etf_sink_perf(struct coresight_device *csdev, void *data)
203 {
204 	int ret = 0;
205 	unsigned long flags;
206 	struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
207 	struct perf_output_handle *handle = data;
208 
209 	spin_lock_irqsave(&drvdata->spinlock, flags);
210 	do {
211 		ret = -EINVAL;
212 		if (drvdata->reading)
213 			break;
214 		/*
215 		 * In Perf mode there can be only one writer per sink.  There
216 		 * is also no need to continue if the ETB/ETF is already
217 		 * operated from sysFS.
218 		 */
219 		if (drvdata->mode != CS_MODE_DISABLED)
220 			break;
221 
222 		ret = tmc_set_etf_buffer(csdev, handle);
223 		if (ret)
224 			break;
225 		ret  = tmc_etb_enable_hw(drvdata);
226 		if (!ret)
227 			drvdata->mode = CS_MODE_PERF;
228 	} while (0);
229 	spin_unlock_irqrestore(&drvdata->spinlock, flags);
230 
231 	return ret;
232 }
233 
234 static int tmc_enable_etf_sink(struct coresight_device *csdev,
235 			       u32 mode, void *data)
236 {
237 	int ret;
238 	struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
239 
240 	switch (mode) {
241 	case CS_MODE_SYSFS:
242 		ret = tmc_enable_etf_sink_sysfs(csdev);
243 		break;
244 	case CS_MODE_PERF:
245 		ret = tmc_enable_etf_sink_perf(csdev, data);
246 		break;
247 	/* We shouldn't be here */
248 	default:
249 		ret = -EINVAL;
250 		break;
251 	}
252 
253 	if (ret)
254 		return ret;
255 
256 	dev_dbg(drvdata->dev, "TMC-ETB/ETF enabled\n");
257 	return 0;
258 }
259 
260 static void tmc_disable_etf_sink(struct coresight_device *csdev)
261 {
262 	unsigned long flags;
263 	struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
264 
265 	spin_lock_irqsave(&drvdata->spinlock, flags);
266 	if (drvdata->reading) {
267 		spin_unlock_irqrestore(&drvdata->spinlock, flags);
268 		return;
269 	}
270 
271 	/* Disable the TMC only if it needs to */
272 	if (drvdata->mode != CS_MODE_DISABLED) {
273 		tmc_etb_disable_hw(drvdata);
274 		drvdata->mode = CS_MODE_DISABLED;
275 	}
276 
277 	spin_unlock_irqrestore(&drvdata->spinlock, flags);
278 
279 	dev_dbg(drvdata->dev, "TMC-ETB/ETF disabled\n");
280 }
281 
282 static int tmc_enable_etf_link(struct coresight_device *csdev,
283 			       int inport, int outport)
284 {
285 	int ret;
286 	unsigned long flags;
287 	struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
288 
289 	spin_lock_irqsave(&drvdata->spinlock, flags);
290 	if (drvdata->reading) {
291 		spin_unlock_irqrestore(&drvdata->spinlock, flags);
292 		return -EBUSY;
293 	}
294 
295 	ret = tmc_etf_enable_hw(drvdata);
296 	if (!ret)
297 		drvdata->mode = CS_MODE_SYSFS;
298 	spin_unlock_irqrestore(&drvdata->spinlock, flags);
299 
300 	if (!ret)
301 		dev_dbg(drvdata->dev, "TMC-ETF enabled\n");
302 	return ret;
303 }
304 
305 static void tmc_disable_etf_link(struct coresight_device *csdev,
306 				 int inport, int outport)
307 {
308 	unsigned long flags;
309 	struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
310 
311 	spin_lock_irqsave(&drvdata->spinlock, flags);
312 	if (drvdata->reading) {
313 		spin_unlock_irqrestore(&drvdata->spinlock, flags);
314 		return;
315 	}
316 
317 	tmc_etf_disable_hw(drvdata);
318 	drvdata->mode = CS_MODE_DISABLED;
319 	spin_unlock_irqrestore(&drvdata->spinlock, flags);
320 
321 	dev_dbg(drvdata->dev, "TMC-ETF disabled\n");
322 }
323 
324 static void *tmc_alloc_etf_buffer(struct coresight_device *csdev, int cpu,
325 				  void **pages, int nr_pages, bool overwrite)
326 {
327 	int node;
328 	struct cs_buffers *buf;
329 
330 	if (cpu == -1)
331 		cpu = smp_processor_id();
332 	node = cpu_to_node(cpu);
333 
334 	/* Allocate memory structure for interaction with Perf */
335 	buf = kzalloc_node(sizeof(struct cs_buffers), GFP_KERNEL, node);
336 	if (!buf)
337 		return NULL;
338 
339 	buf->snapshot = overwrite;
340 	buf->nr_pages = nr_pages;
341 	buf->data_pages = pages;
342 
343 	return buf;
344 }
345 
346 static void tmc_free_etf_buffer(void *config)
347 {
348 	struct cs_buffers *buf = config;
349 
350 	kfree(buf);
351 }
352 
353 static int tmc_set_etf_buffer(struct coresight_device *csdev,
354 			      struct perf_output_handle *handle)
355 {
356 	int ret = 0;
357 	unsigned long head;
358 	struct cs_buffers *buf = etm_perf_sink_config(handle);
359 
360 	if (!buf)
361 		return -EINVAL;
362 
363 	/* wrap head around to the amount of space we have */
364 	head = handle->head & ((buf->nr_pages << PAGE_SHIFT) - 1);
365 
366 	/* find the page to write to */
367 	buf->cur = head / PAGE_SIZE;
368 
369 	/* and offset within that page */
370 	buf->offset = head % PAGE_SIZE;
371 
372 	local_set(&buf->data_size, 0);
373 
374 	return ret;
375 }
376 
377 static unsigned long tmc_update_etf_buffer(struct coresight_device *csdev,
378 				  struct perf_output_handle *handle,
379 				  void *sink_config)
380 {
381 	bool lost = false;
382 	int i, cur;
383 	const u32 *barrier;
384 	u32 *buf_ptr;
385 	u64 read_ptr, write_ptr;
386 	u32 status;
387 	unsigned long offset, to_read;
388 	struct cs_buffers *buf = sink_config;
389 	struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
390 
391 	if (!buf)
392 		return 0;
393 
394 	/* This shouldn't happen */
395 	if (WARN_ON_ONCE(drvdata->mode != CS_MODE_PERF))
396 		return 0;
397 
398 	CS_UNLOCK(drvdata->base);
399 
400 	tmc_flush_and_stop(drvdata);
401 
402 	read_ptr = tmc_read_rrp(drvdata);
403 	write_ptr = tmc_read_rwp(drvdata);
404 
405 	/*
406 	 * Get a hold of the status register and see if a wrap around
407 	 * has occurred.  If so adjust things accordingly.
408 	 */
409 	status = readl_relaxed(drvdata->base + TMC_STS);
410 	if (status & TMC_STS_FULL) {
411 		lost = true;
412 		to_read = drvdata->size;
413 	} else {
414 		to_read = CIRC_CNT(write_ptr, read_ptr, drvdata->size);
415 	}
416 
417 	/*
418 	 * The TMC RAM buffer may be bigger than the space available in the
419 	 * perf ring buffer (handle->size).  If so advance the RRP so that we
420 	 * get the latest trace data.
421 	 */
422 	if (to_read > handle->size) {
423 		u32 mask = 0;
424 
425 		/*
426 		 * The value written to RRP must be byte-address aligned to
427 		 * the width of the trace memory databus _and_ to a frame
428 		 * boundary (16 byte), whichever is the biggest. For example,
429 		 * for 32-bit, 64-bit and 128-bit wide trace memory, the four
430 		 * LSBs must be 0s. For 256-bit wide trace memory, the five
431 		 * LSBs must be 0s.
432 		 */
433 		switch (drvdata->memwidth) {
434 		case TMC_MEM_INTF_WIDTH_32BITS:
435 		case TMC_MEM_INTF_WIDTH_64BITS:
436 		case TMC_MEM_INTF_WIDTH_128BITS:
437 			mask = GENMASK(31, 4);
438 			break;
439 		case TMC_MEM_INTF_WIDTH_256BITS:
440 			mask = GENMASK(31, 5);
441 			break;
442 		}
443 
444 		/*
445 		 * Make sure the new size is aligned in accordance with the
446 		 * requirement explained above.
447 		 */
448 		to_read = handle->size & mask;
449 		/* Move the RAM read pointer up */
450 		read_ptr = (write_ptr + drvdata->size) - to_read;
451 		/* Make sure we are still within our limits */
452 		if (read_ptr > (drvdata->size - 1))
453 			read_ptr -= drvdata->size;
454 		/* Tell the HW */
455 		tmc_write_rrp(drvdata, read_ptr);
456 		lost = true;
457 	}
458 
459 	if (lost)
460 		perf_aux_output_flag(handle, PERF_AUX_FLAG_TRUNCATED);
461 
462 	cur = buf->cur;
463 	offset = buf->offset;
464 	barrier = barrier_pkt;
465 
466 	/* for every byte to read */
467 	for (i = 0; i < to_read; i += 4) {
468 		buf_ptr = buf->data_pages[cur] + offset;
469 		*buf_ptr = readl_relaxed(drvdata->base + TMC_RRD);
470 
471 		if (lost && *barrier) {
472 			*buf_ptr = *barrier;
473 			barrier++;
474 		}
475 
476 		offset += 4;
477 		if (offset >= PAGE_SIZE) {
478 			offset = 0;
479 			cur++;
480 			/* wrap around at the end of the buffer */
481 			cur &= buf->nr_pages - 1;
482 		}
483 	}
484 
485 	/* In snapshot mode we have to update the head */
486 	if (buf->snapshot) {
487 		handle->head = (cur * PAGE_SIZE) + offset;
488 		to_read = buf->nr_pages << PAGE_SHIFT;
489 	}
490 	CS_LOCK(drvdata->base);
491 
492 	return to_read;
493 }
494 
495 static const struct coresight_ops_sink tmc_etf_sink_ops = {
496 	.enable		= tmc_enable_etf_sink,
497 	.disable	= tmc_disable_etf_sink,
498 	.alloc_buffer	= tmc_alloc_etf_buffer,
499 	.free_buffer	= tmc_free_etf_buffer,
500 	.update_buffer	= tmc_update_etf_buffer,
501 };
502 
503 static const struct coresight_ops_link tmc_etf_link_ops = {
504 	.enable		= tmc_enable_etf_link,
505 	.disable	= tmc_disable_etf_link,
506 };
507 
508 const struct coresight_ops tmc_etb_cs_ops = {
509 	.sink_ops	= &tmc_etf_sink_ops,
510 };
511 
512 const struct coresight_ops tmc_etf_cs_ops = {
513 	.sink_ops	= &tmc_etf_sink_ops,
514 	.link_ops	= &tmc_etf_link_ops,
515 };
516 
517 int tmc_read_prepare_etb(struct tmc_drvdata *drvdata)
518 {
519 	enum tmc_mode mode;
520 	int ret = 0;
521 	unsigned long flags;
522 
523 	/* config types are set a boot time and never change */
524 	if (WARN_ON_ONCE(drvdata->config_type != TMC_CONFIG_TYPE_ETB &&
525 			 drvdata->config_type != TMC_CONFIG_TYPE_ETF))
526 		return -EINVAL;
527 
528 	spin_lock_irqsave(&drvdata->spinlock, flags);
529 
530 	if (drvdata->reading) {
531 		ret = -EBUSY;
532 		goto out;
533 	}
534 
535 	/* There is no point in reading a TMC in HW FIFO mode */
536 	mode = readl_relaxed(drvdata->base + TMC_MODE);
537 	if (mode != TMC_MODE_CIRCULAR_BUFFER) {
538 		ret = -EINVAL;
539 		goto out;
540 	}
541 
542 	/* Don't interfere if operated from Perf */
543 	if (drvdata->mode == CS_MODE_PERF) {
544 		ret = -EINVAL;
545 		goto out;
546 	}
547 
548 	/* If drvdata::buf is NULL the trace data has been read already */
549 	if (drvdata->buf == NULL) {
550 		ret = -EINVAL;
551 		goto out;
552 	}
553 
554 	/* Disable the TMC if need be */
555 	if (drvdata->mode == CS_MODE_SYSFS)
556 		tmc_etb_disable_hw(drvdata);
557 
558 	drvdata->reading = true;
559 out:
560 	spin_unlock_irqrestore(&drvdata->spinlock, flags);
561 
562 	return ret;
563 }
564 
565 int tmc_read_unprepare_etb(struct tmc_drvdata *drvdata)
566 {
567 	char *buf = NULL;
568 	enum tmc_mode mode;
569 	unsigned long flags;
570 
571 	/* config types are set a boot time and never change */
572 	if (WARN_ON_ONCE(drvdata->config_type != TMC_CONFIG_TYPE_ETB &&
573 			 drvdata->config_type != TMC_CONFIG_TYPE_ETF))
574 		return -EINVAL;
575 
576 	spin_lock_irqsave(&drvdata->spinlock, flags);
577 
578 	/* There is no point in reading a TMC in HW FIFO mode */
579 	mode = readl_relaxed(drvdata->base + TMC_MODE);
580 	if (mode != TMC_MODE_CIRCULAR_BUFFER) {
581 		spin_unlock_irqrestore(&drvdata->spinlock, flags);
582 		return -EINVAL;
583 	}
584 
585 	/* Re-enable the TMC if need be */
586 	if (drvdata->mode == CS_MODE_SYSFS) {
587 		/*
588 		 * The trace run will continue with the same allocated trace
589 		 * buffer. As such zero-out the buffer so that we don't end
590 		 * up with stale data.
591 		 *
592 		 * Since the tracer is still enabled drvdata::buf
593 		 * can't be NULL.
594 		 */
595 		memset(drvdata->buf, 0, drvdata->size);
596 		__tmc_etb_enable_hw(drvdata);
597 	} else {
598 		/*
599 		 * The ETB/ETF is not tracing and the buffer was just read.
600 		 * As such prepare to free the trace buffer.
601 		 */
602 		buf = drvdata->buf;
603 		drvdata->buf = NULL;
604 	}
605 
606 	drvdata->reading = false;
607 	spin_unlock_irqrestore(&drvdata->spinlock, flags);
608 
609 	/*
610 	 * Free allocated memory outside of the spinlock.  There is no need
611 	 * to assert the validity of 'buf' since calling kfree(NULL) is safe.
612 	 */
613 	kfree(buf);
614 
615 	return 0;
616 }
617