1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright(C) 2016 Linaro Limited. All rights reserved.
4 * Author: Mathieu Poirier <mathieu.poirier@linaro.org>
5 */
6
7 #include <linux/atomic.h>
8 #include <linux/circ_buf.h>
9 #include <linux/coresight.h>
10 #include <linux/perf_event.h>
11 #include <linux/slab.h>
12 #include "coresight-priv.h"
13 #include "coresight-tmc.h"
14 #include "coresight-etm-perf.h"
15
16 static int tmc_set_etf_buffer(struct coresight_device *csdev,
17 struct perf_output_handle *handle);
18
__tmc_etb_enable_hw(struct tmc_drvdata * drvdata)19 static int __tmc_etb_enable_hw(struct tmc_drvdata *drvdata)
20 {
21 int rc = 0;
22
23 CS_UNLOCK(drvdata->base);
24
25 /* Wait for TMCSReady bit to be set */
26 rc = tmc_wait_for_tmcready(drvdata);
27 if (rc) {
28 dev_err(&drvdata->csdev->dev,
29 "Failed to enable: TMC not ready\n");
30 CS_LOCK(drvdata->base);
31 return rc;
32 }
33
34 writel_relaxed(TMC_MODE_CIRCULAR_BUFFER, drvdata->base + TMC_MODE);
35 writel_relaxed(TMC_FFCR_EN_FMT | TMC_FFCR_EN_TI |
36 TMC_FFCR_FON_FLIN | TMC_FFCR_FON_TRIG_EVT |
37 TMC_FFCR_TRIGON_TRIGIN,
38 drvdata->base + TMC_FFCR);
39
40 writel_relaxed(drvdata->trigger_cntr, drvdata->base + TMC_TRG);
41 tmc_enable_hw(drvdata);
42
43 CS_LOCK(drvdata->base);
44 return rc;
45 }
46
tmc_etb_enable_hw(struct tmc_drvdata * drvdata)47 static int tmc_etb_enable_hw(struct tmc_drvdata *drvdata)
48 {
49 int rc = coresight_claim_device(drvdata->csdev);
50
51 if (rc)
52 return rc;
53
54 rc = __tmc_etb_enable_hw(drvdata);
55 if (rc)
56 coresight_disclaim_device(drvdata->csdev);
57 return rc;
58 }
59
tmc_etb_dump_hw(struct tmc_drvdata * drvdata)60 static void tmc_etb_dump_hw(struct tmc_drvdata *drvdata)
61 {
62 char *bufp;
63 u32 read_data, lost;
64
65 /* Check if the buffer wrapped around. */
66 lost = readl_relaxed(drvdata->base + TMC_STS) & TMC_STS_FULL;
67 bufp = drvdata->buf;
68 drvdata->len = 0;
69 while (1) {
70 read_data = readl_relaxed(drvdata->base + TMC_RRD);
71 if (read_data == 0xFFFFFFFF)
72 break;
73 memcpy(bufp, &read_data, 4);
74 bufp += 4;
75 drvdata->len += 4;
76 }
77
78 if (lost)
79 coresight_insert_barrier_packet(drvdata->buf);
80 return;
81 }
82
__tmc_etb_disable_hw(struct tmc_drvdata * drvdata)83 static void __tmc_etb_disable_hw(struct tmc_drvdata *drvdata)
84 {
85 CS_UNLOCK(drvdata->base);
86
87 tmc_flush_and_stop(drvdata);
88 /*
89 * When operating in sysFS mode the content of the buffer needs to be
90 * read before the TMC is disabled.
91 */
92 if (drvdata->mode == CS_MODE_SYSFS)
93 tmc_etb_dump_hw(drvdata);
94 tmc_disable_hw(drvdata);
95
96 CS_LOCK(drvdata->base);
97 }
98
tmc_etb_disable_hw(struct tmc_drvdata * drvdata)99 static void tmc_etb_disable_hw(struct tmc_drvdata *drvdata)
100 {
101 __tmc_etb_disable_hw(drvdata);
102 coresight_disclaim_device(drvdata->csdev);
103 }
104
__tmc_etf_enable_hw(struct tmc_drvdata * drvdata)105 static int __tmc_etf_enable_hw(struct tmc_drvdata *drvdata)
106 {
107 int rc = 0;
108
109 CS_UNLOCK(drvdata->base);
110
111 /* Wait for TMCSReady bit to be set */
112 rc = tmc_wait_for_tmcready(drvdata);
113 if (rc) {
114 dev_err(&drvdata->csdev->dev,
115 "Failed to enable : TMC is not ready\n");
116 CS_LOCK(drvdata->base);
117 return rc;
118 }
119
120 writel_relaxed(TMC_MODE_HARDWARE_FIFO, drvdata->base + TMC_MODE);
121 writel_relaxed(TMC_FFCR_EN_FMT | TMC_FFCR_EN_TI,
122 drvdata->base + TMC_FFCR);
123 writel_relaxed(0x0, drvdata->base + TMC_BUFWM);
124 tmc_enable_hw(drvdata);
125
126 CS_LOCK(drvdata->base);
127 return rc;
128 }
129
tmc_etf_enable_hw(struct tmc_drvdata * drvdata)130 static int tmc_etf_enable_hw(struct tmc_drvdata *drvdata)
131 {
132 int rc = coresight_claim_device(drvdata->csdev);
133
134 if (rc)
135 return rc;
136
137 rc = __tmc_etf_enable_hw(drvdata);
138 if (rc)
139 coresight_disclaim_device(drvdata->csdev);
140 return rc;
141 }
142
tmc_etf_disable_hw(struct tmc_drvdata * drvdata)143 static void tmc_etf_disable_hw(struct tmc_drvdata *drvdata)
144 {
145 struct coresight_device *csdev = drvdata->csdev;
146
147 CS_UNLOCK(drvdata->base);
148
149 tmc_flush_and_stop(drvdata);
150 tmc_disable_hw(drvdata);
151 coresight_disclaim_device_unlocked(csdev);
152 CS_LOCK(drvdata->base);
153 }
154
155 /*
156 * Return the available trace data in the buffer from @pos, with
157 * a maximum limit of @len, updating the @bufpp on where to
158 * find it.
159 */
tmc_etb_get_sysfs_trace(struct tmc_drvdata * drvdata,loff_t pos,size_t len,char ** bufpp)160 ssize_t tmc_etb_get_sysfs_trace(struct tmc_drvdata *drvdata,
161 loff_t pos, size_t len, char **bufpp)
162 {
163 ssize_t actual = len;
164
165 /* Adjust the len to available size @pos */
166 if (pos + actual > drvdata->len)
167 actual = drvdata->len - pos;
168 if (actual > 0)
169 *bufpp = drvdata->buf + pos;
170 return actual;
171 }
172
tmc_enable_etf_sink_sysfs(struct coresight_device * csdev)173 static int tmc_enable_etf_sink_sysfs(struct coresight_device *csdev)
174 {
175 int ret = 0;
176 bool used = false;
177 char *buf = NULL;
178 unsigned long flags;
179 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
180
181 /*
182 * If we don't have a buffer release the lock and allocate memory.
183 * Otherwise keep the lock and move along.
184 */
185 spin_lock_irqsave(&drvdata->spinlock, flags);
186 if (!drvdata->buf) {
187 spin_unlock_irqrestore(&drvdata->spinlock, flags);
188
189 /* Allocating the memory here while outside of the spinlock */
190 buf = kzalloc(drvdata->size, GFP_KERNEL);
191 if (!buf)
192 return -ENOMEM;
193
194 /* Let's try again */
195 spin_lock_irqsave(&drvdata->spinlock, flags);
196 }
197
198 if (drvdata->reading) {
199 ret = -EBUSY;
200 goto out;
201 }
202
203 /*
204 * In sysFS mode we can have multiple writers per sink. Since this
205 * sink is already enabled no memory is needed and the HW need not be
206 * touched.
207 */
208 if (drvdata->mode == CS_MODE_SYSFS) {
209 atomic_inc(&csdev->refcnt);
210 goto out;
211 }
212
213 /*
214 * If drvdata::buf isn't NULL, memory was allocated for a previous
215 * trace run but wasn't read. If so simply zero-out the memory.
216 * Otherwise use the memory allocated above.
217 *
218 * The memory is freed when users read the buffer using the
219 * /dev/xyz.{etf|etb} interface. See tmc_read_unprepare_etf() for
220 * details.
221 */
222 if (drvdata->buf) {
223 memset(drvdata->buf, 0, drvdata->size);
224 } else {
225 used = true;
226 drvdata->buf = buf;
227 }
228
229 ret = tmc_etb_enable_hw(drvdata);
230 if (!ret) {
231 drvdata->mode = CS_MODE_SYSFS;
232 atomic_inc(&csdev->refcnt);
233 } else {
234 /* Free up the buffer if we failed to enable */
235 used = false;
236 }
237 out:
238 spin_unlock_irqrestore(&drvdata->spinlock, flags);
239
240 /* Free memory outside the spinlock if need be */
241 if (!used)
242 kfree(buf);
243
244 return ret;
245 }
246
tmc_enable_etf_sink_perf(struct coresight_device * csdev,void * data)247 static int tmc_enable_etf_sink_perf(struct coresight_device *csdev, void *data)
248 {
249 int ret = 0;
250 pid_t pid;
251 unsigned long flags;
252 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
253 struct perf_output_handle *handle = data;
254 struct cs_buffers *buf = etm_perf_sink_config(handle);
255
256 spin_lock_irqsave(&drvdata->spinlock, flags);
257 do {
258 ret = -EINVAL;
259 if (drvdata->reading)
260 break;
261 /*
262 * No need to continue if the ETB/ETF is already operated
263 * from sysFS.
264 */
265 if (drvdata->mode == CS_MODE_SYSFS) {
266 ret = -EBUSY;
267 break;
268 }
269
270 /* Get a handle on the pid of the process to monitor */
271 pid = buf->pid;
272
273 if (drvdata->pid != -1 && drvdata->pid != pid) {
274 ret = -EBUSY;
275 break;
276 }
277
278 ret = tmc_set_etf_buffer(csdev, handle);
279 if (ret)
280 break;
281
282 /*
283 * No HW configuration is needed if the sink is already in
284 * use for this session.
285 */
286 if (drvdata->pid == pid) {
287 atomic_inc(&csdev->refcnt);
288 break;
289 }
290
291 ret = tmc_etb_enable_hw(drvdata);
292 if (!ret) {
293 /* Associate with monitored process. */
294 drvdata->pid = pid;
295 drvdata->mode = CS_MODE_PERF;
296 atomic_inc(&csdev->refcnt);
297 }
298 } while (0);
299 spin_unlock_irqrestore(&drvdata->spinlock, flags);
300
301 return ret;
302 }
303
tmc_enable_etf_sink(struct coresight_device * csdev,enum cs_mode mode,void * data)304 static int tmc_enable_etf_sink(struct coresight_device *csdev,
305 enum cs_mode mode, void *data)
306 {
307 int ret;
308
309 switch (mode) {
310 case CS_MODE_SYSFS:
311 ret = tmc_enable_etf_sink_sysfs(csdev);
312 break;
313 case CS_MODE_PERF:
314 ret = tmc_enable_etf_sink_perf(csdev, data);
315 break;
316 /* We shouldn't be here */
317 default:
318 ret = -EINVAL;
319 break;
320 }
321
322 if (ret)
323 return ret;
324
325 dev_dbg(&csdev->dev, "TMC-ETB/ETF enabled\n");
326 return 0;
327 }
328
tmc_disable_etf_sink(struct coresight_device * csdev)329 static int tmc_disable_etf_sink(struct coresight_device *csdev)
330 {
331 unsigned long flags;
332 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
333
334 spin_lock_irqsave(&drvdata->spinlock, flags);
335
336 if (drvdata->reading) {
337 spin_unlock_irqrestore(&drvdata->spinlock, flags);
338 return -EBUSY;
339 }
340
341 if (atomic_dec_return(&csdev->refcnt)) {
342 spin_unlock_irqrestore(&drvdata->spinlock, flags);
343 return -EBUSY;
344 }
345
346 /* Complain if we (somehow) got out of sync */
347 WARN_ON_ONCE(drvdata->mode == CS_MODE_DISABLED);
348 tmc_etb_disable_hw(drvdata);
349 /* Dissociate from monitored process. */
350 drvdata->pid = -1;
351 drvdata->mode = CS_MODE_DISABLED;
352
353 spin_unlock_irqrestore(&drvdata->spinlock, flags);
354
355 dev_dbg(&csdev->dev, "TMC-ETB/ETF disabled\n");
356 return 0;
357 }
358
tmc_enable_etf_link(struct coresight_device * csdev,struct coresight_connection * in,struct coresight_connection * out)359 static int tmc_enable_etf_link(struct coresight_device *csdev,
360 struct coresight_connection *in,
361 struct coresight_connection *out)
362 {
363 int ret = 0;
364 unsigned long flags;
365 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
366 bool first_enable = false;
367
368 spin_lock_irqsave(&drvdata->spinlock, flags);
369 if (drvdata->reading) {
370 spin_unlock_irqrestore(&drvdata->spinlock, flags);
371 return -EBUSY;
372 }
373
374 if (atomic_read(&csdev->refcnt) == 0) {
375 ret = tmc_etf_enable_hw(drvdata);
376 if (!ret) {
377 drvdata->mode = CS_MODE_SYSFS;
378 first_enable = true;
379 }
380 }
381 if (!ret)
382 atomic_inc(&csdev->refcnt);
383 spin_unlock_irqrestore(&drvdata->spinlock, flags);
384
385 if (first_enable)
386 dev_dbg(&csdev->dev, "TMC-ETF enabled\n");
387 return ret;
388 }
389
tmc_disable_etf_link(struct coresight_device * csdev,struct coresight_connection * in,struct coresight_connection * out)390 static void tmc_disable_etf_link(struct coresight_device *csdev,
391 struct coresight_connection *in,
392 struct coresight_connection *out)
393 {
394 unsigned long flags;
395 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
396 bool last_disable = false;
397
398 spin_lock_irqsave(&drvdata->spinlock, flags);
399 if (drvdata->reading) {
400 spin_unlock_irqrestore(&drvdata->spinlock, flags);
401 return;
402 }
403
404 if (atomic_dec_return(&csdev->refcnt) == 0) {
405 tmc_etf_disable_hw(drvdata);
406 drvdata->mode = CS_MODE_DISABLED;
407 last_disable = true;
408 }
409 spin_unlock_irqrestore(&drvdata->spinlock, flags);
410
411 if (last_disable)
412 dev_dbg(&csdev->dev, "TMC-ETF disabled\n");
413 }
414
tmc_alloc_etf_buffer(struct coresight_device * csdev,struct perf_event * event,void ** pages,int nr_pages,bool overwrite)415 static void *tmc_alloc_etf_buffer(struct coresight_device *csdev,
416 struct perf_event *event, void **pages,
417 int nr_pages, bool overwrite)
418 {
419 int node;
420 struct cs_buffers *buf;
421
422 node = (event->cpu == -1) ? NUMA_NO_NODE : cpu_to_node(event->cpu);
423
424 /* Allocate memory structure for interaction with Perf */
425 buf = kzalloc_node(sizeof(struct cs_buffers), GFP_KERNEL, node);
426 if (!buf)
427 return NULL;
428
429 buf->pid = task_pid_nr(event->owner);
430 buf->snapshot = overwrite;
431 buf->nr_pages = nr_pages;
432 buf->data_pages = pages;
433
434 return buf;
435 }
436
tmc_free_etf_buffer(void * config)437 static void tmc_free_etf_buffer(void *config)
438 {
439 struct cs_buffers *buf = config;
440
441 kfree(buf);
442 }
443
tmc_set_etf_buffer(struct coresight_device * csdev,struct perf_output_handle * handle)444 static int tmc_set_etf_buffer(struct coresight_device *csdev,
445 struct perf_output_handle *handle)
446 {
447 int ret = 0;
448 unsigned long head;
449 struct cs_buffers *buf = etm_perf_sink_config(handle);
450
451 if (!buf)
452 return -EINVAL;
453
454 /* wrap head around to the amount of space we have */
455 head = handle->head & (((unsigned long)buf->nr_pages << PAGE_SHIFT) - 1);
456
457 /* find the page to write to */
458 buf->cur = head / PAGE_SIZE;
459
460 /* and offset within that page */
461 buf->offset = head % PAGE_SIZE;
462
463 local_set(&buf->data_size, 0);
464
465 return ret;
466 }
467
tmc_update_etf_buffer(struct coresight_device * csdev,struct perf_output_handle * handle,void * sink_config)468 static unsigned long tmc_update_etf_buffer(struct coresight_device *csdev,
469 struct perf_output_handle *handle,
470 void *sink_config)
471 {
472 bool lost = false;
473 int i, cur;
474 const u32 *barrier;
475 u32 *buf_ptr;
476 u64 read_ptr, write_ptr;
477 u32 status;
478 unsigned long offset, to_read = 0, flags;
479 struct cs_buffers *buf = sink_config;
480 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
481
482 if (!buf)
483 return 0;
484
485 /* This shouldn't happen */
486 if (WARN_ON_ONCE(drvdata->mode != CS_MODE_PERF))
487 return 0;
488
489 spin_lock_irqsave(&drvdata->spinlock, flags);
490
491 /* Don't do anything if another tracer is using this sink */
492 if (atomic_read(&csdev->refcnt) != 1)
493 goto out;
494
495 CS_UNLOCK(drvdata->base);
496
497 tmc_flush_and_stop(drvdata);
498
499 read_ptr = tmc_read_rrp(drvdata);
500 write_ptr = tmc_read_rwp(drvdata);
501
502 /*
503 * Get a hold of the status register and see if a wrap around
504 * has occurred. If so adjust things accordingly.
505 */
506 status = readl_relaxed(drvdata->base + TMC_STS);
507 if (status & TMC_STS_FULL) {
508 lost = true;
509 to_read = drvdata->size;
510 } else {
511 to_read = CIRC_CNT(write_ptr, read_ptr, drvdata->size);
512 }
513
514 /*
515 * The TMC RAM buffer may be bigger than the space available in the
516 * perf ring buffer (handle->size). If so advance the RRP so that we
517 * get the latest trace data. In snapshot mode none of that matters
518 * since we are expected to clobber stale data in favour of the latest
519 * traces.
520 */
521 if (!buf->snapshot && to_read > handle->size) {
522 u32 mask = tmc_get_memwidth_mask(drvdata);
523
524 /*
525 * Make sure the new size is aligned in accordance with the
526 * requirement explained in function tmc_get_memwidth_mask().
527 */
528 to_read = handle->size & mask;
529 /* Move the RAM read pointer up */
530 read_ptr = (write_ptr + drvdata->size) - to_read;
531 /* Make sure we are still within our limits */
532 if (read_ptr > (drvdata->size - 1))
533 read_ptr -= drvdata->size;
534 /* Tell the HW */
535 tmc_write_rrp(drvdata, read_ptr);
536 lost = true;
537 }
538
539 /*
540 * Don't set the TRUNCATED flag in snapshot mode because 1) the
541 * captured buffer is expected to be truncated and 2) a full buffer
542 * prevents the event from being re-enabled by the perf core,
543 * resulting in stale data being send to user space.
544 */
545 if (!buf->snapshot && lost)
546 perf_aux_output_flag(handle, PERF_AUX_FLAG_TRUNCATED);
547
548 cur = buf->cur;
549 offset = buf->offset;
550 barrier = coresight_barrier_pkt;
551
552 /* for every byte to read */
553 for (i = 0; i < to_read; i += 4) {
554 buf_ptr = buf->data_pages[cur] + offset;
555 *buf_ptr = readl_relaxed(drvdata->base + TMC_RRD);
556
557 if (lost && i < CORESIGHT_BARRIER_PKT_SIZE) {
558 *buf_ptr = *barrier;
559 barrier++;
560 }
561
562 offset += 4;
563 if (offset >= PAGE_SIZE) {
564 offset = 0;
565 cur++;
566 /* wrap around at the end of the buffer */
567 cur &= buf->nr_pages - 1;
568 }
569 }
570
571 /*
572 * In snapshot mode we simply increment the head by the number of byte
573 * that were written. User space will figure out how many bytes to get
574 * from the AUX buffer based on the position of the head.
575 */
576 if (buf->snapshot)
577 handle->head += to_read;
578
579 /*
580 * CS_LOCK() contains mb() so it can ensure visibility of the AUX trace
581 * data before the aux_head is updated via perf_aux_output_end(), which
582 * is expected by the perf ring buffer.
583 */
584 CS_LOCK(drvdata->base);
585 out:
586 spin_unlock_irqrestore(&drvdata->spinlock, flags);
587
588 return to_read;
589 }
590
591 static const struct coresight_ops_sink tmc_etf_sink_ops = {
592 .enable = tmc_enable_etf_sink,
593 .disable = tmc_disable_etf_sink,
594 .alloc_buffer = tmc_alloc_etf_buffer,
595 .free_buffer = tmc_free_etf_buffer,
596 .update_buffer = tmc_update_etf_buffer,
597 };
598
599 static const struct coresight_ops_link tmc_etf_link_ops = {
600 .enable = tmc_enable_etf_link,
601 .disable = tmc_disable_etf_link,
602 };
603
604 const struct coresight_ops tmc_etb_cs_ops = {
605 .sink_ops = &tmc_etf_sink_ops,
606 };
607
608 const struct coresight_ops tmc_etf_cs_ops = {
609 .sink_ops = &tmc_etf_sink_ops,
610 .link_ops = &tmc_etf_link_ops,
611 };
612
tmc_read_prepare_etb(struct tmc_drvdata * drvdata)613 int tmc_read_prepare_etb(struct tmc_drvdata *drvdata)
614 {
615 enum tmc_mode mode;
616 int ret = 0;
617 unsigned long flags;
618
619 /* config types are set a boot time and never change */
620 if (WARN_ON_ONCE(drvdata->config_type != TMC_CONFIG_TYPE_ETB &&
621 drvdata->config_type != TMC_CONFIG_TYPE_ETF))
622 return -EINVAL;
623
624 spin_lock_irqsave(&drvdata->spinlock, flags);
625
626 if (drvdata->reading) {
627 ret = -EBUSY;
628 goto out;
629 }
630
631 /* Don't interfere if operated from Perf */
632 if (drvdata->mode == CS_MODE_PERF) {
633 ret = -EINVAL;
634 goto out;
635 }
636
637 /* If drvdata::buf is NULL the trace data has been read already */
638 if (drvdata->buf == NULL) {
639 ret = -EINVAL;
640 goto out;
641 }
642
643 /* Disable the TMC if need be */
644 if (drvdata->mode == CS_MODE_SYSFS) {
645 /* There is no point in reading a TMC in HW FIFO mode */
646 mode = readl_relaxed(drvdata->base + TMC_MODE);
647 if (mode != TMC_MODE_CIRCULAR_BUFFER) {
648 ret = -EINVAL;
649 goto out;
650 }
651 __tmc_etb_disable_hw(drvdata);
652 }
653
654 drvdata->reading = true;
655 out:
656 spin_unlock_irqrestore(&drvdata->spinlock, flags);
657
658 return ret;
659 }
660
tmc_read_unprepare_etb(struct tmc_drvdata * drvdata)661 int tmc_read_unprepare_etb(struct tmc_drvdata *drvdata)
662 {
663 char *buf = NULL;
664 enum tmc_mode mode;
665 unsigned long flags;
666 int rc = 0;
667
668 /* config types are set a boot time and never change */
669 if (WARN_ON_ONCE(drvdata->config_type != TMC_CONFIG_TYPE_ETB &&
670 drvdata->config_type != TMC_CONFIG_TYPE_ETF))
671 return -EINVAL;
672
673 spin_lock_irqsave(&drvdata->spinlock, flags);
674
675 /* Re-enable the TMC if need be */
676 if (drvdata->mode == CS_MODE_SYSFS) {
677 /* There is no point in reading a TMC in HW FIFO mode */
678 mode = readl_relaxed(drvdata->base + TMC_MODE);
679 if (mode != TMC_MODE_CIRCULAR_BUFFER) {
680 spin_unlock_irqrestore(&drvdata->spinlock, flags);
681 return -EINVAL;
682 }
683 /*
684 * The trace run will continue with the same allocated trace
685 * buffer. As such zero-out the buffer so that we don't end
686 * up with stale data.
687 *
688 * Since the tracer is still enabled drvdata::buf
689 * can't be NULL.
690 */
691 memset(drvdata->buf, 0, drvdata->size);
692 rc = __tmc_etb_enable_hw(drvdata);
693 if (rc) {
694 spin_unlock_irqrestore(&drvdata->spinlock, flags);
695 return rc;
696 }
697 } else {
698 /*
699 * The ETB/ETF is not tracing and the buffer was just read.
700 * As such prepare to free the trace buffer.
701 */
702 buf = drvdata->buf;
703 drvdata->buf = NULL;
704 }
705
706 drvdata->reading = false;
707 spin_unlock_irqrestore(&drvdata->spinlock, flags);
708
709 /*
710 * Free allocated memory outside of the spinlock. There is no need
711 * to assert the validity of 'buf' since calling kfree(NULL) is safe.
712 */
713 kfree(buf);
714
715 return 0;
716 }
717