1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright(C) 2016 Linaro Limited. All rights reserved.
4  * Author: Mathieu Poirier <mathieu.poirier@linaro.org>
5  */
6 
7 #include <linux/coresight.h>
8 #include <linux/dma-mapping.h>
9 #include "coresight-priv.h"
10 #include "coresight-tmc.h"
11 
12 static void tmc_etr_enable_hw(struct tmc_drvdata *drvdata)
13 {
14 	u32 axictl, sts;
15 
16 	/* Zero out the memory to help with debug */
17 	memset(drvdata->vaddr, 0, drvdata->size);
18 
19 	CS_UNLOCK(drvdata->base);
20 
21 	/* Wait for TMCSReady bit to be set */
22 	tmc_wait_for_tmcready(drvdata);
23 
24 	writel_relaxed(drvdata->size / 4, drvdata->base + TMC_RSZ);
25 	writel_relaxed(TMC_MODE_CIRCULAR_BUFFER, drvdata->base + TMC_MODE);
26 
27 	axictl = readl_relaxed(drvdata->base + TMC_AXICTL);
28 	axictl &= ~TMC_AXICTL_CLEAR_MASK;
29 	axictl |= (TMC_AXICTL_PROT_CTL_B1 | TMC_AXICTL_WR_BURST_16);
30 	axictl |= TMC_AXICTL_AXCACHE_OS;
31 
32 	if (tmc_etr_has_cap(drvdata, TMC_ETR_AXI_ARCACHE)) {
33 		axictl &= ~TMC_AXICTL_ARCACHE_MASK;
34 		axictl |= TMC_AXICTL_ARCACHE_OS;
35 	}
36 
37 	writel_relaxed(axictl, drvdata->base + TMC_AXICTL);
38 	tmc_write_dba(drvdata, drvdata->paddr);
39 	/*
40 	 * If the TMC pointers must be programmed before the session,
41 	 * we have to set it properly (i.e, RRP/RWP to base address and
42 	 * STS to "not full").
43 	 */
44 	if (tmc_etr_has_cap(drvdata, TMC_ETR_SAVE_RESTORE)) {
45 		tmc_write_rrp(drvdata, drvdata->paddr);
46 		tmc_write_rwp(drvdata, drvdata->paddr);
47 		sts = readl_relaxed(drvdata->base + TMC_STS) & ~TMC_STS_FULL;
48 		writel_relaxed(sts, drvdata->base + TMC_STS);
49 	}
50 
51 	writel_relaxed(TMC_FFCR_EN_FMT | TMC_FFCR_EN_TI |
52 		       TMC_FFCR_FON_FLIN | TMC_FFCR_FON_TRIG_EVT |
53 		       TMC_FFCR_TRIGON_TRIGIN,
54 		       drvdata->base + TMC_FFCR);
55 	writel_relaxed(drvdata->trigger_cntr, drvdata->base + TMC_TRG);
56 	tmc_enable_hw(drvdata);
57 
58 	CS_LOCK(drvdata->base);
59 }
60 
61 static void tmc_etr_dump_hw(struct tmc_drvdata *drvdata)
62 {
63 	const u32 *barrier;
64 	u32 val;
65 	u32 *temp;
66 	u64 rwp;
67 
68 	rwp = tmc_read_rwp(drvdata);
69 	val = readl_relaxed(drvdata->base + TMC_STS);
70 
71 	/*
72 	 * Adjust the buffer to point to the beginning of the trace data
73 	 * and update the available trace data.
74 	 */
75 	if (val & TMC_STS_FULL) {
76 		drvdata->buf = drvdata->vaddr + rwp - drvdata->paddr;
77 		drvdata->len = drvdata->size;
78 
79 		barrier = barrier_pkt;
80 		temp = (u32 *)drvdata->buf;
81 
82 		while (*barrier) {
83 			*temp = *barrier;
84 			temp++;
85 			barrier++;
86 		}
87 
88 	} else {
89 		drvdata->buf = drvdata->vaddr;
90 		drvdata->len = rwp - drvdata->paddr;
91 	}
92 }
93 
94 static void tmc_etr_disable_hw(struct tmc_drvdata *drvdata)
95 {
96 	CS_UNLOCK(drvdata->base);
97 
98 	tmc_flush_and_stop(drvdata);
99 	/*
100 	 * When operating in sysFS mode the content of the buffer needs to be
101 	 * read before the TMC is disabled.
102 	 */
103 	if (drvdata->mode == CS_MODE_SYSFS)
104 		tmc_etr_dump_hw(drvdata);
105 	tmc_disable_hw(drvdata);
106 
107 	CS_LOCK(drvdata->base);
108 }
109 
110 static int tmc_enable_etr_sink_sysfs(struct coresight_device *csdev)
111 {
112 	int ret = 0;
113 	bool used = false;
114 	unsigned long flags;
115 	void __iomem *vaddr = NULL;
116 	dma_addr_t paddr = 0;
117 	struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
118 
119 	/*
120 	 * If we don't have a buffer release the lock and allocate memory.
121 	 * Otherwise keep the lock and move along.
122 	 */
123 	spin_lock_irqsave(&drvdata->spinlock, flags);
124 	if (!drvdata->vaddr) {
125 		spin_unlock_irqrestore(&drvdata->spinlock, flags);
126 
127 		/*
128 		 * Contiguous  memory can't be allocated while a spinlock is
129 		 * held.  As such allocate memory here and free it if a buffer
130 		 * has already been allocated (from a previous session).
131 		 */
132 		vaddr = dma_alloc_coherent(drvdata->dev, drvdata->size,
133 					   &paddr, GFP_KERNEL);
134 		if (!vaddr)
135 			return -ENOMEM;
136 
137 		/* Let's try again */
138 		spin_lock_irqsave(&drvdata->spinlock, flags);
139 	}
140 
141 	if (drvdata->reading) {
142 		ret = -EBUSY;
143 		goto out;
144 	}
145 
146 	/*
147 	 * In sysFS mode we can have multiple writers per sink.  Since this
148 	 * sink is already enabled no memory is needed and the HW need not be
149 	 * touched.
150 	 */
151 	if (drvdata->mode == CS_MODE_SYSFS)
152 		goto out;
153 
154 	/*
155 	 * If drvdata::vaddr == NULL, use the memory allocated above.
156 	 * Otherwise a buffer still exists from a previous session, so
157 	 * simply use that.
158 	 */
159 	if (drvdata->vaddr == NULL) {
160 		used = true;
161 		drvdata->vaddr = vaddr;
162 		drvdata->paddr = paddr;
163 		drvdata->buf = drvdata->vaddr;
164 	}
165 
166 	drvdata->mode = CS_MODE_SYSFS;
167 	tmc_etr_enable_hw(drvdata);
168 out:
169 	spin_unlock_irqrestore(&drvdata->spinlock, flags);
170 
171 	/* Free memory outside the spinlock if need be */
172 	if (!used && vaddr)
173 		dma_free_coherent(drvdata->dev, drvdata->size, vaddr, paddr);
174 
175 	if (!ret)
176 		dev_info(drvdata->dev, "TMC-ETR enabled\n");
177 
178 	return ret;
179 }
180 
181 static int tmc_enable_etr_sink_perf(struct coresight_device *csdev)
182 {
183 	int ret = 0;
184 	unsigned long flags;
185 	struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
186 
187 	spin_lock_irqsave(&drvdata->spinlock, flags);
188 	if (drvdata->reading) {
189 		ret = -EINVAL;
190 		goto out;
191 	}
192 
193 	/*
194 	 * In Perf mode there can be only one writer per sink.  There
195 	 * is also no need to continue if the ETR is already operated
196 	 * from sysFS.
197 	 */
198 	if (drvdata->mode != CS_MODE_DISABLED) {
199 		ret = -EINVAL;
200 		goto out;
201 	}
202 
203 	drvdata->mode = CS_MODE_PERF;
204 	tmc_etr_enable_hw(drvdata);
205 out:
206 	spin_unlock_irqrestore(&drvdata->spinlock, flags);
207 
208 	return ret;
209 }
210 
211 static int tmc_enable_etr_sink(struct coresight_device *csdev, u32 mode)
212 {
213 	switch (mode) {
214 	case CS_MODE_SYSFS:
215 		return tmc_enable_etr_sink_sysfs(csdev);
216 	case CS_MODE_PERF:
217 		return tmc_enable_etr_sink_perf(csdev);
218 	}
219 
220 	/* We shouldn't be here */
221 	return -EINVAL;
222 }
223 
224 static void tmc_disable_etr_sink(struct coresight_device *csdev)
225 {
226 	unsigned long flags;
227 	struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
228 
229 	spin_lock_irqsave(&drvdata->spinlock, flags);
230 	if (drvdata->reading) {
231 		spin_unlock_irqrestore(&drvdata->spinlock, flags);
232 		return;
233 	}
234 
235 	/* Disable the TMC only if it needs to */
236 	if (drvdata->mode != CS_MODE_DISABLED) {
237 		tmc_etr_disable_hw(drvdata);
238 		drvdata->mode = CS_MODE_DISABLED;
239 	}
240 
241 	spin_unlock_irqrestore(&drvdata->spinlock, flags);
242 
243 	dev_info(drvdata->dev, "TMC-ETR disabled\n");
244 }
245 
246 static const struct coresight_ops_sink tmc_etr_sink_ops = {
247 	.enable		= tmc_enable_etr_sink,
248 	.disable	= tmc_disable_etr_sink,
249 };
250 
251 const struct coresight_ops tmc_etr_cs_ops = {
252 	.sink_ops	= &tmc_etr_sink_ops,
253 };
254 
255 int tmc_read_prepare_etr(struct tmc_drvdata *drvdata)
256 {
257 	int ret = 0;
258 	unsigned long flags;
259 
260 	/* config types are set a boot time and never change */
261 	if (WARN_ON_ONCE(drvdata->config_type != TMC_CONFIG_TYPE_ETR))
262 		return -EINVAL;
263 
264 	spin_lock_irqsave(&drvdata->spinlock, flags);
265 	if (drvdata->reading) {
266 		ret = -EBUSY;
267 		goto out;
268 	}
269 
270 	/* Don't interfere if operated from Perf */
271 	if (drvdata->mode == CS_MODE_PERF) {
272 		ret = -EINVAL;
273 		goto out;
274 	}
275 
276 	/* If drvdata::buf is NULL the trace data has been read already */
277 	if (drvdata->buf == NULL) {
278 		ret = -EINVAL;
279 		goto out;
280 	}
281 
282 	/* Disable the TMC if need be */
283 	if (drvdata->mode == CS_MODE_SYSFS)
284 		tmc_etr_disable_hw(drvdata);
285 
286 	drvdata->reading = true;
287 out:
288 	spin_unlock_irqrestore(&drvdata->spinlock, flags);
289 
290 	return ret;
291 }
292 
293 int tmc_read_unprepare_etr(struct tmc_drvdata *drvdata)
294 {
295 	unsigned long flags;
296 	dma_addr_t paddr;
297 	void __iomem *vaddr = NULL;
298 
299 	/* config types are set a boot time and never change */
300 	if (WARN_ON_ONCE(drvdata->config_type != TMC_CONFIG_TYPE_ETR))
301 		return -EINVAL;
302 
303 	spin_lock_irqsave(&drvdata->spinlock, flags);
304 
305 	/* RE-enable the TMC if need be */
306 	if (drvdata->mode == CS_MODE_SYSFS) {
307 		/*
308 		 * The trace run will continue with the same allocated trace
309 		 * buffer. The trace buffer is cleared in tmc_etr_enable_hw(),
310 		 * so we don't have to explicitly clear it. Also, since the
311 		 * tracer is still enabled drvdata::buf can't be NULL.
312 		 */
313 		tmc_etr_enable_hw(drvdata);
314 	} else {
315 		/*
316 		 * The ETR is not tracing and the buffer was just read.
317 		 * As such prepare to free the trace buffer.
318 		 */
319 		vaddr = drvdata->vaddr;
320 		paddr = drvdata->paddr;
321 		drvdata->buf = drvdata->vaddr = NULL;
322 	}
323 
324 	drvdata->reading = false;
325 	spin_unlock_irqrestore(&drvdata->spinlock, flags);
326 
327 	/* Free allocated memory out side of the spinlock */
328 	if (vaddr)
329 		dma_free_coherent(drvdata->dev, drvdata->size, vaddr, paddr);
330 
331 	return 0;
332 }
333