xref: /openbmc/linux/drivers/hwtracing/coresight/coresight-tmc-etr.c (revision 93707cbabcc8baf2b2b5f4a99c1f08ee83eb7abd)
1 /*
2  * Copyright(C) 2016 Linaro Limited. All rights reserved.
3  * Author: Mathieu Poirier <mathieu.poirier@linaro.org>
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms of the GNU General Public License version 2 as published by
7  * the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
12  * more details.
13  *
14  * You should have received a copy of the GNU General Public License along with
15  * this program.  If not, see <http://www.gnu.org/licenses/>.
16  */
17 
18 #include <linux/coresight.h>
19 #include <linux/dma-mapping.h>
20 #include "coresight-priv.h"
21 #include "coresight-tmc.h"
22 
23 static void tmc_etr_enable_hw(struct tmc_drvdata *drvdata)
24 {
25 	u32 axictl, sts;
26 
27 	/* Zero out the memory to help with debug */
28 	memset(drvdata->vaddr, 0, drvdata->size);
29 
30 	CS_UNLOCK(drvdata->base);
31 
32 	/* Wait for TMCSReady bit to be set */
33 	tmc_wait_for_tmcready(drvdata);
34 
35 	writel_relaxed(drvdata->size / 4, drvdata->base + TMC_RSZ);
36 	writel_relaxed(TMC_MODE_CIRCULAR_BUFFER, drvdata->base + TMC_MODE);
37 
38 	axictl = readl_relaxed(drvdata->base + TMC_AXICTL);
39 	axictl &= ~TMC_AXICTL_CLEAR_MASK;
40 	axictl |= (TMC_AXICTL_PROT_CTL_B1 | TMC_AXICTL_WR_BURST_16);
41 	axictl |= TMC_AXICTL_AXCACHE_OS;
42 
43 	if (tmc_etr_has_cap(drvdata, TMC_ETR_AXI_ARCACHE)) {
44 		axictl &= ~TMC_AXICTL_ARCACHE_MASK;
45 		axictl |= TMC_AXICTL_ARCACHE_OS;
46 	}
47 
48 	writel_relaxed(axictl, drvdata->base + TMC_AXICTL);
49 	tmc_write_dba(drvdata, drvdata->paddr);
50 	/*
51 	 * If the TMC pointers must be programmed before the session,
52 	 * we have to set it properly (i.e, RRP/RWP to base address and
53 	 * STS to "not full").
54 	 */
55 	if (tmc_etr_has_cap(drvdata, TMC_ETR_SAVE_RESTORE)) {
56 		tmc_write_rrp(drvdata, drvdata->paddr);
57 		tmc_write_rwp(drvdata, drvdata->paddr);
58 		sts = readl_relaxed(drvdata->base + TMC_STS) & ~TMC_STS_FULL;
59 		writel_relaxed(sts, drvdata->base + TMC_STS);
60 	}
61 
62 	writel_relaxed(TMC_FFCR_EN_FMT | TMC_FFCR_EN_TI |
63 		       TMC_FFCR_FON_FLIN | TMC_FFCR_FON_TRIG_EVT |
64 		       TMC_FFCR_TRIGON_TRIGIN,
65 		       drvdata->base + TMC_FFCR);
66 	writel_relaxed(drvdata->trigger_cntr, drvdata->base + TMC_TRG);
67 	tmc_enable_hw(drvdata);
68 
69 	CS_LOCK(drvdata->base);
70 }
71 
72 static void tmc_etr_dump_hw(struct tmc_drvdata *drvdata)
73 {
74 	const u32 *barrier;
75 	u32 val;
76 	u32 *temp;
77 	u64 rwp;
78 
79 	rwp = tmc_read_rwp(drvdata);
80 	val = readl_relaxed(drvdata->base + TMC_STS);
81 
82 	/*
83 	 * Adjust the buffer to point to the beginning of the trace data
84 	 * and update the available trace data.
85 	 */
86 	if (val & TMC_STS_FULL) {
87 		drvdata->buf = drvdata->vaddr + rwp - drvdata->paddr;
88 		drvdata->len = drvdata->size;
89 
90 		barrier = barrier_pkt;
91 		temp = (u32 *)drvdata->buf;
92 
93 		while (*barrier) {
94 			*temp = *barrier;
95 			temp++;
96 			barrier++;
97 		}
98 
99 	} else {
100 		drvdata->buf = drvdata->vaddr;
101 		drvdata->len = rwp - drvdata->paddr;
102 	}
103 }
104 
105 static void tmc_etr_disable_hw(struct tmc_drvdata *drvdata)
106 {
107 	CS_UNLOCK(drvdata->base);
108 
109 	tmc_flush_and_stop(drvdata);
110 	/*
111 	 * When operating in sysFS mode the content of the buffer needs to be
112 	 * read before the TMC is disabled.
113 	 */
114 	if (drvdata->mode == CS_MODE_SYSFS)
115 		tmc_etr_dump_hw(drvdata);
116 	tmc_disable_hw(drvdata);
117 
118 	CS_LOCK(drvdata->base);
119 }
120 
121 static int tmc_enable_etr_sink_sysfs(struct coresight_device *csdev)
122 {
123 	int ret = 0;
124 	bool used = false;
125 	unsigned long flags;
126 	void __iomem *vaddr = NULL;
127 	dma_addr_t paddr;
128 	struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
129 
130 
131 	/*
132 	 * If we don't have a buffer release the lock and allocate memory.
133 	 * Otherwise keep the lock and move along.
134 	 */
135 	spin_lock_irqsave(&drvdata->spinlock, flags);
136 	if (!drvdata->vaddr) {
137 		spin_unlock_irqrestore(&drvdata->spinlock, flags);
138 
139 		/*
140 		 * Contiguous  memory can't be allocated while a spinlock is
141 		 * held.  As such allocate memory here and free it if a buffer
142 		 * has already been allocated (from a previous session).
143 		 */
144 		vaddr = dma_alloc_coherent(drvdata->dev, drvdata->size,
145 					   &paddr, GFP_KERNEL);
146 		if (!vaddr)
147 			return -ENOMEM;
148 
149 		/* Let's try again */
150 		spin_lock_irqsave(&drvdata->spinlock, flags);
151 	}
152 
153 	if (drvdata->reading) {
154 		ret = -EBUSY;
155 		goto out;
156 	}
157 
158 	/*
159 	 * In sysFS mode we can have multiple writers per sink.  Since this
160 	 * sink is already enabled no memory is needed and the HW need not be
161 	 * touched.
162 	 */
163 	if (drvdata->mode == CS_MODE_SYSFS)
164 		goto out;
165 
166 	/*
167 	 * If drvdata::buf == NULL, use the memory allocated above.
168 	 * Otherwise a buffer still exists from a previous session, so
169 	 * simply use that.
170 	 */
171 	if (drvdata->buf == NULL) {
172 		used = true;
173 		drvdata->vaddr = vaddr;
174 		drvdata->paddr = paddr;
175 		drvdata->buf = drvdata->vaddr;
176 	}
177 
178 	drvdata->mode = CS_MODE_SYSFS;
179 	tmc_etr_enable_hw(drvdata);
180 out:
181 	spin_unlock_irqrestore(&drvdata->spinlock, flags);
182 
183 	/* Free memory outside the spinlock if need be */
184 	if (!used && vaddr)
185 		dma_free_coherent(drvdata->dev, drvdata->size, vaddr, paddr);
186 
187 	if (!ret)
188 		dev_info(drvdata->dev, "TMC-ETR enabled\n");
189 
190 	return ret;
191 }
192 
193 static int tmc_enable_etr_sink_perf(struct coresight_device *csdev)
194 {
195 	int ret = 0;
196 	unsigned long flags;
197 	struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
198 
199 	spin_lock_irqsave(&drvdata->spinlock, flags);
200 	if (drvdata->reading) {
201 		ret = -EINVAL;
202 		goto out;
203 	}
204 
205 	/*
206 	 * In Perf mode there can be only one writer per sink.  There
207 	 * is also no need to continue if the ETR is already operated
208 	 * from sysFS.
209 	 */
210 	if (drvdata->mode != CS_MODE_DISABLED) {
211 		ret = -EINVAL;
212 		goto out;
213 	}
214 
215 	drvdata->mode = CS_MODE_PERF;
216 	tmc_etr_enable_hw(drvdata);
217 out:
218 	spin_unlock_irqrestore(&drvdata->spinlock, flags);
219 
220 	return ret;
221 }
222 
223 static int tmc_enable_etr_sink(struct coresight_device *csdev, u32 mode)
224 {
225 	switch (mode) {
226 	case CS_MODE_SYSFS:
227 		return tmc_enable_etr_sink_sysfs(csdev);
228 	case CS_MODE_PERF:
229 		return tmc_enable_etr_sink_perf(csdev);
230 	}
231 
232 	/* We shouldn't be here */
233 	return -EINVAL;
234 }
235 
236 static void tmc_disable_etr_sink(struct coresight_device *csdev)
237 {
238 	unsigned long flags;
239 	struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
240 
241 	spin_lock_irqsave(&drvdata->spinlock, flags);
242 	if (drvdata->reading) {
243 		spin_unlock_irqrestore(&drvdata->spinlock, flags);
244 		return;
245 	}
246 
247 	/* Disable the TMC only if it needs to */
248 	if (drvdata->mode != CS_MODE_DISABLED) {
249 		tmc_etr_disable_hw(drvdata);
250 		drvdata->mode = CS_MODE_DISABLED;
251 	}
252 
253 	spin_unlock_irqrestore(&drvdata->spinlock, flags);
254 
255 	dev_info(drvdata->dev, "TMC-ETR disabled\n");
256 }
257 
258 static const struct coresight_ops_sink tmc_etr_sink_ops = {
259 	.enable		= tmc_enable_etr_sink,
260 	.disable	= tmc_disable_etr_sink,
261 };
262 
263 const struct coresight_ops tmc_etr_cs_ops = {
264 	.sink_ops	= &tmc_etr_sink_ops,
265 };
266 
267 int tmc_read_prepare_etr(struct tmc_drvdata *drvdata)
268 {
269 	int ret = 0;
270 	unsigned long flags;
271 
272 	/* config types are set a boot time and never change */
273 	if (WARN_ON_ONCE(drvdata->config_type != TMC_CONFIG_TYPE_ETR))
274 		return -EINVAL;
275 
276 	spin_lock_irqsave(&drvdata->spinlock, flags);
277 	if (drvdata->reading) {
278 		ret = -EBUSY;
279 		goto out;
280 	}
281 
282 	/* Don't interfere if operated from Perf */
283 	if (drvdata->mode == CS_MODE_PERF) {
284 		ret = -EINVAL;
285 		goto out;
286 	}
287 
288 	/* If drvdata::buf is NULL the trace data has been read already */
289 	if (drvdata->buf == NULL) {
290 		ret = -EINVAL;
291 		goto out;
292 	}
293 
294 	/* Disable the TMC if need be */
295 	if (drvdata->mode == CS_MODE_SYSFS)
296 		tmc_etr_disable_hw(drvdata);
297 
298 	drvdata->reading = true;
299 out:
300 	spin_unlock_irqrestore(&drvdata->spinlock, flags);
301 
302 	return ret;
303 }
304 
305 int tmc_read_unprepare_etr(struct tmc_drvdata *drvdata)
306 {
307 	unsigned long flags;
308 	dma_addr_t paddr;
309 	void __iomem *vaddr = NULL;
310 
311 	/* config types are set a boot time and never change */
312 	if (WARN_ON_ONCE(drvdata->config_type != TMC_CONFIG_TYPE_ETR))
313 		return -EINVAL;
314 
315 	spin_lock_irqsave(&drvdata->spinlock, flags);
316 
317 	/* RE-enable the TMC if need be */
318 	if (drvdata->mode == CS_MODE_SYSFS) {
319 		/*
320 		 * The trace run will continue with the same allocated trace
321 		 * buffer. The trace buffer is cleared in tmc_etr_enable_hw(),
322 		 * so we don't have to explicitly clear it. Also, since the
323 		 * tracer is still enabled drvdata::buf can't be NULL.
324 		 */
325 		tmc_etr_enable_hw(drvdata);
326 	} else {
327 		/*
328 		 * The ETR is not tracing and the buffer was just read.
329 		 * As such prepare to free the trace buffer.
330 		 */
331 		vaddr = drvdata->vaddr;
332 		paddr = drvdata->paddr;
333 		drvdata->buf = drvdata->vaddr = NULL;
334 	}
335 
336 	drvdata->reading = false;
337 	spin_unlock_irqrestore(&drvdata->spinlock, flags);
338 
339 	/* Free allocated memory out side of the spinlock */
340 	if (vaddr)
341 		dma_free_coherent(drvdata->dev, drvdata->size, vaddr, paddr);
342 
343 	return 0;
344 }
345