1 /*
2  * Copyright(C) 2016 Linaro Limited. All rights reserved.
3  * Author: Mathieu Poirier <mathieu.poirier@linaro.org>
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms of the GNU General Public License version 2 as published by
7  * the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
12  * more details.
13  *
14  * You should have received a copy of the GNU General Public License along with
15  * this program.  If not, see <http://www.gnu.org/licenses/>.
16  */
17 
18 #include <linux/coresight.h>
19 #include <linux/dma-mapping.h>
20 #include "coresight-priv.h"
21 #include "coresight-tmc.h"
22 
23 static void tmc_etr_enable_hw(struct tmc_drvdata *drvdata)
24 {
25 	u32 axictl;
26 
27 	/* Zero out the memory to help with debug */
28 	memset(drvdata->vaddr, 0, drvdata->size);
29 
30 	CS_UNLOCK(drvdata->base);
31 
32 	/* Wait for TMCSReady bit to be set */
33 	tmc_wait_for_tmcready(drvdata);
34 
35 	writel_relaxed(drvdata->size / 4, drvdata->base + TMC_RSZ);
36 	writel_relaxed(TMC_MODE_CIRCULAR_BUFFER, drvdata->base + TMC_MODE);
37 
38 	axictl = readl_relaxed(drvdata->base + TMC_AXICTL);
39 	axictl |= TMC_AXICTL_WR_BURST_16;
40 	writel_relaxed(axictl, drvdata->base + TMC_AXICTL);
41 	axictl &= ~TMC_AXICTL_SCT_GAT_MODE;
42 	writel_relaxed(axictl, drvdata->base + TMC_AXICTL);
43 	axictl = (axictl &
44 		  ~(TMC_AXICTL_PROT_CTL_B0 | TMC_AXICTL_PROT_CTL_B1)) |
45 		  TMC_AXICTL_PROT_CTL_B1;
46 	writel_relaxed(axictl, drvdata->base + TMC_AXICTL);
47 
48 	writel_relaxed(drvdata->paddr, drvdata->base + TMC_DBALO);
49 	writel_relaxed(0x0, drvdata->base + TMC_DBAHI);
50 	writel_relaxed(TMC_FFCR_EN_FMT | TMC_FFCR_EN_TI |
51 		       TMC_FFCR_FON_FLIN | TMC_FFCR_FON_TRIG_EVT |
52 		       TMC_FFCR_TRIGON_TRIGIN,
53 		       drvdata->base + TMC_FFCR);
54 	writel_relaxed(drvdata->trigger_cntr, drvdata->base + TMC_TRG);
55 	tmc_enable_hw(drvdata);
56 
57 	CS_LOCK(drvdata->base);
58 }
59 
60 static void tmc_etr_dump_hw(struct tmc_drvdata *drvdata)
61 {
62 	u32 rwp, val;
63 
64 	rwp = readl_relaxed(drvdata->base + TMC_RWP);
65 	val = readl_relaxed(drvdata->base + TMC_STS);
66 
67 	/*
68 	 * Adjust the buffer to point to the beginning of the trace data
69 	 * and update the available trace data.
70 	 */
71 	if (val & TMC_STS_FULL) {
72 		drvdata->buf = drvdata->vaddr + rwp - drvdata->paddr;
73 		drvdata->len = drvdata->size;
74 	} else {
75 		drvdata->buf = drvdata->vaddr;
76 		drvdata->len = rwp - drvdata->paddr;
77 	}
78 }
79 
80 static void tmc_etr_disable_hw(struct tmc_drvdata *drvdata)
81 {
82 	CS_UNLOCK(drvdata->base);
83 
84 	tmc_flush_and_stop(drvdata);
85 	/*
86 	 * When operating in sysFS mode the content of the buffer needs to be
87 	 * read before the TMC is disabled.
88 	 */
89 	if (drvdata->mode == CS_MODE_SYSFS)
90 		tmc_etr_dump_hw(drvdata);
91 	tmc_disable_hw(drvdata);
92 
93 	CS_LOCK(drvdata->base);
94 }
95 
96 static int tmc_enable_etr_sink_sysfs(struct coresight_device *csdev)
97 {
98 	int ret = 0;
99 	bool used = false;
100 	unsigned long flags;
101 	void __iomem *vaddr = NULL;
102 	dma_addr_t paddr;
103 	struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
104 
105 
106 	/*
107 	 * If we don't have a buffer release the lock and allocate memory.
108 	 * Otherwise keep the lock and move along.
109 	 */
110 	spin_lock_irqsave(&drvdata->spinlock, flags);
111 	if (!drvdata->vaddr) {
112 		spin_unlock_irqrestore(&drvdata->spinlock, flags);
113 
114 		/*
115 		 * Contiguous  memory can't be allocated while a spinlock is
116 		 * held.  As such allocate memory here and free it if a buffer
117 		 * has already been allocated (from a previous session).
118 		 */
119 		vaddr = dma_alloc_coherent(drvdata->dev, drvdata->size,
120 					   &paddr, GFP_KERNEL);
121 		if (!vaddr)
122 			return -ENOMEM;
123 
124 		/* Let's try again */
125 		spin_lock_irqsave(&drvdata->spinlock, flags);
126 	}
127 
128 	if (drvdata->reading) {
129 		ret = -EBUSY;
130 		goto out;
131 	}
132 
133 	/*
134 	 * In sysFS mode we can have multiple writers per sink.  Since this
135 	 * sink is already enabled no memory is needed and the HW need not be
136 	 * touched.
137 	 */
138 	if (drvdata->mode == CS_MODE_SYSFS)
139 		goto out;
140 
141 	/*
142 	 * If drvdata::buf == NULL, use the memory allocated above.
143 	 * Otherwise a buffer still exists from a previous session, so
144 	 * simply use that.
145 	 */
146 	if (drvdata->buf == NULL) {
147 		used = true;
148 		drvdata->vaddr = vaddr;
149 		drvdata->paddr = paddr;
150 		drvdata->buf = drvdata->vaddr;
151 	}
152 
153 	drvdata->mode = CS_MODE_SYSFS;
154 	tmc_etr_enable_hw(drvdata);
155 out:
156 	spin_unlock_irqrestore(&drvdata->spinlock, flags);
157 
158 	/* Free memory outside the spinlock if need be */
159 	if (!used && vaddr)
160 		dma_free_coherent(drvdata->dev, drvdata->size, vaddr, paddr);
161 
162 	if (!ret)
163 		dev_info(drvdata->dev, "TMC-ETR enabled\n");
164 
165 	return ret;
166 }
167 
168 static int tmc_enable_etr_sink_perf(struct coresight_device *csdev)
169 {
170 	int ret = 0;
171 	unsigned long flags;
172 	struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
173 
174 	spin_lock_irqsave(&drvdata->spinlock, flags);
175 	if (drvdata->reading) {
176 		ret = -EINVAL;
177 		goto out;
178 	}
179 
180 	/*
181 	 * In Perf mode there can be only one writer per sink.  There
182 	 * is also no need to continue if the ETR is already operated
183 	 * from sysFS.
184 	 */
185 	if (drvdata->mode != CS_MODE_DISABLED) {
186 		ret = -EINVAL;
187 		goto out;
188 	}
189 
190 	drvdata->mode = CS_MODE_PERF;
191 	tmc_etr_enable_hw(drvdata);
192 out:
193 	spin_unlock_irqrestore(&drvdata->spinlock, flags);
194 
195 	return ret;
196 }
197 
198 static int tmc_enable_etr_sink(struct coresight_device *csdev, u32 mode)
199 {
200 	switch (mode) {
201 	case CS_MODE_SYSFS:
202 		return tmc_enable_etr_sink_sysfs(csdev);
203 	case CS_MODE_PERF:
204 		return tmc_enable_etr_sink_perf(csdev);
205 	}
206 
207 	/* We shouldn't be here */
208 	return -EINVAL;
209 }
210 
211 static void tmc_disable_etr_sink(struct coresight_device *csdev)
212 {
213 	unsigned long flags;
214 	struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
215 
216 	spin_lock_irqsave(&drvdata->spinlock, flags);
217 	if (drvdata->reading) {
218 		spin_unlock_irqrestore(&drvdata->spinlock, flags);
219 		return;
220 	}
221 
222 	/* Disable the TMC only if it needs to */
223 	if (drvdata->mode != CS_MODE_DISABLED) {
224 		tmc_etr_disable_hw(drvdata);
225 		drvdata->mode = CS_MODE_DISABLED;
226 	}
227 
228 	spin_unlock_irqrestore(&drvdata->spinlock, flags);
229 
230 	dev_info(drvdata->dev, "TMC-ETR disabled\n");
231 }
232 
233 static const struct coresight_ops_sink tmc_etr_sink_ops = {
234 	.enable		= tmc_enable_etr_sink,
235 	.disable	= tmc_disable_etr_sink,
236 };
237 
238 const struct coresight_ops tmc_etr_cs_ops = {
239 	.sink_ops	= &tmc_etr_sink_ops,
240 };
241 
242 int tmc_read_prepare_etr(struct tmc_drvdata *drvdata)
243 {
244 	int ret = 0;
245 	unsigned long flags;
246 
247 	/* config types are set a boot time and never change */
248 	if (WARN_ON_ONCE(drvdata->config_type != TMC_CONFIG_TYPE_ETR))
249 		return -EINVAL;
250 
251 	spin_lock_irqsave(&drvdata->spinlock, flags);
252 	if (drvdata->reading) {
253 		ret = -EBUSY;
254 		goto out;
255 	}
256 
257 	/* Don't interfere if operated from Perf */
258 	if (drvdata->mode == CS_MODE_PERF) {
259 		ret = -EINVAL;
260 		goto out;
261 	}
262 
263 	/* If drvdata::buf is NULL the trace data has been read already */
264 	if (drvdata->buf == NULL) {
265 		ret = -EINVAL;
266 		goto out;
267 	}
268 
269 	/* Disable the TMC if need be */
270 	if (drvdata->mode == CS_MODE_SYSFS)
271 		tmc_etr_disable_hw(drvdata);
272 
273 	drvdata->reading = true;
274 out:
275 	spin_unlock_irqrestore(&drvdata->spinlock, flags);
276 
277 	return ret;
278 }
279 
280 int tmc_read_unprepare_etr(struct tmc_drvdata *drvdata)
281 {
282 	unsigned long flags;
283 	dma_addr_t paddr;
284 	void __iomem *vaddr = NULL;
285 
286 	/* config types are set a boot time and never change */
287 	if (WARN_ON_ONCE(drvdata->config_type != TMC_CONFIG_TYPE_ETR))
288 		return -EINVAL;
289 
290 	spin_lock_irqsave(&drvdata->spinlock, flags);
291 
292 	/* RE-enable the TMC if need be */
293 	if (drvdata->mode == CS_MODE_SYSFS) {
294 		/*
295 		 * The trace run will continue with the same allocated trace
296 		 * buffer. The trace buffer is cleared in tmc_etr_enable_hw(),
297 		 * so we don't have to explicitly clear it. Also, since the
298 		 * tracer is still enabled drvdata::buf can't be NULL.
299 		 */
300 		tmc_etr_enable_hw(drvdata);
301 	} else {
302 		/*
303 		 * The ETR is not tracing and the buffer was just read.
304 		 * As such prepare to free the trace buffer.
305 		 */
306 		vaddr = drvdata->vaddr;
307 		paddr = drvdata->paddr;
308 		drvdata->buf = drvdata->vaddr = NULL;
309 	}
310 
311 	drvdata->reading = false;
312 	spin_unlock_irqrestore(&drvdata->spinlock, flags);
313 
314 	/* Free allocated memory out side of the spinlock */
315 	if (vaddr)
316 		dma_free_coherent(drvdata->dev, drvdata->size, vaddr, paddr);
317 
318 	return 0;
319 }
320