1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2012, The Linux Foundation. All rights reserved.
3  *
4  * Description: CoreSight Trace Memory Controller driver
5  */
6 
7 #include <linux/kernel.h>
8 #include <linux/init.h>
9 #include <linux/types.h>
10 #include <linux/device.h>
11 #include <linux/idr.h>
12 #include <linux/io.h>
13 #include <linux/err.h>
14 #include <linux/fs.h>
15 #include <linux/miscdevice.h>
16 #include <linux/mutex.h>
17 #include <linux/property.h>
18 #include <linux/uaccess.h>
19 #include <linux/slab.h>
20 #include <linux/dma-mapping.h>
21 #include <linux/spinlock.h>
22 #include <linux/pm_runtime.h>
23 #include <linux/of.h>
24 #include <linux/coresight.h>
25 #include <linux/amba/bus.h>
26 
27 #include "coresight-priv.h"
28 #include "coresight-tmc.h"
29 
30 DEFINE_CORESIGHT_DEVLIST(etb_devs, "tmc_etb");
31 DEFINE_CORESIGHT_DEVLIST(etf_devs, "tmc_etf");
32 DEFINE_CORESIGHT_DEVLIST(etr_devs, "tmc_etr");
33 
34 void tmc_wait_for_tmcready(struct tmc_drvdata *drvdata)
35 {
36 	struct coresight_device *csdev = drvdata->csdev;
37 	struct csdev_access *csa = &csdev->access;
38 
39 	/* Ensure formatter, unformatter and hardware fifo are empty */
40 	if (coresight_timeout(csa, TMC_STS, TMC_STS_TMCREADY_BIT, 1)) {
41 		dev_err(&csdev->dev,
42 			"timeout while waiting for TMC to be Ready\n");
43 	}
44 }
45 
46 void tmc_flush_and_stop(struct tmc_drvdata *drvdata)
47 {
48 	struct coresight_device *csdev = drvdata->csdev;
49 	struct csdev_access *csa = &csdev->access;
50 	u32 ffcr;
51 
52 	ffcr = readl_relaxed(drvdata->base + TMC_FFCR);
53 	ffcr |= TMC_FFCR_STOP_ON_FLUSH;
54 	writel_relaxed(ffcr, drvdata->base + TMC_FFCR);
55 	ffcr |= BIT(TMC_FFCR_FLUSHMAN_BIT);
56 	writel_relaxed(ffcr, drvdata->base + TMC_FFCR);
57 	/* Ensure flush completes */
58 	if (coresight_timeout(csa, TMC_FFCR, TMC_FFCR_FLUSHMAN_BIT, 0)) {
59 		dev_err(&csdev->dev,
60 		"timeout while waiting for completion of Manual Flush\n");
61 	}
62 
63 	tmc_wait_for_tmcready(drvdata);
64 }
65 
66 void tmc_enable_hw(struct tmc_drvdata *drvdata)
67 {
68 	writel_relaxed(TMC_CTL_CAPT_EN, drvdata->base + TMC_CTL);
69 }
70 
71 void tmc_disable_hw(struct tmc_drvdata *drvdata)
72 {
73 	writel_relaxed(0x0, drvdata->base + TMC_CTL);
74 }
75 
76 u32 tmc_get_memwidth_mask(struct tmc_drvdata *drvdata)
77 {
78 	u32 mask = 0;
79 
80 	/*
81 	 * When moving RRP or an offset address forward, the new values must
82 	 * be byte-address aligned to the width of the trace memory databus
83 	 * _and_ to a frame boundary (16 byte), whichever is the biggest. For
84 	 * example, for 32-bit, 64-bit and 128-bit wide trace memory, the four
85 	 * LSBs must be 0s. For 256-bit wide trace memory, the five LSBs must
86 	 * be 0s.
87 	 */
88 	switch (drvdata->memwidth) {
89 	case TMC_MEM_INTF_WIDTH_32BITS:
90 	case TMC_MEM_INTF_WIDTH_64BITS:
91 	case TMC_MEM_INTF_WIDTH_128BITS:
92 		mask = GENMASK(31, 4);
93 		break;
94 	case TMC_MEM_INTF_WIDTH_256BITS:
95 		mask = GENMASK(31, 5);
96 		break;
97 	}
98 
99 	return mask;
100 }
101 
102 static int tmc_read_prepare(struct tmc_drvdata *drvdata)
103 {
104 	int ret = 0;
105 
106 	switch (drvdata->config_type) {
107 	case TMC_CONFIG_TYPE_ETB:
108 	case TMC_CONFIG_TYPE_ETF:
109 		ret = tmc_read_prepare_etb(drvdata);
110 		break;
111 	case TMC_CONFIG_TYPE_ETR:
112 		ret = tmc_read_prepare_etr(drvdata);
113 		break;
114 	default:
115 		ret = -EINVAL;
116 	}
117 
118 	if (!ret)
119 		dev_dbg(&drvdata->csdev->dev, "TMC read start\n");
120 
121 	return ret;
122 }
123 
124 static int tmc_read_unprepare(struct tmc_drvdata *drvdata)
125 {
126 	int ret = 0;
127 
128 	switch (drvdata->config_type) {
129 	case TMC_CONFIG_TYPE_ETB:
130 	case TMC_CONFIG_TYPE_ETF:
131 		ret = tmc_read_unprepare_etb(drvdata);
132 		break;
133 	case TMC_CONFIG_TYPE_ETR:
134 		ret = tmc_read_unprepare_etr(drvdata);
135 		break;
136 	default:
137 		ret = -EINVAL;
138 	}
139 
140 	if (!ret)
141 		dev_dbg(&drvdata->csdev->dev, "TMC read end\n");
142 
143 	return ret;
144 }
145 
146 static int tmc_open(struct inode *inode, struct file *file)
147 {
148 	int ret;
149 	struct tmc_drvdata *drvdata = container_of(file->private_data,
150 						   struct tmc_drvdata, miscdev);
151 
152 	ret = tmc_read_prepare(drvdata);
153 	if (ret)
154 		return ret;
155 
156 	nonseekable_open(inode, file);
157 
158 	dev_dbg(&drvdata->csdev->dev, "%s: successfully opened\n", __func__);
159 	return 0;
160 }
161 
162 static inline ssize_t tmc_get_sysfs_trace(struct tmc_drvdata *drvdata,
163 					  loff_t pos, size_t len, char **bufpp)
164 {
165 	switch (drvdata->config_type) {
166 	case TMC_CONFIG_TYPE_ETB:
167 	case TMC_CONFIG_TYPE_ETF:
168 		return tmc_etb_get_sysfs_trace(drvdata, pos, len, bufpp);
169 	case TMC_CONFIG_TYPE_ETR:
170 		return tmc_etr_get_sysfs_trace(drvdata, pos, len, bufpp);
171 	}
172 
173 	return -EINVAL;
174 }
175 
176 static ssize_t tmc_read(struct file *file, char __user *data, size_t len,
177 			loff_t *ppos)
178 {
179 	char *bufp;
180 	ssize_t actual;
181 	struct tmc_drvdata *drvdata = container_of(file->private_data,
182 						   struct tmc_drvdata, miscdev);
183 	actual = tmc_get_sysfs_trace(drvdata, *ppos, len, &bufp);
184 	if (actual <= 0)
185 		return 0;
186 
187 	if (copy_to_user(data, bufp, actual)) {
188 		dev_dbg(&drvdata->csdev->dev,
189 			"%s: copy_to_user failed\n", __func__);
190 		return -EFAULT;
191 	}
192 
193 	*ppos += actual;
194 	dev_dbg(&drvdata->csdev->dev, "%zu bytes copied\n", actual);
195 
196 	return actual;
197 }
198 
199 static int tmc_release(struct inode *inode, struct file *file)
200 {
201 	int ret;
202 	struct tmc_drvdata *drvdata = container_of(file->private_data,
203 						   struct tmc_drvdata, miscdev);
204 
205 	ret = tmc_read_unprepare(drvdata);
206 	if (ret)
207 		return ret;
208 
209 	dev_dbg(&drvdata->csdev->dev, "%s: released\n", __func__);
210 	return 0;
211 }
212 
213 static const struct file_operations tmc_fops = {
214 	.owner		= THIS_MODULE,
215 	.open		= tmc_open,
216 	.read		= tmc_read,
217 	.release	= tmc_release,
218 	.llseek		= no_llseek,
219 };
220 
221 static enum tmc_mem_intf_width tmc_get_memwidth(u32 devid)
222 {
223 	enum tmc_mem_intf_width memwidth;
224 
225 	/*
226 	 * Excerpt from the TRM:
227 	 *
228 	 * DEVID::MEMWIDTH[10:8]
229 	 * 0x2 Memory interface databus is 32 bits wide.
230 	 * 0x3 Memory interface databus is 64 bits wide.
231 	 * 0x4 Memory interface databus is 128 bits wide.
232 	 * 0x5 Memory interface databus is 256 bits wide.
233 	 */
234 	switch (BMVAL(devid, 8, 10)) {
235 	case 0x2:
236 		memwidth = TMC_MEM_INTF_WIDTH_32BITS;
237 		break;
238 	case 0x3:
239 		memwidth = TMC_MEM_INTF_WIDTH_64BITS;
240 		break;
241 	case 0x4:
242 		memwidth = TMC_MEM_INTF_WIDTH_128BITS;
243 		break;
244 	case 0x5:
245 		memwidth = TMC_MEM_INTF_WIDTH_256BITS;
246 		break;
247 	default:
248 		memwidth = 0;
249 	}
250 
251 	return memwidth;
252 }
253 
254 #define coresight_tmc_reg(name, offset)			\
255 	coresight_simple_reg32(struct tmc_drvdata, name, offset)
256 #define coresight_tmc_reg64(name, lo_off, hi_off)	\
257 	coresight_simple_reg64(struct tmc_drvdata, name, lo_off, hi_off)
258 
259 coresight_tmc_reg(rsz, TMC_RSZ);
260 coresight_tmc_reg(sts, TMC_STS);
261 coresight_tmc_reg(trg, TMC_TRG);
262 coresight_tmc_reg(ctl, TMC_CTL);
263 coresight_tmc_reg(ffsr, TMC_FFSR);
264 coresight_tmc_reg(ffcr, TMC_FFCR);
265 coresight_tmc_reg(mode, TMC_MODE);
266 coresight_tmc_reg(pscr, TMC_PSCR);
267 coresight_tmc_reg(axictl, TMC_AXICTL);
268 coresight_tmc_reg(authstatus, TMC_AUTHSTATUS);
269 coresight_tmc_reg(devid, CORESIGHT_DEVID);
270 coresight_tmc_reg64(rrp, TMC_RRP, TMC_RRPHI);
271 coresight_tmc_reg64(rwp, TMC_RWP, TMC_RWPHI);
272 coresight_tmc_reg64(dba, TMC_DBALO, TMC_DBAHI);
273 
274 static struct attribute *coresight_tmc_mgmt_attrs[] = {
275 	&dev_attr_rsz.attr,
276 	&dev_attr_sts.attr,
277 	&dev_attr_rrp.attr,
278 	&dev_attr_rwp.attr,
279 	&dev_attr_trg.attr,
280 	&dev_attr_ctl.attr,
281 	&dev_attr_ffsr.attr,
282 	&dev_attr_ffcr.attr,
283 	&dev_attr_mode.attr,
284 	&dev_attr_pscr.attr,
285 	&dev_attr_devid.attr,
286 	&dev_attr_dba.attr,
287 	&dev_attr_axictl.attr,
288 	&dev_attr_authstatus.attr,
289 	NULL,
290 };
291 
292 static ssize_t trigger_cntr_show(struct device *dev,
293 				 struct device_attribute *attr, char *buf)
294 {
295 	struct tmc_drvdata *drvdata = dev_get_drvdata(dev->parent);
296 	unsigned long val = drvdata->trigger_cntr;
297 
298 	return sprintf(buf, "%#lx\n", val);
299 }
300 
301 static ssize_t trigger_cntr_store(struct device *dev,
302 			     struct device_attribute *attr,
303 			     const char *buf, size_t size)
304 {
305 	int ret;
306 	unsigned long val;
307 	struct tmc_drvdata *drvdata = dev_get_drvdata(dev->parent);
308 
309 	ret = kstrtoul(buf, 16, &val);
310 	if (ret)
311 		return ret;
312 
313 	drvdata->trigger_cntr = val;
314 	return size;
315 }
316 static DEVICE_ATTR_RW(trigger_cntr);
317 
318 static ssize_t buffer_size_show(struct device *dev,
319 				struct device_attribute *attr, char *buf)
320 {
321 	struct tmc_drvdata *drvdata = dev_get_drvdata(dev->parent);
322 
323 	return sprintf(buf, "%#x\n", drvdata->size);
324 }
325 
326 static ssize_t buffer_size_store(struct device *dev,
327 				 struct device_attribute *attr,
328 				 const char *buf, size_t size)
329 {
330 	int ret;
331 	unsigned long val;
332 	struct tmc_drvdata *drvdata = dev_get_drvdata(dev->parent);
333 
334 	/* Only permitted for TMC-ETRs */
335 	if (drvdata->config_type != TMC_CONFIG_TYPE_ETR)
336 		return -EPERM;
337 
338 	ret = kstrtoul(buf, 0, &val);
339 	if (ret)
340 		return ret;
341 	/* The buffer size should be page aligned */
342 	if (val & (PAGE_SIZE - 1))
343 		return -EINVAL;
344 	drvdata->size = val;
345 	return size;
346 }
347 
348 static DEVICE_ATTR_RW(buffer_size);
349 
350 static struct attribute *coresight_tmc_attrs[] = {
351 	&dev_attr_trigger_cntr.attr,
352 	&dev_attr_buffer_size.attr,
353 	NULL,
354 };
355 
356 static const struct attribute_group coresight_tmc_group = {
357 	.attrs = coresight_tmc_attrs,
358 };
359 
360 static const struct attribute_group coresight_tmc_mgmt_group = {
361 	.attrs = coresight_tmc_mgmt_attrs,
362 	.name = "mgmt",
363 };
364 
365 static const struct attribute_group *coresight_tmc_groups[] = {
366 	&coresight_tmc_group,
367 	&coresight_tmc_mgmt_group,
368 	NULL,
369 };
370 
371 static inline bool tmc_etr_can_use_sg(struct device *dev)
372 {
373 	return fwnode_property_present(dev->fwnode, "arm,scatter-gather");
374 }
375 
376 static inline bool tmc_etr_has_non_secure_access(struct tmc_drvdata *drvdata)
377 {
378 	u32 auth = readl_relaxed(drvdata->base + TMC_AUTHSTATUS);
379 
380 	return (auth & TMC_AUTH_NSID_MASK) == 0x3;
381 }
382 
383 /* Detect and initialise the capabilities of a TMC ETR */
384 static int tmc_etr_setup_caps(struct device *parent, u32 devid, void *dev_caps)
385 {
386 	int rc;
387 	u32 dma_mask = 0;
388 	struct tmc_drvdata *drvdata = dev_get_drvdata(parent);
389 
390 	if (!tmc_etr_has_non_secure_access(drvdata))
391 		return -EACCES;
392 
393 	/* Set the unadvertised capabilities */
394 	tmc_etr_init_caps(drvdata, (u32)(unsigned long)dev_caps);
395 
396 	if (!(devid & TMC_DEVID_NOSCAT) && tmc_etr_can_use_sg(parent))
397 		tmc_etr_set_cap(drvdata, TMC_ETR_SG);
398 
399 	/* Check if the AXI address width is available */
400 	if (devid & TMC_DEVID_AXIAW_VALID)
401 		dma_mask = ((devid >> TMC_DEVID_AXIAW_SHIFT) &
402 				TMC_DEVID_AXIAW_MASK);
403 
404 	/*
405 	 * Unless specified in the device configuration, ETR uses a 40-bit
406 	 * AXI master in place of the embedded SRAM of ETB/ETF.
407 	 */
408 	switch (dma_mask) {
409 	case 32:
410 	case 40:
411 	case 44:
412 	case 48:
413 	case 52:
414 		dev_info(parent, "Detected dma mask %dbits\n", dma_mask);
415 		break;
416 	default:
417 		dma_mask = 40;
418 	}
419 
420 	rc = dma_set_mask_and_coherent(parent, DMA_BIT_MASK(dma_mask));
421 	if (rc)
422 		dev_err(parent, "Failed to setup DMA mask: %d\n", rc);
423 	return rc;
424 }
425 
426 static u32 tmc_etr_get_default_buffer_size(struct device *dev)
427 {
428 	u32 size;
429 
430 	if (fwnode_property_read_u32(dev->fwnode, "arm,buffer-size", &size))
431 		size = SZ_1M;
432 	return size;
433 }
434 
435 static u32 tmc_etr_get_max_burst_size(struct device *dev)
436 {
437 	u32 burst_size;
438 
439 	if (fwnode_property_read_u32(dev->fwnode, "arm,max-burst-size",
440 				     &burst_size))
441 		return TMC_AXICTL_WR_BURST_16;
442 
443 	/* Only permissible values are 0 to 15 */
444 	if (burst_size > 0xF)
445 		burst_size = TMC_AXICTL_WR_BURST_16;
446 
447 	return burst_size;
448 }
449 
450 static int tmc_probe(struct amba_device *adev, const struct amba_id *id)
451 {
452 	int ret = 0;
453 	u32 devid;
454 	void __iomem *base;
455 	struct device *dev = &adev->dev;
456 	struct coresight_platform_data *pdata = NULL;
457 	struct tmc_drvdata *drvdata;
458 	struct resource *res = &adev->res;
459 	struct coresight_desc desc = { 0 };
460 	struct coresight_dev_list *dev_list = NULL;
461 
462 	ret = -ENOMEM;
463 	drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
464 	if (!drvdata)
465 		goto out;
466 
467 	dev_set_drvdata(dev, drvdata);
468 
469 	/* Validity for the resource is already checked by the AMBA core */
470 	base = devm_ioremap_resource(dev, res);
471 	if (IS_ERR(base)) {
472 		ret = PTR_ERR(base);
473 		goto out;
474 	}
475 
476 	drvdata->base = base;
477 	desc.access = CSDEV_ACCESS_IOMEM(base);
478 
479 	spin_lock_init(&drvdata->spinlock);
480 
481 	devid = readl_relaxed(drvdata->base + CORESIGHT_DEVID);
482 	drvdata->config_type = BMVAL(devid, 6, 7);
483 	drvdata->memwidth = tmc_get_memwidth(devid);
484 	/* This device is not associated with a session */
485 	drvdata->pid = -1;
486 
487 	if (drvdata->config_type == TMC_CONFIG_TYPE_ETR) {
488 		drvdata->size = tmc_etr_get_default_buffer_size(dev);
489 		drvdata->max_burst_size = tmc_etr_get_max_burst_size(dev);
490 	} else {
491 		drvdata->size = readl_relaxed(drvdata->base + TMC_RSZ) * 4;
492 	}
493 
494 	desc.dev = dev;
495 	desc.groups = coresight_tmc_groups;
496 
497 	switch (drvdata->config_type) {
498 	case TMC_CONFIG_TYPE_ETB:
499 		desc.type = CORESIGHT_DEV_TYPE_SINK;
500 		desc.subtype.sink_subtype = CORESIGHT_DEV_SUBTYPE_SINK_BUFFER;
501 		desc.ops = &tmc_etb_cs_ops;
502 		dev_list = &etb_devs;
503 		break;
504 	case TMC_CONFIG_TYPE_ETR:
505 		desc.type = CORESIGHT_DEV_TYPE_SINK;
506 		desc.subtype.sink_subtype = CORESIGHT_DEV_SUBTYPE_SINK_SYSMEM;
507 		desc.ops = &tmc_etr_cs_ops;
508 		ret = tmc_etr_setup_caps(dev, devid,
509 					 coresight_get_uci_data(id));
510 		if (ret)
511 			goto out;
512 		idr_init(&drvdata->idr);
513 		mutex_init(&drvdata->idr_mutex);
514 		dev_list = &etr_devs;
515 		break;
516 	case TMC_CONFIG_TYPE_ETF:
517 		desc.type = CORESIGHT_DEV_TYPE_LINKSINK;
518 		desc.subtype.sink_subtype = CORESIGHT_DEV_SUBTYPE_SINK_BUFFER;
519 		desc.subtype.link_subtype = CORESIGHT_DEV_SUBTYPE_LINK_FIFO;
520 		desc.ops = &tmc_etf_cs_ops;
521 		dev_list = &etf_devs;
522 		break;
523 	default:
524 		pr_err("%s: Unsupported TMC config\n", desc.name);
525 		ret = -EINVAL;
526 		goto out;
527 	}
528 
529 	desc.name = coresight_alloc_device_name(dev_list, dev);
530 	if (!desc.name) {
531 		ret = -ENOMEM;
532 		goto out;
533 	}
534 
535 	pdata = coresight_get_platform_data(dev);
536 	if (IS_ERR(pdata)) {
537 		ret = PTR_ERR(pdata);
538 		goto out;
539 	}
540 	adev->dev.platform_data = pdata;
541 	desc.pdata = pdata;
542 
543 	drvdata->csdev = coresight_register(&desc);
544 	if (IS_ERR(drvdata->csdev)) {
545 		ret = PTR_ERR(drvdata->csdev);
546 		goto out;
547 	}
548 
549 	drvdata->miscdev.name = desc.name;
550 	drvdata->miscdev.minor = MISC_DYNAMIC_MINOR;
551 	drvdata->miscdev.fops = &tmc_fops;
552 	ret = misc_register(&drvdata->miscdev);
553 	if (ret)
554 		coresight_unregister(drvdata->csdev);
555 	else
556 		pm_runtime_put(&adev->dev);
557 out:
558 	return ret;
559 }
560 
561 static void tmc_shutdown(struct amba_device *adev)
562 {
563 	unsigned long flags;
564 	struct tmc_drvdata *drvdata = amba_get_drvdata(adev);
565 
566 	spin_lock_irqsave(&drvdata->spinlock, flags);
567 
568 	if (drvdata->mode == CS_MODE_DISABLED)
569 		goto out;
570 
571 	if (drvdata->config_type == TMC_CONFIG_TYPE_ETR)
572 		tmc_etr_disable_hw(drvdata);
573 
574 	/*
575 	 * We do not care about coresight unregister here unlike remove
576 	 * callback which is required for making coresight modular since
577 	 * the system is going down after this.
578 	 */
579 out:
580 	spin_unlock_irqrestore(&drvdata->spinlock, flags);
581 }
582 
583 static void tmc_remove(struct amba_device *adev)
584 {
585 	struct tmc_drvdata *drvdata = dev_get_drvdata(&adev->dev);
586 
587 	/*
588 	 * Since misc_open() holds a refcount on the f_ops, which is
589 	 * etb fops in this case, device is there until last file
590 	 * handler to this device is closed.
591 	 */
592 	misc_deregister(&drvdata->miscdev);
593 	coresight_unregister(drvdata->csdev);
594 }
595 
596 static const struct amba_id tmc_ids[] = {
597 	CS_AMBA_ID(0x000bb961),
598 	/* Coresight SoC 600 TMC-ETR/ETS */
599 	CS_AMBA_ID_DATA(0x000bb9e8, (unsigned long)CORESIGHT_SOC_600_ETR_CAPS),
600 	/* Coresight SoC 600 TMC-ETB */
601 	CS_AMBA_ID(0x000bb9e9),
602 	/* Coresight SoC 600 TMC-ETF */
603 	CS_AMBA_ID(0x000bb9ea),
604 	{ 0, 0},
605 };
606 
607 MODULE_DEVICE_TABLE(amba, tmc_ids);
608 
609 static struct amba_driver tmc_driver = {
610 	.drv = {
611 		.name   = "coresight-tmc",
612 		.owner  = THIS_MODULE,
613 		.suppress_bind_attrs = true,
614 	},
615 	.probe		= tmc_probe,
616 	.shutdown	= tmc_shutdown,
617 	.remove		= tmc_remove,
618 	.id_table	= tmc_ids,
619 };
620 
621 module_amba_driver(tmc_driver);
622 
623 MODULE_AUTHOR("Pratik Patel <pratikp@codeaurora.org>");
624 MODULE_DESCRIPTION("Arm CoreSight Trace Memory Controller driver");
625 MODULE_LICENSE("GPL v2");
626