1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright(C) 2015 Linaro Limited. All rights reserved.
4  * Author: Mathieu Poirier <mathieu.poirier@linaro.org>
5  */
6 
7 #include <linux/pid_namespace.h>
8 #include <linux/pm_runtime.h>
9 #include <linux/sysfs.h>
10 #include "coresight-etm4x.h"
11 #include "coresight-priv.h"
12 
13 static int etm4_set_mode_exclude(struct etmv4_drvdata *drvdata, bool exclude)
14 {
15 	u8 idx;
16 	struct etmv4_config *config = &drvdata->config;
17 
18 	idx = config->addr_idx;
19 
20 	/*
21 	 * TRCACATRn.TYPE bit[1:0]: type of comparison
22 	 * the trace unit performs
23 	 */
24 	if (BMVAL(config->addr_acc[idx], 0, 1) == ETM_INSTR_ADDR) {
25 		if (idx % 2 != 0)
26 			return -EINVAL;
27 
28 		/*
29 		 * We are performing instruction address comparison. Set the
30 		 * relevant bit of ViewInst Include/Exclude Control register
31 		 * for corresponding address comparator pair.
32 		 */
33 		if (config->addr_type[idx] != ETM_ADDR_TYPE_RANGE ||
34 		    config->addr_type[idx + 1] != ETM_ADDR_TYPE_RANGE)
35 			return -EINVAL;
36 
37 		if (exclude == true) {
38 			/*
39 			 * Set exclude bit and unset the include bit
40 			 * corresponding to comparator pair
41 			 */
42 			config->viiectlr |= BIT(idx / 2 + 16);
43 			config->viiectlr &= ~BIT(idx / 2);
44 		} else {
45 			/*
46 			 * Set include bit and unset exclude bit
47 			 * corresponding to comparator pair
48 			 */
49 			config->viiectlr |= BIT(idx / 2);
50 			config->viiectlr &= ~BIT(idx / 2 + 16);
51 		}
52 	}
53 	return 0;
54 }
55 
56 static ssize_t nr_pe_cmp_show(struct device *dev,
57 			      struct device_attribute *attr,
58 			      char *buf)
59 {
60 	unsigned long val;
61 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
62 
63 	val = drvdata->nr_pe_cmp;
64 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
65 }
66 static DEVICE_ATTR_RO(nr_pe_cmp);
67 
68 static ssize_t nr_addr_cmp_show(struct device *dev,
69 				struct device_attribute *attr,
70 				char *buf)
71 {
72 	unsigned long val;
73 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
74 
75 	val = drvdata->nr_addr_cmp;
76 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
77 }
78 static DEVICE_ATTR_RO(nr_addr_cmp);
79 
80 static ssize_t nr_cntr_show(struct device *dev,
81 			    struct device_attribute *attr,
82 			    char *buf)
83 {
84 	unsigned long val;
85 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
86 
87 	val = drvdata->nr_cntr;
88 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
89 }
90 static DEVICE_ATTR_RO(nr_cntr);
91 
92 static ssize_t nr_ext_inp_show(struct device *dev,
93 			       struct device_attribute *attr,
94 			       char *buf)
95 {
96 	unsigned long val;
97 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
98 
99 	val = drvdata->nr_ext_inp;
100 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
101 }
102 static DEVICE_ATTR_RO(nr_ext_inp);
103 
104 static ssize_t numcidc_show(struct device *dev,
105 			    struct device_attribute *attr,
106 			    char *buf)
107 {
108 	unsigned long val;
109 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
110 
111 	val = drvdata->numcidc;
112 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
113 }
114 static DEVICE_ATTR_RO(numcidc);
115 
116 static ssize_t numvmidc_show(struct device *dev,
117 			     struct device_attribute *attr,
118 			     char *buf)
119 {
120 	unsigned long val;
121 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
122 
123 	val = drvdata->numvmidc;
124 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
125 }
126 static DEVICE_ATTR_RO(numvmidc);
127 
128 static ssize_t nrseqstate_show(struct device *dev,
129 			       struct device_attribute *attr,
130 			       char *buf)
131 {
132 	unsigned long val;
133 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
134 
135 	val = drvdata->nrseqstate;
136 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
137 }
138 static DEVICE_ATTR_RO(nrseqstate);
139 
140 static ssize_t nr_resource_show(struct device *dev,
141 				struct device_attribute *attr,
142 				char *buf)
143 {
144 	unsigned long val;
145 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
146 
147 	val = drvdata->nr_resource;
148 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
149 }
150 static DEVICE_ATTR_RO(nr_resource);
151 
152 static ssize_t nr_ss_cmp_show(struct device *dev,
153 			      struct device_attribute *attr,
154 			      char *buf)
155 {
156 	unsigned long val;
157 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
158 
159 	val = drvdata->nr_ss_cmp;
160 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
161 }
162 static DEVICE_ATTR_RO(nr_ss_cmp);
163 
164 static ssize_t reset_store(struct device *dev,
165 			   struct device_attribute *attr,
166 			   const char *buf, size_t size)
167 {
168 	int i;
169 	unsigned long val;
170 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
171 	struct etmv4_config *config = &drvdata->config;
172 
173 	if (kstrtoul(buf, 16, &val))
174 		return -EINVAL;
175 
176 	spin_lock(&drvdata->spinlock);
177 	if (val)
178 		config->mode = 0x0;
179 
180 	/* Disable data tracing: do not trace load and store data transfers */
181 	config->mode &= ~(ETM_MODE_LOAD | ETM_MODE_STORE);
182 	config->cfg &= ~(BIT(1) | BIT(2));
183 
184 	/* Disable data value and data address tracing */
185 	config->mode &= ~(ETM_MODE_DATA_TRACE_ADDR |
186 			   ETM_MODE_DATA_TRACE_VAL);
187 	config->cfg &= ~(BIT(16) | BIT(17));
188 
189 	/* Disable all events tracing */
190 	config->eventctrl0 = 0x0;
191 	config->eventctrl1 = 0x0;
192 
193 	/* Disable timestamp event */
194 	config->ts_ctrl = 0x0;
195 
196 	/* Disable stalling */
197 	config->stall_ctrl = 0x0;
198 
199 	/* Reset trace synchronization period  to 2^8 = 256 bytes*/
200 	if (drvdata->syncpr == false)
201 		config->syncfreq = 0x8;
202 
203 	/*
204 	 * Enable ViewInst to trace everything with start-stop logic in
205 	 * started state. ARM recommends start-stop logic is set before
206 	 * each trace run.
207 	 */
208 	config->vinst_ctrl |= BIT(0);
209 	if (drvdata->nr_addr_cmp == true) {
210 		config->mode |= ETM_MODE_VIEWINST_STARTSTOP;
211 		/* SSSTATUS, bit[9] */
212 		config->vinst_ctrl |= BIT(9);
213 	}
214 
215 	/* No address range filtering for ViewInst */
216 	config->viiectlr = 0x0;
217 
218 	/* No start-stop filtering for ViewInst */
219 	config->vissctlr = 0x0;
220 	config->vipcssctlr = 0x0;
221 
222 	/* Disable seq events */
223 	for (i = 0; i < drvdata->nrseqstate-1; i++)
224 		config->seq_ctrl[i] = 0x0;
225 	config->seq_rst = 0x0;
226 	config->seq_state = 0x0;
227 
228 	/* Disable external input events */
229 	config->ext_inp = 0x0;
230 
231 	config->cntr_idx = 0x0;
232 	for (i = 0; i < drvdata->nr_cntr; i++) {
233 		config->cntrldvr[i] = 0x0;
234 		config->cntr_ctrl[i] = 0x0;
235 		config->cntr_val[i] = 0x0;
236 	}
237 
238 	config->res_idx = 0x0;
239 	for (i = 0; i < drvdata->nr_resource; i++)
240 		config->res_ctrl[i] = 0x0;
241 
242 	config->ss_idx = 0x0;
243 	for (i = 0; i < drvdata->nr_ss_cmp; i++) {
244 		config->ss_ctrl[i] = 0x0;
245 		config->ss_pe_cmp[i] = 0x0;
246 	}
247 
248 	config->addr_idx = 0x0;
249 	for (i = 0; i < drvdata->nr_addr_cmp * 2; i++) {
250 		config->addr_val[i] = 0x0;
251 		config->addr_acc[i] = 0x0;
252 		config->addr_type[i] = ETM_ADDR_TYPE_NONE;
253 	}
254 
255 	config->ctxid_idx = 0x0;
256 	for (i = 0; i < drvdata->numcidc; i++)
257 		config->ctxid_pid[i] = 0x0;
258 
259 	config->ctxid_mask0 = 0x0;
260 	config->ctxid_mask1 = 0x0;
261 
262 	config->vmid_idx = 0x0;
263 	for (i = 0; i < drvdata->numvmidc; i++)
264 		config->vmid_val[i] = 0x0;
265 	config->vmid_mask0 = 0x0;
266 	config->vmid_mask1 = 0x0;
267 
268 	drvdata->trcid = drvdata->cpu + 1;
269 
270 	spin_unlock(&drvdata->spinlock);
271 
272 	return size;
273 }
274 static DEVICE_ATTR_WO(reset);
275 
276 static ssize_t mode_show(struct device *dev,
277 			 struct device_attribute *attr,
278 			 char *buf)
279 {
280 	unsigned long val;
281 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
282 	struct etmv4_config *config = &drvdata->config;
283 
284 	val = config->mode;
285 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
286 }
287 
288 static ssize_t mode_store(struct device *dev,
289 			  struct device_attribute *attr,
290 			  const char *buf, size_t size)
291 {
292 	unsigned long val, mode;
293 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
294 	struct etmv4_config *config = &drvdata->config;
295 
296 	if (kstrtoul(buf, 16, &val))
297 		return -EINVAL;
298 
299 	spin_lock(&drvdata->spinlock);
300 	config->mode = val & ETMv4_MODE_ALL;
301 
302 	if (drvdata->instrp0 == true) {
303 		/* start by clearing instruction P0 field */
304 		config->cfg  &= ~(BIT(1) | BIT(2));
305 		if (config->mode & ETM_MODE_LOAD)
306 			/* 0b01 Trace load instructions as P0 instructions */
307 			config->cfg  |= BIT(1);
308 		if (config->mode & ETM_MODE_STORE)
309 			/* 0b10 Trace store instructions as P0 instructions */
310 			config->cfg  |= BIT(2);
311 		if (config->mode & ETM_MODE_LOAD_STORE)
312 			/*
313 			 * 0b11 Trace load and store instructions
314 			 * as P0 instructions
315 			 */
316 			config->cfg  |= BIT(1) | BIT(2);
317 	}
318 
319 	/* bit[3], Branch broadcast mode */
320 	if ((config->mode & ETM_MODE_BB) && (drvdata->trcbb == true))
321 		config->cfg |= BIT(3);
322 	else
323 		config->cfg &= ~BIT(3);
324 
325 	/* bit[4], Cycle counting instruction trace bit */
326 	if ((config->mode & ETMv4_MODE_CYCACC) &&
327 		(drvdata->trccci == true))
328 		config->cfg |= BIT(4);
329 	else
330 		config->cfg &= ~BIT(4);
331 
332 	/* bit[6], Context ID tracing bit */
333 	if ((config->mode & ETMv4_MODE_CTXID) && (drvdata->ctxid_size))
334 		config->cfg |= BIT(6);
335 	else
336 		config->cfg &= ~BIT(6);
337 
338 	if ((config->mode & ETM_MODE_VMID) && (drvdata->vmid_size))
339 		config->cfg |= BIT(7);
340 	else
341 		config->cfg &= ~BIT(7);
342 
343 	/* bits[10:8], Conditional instruction tracing bit */
344 	mode = ETM_MODE_COND(config->mode);
345 	if (drvdata->trccond == true) {
346 		config->cfg &= ~(BIT(8) | BIT(9) | BIT(10));
347 		config->cfg |= mode << 8;
348 	}
349 
350 	/* bit[11], Global timestamp tracing bit */
351 	if ((config->mode & ETMv4_MODE_TIMESTAMP) && (drvdata->ts_size))
352 		config->cfg |= BIT(11);
353 	else
354 		config->cfg &= ~BIT(11);
355 
356 	/* bit[12], Return stack enable bit */
357 	if ((config->mode & ETM_MODE_RETURNSTACK) &&
358 					(drvdata->retstack == true))
359 		config->cfg |= BIT(12);
360 	else
361 		config->cfg &= ~BIT(12);
362 
363 	/* bits[14:13], Q element enable field */
364 	mode = ETM_MODE_QELEM(config->mode);
365 	/* start by clearing QE bits */
366 	config->cfg &= ~(BIT(13) | BIT(14));
367 	/* if supported, Q elements with instruction counts are enabled */
368 	if ((mode & BIT(0)) && (drvdata->q_support & BIT(0)))
369 		config->cfg |= BIT(13);
370 	/*
371 	 * if supported, Q elements with and without instruction
372 	 * counts are enabled
373 	 */
374 	if ((mode & BIT(1)) && (drvdata->q_support & BIT(1)))
375 		config->cfg |= BIT(14);
376 
377 	/* bit[11], AMBA Trace Bus (ATB) trigger enable bit */
378 	if ((config->mode & ETM_MODE_ATB_TRIGGER) &&
379 	    (drvdata->atbtrig == true))
380 		config->eventctrl1 |= BIT(11);
381 	else
382 		config->eventctrl1 &= ~BIT(11);
383 
384 	/* bit[12], Low-power state behavior override bit */
385 	if ((config->mode & ETM_MODE_LPOVERRIDE) &&
386 	    (drvdata->lpoverride == true))
387 		config->eventctrl1 |= BIT(12);
388 	else
389 		config->eventctrl1 &= ~BIT(12);
390 
391 	/* bit[8], Instruction stall bit */
392 	if (config->mode & ETM_MODE_ISTALL_EN)
393 		config->stall_ctrl |= BIT(8);
394 	else
395 		config->stall_ctrl &= ~BIT(8);
396 
397 	/* bit[10], Prioritize instruction trace bit */
398 	if (config->mode & ETM_MODE_INSTPRIO)
399 		config->stall_ctrl |= BIT(10);
400 	else
401 		config->stall_ctrl &= ~BIT(10);
402 
403 	/* bit[13], Trace overflow prevention bit */
404 	if ((config->mode & ETM_MODE_NOOVERFLOW) &&
405 		(drvdata->nooverflow == true))
406 		config->stall_ctrl |= BIT(13);
407 	else
408 		config->stall_ctrl &= ~BIT(13);
409 
410 	/* bit[9] Start/stop logic control bit */
411 	if (config->mode & ETM_MODE_VIEWINST_STARTSTOP)
412 		config->vinst_ctrl |= BIT(9);
413 	else
414 		config->vinst_ctrl &= ~BIT(9);
415 
416 	/* bit[10], Whether a trace unit must trace a Reset exception */
417 	if (config->mode & ETM_MODE_TRACE_RESET)
418 		config->vinst_ctrl |= BIT(10);
419 	else
420 		config->vinst_ctrl &= ~BIT(10);
421 
422 	/* bit[11], Whether a trace unit must trace a system error exception */
423 	if ((config->mode & ETM_MODE_TRACE_ERR) &&
424 		(drvdata->trc_error == true))
425 		config->vinst_ctrl |= BIT(11);
426 	else
427 		config->vinst_ctrl &= ~BIT(11);
428 
429 	if (config->mode & (ETM_MODE_EXCL_KERN | ETM_MODE_EXCL_USER))
430 		etm4_config_trace_mode(config);
431 
432 	spin_unlock(&drvdata->spinlock);
433 
434 	return size;
435 }
436 static DEVICE_ATTR_RW(mode);
437 
438 static ssize_t pe_show(struct device *dev,
439 		       struct device_attribute *attr,
440 		       char *buf)
441 {
442 	unsigned long val;
443 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
444 	struct etmv4_config *config = &drvdata->config;
445 
446 	val = config->pe_sel;
447 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
448 }
449 
450 static ssize_t pe_store(struct device *dev,
451 			struct device_attribute *attr,
452 			const char *buf, size_t size)
453 {
454 	unsigned long val;
455 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
456 	struct etmv4_config *config = &drvdata->config;
457 
458 	if (kstrtoul(buf, 16, &val))
459 		return -EINVAL;
460 
461 	spin_lock(&drvdata->spinlock);
462 	if (val > drvdata->nr_pe) {
463 		spin_unlock(&drvdata->spinlock);
464 		return -EINVAL;
465 	}
466 
467 	config->pe_sel = val;
468 	spin_unlock(&drvdata->spinlock);
469 	return size;
470 }
471 static DEVICE_ATTR_RW(pe);
472 
473 static ssize_t event_show(struct device *dev,
474 			  struct device_attribute *attr,
475 			  char *buf)
476 {
477 	unsigned long val;
478 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
479 	struct etmv4_config *config = &drvdata->config;
480 
481 	val = config->eventctrl0;
482 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
483 }
484 
485 static ssize_t event_store(struct device *dev,
486 			   struct device_attribute *attr,
487 			   const char *buf, size_t size)
488 {
489 	unsigned long val;
490 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
491 	struct etmv4_config *config = &drvdata->config;
492 
493 	if (kstrtoul(buf, 16, &val))
494 		return -EINVAL;
495 
496 	spin_lock(&drvdata->spinlock);
497 	switch (drvdata->nr_event) {
498 	case 0x0:
499 		/* EVENT0, bits[7:0] */
500 		config->eventctrl0 = val & 0xFF;
501 		break;
502 	case 0x1:
503 		 /* EVENT1, bits[15:8] */
504 		config->eventctrl0 = val & 0xFFFF;
505 		break;
506 	case 0x2:
507 		/* EVENT2, bits[23:16] */
508 		config->eventctrl0 = val & 0xFFFFFF;
509 		break;
510 	case 0x3:
511 		/* EVENT3, bits[31:24] */
512 		config->eventctrl0 = val;
513 		break;
514 	default:
515 		break;
516 	}
517 	spin_unlock(&drvdata->spinlock);
518 	return size;
519 }
520 static DEVICE_ATTR_RW(event);
521 
522 static ssize_t event_instren_show(struct device *dev,
523 				  struct device_attribute *attr,
524 				  char *buf)
525 {
526 	unsigned long val;
527 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
528 	struct etmv4_config *config = &drvdata->config;
529 
530 	val = BMVAL(config->eventctrl1, 0, 3);
531 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
532 }
533 
534 static ssize_t event_instren_store(struct device *dev,
535 				   struct device_attribute *attr,
536 				   const char *buf, size_t size)
537 {
538 	unsigned long val;
539 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
540 	struct etmv4_config *config = &drvdata->config;
541 
542 	if (kstrtoul(buf, 16, &val))
543 		return -EINVAL;
544 
545 	spin_lock(&drvdata->spinlock);
546 	/* start by clearing all instruction event enable bits */
547 	config->eventctrl1 &= ~(BIT(0) | BIT(1) | BIT(2) | BIT(3));
548 	switch (drvdata->nr_event) {
549 	case 0x0:
550 		/* generate Event element for event 1 */
551 		config->eventctrl1 |= val & BIT(1);
552 		break;
553 	case 0x1:
554 		/* generate Event element for event 1 and 2 */
555 		config->eventctrl1 |= val & (BIT(0) | BIT(1));
556 		break;
557 	case 0x2:
558 		/* generate Event element for event 1, 2 and 3 */
559 		config->eventctrl1 |= val & (BIT(0) | BIT(1) | BIT(2));
560 		break;
561 	case 0x3:
562 		/* generate Event element for all 4 events */
563 		config->eventctrl1 |= val & 0xF;
564 		break;
565 	default:
566 		break;
567 	}
568 	spin_unlock(&drvdata->spinlock);
569 	return size;
570 }
571 static DEVICE_ATTR_RW(event_instren);
572 
573 static ssize_t event_ts_show(struct device *dev,
574 			     struct device_attribute *attr,
575 			     char *buf)
576 {
577 	unsigned long val;
578 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
579 	struct etmv4_config *config = &drvdata->config;
580 
581 	val = config->ts_ctrl;
582 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
583 }
584 
585 static ssize_t event_ts_store(struct device *dev,
586 			      struct device_attribute *attr,
587 			      const char *buf, size_t size)
588 {
589 	unsigned long val;
590 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
591 	struct etmv4_config *config = &drvdata->config;
592 
593 	if (kstrtoul(buf, 16, &val))
594 		return -EINVAL;
595 	if (!drvdata->ts_size)
596 		return -EINVAL;
597 
598 	config->ts_ctrl = val & ETMv4_EVENT_MASK;
599 	return size;
600 }
601 static DEVICE_ATTR_RW(event_ts);
602 
603 static ssize_t syncfreq_show(struct device *dev,
604 			     struct device_attribute *attr,
605 			     char *buf)
606 {
607 	unsigned long val;
608 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
609 	struct etmv4_config *config = &drvdata->config;
610 
611 	val = config->syncfreq;
612 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
613 }
614 
615 static ssize_t syncfreq_store(struct device *dev,
616 			      struct device_attribute *attr,
617 			      const char *buf, size_t size)
618 {
619 	unsigned long val;
620 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
621 	struct etmv4_config *config = &drvdata->config;
622 
623 	if (kstrtoul(buf, 16, &val))
624 		return -EINVAL;
625 	if (drvdata->syncpr == true)
626 		return -EINVAL;
627 
628 	config->syncfreq = val & ETMv4_SYNC_MASK;
629 	return size;
630 }
631 static DEVICE_ATTR_RW(syncfreq);
632 
633 static ssize_t cyc_threshold_show(struct device *dev,
634 				  struct device_attribute *attr,
635 				  char *buf)
636 {
637 	unsigned long val;
638 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
639 	struct etmv4_config *config = &drvdata->config;
640 
641 	val = config->ccctlr;
642 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
643 }
644 
645 static ssize_t cyc_threshold_store(struct device *dev,
646 				   struct device_attribute *attr,
647 				   const char *buf, size_t size)
648 {
649 	unsigned long val;
650 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
651 	struct etmv4_config *config = &drvdata->config;
652 
653 	if (kstrtoul(buf, 16, &val))
654 		return -EINVAL;
655 
656 	/* mask off max threshold before checking min value */
657 	val &= ETM_CYC_THRESHOLD_MASK;
658 	if (val < drvdata->ccitmin)
659 		return -EINVAL;
660 
661 	config->ccctlr = val;
662 	return size;
663 }
664 static DEVICE_ATTR_RW(cyc_threshold);
665 
666 static ssize_t bb_ctrl_show(struct device *dev,
667 			    struct device_attribute *attr,
668 			    char *buf)
669 {
670 	unsigned long val;
671 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
672 	struct etmv4_config *config = &drvdata->config;
673 
674 	val = config->bb_ctrl;
675 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
676 }
677 
678 static ssize_t bb_ctrl_store(struct device *dev,
679 			     struct device_attribute *attr,
680 			     const char *buf, size_t size)
681 {
682 	unsigned long val;
683 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
684 	struct etmv4_config *config = &drvdata->config;
685 
686 	if (kstrtoul(buf, 16, &val))
687 		return -EINVAL;
688 	if (drvdata->trcbb == false)
689 		return -EINVAL;
690 	if (!drvdata->nr_addr_cmp)
691 		return -EINVAL;
692 
693 	/*
694 	 * Bit[8] controls include(1) / exclude(0), bits[0-7] select
695 	 * individual range comparators. If include then at least 1
696 	 * range must be selected.
697 	 */
698 	if ((val & BIT(8)) && (BMVAL(val, 0, 7) == 0))
699 		return -EINVAL;
700 
701 	config->bb_ctrl = val & GENMASK(8, 0);
702 	return size;
703 }
704 static DEVICE_ATTR_RW(bb_ctrl);
705 
706 static ssize_t event_vinst_show(struct device *dev,
707 				struct device_attribute *attr,
708 				char *buf)
709 {
710 	unsigned long val;
711 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
712 	struct etmv4_config *config = &drvdata->config;
713 
714 	val = config->vinst_ctrl & ETMv4_EVENT_MASK;
715 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
716 }
717 
718 static ssize_t event_vinst_store(struct device *dev,
719 				 struct device_attribute *attr,
720 				 const char *buf, size_t size)
721 {
722 	unsigned long val;
723 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
724 	struct etmv4_config *config = &drvdata->config;
725 
726 	if (kstrtoul(buf, 16, &val))
727 		return -EINVAL;
728 
729 	spin_lock(&drvdata->spinlock);
730 	val &= ETMv4_EVENT_MASK;
731 	config->vinst_ctrl &= ~ETMv4_EVENT_MASK;
732 	config->vinst_ctrl |= val;
733 	spin_unlock(&drvdata->spinlock);
734 	return size;
735 }
736 static DEVICE_ATTR_RW(event_vinst);
737 
738 static ssize_t s_exlevel_vinst_show(struct device *dev,
739 				    struct device_attribute *attr,
740 				    char *buf)
741 {
742 	unsigned long val;
743 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
744 	struct etmv4_config *config = &drvdata->config;
745 
746 	val = (config->vinst_ctrl & ETM_EXLEVEL_S_VICTLR_MASK) >> 16;
747 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
748 }
749 
750 static ssize_t s_exlevel_vinst_store(struct device *dev,
751 				     struct device_attribute *attr,
752 				     const char *buf, size_t size)
753 {
754 	unsigned long val;
755 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
756 	struct etmv4_config *config = &drvdata->config;
757 
758 	if (kstrtoul(buf, 16, &val))
759 		return -EINVAL;
760 
761 	spin_lock(&drvdata->spinlock);
762 	/* clear all EXLEVEL_S bits  */
763 	config->vinst_ctrl &= ~(ETM_EXLEVEL_S_VICTLR_MASK);
764 	/* enable instruction tracing for corresponding exception level */
765 	val &= drvdata->s_ex_level;
766 	config->vinst_ctrl |= (val << 16);
767 	spin_unlock(&drvdata->spinlock);
768 	return size;
769 }
770 static DEVICE_ATTR_RW(s_exlevel_vinst);
771 
772 static ssize_t ns_exlevel_vinst_show(struct device *dev,
773 				     struct device_attribute *attr,
774 				     char *buf)
775 {
776 	unsigned long val;
777 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
778 	struct etmv4_config *config = &drvdata->config;
779 
780 	/* EXLEVEL_NS, bits[23:20] */
781 	val = (config->vinst_ctrl & ETM_EXLEVEL_NS_VICTLR_MASK) >> 20;
782 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
783 }
784 
785 static ssize_t ns_exlevel_vinst_store(struct device *dev,
786 				      struct device_attribute *attr,
787 				      const char *buf, size_t size)
788 {
789 	unsigned long val;
790 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
791 	struct etmv4_config *config = &drvdata->config;
792 
793 	if (kstrtoul(buf, 16, &val))
794 		return -EINVAL;
795 
796 	spin_lock(&drvdata->spinlock);
797 	/* clear EXLEVEL_NS bits  */
798 	config->vinst_ctrl &= ~(ETM_EXLEVEL_NS_VICTLR_MASK);
799 	/* enable instruction tracing for corresponding exception level */
800 	val &= drvdata->ns_ex_level;
801 	config->vinst_ctrl |= (val << 20);
802 	spin_unlock(&drvdata->spinlock);
803 	return size;
804 }
805 static DEVICE_ATTR_RW(ns_exlevel_vinst);
806 
807 static ssize_t addr_idx_show(struct device *dev,
808 			     struct device_attribute *attr,
809 			     char *buf)
810 {
811 	unsigned long val;
812 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
813 	struct etmv4_config *config = &drvdata->config;
814 
815 	val = config->addr_idx;
816 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
817 }
818 
819 static ssize_t addr_idx_store(struct device *dev,
820 			      struct device_attribute *attr,
821 			      const char *buf, size_t size)
822 {
823 	unsigned long val;
824 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
825 	struct etmv4_config *config = &drvdata->config;
826 
827 	if (kstrtoul(buf, 16, &val))
828 		return -EINVAL;
829 	if (val >= drvdata->nr_addr_cmp * 2)
830 		return -EINVAL;
831 
832 	/*
833 	 * Use spinlock to ensure index doesn't change while it gets
834 	 * dereferenced multiple times within a spinlock block elsewhere.
835 	 */
836 	spin_lock(&drvdata->spinlock);
837 	config->addr_idx = val;
838 	spin_unlock(&drvdata->spinlock);
839 	return size;
840 }
841 static DEVICE_ATTR_RW(addr_idx);
842 
843 static ssize_t addr_instdatatype_show(struct device *dev,
844 				      struct device_attribute *attr,
845 				      char *buf)
846 {
847 	ssize_t len;
848 	u8 val, idx;
849 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
850 	struct etmv4_config *config = &drvdata->config;
851 
852 	spin_lock(&drvdata->spinlock);
853 	idx = config->addr_idx;
854 	val = BMVAL(config->addr_acc[idx], 0, 1);
855 	len = scnprintf(buf, PAGE_SIZE, "%s\n",
856 			val == ETM_INSTR_ADDR ? "instr" :
857 			(val == ETM_DATA_LOAD_ADDR ? "data_load" :
858 			(val == ETM_DATA_STORE_ADDR ? "data_store" :
859 			"data_load_store")));
860 	spin_unlock(&drvdata->spinlock);
861 	return len;
862 }
863 
864 static ssize_t addr_instdatatype_store(struct device *dev,
865 				       struct device_attribute *attr,
866 				       const char *buf, size_t size)
867 {
868 	u8 idx;
869 	char str[20] = "";
870 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
871 	struct etmv4_config *config = &drvdata->config;
872 
873 	if (strlen(buf) >= 20)
874 		return -EINVAL;
875 	if (sscanf(buf, "%s", str) != 1)
876 		return -EINVAL;
877 
878 	spin_lock(&drvdata->spinlock);
879 	idx = config->addr_idx;
880 	if (!strcmp(str, "instr"))
881 		/* TYPE, bits[1:0] */
882 		config->addr_acc[idx] &= ~(BIT(0) | BIT(1));
883 
884 	spin_unlock(&drvdata->spinlock);
885 	return size;
886 }
887 static DEVICE_ATTR_RW(addr_instdatatype);
888 
889 static ssize_t addr_single_show(struct device *dev,
890 				struct device_attribute *attr,
891 				char *buf)
892 {
893 	u8 idx;
894 	unsigned long val;
895 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
896 	struct etmv4_config *config = &drvdata->config;
897 
898 	idx = config->addr_idx;
899 	spin_lock(&drvdata->spinlock);
900 	if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
901 	      config->addr_type[idx] == ETM_ADDR_TYPE_SINGLE)) {
902 		spin_unlock(&drvdata->spinlock);
903 		return -EPERM;
904 	}
905 	val = (unsigned long)config->addr_val[idx];
906 	spin_unlock(&drvdata->spinlock);
907 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
908 }
909 
910 static ssize_t addr_single_store(struct device *dev,
911 				 struct device_attribute *attr,
912 				 const char *buf, size_t size)
913 {
914 	u8 idx;
915 	unsigned long val;
916 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
917 	struct etmv4_config *config = &drvdata->config;
918 
919 	if (kstrtoul(buf, 16, &val))
920 		return -EINVAL;
921 
922 	spin_lock(&drvdata->spinlock);
923 	idx = config->addr_idx;
924 	if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
925 	      config->addr_type[idx] == ETM_ADDR_TYPE_SINGLE)) {
926 		spin_unlock(&drvdata->spinlock);
927 		return -EPERM;
928 	}
929 
930 	config->addr_val[idx] = (u64)val;
931 	config->addr_type[idx] = ETM_ADDR_TYPE_SINGLE;
932 	spin_unlock(&drvdata->spinlock);
933 	return size;
934 }
935 static DEVICE_ATTR_RW(addr_single);
936 
937 static ssize_t addr_range_show(struct device *dev,
938 			       struct device_attribute *attr,
939 			       char *buf)
940 {
941 	u8 idx;
942 	unsigned long val1, val2;
943 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
944 	struct etmv4_config *config = &drvdata->config;
945 
946 	spin_lock(&drvdata->spinlock);
947 	idx = config->addr_idx;
948 	if (idx % 2 != 0) {
949 		spin_unlock(&drvdata->spinlock);
950 		return -EPERM;
951 	}
952 	if (!((config->addr_type[idx] == ETM_ADDR_TYPE_NONE &&
953 	       config->addr_type[idx + 1] == ETM_ADDR_TYPE_NONE) ||
954 	      (config->addr_type[idx] == ETM_ADDR_TYPE_RANGE &&
955 	       config->addr_type[idx + 1] == ETM_ADDR_TYPE_RANGE))) {
956 		spin_unlock(&drvdata->spinlock);
957 		return -EPERM;
958 	}
959 
960 	val1 = (unsigned long)config->addr_val[idx];
961 	val2 = (unsigned long)config->addr_val[idx + 1];
962 	spin_unlock(&drvdata->spinlock);
963 	return scnprintf(buf, PAGE_SIZE, "%#lx %#lx\n", val1, val2);
964 }
965 
966 static ssize_t addr_range_store(struct device *dev,
967 				struct device_attribute *attr,
968 				const char *buf, size_t size)
969 {
970 	u8 idx;
971 	unsigned long val1, val2;
972 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
973 	struct etmv4_config *config = &drvdata->config;
974 	int elements, exclude;
975 
976 	elements = sscanf(buf, "%lx %lx %x", &val1, &val2, &exclude);
977 
978 	/*  exclude is optional, but need at least two parameter */
979 	if (elements < 2)
980 		return -EINVAL;
981 	/* lower address comparator cannot have a higher address value */
982 	if (val1 > val2)
983 		return -EINVAL;
984 
985 	spin_lock(&drvdata->spinlock);
986 	idx = config->addr_idx;
987 	if (idx % 2 != 0) {
988 		spin_unlock(&drvdata->spinlock);
989 		return -EPERM;
990 	}
991 
992 	if (!((config->addr_type[idx] == ETM_ADDR_TYPE_NONE &&
993 	       config->addr_type[idx + 1] == ETM_ADDR_TYPE_NONE) ||
994 	      (config->addr_type[idx] == ETM_ADDR_TYPE_RANGE &&
995 	       config->addr_type[idx + 1] == ETM_ADDR_TYPE_RANGE))) {
996 		spin_unlock(&drvdata->spinlock);
997 		return -EPERM;
998 	}
999 
1000 	config->addr_val[idx] = (u64)val1;
1001 	config->addr_type[idx] = ETM_ADDR_TYPE_RANGE;
1002 	config->addr_val[idx + 1] = (u64)val2;
1003 	config->addr_type[idx + 1] = ETM_ADDR_TYPE_RANGE;
1004 	/*
1005 	 * Program include or exclude control bits for vinst or vdata
1006 	 * whenever we change addr comparators to ETM_ADDR_TYPE_RANGE
1007 	 * use supplied value, or default to bit set in 'mode'
1008 	 */
1009 	if (elements != 3)
1010 		exclude = config->mode & ETM_MODE_EXCLUDE;
1011 	etm4_set_mode_exclude(drvdata, exclude ? true : false);
1012 
1013 	spin_unlock(&drvdata->spinlock);
1014 	return size;
1015 }
1016 static DEVICE_ATTR_RW(addr_range);
1017 
1018 static ssize_t addr_start_show(struct device *dev,
1019 			       struct device_attribute *attr,
1020 			       char *buf)
1021 {
1022 	u8 idx;
1023 	unsigned long val;
1024 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1025 	struct etmv4_config *config = &drvdata->config;
1026 
1027 	spin_lock(&drvdata->spinlock);
1028 	idx = config->addr_idx;
1029 
1030 	if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
1031 	      config->addr_type[idx] == ETM_ADDR_TYPE_START)) {
1032 		spin_unlock(&drvdata->spinlock);
1033 		return -EPERM;
1034 	}
1035 
1036 	val = (unsigned long)config->addr_val[idx];
1037 	spin_unlock(&drvdata->spinlock);
1038 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1039 }
1040 
1041 static ssize_t addr_start_store(struct device *dev,
1042 				struct device_attribute *attr,
1043 				const char *buf, size_t size)
1044 {
1045 	u8 idx;
1046 	unsigned long val;
1047 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1048 	struct etmv4_config *config = &drvdata->config;
1049 
1050 	if (kstrtoul(buf, 16, &val))
1051 		return -EINVAL;
1052 
1053 	spin_lock(&drvdata->spinlock);
1054 	idx = config->addr_idx;
1055 	if (!drvdata->nr_addr_cmp) {
1056 		spin_unlock(&drvdata->spinlock);
1057 		return -EINVAL;
1058 	}
1059 	if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
1060 	      config->addr_type[idx] == ETM_ADDR_TYPE_START)) {
1061 		spin_unlock(&drvdata->spinlock);
1062 		return -EPERM;
1063 	}
1064 
1065 	config->addr_val[idx] = (u64)val;
1066 	config->addr_type[idx] = ETM_ADDR_TYPE_START;
1067 	config->vissctlr |= BIT(idx);
1068 	spin_unlock(&drvdata->spinlock);
1069 	return size;
1070 }
1071 static DEVICE_ATTR_RW(addr_start);
1072 
1073 static ssize_t addr_stop_show(struct device *dev,
1074 			      struct device_attribute *attr,
1075 			      char *buf)
1076 {
1077 	u8 idx;
1078 	unsigned long val;
1079 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1080 	struct etmv4_config *config = &drvdata->config;
1081 
1082 	spin_lock(&drvdata->spinlock);
1083 	idx = config->addr_idx;
1084 
1085 	if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
1086 	      config->addr_type[idx] == ETM_ADDR_TYPE_STOP)) {
1087 		spin_unlock(&drvdata->spinlock);
1088 		return -EPERM;
1089 	}
1090 
1091 	val = (unsigned long)config->addr_val[idx];
1092 	spin_unlock(&drvdata->spinlock);
1093 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1094 }
1095 
1096 static ssize_t addr_stop_store(struct device *dev,
1097 			       struct device_attribute *attr,
1098 			       const char *buf, size_t size)
1099 {
1100 	u8 idx;
1101 	unsigned long val;
1102 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1103 	struct etmv4_config *config = &drvdata->config;
1104 
1105 	if (kstrtoul(buf, 16, &val))
1106 		return -EINVAL;
1107 
1108 	spin_lock(&drvdata->spinlock);
1109 	idx = config->addr_idx;
1110 	if (!drvdata->nr_addr_cmp) {
1111 		spin_unlock(&drvdata->spinlock);
1112 		return -EINVAL;
1113 	}
1114 	if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
1115 	       config->addr_type[idx] == ETM_ADDR_TYPE_STOP)) {
1116 		spin_unlock(&drvdata->spinlock);
1117 		return -EPERM;
1118 	}
1119 
1120 	config->addr_val[idx] = (u64)val;
1121 	config->addr_type[idx] = ETM_ADDR_TYPE_STOP;
1122 	config->vissctlr |= BIT(idx + 16);
1123 	spin_unlock(&drvdata->spinlock);
1124 	return size;
1125 }
1126 static DEVICE_ATTR_RW(addr_stop);
1127 
1128 static ssize_t addr_ctxtype_show(struct device *dev,
1129 				 struct device_attribute *attr,
1130 				 char *buf)
1131 {
1132 	ssize_t len;
1133 	u8 idx, val;
1134 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1135 	struct etmv4_config *config = &drvdata->config;
1136 
1137 	spin_lock(&drvdata->spinlock);
1138 	idx = config->addr_idx;
1139 	/* CONTEXTTYPE, bits[3:2] */
1140 	val = BMVAL(config->addr_acc[idx], 2, 3);
1141 	len = scnprintf(buf, PAGE_SIZE, "%s\n", val == ETM_CTX_NONE ? "none" :
1142 			(val == ETM_CTX_CTXID ? "ctxid" :
1143 			(val == ETM_CTX_VMID ? "vmid" : "all")));
1144 	spin_unlock(&drvdata->spinlock);
1145 	return len;
1146 }
1147 
1148 static ssize_t addr_ctxtype_store(struct device *dev,
1149 				  struct device_attribute *attr,
1150 				  const char *buf, size_t size)
1151 {
1152 	u8 idx;
1153 	char str[10] = "";
1154 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1155 	struct etmv4_config *config = &drvdata->config;
1156 
1157 	if (strlen(buf) >= 10)
1158 		return -EINVAL;
1159 	if (sscanf(buf, "%s", str) != 1)
1160 		return -EINVAL;
1161 
1162 	spin_lock(&drvdata->spinlock);
1163 	idx = config->addr_idx;
1164 	if (!strcmp(str, "none"))
1165 		/* start by clearing context type bits */
1166 		config->addr_acc[idx] &= ~(BIT(2) | BIT(3));
1167 	else if (!strcmp(str, "ctxid")) {
1168 		/* 0b01 The trace unit performs a Context ID */
1169 		if (drvdata->numcidc) {
1170 			config->addr_acc[idx] |= BIT(2);
1171 			config->addr_acc[idx] &= ~BIT(3);
1172 		}
1173 	} else if (!strcmp(str, "vmid")) {
1174 		/* 0b10 The trace unit performs a VMID */
1175 		if (drvdata->numvmidc) {
1176 			config->addr_acc[idx] &= ~BIT(2);
1177 			config->addr_acc[idx] |= BIT(3);
1178 		}
1179 	} else if (!strcmp(str, "all")) {
1180 		/*
1181 		 * 0b11 The trace unit performs a Context ID
1182 		 * comparison and a VMID
1183 		 */
1184 		if (drvdata->numcidc)
1185 			config->addr_acc[idx] |= BIT(2);
1186 		if (drvdata->numvmidc)
1187 			config->addr_acc[idx] |= BIT(3);
1188 	}
1189 	spin_unlock(&drvdata->spinlock);
1190 	return size;
1191 }
1192 static DEVICE_ATTR_RW(addr_ctxtype);
1193 
1194 static ssize_t addr_context_show(struct device *dev,
1195 				 struct device_attribute *attr,
1196 				 char *buf)
1197 {
1198 	u8 idx;
1199 	unsigned long val;
1200 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1201 	struct etmv4_config *config = &drvdata->config;
1202 
1203 	spin_lock(&drvdata->spinlock);
1204 	idx = config->addr_idx;
1205 	/* context ID comparator bits[6:4] */
1206 	val = BMVAL(config->addr_acc[idx], 4, 6);
1207 	spin_unlock(&drvdata->spinlock);
1208 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1209 }
1210 
1211 static ssize_t addr_context_store(struct device *dev,
1212 				  struct device_attribute *attr,
1213 				  const char *buf, size_t size)
1214 {
1215 	u8 idx;
1216 	unsigned long val;
1217 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1218 	struct etmv4_config *config = &drvdata->config;
1219 
1220 	if (kstrtoul(buf, 16, &val))
1221 		return -EINVAL;
1222 	if ((drvdata->numcidc <= 1) && (drvdata->numvmidc <= 1))
1223 		return -EINVAL;
1224 	if (val >=  (drvdata->numcidc >= drvdata->numvmidc ?
1225 		     drvdata->numcidc : drvdata->numvmidc))
1226 		return -EINVAL;
1227 
1228 	spin_lock(&drvdata->spinlock);
1229 	idx = config->addr_idx;
1230 	/* clear context ID comparator bits[6:4] */
1231 	config->addr_acc[idx] &= ~(BIT(4) | BIT(5) | BIT(6));
1232 	config->addr_acc[idx] |= (val << 4);
1233 	spin_unlock(&drvdata->spinlock);
1234 	return size;
1235 }
1236 static DEVICE_ATTR_RW(addr_context);
1237 
1238 static ssize_t addr_exlevel_s_ns_show(struct device *dev,
1239 				      struct device_attribute *attr,
1240 				      char *buf)
1241 {
1242 	u8 idx;
1243 	unsigned long val;
1244 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1245 	struct etmv4_config *config = &drvdata->config;
1246 
1247 	spin_lock(&drvdata->spinlock);
1248 	idx = config->addr_idx;
1249 	val = BMVAL(config->addr_acc[idx], 8, 14);
1250 	spin_unlock(&drvdata->spinlock);
1251 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1252 }
1253 
1254 static ssize_t addr_exlevel_s_ns_store(struct device *dev,
1255 				       struct device_attribute *attr,
1256 				       const char *buf, size_t size)
1257 {
1258 	u8 idx;
1259 	unsigned long val;
1260 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1261 	struct etmv4_config *config = &drvdata->config;
1262 
1263 	if (kstrtoul(buf, 0, &val))
1264 		return -EINVAL;
1265 
1266 	if (val & ~((GENMASK(14, 8) >> 8)))
1267 		return -EINVAL;
1268 
1269 	spin_lock(&drvdata->spinlock);
1270 	idx = config->addr_idx;
1271 	/* clear Exlevel_ns & Exlevel_s bits[14:12, 11:8], bit[15] is res0 */
1272 	config->addr_acc[idx] &= ~(GENMASK(14, 8));
1273 	config->addr_acc[idx] |= (val << 8);
1274 	spin_unlock(&drvdata->spinlock);
1275 	return size;
1276 }
1277 static DEVICE_ATTR_RW(addr_exlevel_s_ns);
1278 
1279 static const char * const addr_type_names[] = {
1280 	"unused",
1281 	"single",
1282 	"range",
1283 	"start",
1284 	"stop"
1285 };
1286 
1287 static ssize_t addr_cmp_view_show(struct device *dev,
1288 				  struct device_attribute *attr, char *buf)
1289 {
1290 	u8 idx, addr_type;
1291 	unsigned long addr_v, addr_v2, addr_ctrl;
1292 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1293 	struct etmv4_config *config = &drvdata->config;
1294 	int size = 0;
1295 	bool exclude = false;
1296 
1297 	spin_lock(&drvdata->spinlock);
1298 	idx = config->addr_idx;
1299 	addr_v = config->addr_val[idx];
1300 	addr_ctrl = config->addr_acc[idx];
1301 	addr_type = config->addr_type[idx];
1302 	if (addr_type == ETM_ADDR_TYPE_RANGE) {
1303 		if (idx & 0x1) {
1304 			idx -= 1;
1305 			addr_v2 = addr_v;
1306 			addr_v = config->addr_val[idx];
1307 		} else {
1308 			addr_v2 = config->addr_val[idx + 1];
1309 		}
1310 		exclude = config->viiectlr & BIT(idx / 2 + 16);
1311 	}
1312 	spin_unlock(&drvdata->spinlock);
1313 	if (addr_type) {
1314 		size = scnprintf(buf, PAGE_SIZE, "addr_cmp[%i] %s %#lx", idx,
1315 				 addr_type_names[addr_type], addr_v);
1316 		if (addr_type == ETM_ADDR_TYPE_RANGE) {
1317 			size += scnprintf(buf + size, PAGE_SIZE - size,
1318 					  " %#lx %s", addr_v2,
1319 					  exclude ? "exclude" : "include");
1320 		}
1321 		size += scnprintf(buf + size, PAGE_SIZE - size,
1322 				  " ctrl(%#lx)\n", addr_ctrl);
1323 	} else {
1324 		size = scnprintf(buf, PAGE_SIZE, "addr_cmp[%i] unused\n", idx);
1325 	}
1326 	return size;
1327 }
1328 static DEVICE_ATTR_RO(addr_cmp_view);
1329 
1330 static ssize_t vinst_pe_cmp_start_stop_show(struct device *dev,
1331 					    struct device_attribute *attr,
1332 					    char *buf)
1333 {
1334 	unsigned long val;
1335 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1336 	struct etmv4_config *config = &drvdata->config;
1337 
1338 	if (!drvdata->nr_pe_cmp)
1339 		return -EINVAL;
1340 	val = config->vipcssctlr;
1341 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1342 }
1343 static ssize_t vinst_pe_cmp_start_stop_store(struct device *dev,
1344 					     struct device_attribute *attr,
1345 					     const char *buf, size_t size)
1346 {
1347 	unsigned long val;
1348 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1349 	struct etmv4_config *config = &drvdata->config;
1350 
1351 	if (kstrtoul(buf, 16, &val))
1352 		return -EINVAL;
1353 	if (!drvdata->nr_pe_cmp)
1354 		return -EINVAL;
1355 
1356 	spin_lock(&drvdata->spinlock);
1357 	config->vipcssctlr = val;
1358 	spin_unlock(&drvdata->spinlock);
1359 	return size;
1360 }
1361 static DEVICE_ATTR_RW(vinst_pe_cmp_start_stop);
1362 
1363 static ssize_t seq_idx_show(struct device *dev,
1364 			    struct device_attribute *attr,
1365 			    char *buf)
1366 {
1367 	unsigned long val;
1368 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1369 	struct etmv4_config *config = &drvdata->config;
1370 
1371 	val = config->seq_idx;
1372 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1373 }
1374 
1375 static ssize_t seq_idx_store(struct device *dev,
1376 			     struct device_attribute *attr,
1377 			     const char *buf, size_t size)
1378 {
1379 	unsigned long val;
1380 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1381 	struct etmv4_config *config = &drvdata->config;
1382 
1383 	if (kstrtoul(buf, 16, &val))
1384 		return -EINVAL;
1385 	if (val >= drvdata->nrseqstate - 1)
1386 		return -EINVAL;
1387 
1388 	/*
1389 	 * Use spinlock to ensure index doesn't change while it gets
1390 	 * dereferenced multiple times within a spinlock block elsewhere.
1391 	 */
1392 	spin_lock(&drvdata->spinlock);
1393 	config->seq_idx = val;
1394 	spin_unlock(&drvdata->spinlock);
1395 	return size;
1396 }
1397 static DEVICE_ATTR_RW(seq_idx);
1398 
1399 static ssize_t seq_state_show(struct device *dev,
1400 			      struct device_attribute *attr,
1401 			      char *buf)
1402 {
1403 	unsigned long val;
1404 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1405 	struct etmv4_config *config = &drvdata->config;
1406 
1407 	val = config->seq_state;
1408 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1409 }
1410 
1411 static ssize_t seq_state_store(struct device *dev,
1412 			       struct device_attribute *attr,
1413 			       const char *buf, size_t size)
1414 {
1415 	unsigned long val;
1416 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1417 	struct etmv4_config *config = &drvdata->config;
1418 
1419 	if (kstrtoul(buf, 16, &val))
1420 		return -EINVAL;
1421 	if (val >= drvdata->nrseqstate)
1422 		return -EINVAL;
1423 
1424 	config->seq_state = val;
1425 	return size;
1426 }
1427 static DEVICE_ATTR_RW(seq_state);
1428 
1429 static ssize_t seq_event_show(struct device *dev,
1430 			      struct device_attribute *attr,
1431 			      char *buf)
1432 {
1433 	u8 idx;
1434 	unsigned long val;
1435 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1436 	struct etmv4_config *config = &drvdata->config;
1437 
1438 	spin_lock(&drvdata->spinlock);
1439 	idx = config->seq_idx;
1440 	val = config->seq_ctrl[idx];
1441 	spin_unlock(&drvdata->spinlock);
1442 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1443 }
1444 
1445 static ssize_t seq_event_store(struct device *dev,
1446 			       struct device_attribute *attr,
1447 			       const char *buf, size_t size)
1448 {
1449 	u8 idx;
1450 	unsigned long val;
1451 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1452 	struct etmv4_config *config = &drvdata->config;
1453 
1454 	if (kstrtoul(buf, 16, &val))
1455 		return -EINVAL;
1456 
1457 	spin_lock(&drvdata->spinlock);
1458 	idx = config->seq_idx;
1459 	/* Seq control has two masks B[15:8] F[7:0] */
1460 	config->seq_ctrl[idx] = val & 0xFFFF;
1461 	spin_unlock(&drvdata->spinlock);
1462 	return size;
1463 }
1464 static DEVICE_ATTR_RW(seq_event);
1465 
1466 static ssize_t seq_reset_event_show(struct device *dev,
1467 				    struct device_attribute *attr,
1468 				    char *buf)
1469 {
1470 	unsigned long val;
1471 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1472 	struct etmv4_config *config = &drvdata->config;
1473 
1474 	val = config->seq_rst;
1475 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1476 }
1477 
1478 static ssize_t seq_reset_event_store(struct device *dev,
1479 				     struct device_attribute *attr,
1480 				     const char *buf, size_t size)
1481 {
1482 	unsigned long val;
1483 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1484 	struct etmv4_config *config = &drvdata->config;
1485 
1486 	if (kstrtoul(buf, 16, &val))
1487 		return -EINVAL;
1488 	if (!(drvdata->nrseqstate))
1489 		return -EINVAL;
1490 
1491 	config->seq_rst = val & ETMv4_EVENT_MASK;
1492 	return size;
1493 }
1494 static DEVICE_ATTR_RW(seq_reset_event);
1495 
1496 static ssize_t cntr_idx_show(struct device *dev,
1497 			     struct device_attribute *attr,
1498 			     char *buf)
1499 {
1500 	unsigned long val;
1501 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1502 	struct etmv4_config *config = &drvdata->config;
1503 
1504 	val = config->cntr_idx;
1505 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1506 }
1507 
1508 static ssize_t cntr_idx_store(struct device *dev,
1509 			      struct device_attribute *attr,
1510 			      const char *buf, size_t size)
1511 {
1512 	unsigned long val;
1513 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1514 	struct etmv4_config *config = &drvdata->config;
1515 
1516 	if (kstrtoul(buf, 16, &val))
1517 		return -EINVAL;
1518 	if (val >= drvdata->nr_cntr)
1519 		return -EINVAL;
1520 
1521 	/*
1522 	 * Use spinlock to ensure index doesn't change while it gets
1523 	 * dereferenced multiple times within a spinlock block elsewhere.
1524 	 */
1525 	spin_lock(&drvdata->spinlock);
1526 	config->cntr_idx = val;
1527 	spin_unlock(&drvdata->spinlock);
1528 	return size;
1529 }
1530 static DEVICE_ATTR_RW(cntr_idx);
1531 
1532 static ssize_t cntrldvr_show(struct device *dev,
1533 			     struct device_attribute *attr,
1534 			     char *buf)
1535 {
1536 	u8 idx;
1537 	unsigned long val;
1538 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1539 	struct etmv4_config *config = &drvdata->config;
1540 
1541 	spin_lock(&drvdata->spinlock);
1542 	idx = config->cntr_idx;
1543 	val = config->cntrldvr[idx];
1544 	spin_unlock(&drvdata->spinlock);
1545 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1546 }
1547 
1548 static ssize_t cntrldvr_store(struct device *dev,
1549 			      struct device_attribute *attr,
1550 			      const char *buf, size_t size)
1551 {
1552 	u8 idx;
1553 	unsigned long val;
1554 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1555 	struct etmv4_config *config = &drvdata->config;
1556 
1557 	if (kstrtoul(buf, 16, &val))
1558 		return -EINVAL;
1559 	if (val > ETM_CNTR_MAX_VAL)
1560 		return -EINVAL;
1561 
1562 	spin_lock(&drvdata->spinlock);
1563 	idx = config->cntr_idx;
1564 	config->cntrldvr[idx] = val;
1565 	spin_unlock(&drvdata->spinlock);
1566 	return size;
1567 }
1568 static DEVICE_ATTR_RW(cntrldvr);
1569 
1570 static ssize_t cntr_val_show(struct device *dev,
1571 			     struct device_attribute *attr,
1572 			     char *buf)
1573 {
1574 	u8 idx;
1575 	unsigned long val;
1576 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1577 	struct etmv4_config *config = &drvdata->config;
1578 
1579 	spin_lock(&drvdata->spinlock);
1580 	idx = config->cntr_idx;
1581 	val = config->cntr_val[idx];
1582 	spin_unlock(&drvdata->spinlock);
1583 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1584 }
1585 
1586 static ssize_t cntr_val_store(struct device *dev,
1587 			      struct device_attribute *attr,
1588 			      const char *buf, size_t size)
1589 {
1590 	u8 idx;
1591 	unsigned long val;
1592 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1593 	struct etmv4_config *config = &drvdata->config;
1594 
1595 	if (kstrtoul(buf, 16, &val))
1596 		return -EINVAL;
1597 	if (val > ETM_CNTR_MAX_VAL)
1598 		return -EINVAL;
1599 
1600 	spin_lock(&drvdata->spinlock);
1601 	idx = config->cntr_idx;
1602 	config->cntr_val[idx] = val;
1603 	spin_unlock(&drvdata->spinlock);
1604 	return size;
1605 }
1606 static DEVICE_ATTR_RW(cntr_val);
1607 
1608 static ssize_t cntr_ctrl_show(struct device *dev,
1609 			      struct device_attribute *attr,
1610 			      char *buf)
1611 {
1612 	u8 idx;
1613 	unsigned long val;
1614 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1615 	struct etmv4_config *config = &drvdata->config;
1616 
1617 	spin_lock(&drvdata->spinlock);
1618 	idx = config->cntr_idx;
1619 	val = config->cntr_ctrl[idx];
1620 	spin_unlock(&drvdata->spinlock);
1621 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1622 }
1623 
1624 static ssize_t cntr_ctrl_store(struct device *dev,
1625 			       struct device_attribute *attr,
1626 			       const char *buf, size_t size)
1627 {
1628 	u8 idx;
1629 	unsigned long val;
1630 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1631 	struct etmv4_config *config = &drvdata->config;
1632 
1633 	if (kstrtoul(buf, 16, &val))
1634 		return -EINVAL;
1635 
1636 	spin_lock(&drvdata->spinlock);
1637 	idx = config->cntr_idx;
1638 	config->cntr_ctrl[idx] = val;
1639 	spin_unlock(&drvdata->spinlock);
1640 	return size;
1641 }
1642 static DEVICE_ATTR_RW(cntr_ctrl);
1643 
1644 static ssize_t res_idx_show(struct device *dev,
1645 			    struct device_attribute *attr,
1646 			    char *buf)
1647 {
1648 	unsigned long val;
1649 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1650 	struct etmv4_config *config = &drvdata->config;
1651 
1652 	val = config->res_idx;
1653 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1654 }
1655 
1656 static ssize_t res_idx_store(struct device *dev,
1657 			     struct device_attribute *attr,
1658 			     const char *buf, size_t size)
1659 {
1660 	unsigned long val;
1661 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1662 	struct etmv4_config *config = &drvdata->config;
1663 
1664 	if (kstrtoul(buf, 16, &val))
1665 		return -EINVAL;
1666 	/* Resource selector pair 0 is always implemented and reserved */
1667 	if ((val == 0) || (val >= drvdata->nr_resource))
1668 		return -EINVAL;
1669 
1670 	/*
1671 	 * Use spinlock to ensure index doesn't change while it gets
1672 	 * dereferenced multiple times within a spinlock block elsewhere.
1673 	 */
1674 	spin_lock(&drvdata->spinlock);
1675 	config->res_idx = val;
1676 	spin_unlock(&drvdata->spinlock);
1677 	return size;
1678 }
1679 static DEVICE_ATTR_RW(res_idx);
1680 
1681 static ssize_t res_ctrl_show(struct device *dev,
1682 			     struct device_attribute *attr,
1683 			     char *buf)
1684 {
1685 	u8 idx;
1686 	unsigned long val;
1687 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1688 	struct etmv4_config *config = &drvdata->config;
1689 
1690 	spin_lock(&drvdata->spinlock);
1691 	idx = config->res_idx;
1692 	val = config->res_ctrl[idx];
1693 	spin_unlock(&drvdata->spinlock);
1694 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1695 }
1696 
1697 static ssize_t res_ctrl_store(struct device *dev,
1698 			      struct device_attribute *attr,
1699 			      const char *buf, size_t size)
1700 {
1701 	u8 idx;
1702 	unsigned long val;
1703 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1704 	struct etmv4_config *config = &drvdata->config;
1705 
1706 	if (kstrtoul(buf, 16, &val))
1707 		return -EINVAL;
1708 
1709 	spin_lock(&drvdata->spinlock);
1710 	idx = config->res_idx;
1711 	/* For odd idx pair inversal bit is RES0 */
1712 	if (idx % 2 != 0)
1713 		/* PAIRINV, bit[21] */
1714 		val &= ~BIT(21);
1715 	config->res_ctrl[idx] = val & GENMASK(21, 0);
1716 	spin_unlock(&drvdata->spinlock);
1717 	return size;
1718 }
1719 static DEVICE_ATTR_RW(res_ctrl);
1720 
1721 static ssize_t sshot_idx_show(struct device *dev,
1722 			      struct device_attribute *attr, char *buf)
1723 {
1724 	unsigned long val;
1725 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1726 	struct etmv4_config *config = &drvdata->config;
1727 
1728 	val = config->ss_idx;
1729 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1730 }
1731 
1732 static ssize_t sshot_idx_store(struct device *dev,
1733 			       struct device_attribute *attr,
1734 			       const char *buf, size_t size)
1735 {
1736 	unsigned long val;
1737 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1738 	struct etmv4_config *config = &drvdata->config;
1739 
1740 	if (kstrtoul(buf, 16, &val))
1741 		return -EINVAL;
1742 	if (val >= drvdata->nr_ss_cmp)
1743 		return -EINVAL;
1744 
1745 	spin_lock(&drvdata->spinlock);
1746 	config->ss_idx = val;
1747 	spin_unlock(&drvdata->spinlock);
1748 	return size;
1749 }
1750 static DEVICE_ATTR_RW(sshot_idx);
1751 
1752 static ssize_t sshot_ctrl_show(struct device *dev,
1753 			       struct device_attribute *attr,
1754 			       char *buf)
1755 {
1756 	unsigned long val;
1757 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1758 	struct etmv4_config *config = &drvdata->config;
1759 
1760 	spin_lock(&drvdata->spinlock);
1761 	val = config->ss_ctrl[config->ss_idx];
1762 	spin_unlock(&drvdata->spinlock);
1763 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1764 }
1765 
1766 static ssize_t sshot_ctrl_store(struct device *dev,
1767 				struct device_attribute *attr,
1768 				const char *buf, size_t size)
1769 {
1770 	u8 idx;
1771 	unsigned long val;
1772 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1773 	struct etmv4_config *config = &drvdata->config;
1774 
1775 	if (kstrtoul(buf, 16, &val))
1776 		return -EINVAL;
1777 
1778 	spin_lock(&drvdata->spinlock);
1779 	idx = config->ss_idx;
1780 	config->ss_ctrl[idx] = val & GENMASK(24, 0);
1781 	/* must clear bit 31 in related status register on programming */
1782 	config->ss_status[idx] &= ~BIT(31);
1783 	spin_unlock(&drvdata->spinlock);
1784 	return size;
1785 }
1786 static DEVICE_ATTR_RW(sshot_ctrl);
1787 
1788 static ssize_t sshot_status_show(struct device *dev,
1789 				 struct device_attribute *attr, char *buf)
1790 {
1791 	unsigned long val;
1792 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1793 	struct etmv4_config *config = &drvdata->config;
1794 
1795 	spin_lock(&drvdata->spinlock);
1796 	val = config->ss_status[config->ss_idx];
1797 	spin_unlock(&drvdata->spinlock);
1798 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1799 }
1800 static DEVICE_ATTR_RO(sshot_status);
1801 
1802 static ssize_t sshot_pe_ctrl_show(struct device *dev,
1803 				  struct device_attribute *attr,
1804 				  char *buf)
1805 {
1806 	unsigned long val;
1807 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1808 	struct etmv4_config *config = &drvdata->config;
1809 
1810 	spin_lock(&drvdata->spinlock);
1811 	val = config->ss_pe_cmp[config->ss_idx];
1812 	spin_unlock(&drvdata->spinlock);
1813 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1814 }
1815 
1816 static ssize_t sshot_pe_ctrl_store(struct device *dev,
1817 				   struct device_attribute *attr,
1818 				   const char *buf, size_t size)
1819 {
1820 	u8 idx;
1821 	unsigned long val;
1822 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1823 	struct etmv4_config *config = &drvdata->config;
1824 
1825 	if (kstrtoul(buf, 16, &val))
1826 		return -EINVAL;
1827 
1828 	spin_lock(&drvdata->spinlock);
1829 	idx = config->ss_idx;
1830 	config->ss_pe_cmp[idx] = val & GENMASK(7, 0);
1831 	/* must clear bit 31 in related status register on programming */
1832 	config->ss_status[idx] &= ~BIT(31);
1833 	spin_unlock(&drvdata->spinlock);
1834 	return size;
1835 }
1836 static DEVICE_ATTR_RW(sshot_pe_ctrl);
1837 
1838 static ssize_t ctxid_idx_show(struct device *dev,
1839 			      struct device_attribute *attr,
1840 			      char *buf)
1841 {
1842 	unsigned long val;
1843 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1844 	struct etmv4_config *config = &drvdata->config;
1845 
1846 	val = config->ctxid_idx;
1847 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1848 }
1849 
1850 static ssize_t ctxid_idx_store(struct device *dev,
1851 			       struct device_attribute *attr,
1852 			       const char *buf, size_t size)
1853 {
1854 	unsigned long val;
1855 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1856 	struct etmv4_config *config = &drvdata->config;
1857 
1858 	if (kstrtoul(buf, 16, &val))
1859 		return -EINVAL;
1860 	if (val >= drvdata->numcidc)
1861 		return -EINVAL;
1862 
1863 	/*
1864 	 * Use spinlock to ensure index doesn't change while it gets
1865 	 * dereferenced multiple times within a spinlock block elsewhere.
1866 	 */
1867 	spin_lock(&drvdata->spinlock);
1868 	config->ctxid_idx = val;
1869 	spin_unlock(&drvdata->spinlock);
1870 	return size;
1871 }
1872 static DEVICE_ATTR_RW(ctxid_idx);
1873 
1874 static ssize_t ctxid_pid_show(struct device *dev,
1875 			      struct device_attribute *attr,
1876 			      char *buf)
1877 {
1878 	u8 idx;
1879 	unsigned long val;
1880 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1881 	struct etmv4_config *config = &drvdata->config;
1882 
1883 	/*
1884 	 * Don't use contextID tracing if coming from a PID namespace.  See
1885 	 * comment in ctxid_pid_store().
1886 	 */
1887 	if (task_active_pid_ns(current) != &init_pid_ns)
1888 		return -EINVAL;
1889 
1890 	spin_lock(&drvdata->spinlock);
1891 	idx = config->ctxid_idx;
1892 	val = (unsigned long)config->ctxid_pid[idx];
1893 	spin_unlock(&drvdata->spinlock);
1894 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1895 }
1896 
1897 static ssize_t ctxid_pid_store(struct device *dev,
1898 			       struct device_attribute *attr,
1899 			       const char *buf, size_t size)
1900 {
1901 	u8 idx;
1902 	unsigned long pid;
1903 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1904 	struct etmv4_config *config = &drvdata->config;
1905 
1906 	/*
1907 	 * When contextID tracing is enabled the tracers will insert the
1908 	 * value found in the contextID register in the trace stream.  But if
1909 	 * a process is in a namespace the PID of that process as seen from the
1910 	 * namespace won't be what the kernel sees, something that makes the
1911 	 * feature confusing and can potentially leak kernel only information.
1912 	 * As such refuse to use the feature if @current is not in the initial
1913 	 * PID namespace.
1914 	 */
1915 	if (task_active_pid_ns(current) != &init_pid_ns)
1916 		return -EINVAL;
1917 
1918 	/*
1919 	 * only implemented when ctxid tracing is enabled, i.e. at least one
1920 	 * ctxid comparator is implemented and ctxid is greater than 0 bits
1921 	 * in length
1922 	 */
1923 	if (!drvdata->ctxid_size || !drvdata->numcidc)
1924 		return -EINVAL;
1925 	if (kstrtoul(buf, 16, &pid))
1926 		return -EINVAL;
1927 
1928 	spin_lock(&drvdata->spinlock);
1929 	idx = config->ctxid_idx;
1930 	config->ctxid_pid[idx] = (u64)pid;
1931 	spin_unlock(&drvdata->spinlock);
1932 	return size;
1933 }
1934 static DEVICE_ATTR_RW(ctxid_pid);
1935 
1936 static ssize_t ctxid_masks_show(struct device *dev,
1937 				struct device_attribute *attr,
1938 				char *buf)
1939 {
1940 	unsigned long val1, val2;
1941 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1942 	struct etmv4_config *config = &drvdata->config;
1943 
1944 	/*
1945 	 * Don't use contextID tracing if coming from a PID namespace.  See
1946 	 * comment in ctxid_pid_store().
1947 	 */
1948 	if (task_active_pid_ns(current) != &init_pid_ns)
1949 		return -EINVAL;
1950 
1951 	spin_lock(&drvdata->spinlock);
1952 	val1 = config->ctxid_mask0;
1953 	val2 = config->ctxid_mask1;
1954 	spin_unlock(&drvdata->spinlock);
1955 	return scnprintf(buf, PAGE_SIZE, "%#lx %#lx\n", val1, val2);
1956 }
1957 
1958 static ssize_t ctxid_masks_store(struct device *dev,
1959 				struct device_attribute *attr,
1960 				const char *buf, size_t size)
1961 {
1962 	u8 i, j, maskbyte;
1963 	unsigned long val1, val2, mask;
1964 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1965 	struct etmv4_config *config = &drvdata->config;
1966 	int nr_inputs;
1967 
1968 	/*
1969 	 * Don't use contextID tracing if coming from a PID namespace.  See
1970 	 * comment in ctxid_pid_store().
1971 	 */
1972 	if (task_active_pid_ns(current) != &init_pid_ns)
1973 		return -EINVAL;
1974 
1975 	/*
1976 	 * only implemented when ctxid tracing is enabled, i.e. at least one
1977 	 * ctxid comparator is implemented and ctxid is greater than 0 bits
1978 	 * in length
1979 	 */
1980 	if (!drvdata->ctxid_size || !drvdata->numcidc)
1981 		return -EINVAL;
1982 	/* one mask if <= 4 comparators, two for up to 8 */
1983 	nr_inputs = sscanf(buf, "%lx %lx", &val1, &val2);
1984 	if ((drvdata->numcidc > 4) && (nr_inputs != 2))
1985 		return -EINVAL;
1986 
1987 	spin_lock(&drvdata->spinlock);
1988 	/*
1989 	 * each byte[0..3] controls mask value applied to ctxid
1990 	 * comparator[0..3]
1991 	 */
1992 	switch (drvdata->numcidc) {
1993 	case 0x1:
1994 		/* COMP0, bits[7:0] */
1995 		config->ctxid_mask0 = val1 & 0xFF;
1996 		break;
1997 	case 0x2:
1998 		/* COMP1, bits[15:8] */
1999 		config->ctxid_mask0 = val1 & 0xFFFF;
2000 		break;
2001 	case 0x3:
2002 		/* COMP2, bits[23:16] */
2003 		config->ctxid_mask0 = val1 & 0xFFFFFF;
2004 		break;
2005 	case 0x4:
2006 		 /* COMP3, bits[31:24] */
2007 		config->ctxid_mask0 = val1;
2008 		break;
2009 	case 0x5:
2010 		/* COMP4, bits[7:0] */
2011 		config->ctxid_mask0 = val1;
2012 		config->ctxid_mask1 = val2 & 0xFF;
2013 		break;
2014 	case 0x6:
2015 		/* COMP5, bits[15:8] */
2016 		config->ctxid_mask0 = val1;
2017 		config->ctxid_mask1 = val2 & 0xFFFF;
2018 		break;
2019 	case 0x7:
2020 		/* COMP6, bits[23:16] */
2021 		config->ctxid_mask0 = val1;
2022 		config->ctxid_mask1 = val2 & 0xFFFFFF;
2023 		break;
2024 	case 0x8:
2025 		/* COMP7, bits[31:24] */
2026 		config->ctxid_mask0 = val1;
2027 		config->ctxid_mask1 = val2;
2028 		break;
2029 	default:
2030 		break;
2031 	}
2032 	/*
2033 	 * If software sets a mask bit to 1, it must program relevant byte
2034 	 * of ctxid comparator value 0x0, otherwise behavior is unpredictable.
2035 	 * For example, if bit[3] of ctxid_mask0 is 1, we must clear bits[31:24]
2036 	 * of ctxid comparator0 value (corresponding to byte 0) register.
2037 	 */
2038 	mask = config->ctxid_mask0;
2039 	for (i = 0; i < drvdata->numcidc; i++) {
2040 		/* mask value of corresponding ctxid comparator */
2041 		maskbyte = mask & ETMv4_EVENT_MASK;
2042 		/*
2043 		 * each bit corresponds to a byte of respective ctxid comparator
2044 		 * value register
2045 		 */
2046 		for (j = 0; j < 8; j++) {
2047 			if (maskbyte & 1)
2048 				config->ctxid_pid[i] &= ~(0xFFUL << (j * 8));
2049 			maskbyte >>= 1;
2050 		}
2051 		/* Select the next ctxid comparator mask value */
2052 		if (i == 3)
2053 			/* ctxid comparators[4-7] */
2054 			mask = config->ctxid_mask1;
2055 		else
2056 			mask >>= 0x8;
2057 	}
2058 
2059 	spin_unlock(&drvdata->spinlock);
2060 	return size;
2061 }
2062 static DEVICE_ATTR_RW(ctxid_masks);
2063 
2064 static ssize_t vmid_idx_show(struct device *dev,
2065 			     struct device_attribute *attr,
2066 			     char *buf)
2067 {
2068 	unsigned long val;
2069 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
2070 	struct etmv4_config *config = &drvdata->config;
2071 
2072 	val = config->vmid_idx;
2073 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
2074 }
2075 
2076 static ssize_t vmid_idx_store(struct device *dev,
2077 			      struct device_attribute *attr,
2078 			      const char *buf, size_t size)
2079 {
2080 	unsigned long val;
2081 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
2082 	struct etmv4_config *config = &drvdata->config;
2083 
2084 	if (kstrtoul(buf, 16, &val))
2085 		return -EINVAL;
2086 	if (val >= drvdata->numvmidc)
2087 		return -EINVAL;
2088 
2089 	/*
2090 	 * Use spinlock to ensure index doesn't change while it gets
2091 	 * dereferenced multiple times within a spinlock block elsewhere.
2092 	 */
2093 	spin_lock(&drvdata->spinlock);
2094 	config->vmid_idx = val;
2095 	spin_unlock(&drvdata->spinlock);
2096 	return size;
2097 }
2098 static DEVICE_ATTR_RW(vmid_idx);
2099 
2100 static ssize_t vmid_val_show(struct device *dev,
2101 			     struct device_attribute *attr,
2102 			     char *buf)
2103 {
2104 	unsigned long val;
2105 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
2106 	struct etmv4_config *config = &drvdata->config;
2107 
2108 	val = (unsigned long)config->vmid_val[config->vmid_idx];
2109 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
2110 }
2111 
2112 static ssize_t vmid_val_store(struct device *dev,
2113 			      struct device_attribute *attr,
2114 			      const char *buf, size_t size)
2115 {
2116 	unsigned long val;
2117 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
2118 	struct etmv4_config *config = &drvdata->config;
2119 
2120 	/*
2121 	 * only implemented when vmid tracing is enabled, i.e. at least one
2122 	 * vmid comparator is implemented and at least 8 bit vmid size
2123 	 */
2124 	if (!drvdata->vmid_size || !drvdata->numvmidc)
2125 		return -EINVAL;
2126 	if (kstrtoul(buf, 16, &val))
2127 		return -EINVAL;
2128 
2129 	spin_lock(&drvdata->spinlock);
2130 	config->vmid_val[config->vmid_idx] = (u64)val;
2131 	spin_unlock(&drvdata->spinlock);
2132 	return size;
2133 }
2134 static DEVICE_ATTR_RW(vmid_val);
2135 
2136 static ssize_t vmid_masks_show(struct device *dev,
2137 			       struct device_attribute *attr, char *buf)
2138 {
2139 	unsigned long val1, val2;
2140 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
2141 	struct etmv4_config *config = &drvdata->config;
2142 
2143 	spin_lock(&drvdata->spinlock);
2144 	val1 = config->vmid_mask0;
2145 	val2 = config->vmid_mask1;
2146 	spin_unlock(&drvdata->spinlock);
2147 	return scnprintf(buf, PAGE_SIZE, "%#lx %#lx\n", val1, val2);
2148 }
2149 
2150 static ssize_t vmid_masks_store(struct device *dev,
2151 				struct device_attribute *attr,
2152 				const char *buf, size_t size)
2153 {
2154 	u8 i, j, maskbyte;
2155 	unsigned long val1, val2, mask;
2156 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
2157 	struct etmv4_config *config = &drvdata->config;
2158 	int nr_inputs;
2159 
2160 	/*
2161 	 * only implemented when vmid tracing is enabled, i.e. at least one
2162 	 * vmid comparator is implemented and at least 8 bit vmid size
2163 	 */
2164 	if (!drvdata->vmid_size || !drvdata->numvmidc)
2165 		return -EINVAL;
2166 	/* one mask if <= 4 comparators, two for up to 8 */
2167 	nr_inputs = sscanf(buf, "%lx %lx", &val1, &val2);
2168 	if ((drvdata->numvmidc > 4) && (nr_inputs != 2))
2169 		return -EINVAL;
2170 
2171 	spin_lock(&drvdata->spinlock);
2172 
2173 	/*
2174 	 * each byte[0..3] controls mask value applied to vmid
2175 	 * comparator[0..3]
2176 	 */
2177 	switch (drvdata->numvmidc) {
2178 	case 0x1:
2179 		/* COMP0, bits[7:0] */
2180 		config->vmid_mask0 = val1 & 0xFF;
2181 		break;
2182 	case 0x2:
2183 		/* COMP1, bits[15:8] */
2184 		config->vmid_mask0 = val1 & 0xFFFF;
2185 		break;
2186 	case 0x3:
2187 		/* COMP2, bits[23:16] */
2188 		config->vmid_mask0 = val1 & 0xFFFFFF;
2189 		break;
2190 	case 0x4:
2191 		/* COMP3, bits[31:24] */
2192 		config->vmid_mask0 = val1;
2193 		break;
2194 	case 0x5:
2195 		/* COMP4, bits[7:0] */
2196 		config->vmid_mask0 = val1;
2197 		config->vmid_mask1 = val2 & 0xFF;
2198 		break;
2199 	case 0x6:
2200 		/* COMP5, bits[15:8] */
2201 		config->vmid_mask0 = val1;
2202 		config->vmid_mask1 = val2 & 0xFFFF;
2203 		break;
2204 	case 0x7:
2205 		/* COMP6, bits[23:16] */
2206 		config->vmid_mask0 = val1;
2207 		config->vmid_mask1 = val2 & 0xFFFFFF;
2208 		break;
2209 	case 0x8:
2210 		/* COMP7, bits[31:24] */
2211 		config->vmid_mask0 = val1;
2212 		config->vmid_mask1 = val2;
2213 		break;
2214 	default:
2215 		break;
2216 	}
2217 
2218 	/*
2219 	 * If software sets a mask bit to 1, it must program relevant byte
2220 	 * of vmid comparator value 0x0, otherwise behavior is unpredictable.
2221 	 * For example, if bit[3] of vmid_mask0 is 1, we must clear bits[31:24]
2222 	 * of vmid comparator0 value (corresponding to byte 0) register.
2223 	 */
2224 	mask = config->vmid_mask0;
2225 	for (i = 0; i < drvdata->numvmidc; i++) {
2226 		/* mask value of corresponding vmid comparator */
2227 		maskbyte = mask & ETMv4_EVENT_MASK;
2228 		/*
2229 		 * each bit corresponds to a byte of respective vmid comparator
2230 		 * value register
2231 		 */
2232 		for (j = 0; j < 8; j++) {
2233 			if (maskbyte & 1)
2234 				config->vmid_val[i] &= ~(0xFFUL << (j * 8));
2235 			maskbyte >>= 1;
2236 		}
2237 		/* Select the next vmid comparator mask value */
2238 		if (i == 3)
2239 			/* vmid comparators[4-7] */
2240 			mask = config->vmid_mask1;
2241 		else
2242 			mask >>= 0x8;
2243 	}
2244 	spin_unlock(&drvdata->spinlock);
2245 	return size;
2246 }
2247 static DEVICE_ATTR_RW(vmid_masks);
2248 
2249 static ssize_t cpu_show(struct device *dev,
2250 			struct device_attribute *attr, char *buf)
2251 {
2252 	int val;
2253 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
2254 
2255 	val = drvdata->cpu;
2256 	return scnprintf(buf, PAGE_SIZE, "%d\n", val);
2257 
2258 }
2259 static DEVICE_ATTR_RO(cpu);
2260 
2261 static struct attribute *coresight_etmv4_attrs[] = {
2262 	&dev_attr_nr_pe_cmp.attr,
2263 	&dev_attr_nr_addr_cmp.attr,
2264 	&dev_attr_nr_cntr.attr,
2265 	&dev_attr_nr_ext_inp.attr,
2266 	&dev_attr_numcidc.attr,
2267 	&dev_attr_numvmidc.attr,
2268 	&dev_attr_nrseqstate.attr,
2269 	&dev_attr_nr_resource.attr,
2270 	&dev_attr_nr_ss_cmp.attr,
2271 	&dev_attr_reset.attr,
2272 	&dev_attr_mode.attr,
2273 	&dev_attr_pe.attr,
2274 	&dev_attr_event.attr,
2275 	&dev_attr_event_instren.attr,
2276 	&dev_attr_event_ts.attr,
2277 	&dev_attr_syncfreq.attr,
2278 	&dev_attr_cyc_threshold.attr,
2279 	&dev_attr_bb_ctrl.attr,
2280 	&dev_attr_event_vinst.attr,
2281 	&dev_attr_s_exlevel_vinst.attr,
2282 	&dev_attr_ns_exlevel_vinst.attr,
2283 	&dev_attr_addr_idx.attr,
2284 	&dev_attr_addr_instdatatype.attr,
2285 	&dev_attr_addr_single.attr,
2286 	&dev_attr_addr_range.attr,
2287 	&dev_attr_addr_start.attr,
2288 	&dev_attr_addr_stop.attr,
2289 	&dev_attr_addr_ctxtype.attr,
2290 	&dev_attr_addr_context.attr,
2291 	&dev_attr_addr_exlevel_s_ns.attr,
2292 	&dev_attr_addr_cmp_view.attr,
2293 	&dev_attr_vinst_pe_cmp_start_stop.attr,
2294 	&dev_attr_sshot_idx.attr,
2295 	&dev_attr_sshot_ctrl.attr,
2296 	&dev_attr_sshot_pe_ctrl.attr,
2297 	&dev_attr_sshot_status.attr,
2298 	&dev_attr_seq_idx.attr,
2299 	&dev_attr_seq_state.attr,
2300 	&dev_attr_seq_event.attr,
2301 	&dev_attr_seq_reset_event.attr,
2302 	&dev_attr_cntr_idx.attr,
2303 	&dev_attr_cntrldvr.attr,
2304 	&dev_attr_cntr_val.attr,
2305 	&dev_attr_cntr_ctrl.attr,
2306 	&dev_attr_res_idx.attr,
2307 	&dev_attr_res_ctrl.attr,
2308 	&dev_attr_ctxid_idx.attr,
2309 	&dev_attr_ctxid_pid.attr,
2310 	&dev_attr_ctxid_masks.attr,
2311 	&dev_attr_vmid_idx.attr,
2312 	&dev_attr_vmid_val.attr,
2313 	&dev_attr_vmid_masks.attr,
2314 	&dev_attr_cpu.attr,
2315 	NULL,
2316 };
2317 
2318 struct etmv4_reg {
2319 	void __iomem *addr;
2320 	u32 data;
2321 };
2322 
2323 static void do_smp_cross_read(void *data)
2324 {
2325 	struct etmv4_reg *reg = data;
2326 
2327 	reg->data = readl_relaxed(reg->addr);
2328 }
2329 
2330 static u32 etmv4_cross_read(const struct device *dev, u32 offset)
2331 {
2332 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev);
2333 	struct etmv4_reg reg;
2334 
2335 	reg.addr = drvdata->base + offset;
2336 	/*
2337 	 * smp cross call ensures the CPU will be powered up before
2338 	 * accessing the ETMv4 trace core registers
2339 	 */
2340 	smp_call_function_single(drvdata->cpu, do_smp_cross_read, &reg, 1);
2341 	return reg.data;
2342 }
2343 
2344 #define coresight_etm4x_reg(name, offset)			\
2345 	coresight_simple_reg32(struct etmv4_drvdata, name, offset)
2346 
2347 #define coresight_etm4x_cross_read(name, offset)			\
2348 	coresight_simple_func(struct etmv4_drvdata, etmv4_cross_read,	\
2349 			      name, offset)
2350 
2351 coresight_etm4x_reg(trcpdcr, TRCPDCR);
2352 coresight_etm4x_reg(trcpdsr, TRCPDSR);
2353 coresight_etm4x_reg(trclsr, TRCLSR);
2354 coresight_etm4x_reg(trcauthstatus, TRCAUTHSTATUS);
2355 coresight_etm4x_reg(trcdevid, TRCDEVID);
2356 coresight_etm4x_reg(trcdevtype, TRCDEVTYPE);
2357 coresight_etm4x_reg(trcpidr0, TRCPIDR0);
2358 coresight_etm4x_reg(trcpidr1, TRCPIDR1);
2359 coresight_etm4x_reg(trcpidr2, TRCPIDR2);
2360 coresight_etm4x_reg(trcpidr3, TRCPIDR3);
2361 coresight_etm4x_cross_read(trcoslsr, TRCOSLSR);
2362 coresight_etm4x_cross_read(trcconfig, TRCCONFIGR);
2363 coresight_etm4x_cross_read(trctraceid, TRCTRACEIDR);
2364 
2365 static struct attribute *coresight_etmv4_mgmt_attrs[] = {
2366 	&dev_attr_trcoslsr.attr,
2367 	&dev_attr_trcpdcr.attr,
2368 	&dev_attr_trcpdsr.attr,
2369 	&dev_attr_trclsr.attr,
2370 	&dev_attr_trcconfig.attr,
2371 	&dev_attr_trctraceid.attr,
2372 	&dev_attr_trcauthstatus.attr,
2373 	&dev_attr_trcdevid.attr,
2374 	&dev_attr_trcdevtype.attr,
2375 	&dev_attr_trcpidr0.attr,
2376 	&dev_attr_trcpidr1.attr,
2377 	&dev_attr_trcpidr2.attr,
2378 	&dev_attr_trcpidr3.attr,
2379 	NULL,
2380 };
2381 
2382 coresight_etm4x_cross_read(trcidr0, TRCIDR0);
2383 coresight_etm4x_cross_read(trcidr1, TRCIDR1);
2384 coresight_etm4x_cross_read(trcidr2, TRCIDR2);
2385 coresight_etm4x_cross_read(trcidr3, TRCIDR3);
2386 coresight_etm4x_cross_read(trcidr4, TRCIDR4);
2387 coresight_etm4x_cross_read(trcidr5, TRCIDR5);
2388 /* trcidr[6,7] are reserved */
2389 coresight_etm4x_cross_read(trcidr8, TRCIDR8);
2390 coresight_etm4x_cross_read(trcidr9, TRCIDR9);
2391 coresight_etm4x_cross_read(trcidr10, TRCIDR10);
2392 coresight_etm4x_cross_read(trcidr11, TRCIDR11);
2393 coresight_etm4x_cross_read(trcidr12, TRCIDR12);
2394 coresight_etm4x_cross_read(trcidr13, TRCIDR13);
2395 
2396 static struct attribute *coresight_etmv4_trcidr_attrs[] = {
2397 	&dev_attr_trcidr0.attr,
2398 	&dev_attr_trcidr1.attr,
2399 	&dev_attr_trcidr2.attr,
2400 	&dev_attr_trcidr3.attr,
2401 	&dev_attr_trcidr4.attr,
2402 	&dev_attr_trcidr5.attr,
2403 	/* trcidr[6,7] are reserved */
2404 	&dev_attr_trcidr8.attr,
2405 	&dev_attr_trcidr9.attr,
2406 	&dev_attr_trcidr10.attr,
2407 	&dev_attr_trcidr11.attr,
2408 	&dev_attr_trcidr12.attr,
2409 	&dev_attr_trcidr13.attr,
2410 	NULL,
2411 };
2412 
2413 static const struct attribute_group coresight_etmv4_group = {
2414 	.attrs = coresight_etmv4_attrs,
2415 };
2416 
2417 static const struct attribute_group coresight_etmv4_mgmt_group = {
2418 	.attrs = coresight_etmv4_mgmt_attrs,
2419 	.name = "mgmt",
2420 };
2421 
2422 static const struct attribute_group coresight_etmv4_trcidr_group = {
2423 	.attrs = coresight_etmv4_trcidr_attrs,
2424 	.name = "trcidr",
2425 };
2426 
2427 const struct attribute_group *coresight_etmv4_groups[] = {
2428 	&coresight_etmv4_group,
2429 	&coresight_etmv4_mgmt_group,
2430 	&coresight_etmv4_trcidr_group,
2431 	NULL,
2432 };
2433