1 /*
2  * Copyright(C) 2015 Linaro Limited. All rights reserved.
3  * Author: Mathieu Poirier <mathieu.poirier@linaro.org>
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms of the GNU General Public License version 2 as published by
7  * the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
12  * more details.
13  *
14  * You should have received a copy of the GNU General Public License along with
15  * this program.  If not, see <http://www.gnu.org/licenses/>.
16  */
17 
18 #include <linux/pm_runtime.h>
19 #include <linux/sysfs.h>
20 #include "coresight-etm4x.h"
21 #include "coresight-priv.h"
22 
23 static int etm4_set_mode_exclude(struct etmv4_drvdata *drvdata, bool exclude)
24 {
25 	u8 idx;
26 	struct etmv4_config *config = &drvdata->config;
27 
28 	idx = config->addr_idx;
29 
30 	/*
31 	 * TRCACATRn.TYPE bit[1:0]: type of comparison
32 	 * the trace unit performs
33 	 */
34 	if (BMVAL(config->addr_acc[idx], 0, 1) == ETM_INSTR_ADDR) {
35 		if (idx % 2 != 0)
36 			return -EINVAL;
37 
38 		/*
39 		 * We are performing instruction address comparison. Set the
40 		 * relevant bit of ViewInst Include/Exclude Control register
41 		 * for corresponding address comparator pair.
42 		 */
43 		if (config->addr_type[idx] != ETM_ADDR_TYPE_RANGE ||
44 		    config->addr_type[idx + 1] != ETM_ADDR_TYPE_RANGE)
45 			return -EINVAL;
46 
47 		if (exclude == true) {
48 			/*
49 			 * Set exclude bit and unset the include bit
50 			 * corresponding to comparator pair
51 			 */
52 			config->viiectlr |= BIT(idx / 2 + 16);
53 			config->viiectlr &= ~BIT(idx / 2);
54 		} else {
55 			/*
56 			 * Set include bit and unset exclude bit
57 			 * corresponding to comparator pair
58 			 */
59 			config->viiectlr |= BIT(idx / 2);
60 			config->viiectlr &= ~BIT(idx / 2 + 16);
61 		}
62 	}
63 	return 0;
64 }
65 
66 static ssize_t nr_pe_cmp_show(struct device *dev,
67 			      struct device_attribute *attr,
68 			      char *buf)
69 {
70 	unsigned long val;
71 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
72 
73 	val = drvdata->nr_pe_cmp;
74 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
75 }
76 static DEVICE_ATTR_RO(nr_pe_cmp);
77 
78 static ssize_t nr_addr_cmp_show(struct device *dev,
79 				struct device_attribute *attr,
80 				char *buf)
81 {
82 	unsigned long val;
83 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
84 
85 	val = drvdata->nr_addr_cmp;
86 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
87 }
88 static DEVICE_ATTR_RO(nr_addr_cmp);
89 
90 static ssize_t nr_cntr_show(struct device *dev,
91 			    struct device_attribute *attr,
92 			    char *buf)
93 {
94 	unsigned long val;
95 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
96 
97 	val = drvdata->nr_cntr;
98 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
99 }
100 static DEVICE_ATTR_RO(nr_cntr);
101 
102 static ssize_t nr_ext_inp_show(struct device *dev,
103 			       struct device_attribute *attr,
104 			       char *buf)
105 {
106 	unsigned long val;
107 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
108 
109 	val = drvdata->nr_ext_inp;
110 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
111 }
112 static DEVICE_ATTR_RO(nr_ext_inp);
113 
114 static ssize_t numcidc_show(struct device *dev,
115 			    struct device_attribute *attr,
116 			    char *buf)
117 {
118 	unsigned long val;
119 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
120 
121 	val = drvdata->numcidc;
122 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
123 }
124 static DEVICE_ATTR_RO(numcidc);
125 
126 static ssize_t numvmidc_show(struct device *dev,
127 			     struct device_attribute *attr,
128 			     char *buf)
129 {
130 	unsigned long val;
131 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
132 
133 	val = drvdata->numvmidc;
134 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
135 }
136 static DEVICE_ATTR_RO(numvmidc);
137 
138 static ssize_t nrseqstate_show(struct device *dev,
139 			       struct device_attribute *attr,
140 			       char *buf)
141 {
142 	unsigned long val;
143 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
144 
145 	val = drvdata->nrseqstate;
146 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
147 }
148 static DEVICE_ATTR_RO(nrseqstate);
149 
150 static ssize_t nr_resource_show(struct device *dev,
151 				struct device_attribute *attr,
152 				char *buf)
153 {
154 	unsigned long val;
155 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
156 
157 	val = drvdata->nr_resource;
158 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
159 }
160 static DEVICE_ATTR_RO(nr_resource);
161 
162 static ssize_t nr_ss_cmp_show(struct device *dev,
163 			      struct device_attribute *attr,
164 			      char *buf)
165 {
166 	unsigned long val;
167 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
168 
169 	val = drvdata->nr_ss_cmp;
170 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
171 }
172 static DEVICE_ATTR_RO(nr_ss_cmp);
173 
174 static ssize_t reset_store(struct device *dev,
175 			   struct device_attribute *attr,
176 			   const char *buf, size_t size)
177 {
178 	int i;
179 	unsigned long val;
180 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
181 	struct etmv4_config *config = &drvdata->config;
182 
183 	if (kstrtoul(buf, 16, &val))
184 		return -EINVAL;
185 
186 	spin_lock(&drvdata->spinlock);
187 	if (val)
188 		config->mode = 0x0;
189 
190 	/* Disable data tracing: do not trace load and store data transfers */
191 	config->mode &= ~(ETM_MODE_LOAD | ETM_MODE_STORE);
192 	config->cfg &= ~(BIT(1) | BIT(2));
193 
194 	/* Disable data value and data address tracing */
195 	config->mode &= ~(ETM_MODE_DATA_TRACE_ADDR |
196 			   ETM_MODE_DATA_TRACE_VAL);
197 	config->cfg &= ~(BIT(16) | BIT(17));
198 
199 	/* Disable all events tracing */
200 	config->eventctrl0 = 0x0;
201 	config->eventctrl1 = 0x0;
202 
203 	/* Disable timestamp event */
204 	config->ts_ctrl = 0x0;
205 
206 	/* Disable stalling */
207 	config->stall_ctrl = 0x0;
208 
209 	/* Reset trace synchronization period  to 2^8 = 256 bytes*/
210 	if (drvdata->syncpr == false)
211 		config->syncfreq = 0x8;
212 
213 	/*
214 	 * Enable ViewInst to trace everything with start-stop logic in
215 	 * started state. ARM recommends start-stop logic is set before
216 	 * each trace run.
217 	 */
218 	config->vinst_ctrl |= BIT(0);
219 	if (drvdata->nr_addr_cmp == true) {
220 		config->mode |= ETM_MODE_VIEWINST_STARTSTOP;
221 		/* SSSTATUS, bit[9] */
222 		config->vinst_ctrl |= BIT(9);
223 	}
224 
225 	/* No address range filtering for ViewInst */
226 	config->viiectlr = 0x0;
227 
228 	/* No start-stop filtering for ViewInst */
229 	config->vissctlr = 0x0;
230 
231 	/* Disable seq events */
232 	for (i = 0; i < drvdata->nrseqstate-1; i++)
233 		config->seq_ctrl[i] = 0x0;
234 	config->seq_rst = 0x0;
235 	config->seq_state = 0x0;
236 
237 	/* Disable external input events */
238 	config->ext_inp = 0x0;
239 
240 	config->cntr_idx = 0x0;
241 	for (i = 0; i < drvdata->nr_cntr; i++) {
242 		config->cntrldvr[i] = 0x0;
243 		config->cntr_ctrl[i] = 0x0;
244 		config->cntr_val[i] = 0x0;
245 	}
246 
247 	config->res_idx = 0x0;
248 	for (i = 0; i < drvdata->nr_resource; i++)
249 		config->res_ctrl[i] = 0x0;
250 
251 	for (i = 0; i < drvdata->nr_ss_cmp; i++) {
252 		config->ss_ctrl[i] = 0x0;
253 		config->ss_pe_cmp[i] = 0x0;
254 	}
255 
256 	config->addr_idx = 0x0;
257 	for (i = 0; i < drvdata->nr_addr_cmp * 2; i++) {
258 		config->addr_val[i] = 0x0;
259 		config->addr_acc[i] = 0x0;
260 		config->addr_type[i] = ETM_ADDR_TYPE_NONE;
261 	}
262 
263 	config->ctxid_idx = 0x0;
264 	for (i = 0; i < drvdata->numcidc; i++) {
265 		config->ctxid_pid[i] = 0x0;
266 		config->ctxid_vpid[i] = 0x0;
267 	}
268 
269 	config->ctxid_mask0 = 0x0;
270 	config->ctxid_mask1 = 0x0;
271 
272 	config->vmid_idx = 0x0;
273 	for (i = 0; i < drvdata->numvmidc; i++)
274 		config->vmid_val[i] = 0x0;
275 	config->vmid_mask0 = 0x0;
276 	config->vmid_mask1 = 0x0;
277 
278 	drvdata->trcid = drvdata->cpu + 1;
279 
280 	spin_unlock(&drvdata->spinlock);
281 
282 	return size;
283 }
284 static DEVICE_ATTR_WO(reset);
285 
286 static ssize_t mode_show(struct device *dev,
287 			 struct device_attribute *attr,
288 			 char *buf)
289 {
290 	unsigned long val;
291 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
292 	struct etmv4_config *config = &drvdata->config;
293 
294 	val = config->mode;
295 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
296 }
297 
298 static ssize_t mode_store(struct device *dev,
299 			  struct device_attribute *attr,
300 			  const char *buf, size_t size)
301 {
302 	unsigned long val, mode;
303 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
304 	struct etmv4_config *config = &drvdata->config;
305 
306 	if (kstrtoul(buf, 16, &val))
307 		return -EINVAL;
308 
309 	spin_lock(&drvdata->spinlock);
310 	config->mode = val & ETMv4_MODE_ALL;
311 
312 	if (config->mode & ETM_MODE_EXCLUDE)
313 		etm4_set_mode_exclude(drvdata, true);
314 	else
315 		etm4_set_mode_exclude(drvdata, false);
316 
317 	if (drvdata->instrp0 == true) {
318 		/* start by clearing instruction P0 field */
319 		config->cfg  &= ~(BIT(1) | BIT(2));
320 		if (config->mode & ETM_MODE_LOAD)
321 			/* 0b01 Trace load instructions as P0 instructions */
322 			config->cfg  |= BIT(1);
323 		if (config->mode & ETM_MODE_STORE)
324 			/* 0b10 Trace store instructions as P0 instructions */
325 			config->cfg  |= BIT(2);
326 		if (config->mode & ETM_MODE_LOAD_STORE)
327 			/*
328 			 * 0b11 Trace load and store instructions
329 			 * as P0 instructions
330 			 */
331 			config->cfg  |= BIT(1) | BIT(2);
332 	}
333 
334 	/* bit[3], Branch broadcast mode */
335 	if ((config->mode & ETM_MODE_BB) && (drvdata->trcbb == true))
336 		config->cfg |= BIT(3);
337 	else
338 		config->cfg &= ~BIT(3);
339 
340 	/* bit[4], Cycle counting instruction trace bit */
341 	if ((config->mode & ETMv4_MODE_CYCACC) &&
342 		(drvdata->trccci == true))
343 		config->cfg |= BIT(4);
344 	else
345 		config->cfg &= ~BIT(4);
346 
347 	/* bit[6], Context ID tracing bit */
348 	if ((config->mode & ETMv4_MODE_CTXID) && (drvdata->ctxid_size))
349 		config->cfg |= BIT(6);
350 	else
351 		config->cfg &= ~BIT(6);
352 
353 	if ((config->mode & ETM_MODE_VMID) && (drvdata->vmid_size))
354 		config->cfg |= BIT(7);
355 	else
356 		config->cfg &= ~BIT(7);
357 
358 	/* bits[10:8], Conditional instruction tracing bit */
359 	mode = ETM_MODE_COND(config->mode);
360 	if (drvdata->trccond == true) {
361 		config->cfg &= ~(BIT(8) | BIT(9) | BIT(10));
362 		config->cfg |= mode << 8;
363 	}
364 
365 	/* bit[11], Global timestamp tracing bit */
366 	if ((config->mode & ETMv4_MODE_TIMESTAMP) && (drvdata->ts_size))
367 		config->cfg |= BIT(11);
368 	else
369 		config->cfg &= ~BIT(11);
370 
371 	/* bit[12], Return stack enable bit */
372 	if ((config->mode & ETM_MODE_RETURNSTACK) &&
373 					(drvdata->retstack == true))
374 		config->cfg |= BIT(12);
375 	else
376 		config->cfg &= ~BIT(12);
377 
378 	/* bits[14:13], Q element enable field */
379 	mode = ETM_MODE_QELEM(config->mode);
380 	/* start by clearing QE bits */
381 	config->cfg &= ~(BIT(13) | BIT(14));
382 	/* if supported, Q elements with instruction counts are enabled */
383 	if ((mode & BIT(0)) && (drvdata->q_support & BIT(0)))
384 		config->cfg |= BIT(13);
385 	/*
386 	 * if supported, Q elements with and without instruction
387 	 * counts are enabled
388 	 */
389 	if ((mode & BIT(1)) && (drvdata->q_support & BIT(1)))
390 		config->cfg |= BIT(14);
391 
392 	/* bit[11], AMBA Trace Bus (ATB) trigger enable bit */
393 	if ((config->mode & ETM_MODE_ATB_TRIGGER) &&
394 	    (drvdata->atbtrig == true))
395 		config->eventctrl1 |= BIT(11);
396 	else
397 		config->eventctrl1 &= ~BIT(11);
398 
399 	/* bit[12], Low-power state behavior override bit */
400 	if ((config->mode & ETM_MODE_LPOVERRIDE) &&
401 	    (drvdata->lpoverride == true))
402 		config->eventctrl1 |= BIT(12);
403 	else
404 		config->eventctrl1 &= ~BIT(12);
405 
406 	/* bit[8], Instruction stall bit */
407 	if (config->mode & ETM_MODE_ISTALL_EN)
408 		config->stall_ctrl |= BIT(8);
409 	else
410 		config->stall_ctrl &= ~BIT(8);
411 
412 	/* bit[10], Prioritize instruction trace bit */
413 	if (config->mode & ETM_MODE_INSTPRIO)
414 		config->stall_ctrl |= BIT(10);
415 	else
416 		config->stall_ctrl &= ~BIT(10);
417 
418 	/* bit[13], Trace overflow prevention bit */
419 	if ((config->mode & ETM_MODE_NOOVERFLOW) &&
420 		(drvdata->nooverflow == true))
421 		config->stall_ctrl |= BIT(13);
422 	else
423 		config->stall_ctrl &= ~BIT(13);
424 
425 	/* bit[9] Start/stop logic control bit */
426 	if (config->mode & ETM_MODE_VIEWINST_STARTSTOP)
427 		config->vinst_ctrl |= BIT(9);
428 	else
429 		config->vinst_ctrl &= ~BIT(9);
430 
431 	/* bit[10], Whether a trace unit must trace a Reset exception */
432 	if (config->mode & ETM_MODE_TRACE_RESET)
433 		config->vinst_ctrl |= BIT(10);
434 	else
435 		config->vinst_ctrl &= ~BIT(10);
436 
437 	/* bit[11], Whether a trace unit must trace a system error exception */
438 	if ((config->mode & ETM_MODE_TRACE_ERR) &&
439 		(drvdata->trc_error == true))
440 		config->vinst_ctrl |= BIT(11);
441 	else
442 		config->vinst_ctrl &= ~BIT(11);
443 
444 	if (config->mode & (ETM_MODE_EXCL_KERN | ETM_MODE_EXCL_USER))
445 		etm4_config_trace_mode(config);
446 
447 	spin_unlock(&drvdata->spinlock);
448 
449 	return size;
450 }
451 static DEVICE_ATTR_RW(mode);
452 
453 static ssize_t pe_show(struct device *dev,
454 		       struct device_attribute *attr,
455 		       char *buf)
456 {
457 	unsigned long val;
458 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
459 	struct etmv4_config *config = &drvdata->config;
460 
461 	val = config->pe_sel;
462 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
463 }
464 
465 static ssize_t pe_store(struct device *dev,
466 			struct device_attribute *attr,
467 			const char *buf, size_t size)
468 {
469 	unsigned long val;
470 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
471 	struct etmv4_config *config = &drvdata->config;
472 
473 	if (kstrtoul(buf, 16, &val))
474 		return -EINVAL;
475 
476 	spin_lock(&drvdata->spinlock);
477 	if (val > drvdata->nr_pe) {
478 		spin_unlock(&drvdata->spinlock);
479 		return -EINVAL;
480 	}
481 
482 	config->pe_sel = val;
483 	spin_unlock(&drvdata->spinlock);
484 	return size;
485 }
486 static DEVICE_ATTR_RW(pe);
487 
488 static ssize_t event_show(struct device *dev,
489 			  struct device_attribute *attr,
490 			  char *buf)
491 {
492 	unsigned long val;
493 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
494 	struct etmv4_config *config = &drvdata->config;
495 
496 	val = config->eventctrl0;
497 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
498 }
499 
500 static ssize_t event_store(struct device *dev,
501 			   struct device_attribute *attr,
502 			   const char *buf, size_t size)
503 {
504 	unsigned long val;
505 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
506 	struct etmv4_config *config = &drvdata->config;
507 
508 	if (kstrtoul(buf, 16, &val))
509 		return -EINVAL;
510 
511 	spin_lock(&drvdata->spinlock);
512 	switch (drvdata->nr_event) {
513 	case 0x0:
514 		/* EVENT0, bits[7:0] */
515 		config->eventctrl0 = val & 0xFF;
516 		break;
517 	case 0x1:
518 		 /* EVENT1, bits[15:8] */
519 		config->eventctrl0 = val & 0xFFFF;
520 		break;
521 	case 0x2:
522 		/* EVENT2, bits[23:16] */
523 		config->eventctrl0 = val & 0xFFFFFF;
524 		break;
525 	case 0x3:
526 		/* EVENT3, bits[31:24] */
527 		config->eventctrl0 = val;
528 		break;
529 	default:
530 		break;
531 	}
532 	spin_unlock(&drvdata->spinlock);
533 	return size;
534 }
535 static DEVICE_ATTR_RW(event);
536 
537 static ssize_t event_instren_show(struct device *dev,
538 				  struct device_attribute *attr,
539 				  char *buf)
540 {
541 	unsigned long val;
542 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
543 	struct etmv4_config *config = &drvdata->config;
544 
545 	val = BMVAL(config->eventctrl1, 0, 3);
546 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
547 }
548 
549 static ssize_t event_instren_store(struct device *dev,
550 				   struct device_attribute *attr,
551 				   const char *buf, size_t size)
552 {
553 	unsigned long val;
554 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
555 	struct etmv4_config *config = &drvdata->config;
556 
557 	if (kstrtoul(buf, 16, &val))
558 		return -EINVAL;
559 
560 	spin_lock(&drvdata->spinlock);
561 	/* start by clearing all instruction event enable bits */
562 	config->eventctrl1 &= ~(BIT(0) | BIT(1) | BIT(2) | BIT(3));
563 	switch (drvdata->nr_event) {
564 	case 0x0:
565 		/* generate Event element for event 1 */
566 		config->eventctrl1 |= val & BIT(1);
567 		break;
568 	case 0x1:
569 		/* generate Event element for event 1 and 2 */
570 		config->eventctrl1 |= val & (BIT(0) | BIT(1));
571 		break;
572 	case 0x2:
573 		/* generate Event element for event 1, 2 and 3 */
574 		config->eventctrl1 |= val & (BIT(0) | BIT(1) | BIT(2));
575 		break;
576 	case 0x3:
577 		/* generate Event element for all 4 events */
578 		config->eventctrl1 |= val & 0xF;
579 		break;
580 	default:
581 		break;
582 	}
583 	spin_unlock(&drvdata->spinlock);
584 	return size;
585 }
586 static DEVICE_ATTR_RW(event_instren);
587 
588 static ssize_t event_ts_show(struct device *dev,
589 			     struct device_attribute *attr,
590 			     char *buf)
591 {
592 	unsigned long val;
593 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
594 	struct etmv4_config *config = &drvdata->config;
595 
596 	val = config->ts_ctrl;
597 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
598 }
599 
600 static ssize_t event_ts_store(struct device *dev,
601 			      struct device_attribute *attr,
602 			      const char *buf, size_t size)
603 {
604 	unsigned long val;
605 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
606 	struct etmv4_config *config = &drvdata->config;
607 
608 	if (kstrtoul(buf, 16, &val))
609 		return -EINVAL;
610 	if (!drvdata->ts_size)
611 		return -EINVAL;
612 
613 	config->ts_ctrl = val & ETMv4_EVENT_MASK;
614 	return size;
615 }
616 static DEVICE_ATTR_RW(event_ts);
617 
618 static ssize_t syncfreq_show(struct device *dev,
619 			     struct device_attribute *attr,
620 			     char *buf)
621 {
622 	unsigned long val;
623 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
624 	struct etmv4_config *config = &drvdata->config;
625 
626 	val = config->syncfreq;
627 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
628 }
629 
630 static ssize_t syncfreq_store(struct device *dev,
631 			      struct device_attribute *attr,
632 			      const char *buf, size_t size)
633 {
634 	unsigned long val;
635 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
636 	struct etmv4_config *config = &drvdata->config;
637 
638 	if (kstrtoul(buf, 16, &val))
639 		return -EINVAL;
640 	if (drvdata->syncpr == true)
641 		return -EINVAL;
642 
643 	config->syncfreq = val & ETMv4_SYNC_MASK;
644 	return size;
645 }
646 static DEVICE_ATTR_RW(syncfreq);
647 
648 static ssize_t cyc_threshold_show(struct device *dev,
649 				  struct device_attribute *attr,
650 				  char *buf)
651 {
652 	unsigned long val;
653 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
654 	struct etmv4_config *config = &drvdata->config;
655 
656 	val = config->ccctlr;
657 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
658 }
659 
660 static ssize_t cyc_threshold_store(struct device *dev,
661 				   struct device_attribute *attr,
662 				   const char *buf, size_t size)
663 {
664 	unsigned long val;
665 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
666 	struct etmv4_config *config = &drvdata->config;
667 
668 	if (kstrtoul(buf, 16, &val))
669 		return -EINVAL;
670 	if (val < drvdata->ccitmin)
671 		return -EINVAL;
672 
673 	config->ccctlr = val & ETM_CYC_THRESHOLD_MASK;
674 	return size;
675 }
676 static DEVICE_ATTR_RW(cyc_threshold);
677 
678 static ssize_t bb_ctrl_show(struct device *dev,
679 			    struct device_attribute *attr,
680 			    char *buf)
681 {
682 	unsigned long val;
683 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
684 	struct etmv4_config *config = &drvdata->config;
685 
686 	val = config->bb_ctrl;
687 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
688 }
689 
690 static ssize_t bb_ctrl_store(struct device *dev,
691 			     struct device_attribute *attr,
692 			     const char *buf, size_t size)
693 {
694 	unsigned long val;
695 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
696 	struct etmv4_config *config = &drvdata->config;
697 
698 	if (kstrtoul(buf, 16, &val))
699 		return -EINVAL;
700 	if (drvdata->trcbb == false)
701 		return -EINVAL;
702 	if (!drvdata->nr_addr_cmp)
703 		return -EINVAL;
704 	/*
705 	 * Bit[7:0] selects which address range comparator is used for
706 	 * branch broadcast control.
707 	 */
708 	if (BMVAL(val, 0, 7) > drvdata->nr_addr_cmp)
709 		return -EINVAL;
710 
711 	config->bb_ctrl = val;
712 	return size;
713 }
714 static DEVICE_ATTR_RW(bb_ctrl);
715 
716 static ssize_t event_vinst_show(struct device *dev,
717 				struct device_attribute *attr,
718 				char *buf)
719 {
720 	unsigned long val;
721 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
722 	struct etmv4_config *config = &drvdata->config;
723 
724 	val = config->vinst_ctrl & ETMv4_EVENT_MASK;
725 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
726 }
727 
728 static ssize_t event_vinst_store(struct device *dev,
729 				 struct device_attribute *attr,
730 				 const char *buf, size_t size)
731 {
732 	unsigned long val;
733 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
734 	struct etmv4_config *config = &drvdata->config;
735 
736 	if (kstrtoul(buf, 16, &val))
737 		return -EINVAL;
738 
739 	spin_lock(&drvdata->spinlock);
740 	val &= ETMv4_EVENT_MASK;
741 	config->vinst_ctrl &= ~ETMv4_EVENT_MASK;
742 	config->vinst_ctrl |= val;
743 	spin_unlock(&drvdata->spinlock);
744 	return size;
745 }
746 static DEVICE_ATTR_RW(event_vinst);
747 
748 static ssize_t s_exlevel_vinst_show(struct device *dev,
749 				    struct device_attribute *attr,
750 				    char *buf)
751 {
752 	unsigned long val;
753 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
754 	struct etmv4_config *config = &drvdata->config;
755 
756 	val = BMVAL(config->vinst_ctrl, 16, 19);
757 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
758 }
759 
760 static ssize_t s_exlevel_vinst_store(struct device *dev,
761 				     struct device_attribute *attr,
762 				     const char *buf, size_t size)
763 {
764 	unsigned long val;
765 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
766 	struct etmv4_config *config = &drvdata->config;
767 
768 	if (kstrtoul(buf, 16, &val))
769 		return -EINVAL;
770 
771 	spin_lock(&drvdata->spinlock);
772 	/* clear all EXLEVEL_S bits (bit[18] is never implemented) */
773 	config->vinst_ctrl &= ~(BIT(16) | BIT(17) | BIT(19));
774 	/* enable instruction tracing for corresponding exception level */
775 	val &= drvdata->s_ex_level;
776 	config->vinst_ctrl |= (val << 16);
777 	spin_unlock(&drvdata->spinlock);
778 	return size;
779 }
780 static DEVICE_ATTR_RW(s_exlevel_vinst);
781 
782 static ssize_t ns_exlevel_vinst_show(struct device *dev,
783 				     struct device_attribute *attr,
784 				     char *buf)
785 {
786 	unsigned long val;
787 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
788 	struct etmv4_config *config = &drvdata->config;
789 
790 	/* EXLEVEL_NS, bits[23:20] */
791 	val = BMVAL(config->vinst_ctrl, 20, 23);
792 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
793 }
794 
795 static ssize_t ns_exlevel_vinst_store(struct device *dev,
796 				      struct device_attribute *attr,
797 				      const char *buf, size_t size)
798 {
799 	unsigned long val;
800 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
801 	struct etmv4_config *config = &drvdata->config;
802 
803 	if (kstrtoul(buf, 16, &val))
804 		return -EINVAL;
805 
806 	spin_lock(&drvdata->spinlock);
807 	/* clear EXLEVEL_NS bits (bit[23] is never implemented */
808 	config->vinst_ctrl &= ~(BIT(20) | BIT(21) | BIT(22));
809 	/* enable instruction tracing for corresponding exception level */
810 	val &= drvdata->ns_ex_level;
811 	config->vinst_ctrl |= (val << 20);
812 	spin_unlock(&drvdata->spinlock);
813 	return size;
814 }
815 static DEVICE_ATTR_RW(ns_exlevel_vinst);
816 
817 static ssize_t addr_idx_show(struct device *dev,
818 			     struct device_attribute *attr,
819 			     char *buf)
820 {
821 	unsigned long val;
822 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
823 	struct etmv4_config *config = &drvdata->config;
824 
825 	val = config->addr_idx;
826 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
827 }
828 
829 static ssize_t addr_idx_store(struct device *dev,
830 			      struct device_attribute *attr,
831 			      const char *buf, size_t size)
832 {
833 	unsigned long val;
834 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
835 	struct etmv4_config *config = &drvdata->config;
836 
837 	if (kstrtoul(buf, 16, &val))
838 		return -EINVAL;
839 	if (val >= drvdata->nr_addr_cmp * 2)
840 		return -EINVAL;
841 
842 	/*
843 	 * Use spinlock to ensure index doesn't change while it gets
844 	 * dereferenced multiple times within a spinlock block elsewhere.
845 	 */
846 	spin_lock(&drvdata->spinlock);
847 	config->addr_idx = val;
848 	spin_unlock(&drvdata->spinlock);
849 	return size;
850 }
851 static DEVICE_ATTR_RW(addr_idx);
852 
853 static ssize_t addr_instdatatype_show(struct device *dev,
854 				      struct device_attribute *attr,
855 				      char *buf)
856 {
857 	ssize_t len;
858 	u8 val, idx;
859 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
860 	struct etmv4_config *config = &drvdata->config;
861 
862 	spin_lock(&drvdata->spinlock);
863 	idx = config->addr_idx;
864 	val = BMVAL(config->addr_acc[idx], 0, 1);
865 	len = scnprintf(buf, PAGE_SIZE, "%s\n",
866 			val == ETM_INSTR_ADDR ? "instr" :
867 			(val == ETM_DATA_LOAD_ADDR ? "data_load" :
868 			(val == ETM_DATA_STORE_ADDR ? "data_store" :
869 			"data_load_store")));
870 	spin_unlock(&drvdata->spinlock);
871 	return len;
872 }
873 
874 static ssize_t addr_instdatatype_store(struct device *dev,
875 				       struct device_attribute *attr,
876 				       const char *buf, size_t size)
877 {
878 	u8 idx;
879 	char str[20] = "";
880 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
881 	struct etmv4_config *config = &drvdata->config;
882 
883 	if (strlen(buf) >= 20)
884 		return -EINVAL;
885 	if (sscanf(buf, "%s", str) != 1)
886 		return -EINVAL;
887 
888 	spin_lock(&drvdata->spinlock);
889 	idx = config->addr_idx;
890 	if (!strcmp(str, "instr"))
891 		/* TYPE, bits[1:0] */
892 		config->addr_acc[idx] &= ~(BIT(0) | BIT(1));
893 
894 	spin_unlock(&drvdata->spinlock);
895 	return size;
896 }
897 static DEVICE_ATTR_RW(addr_instdatatype);
898 
899 static ssize_t addr_single_show(struct device *dev,
900 				struct device_attribute *attr,
901 				char *buf)
902 {
903 	u8 idx;
904 	unsigned long val;
905 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
906 	struct etmv4_config *config = &drvdata->config;
907 
908 	idx = config->addr_idx;
909 	spin_lock(&drvdata->spinlock);
910 	if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
911 	      config->addr_type[idx] == ETM_ADDR_TYPE_SINGLE)) {
912 		spin_unlock(&drvdata->spinlock);
913 		return -EPERM;
914 	}
915 	val = (unsigned long)config->addr_val[idx];
916 	spin_unlock(&drvdata->spinlock);
917 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
918 }
919 
920 static ssize_t addr_single_store(struct device *dev,
921 				 struct device_attribute *attr,
922 				 const char *buf, size_t size)
923 {
924 	u8 idx;
925 	unsigned long val;
926 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
927 	struct etmv4_config *config = &drvdata->config;
928 
929 	if (kstrtoul(buf, 16, &val))
930 		return -EINVAL;
931 
932 	spin_lock(&drvdata->spinlock);
933 	idx = config->addr_idx;
934 	if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
935 	      config->addr_type[idx] == ETM_ADDR_TYPE_SINGLE)) {
936 		spin_unlock(&drvdata->spinlock);
937 		return -EPERM;
938 	}
939 
940 	config->addr_val[idx] = (u64)val;
941 	config->addr_type[idx] = ETM_ADDR_TYPE_SINGLE;
942 	spin_unlock(&drvdata->spinlock);
943 	return size;
944 }
945 static DEVICE_ATTR_RW(addr_single);
946 
947 static ssize_t addr_range_show(struct device *dev,
948 			       struct device_attribute *attr,
949 			       char *buf)
950 {
951 	u8 idx;
952 	unsigned long val1, val2;
953 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
954 	struct etmv4_config *config = &drvdata->config;
955 
956 	spin_lock(&drvdata->spinlock);
957 	idx = config->addr_idx;
958 	if (idx % 2 != 0) {
959 		spin_unlock(&drvdata->spinlock);
960 		return -EPERM;
961 	}
962 	if (!((config->addr_type[idx] == ETM_ADDR_TYPE_NONE &&
963 	       config->addr_type[idx + 1] == ETM_ADDR_TYPE_NONE) ||
964 	      (config->addr_type[idx] == ETM_ADDR_TYPE_RANGE &&
965 	       config->addr_type[idx + 1] == ETM_ADDR_TYPE_RANGE))) {
966 		spin_unlock(&drvdata->spinlock);
967 		return -EPERM;
968 	}
969 
970 	val1 = (unsigned long)config->addr_val[idx];
971 	val2 = (unsigned long)config->addr_val[idx + 1];
972 	spin_unlock(&drvdata->spinlock);
973 	return scnprintf(buf, PAGE_SIZE, "%#lx %#lx\n", val1, val2);
974 }
975 
976 static ssize_t addr_range_store(struct device *dev,
977 				struct device_attribute *attr,
978 				const char *buf, size_t size)
979 {
980 	u8 idx;
981 	unsigned long val1, val2;
982 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
983 	struct etmv4_config *config = &drvdata->config;
984 
985 	if (sscanf(buf, "%lx %lx", &val1, &val2) != 2)
986 		return -EINVAL;
987 	/* lower address comparator cannot have a higher address value */
988 	if (val1 > val2)
989 		return -EINVAL;
990 
991 	spin_lock(&drvdata->spinlock);
992 	idx = config->addr_idx;
993 	if (idx % 2 != 0) {
994 		spin_unlock(&drvdata->spinlock);
995 		return -EPERM;
996 	}
997 
998 	if (!((config->addr_type[idx] == ETM_ADDR_TYPE_NONE &&
999 	       config->addr_type[idx + 1] == ETM_ADDR_TYPE_NONE) ||
1000 	      (config->addr_type[idx] == ETM_ADDR_TYPE_RANGE &&
1001 	       config->addr_type[idx + 1] == ETM_ADDR_TYPE_RANGE))) {
1002 		spin_unlock(&drvdata->spinlock);
1003 		return -EPERM;
1004 	}
1005 
1006 	config->addr_val[idx] = (u64)val1;
1007 	config->addr_type[idx] = ETM_ADDR_TYPE_RANGE;
1008 	config->addr_val[idx + 1] = (u64)val2;
1009 	config->addr_type[idx + 1] = ETM_ADDR_TYPE_RANGE;
1010 	/*
1011 	 * Program include or exclude control bits for vinst or vdata
1012 	 * whenever we change addr comparators to ETM_ADDR_TYPE_RANGE
1013 	 */
1014 	if (config->mode & ETM_MODE_EXCLUDE)
1015 		etm4_set_mode_exclude(drvdata, true);
1016 	else
1017 		etm4_set_mode_exclude(drvdata, false);
1018 
1019 	spin_unlock(&drvdata->spinlock);
1020 	return size;
1021 }
1022 static DEVICE_ATTR_RW(addr_range);
1023 
1024 static ssize_t addr_start_show(struct device *dev,
1025 			       struct device_attribute *attr,
1026 			       char *buf)
1027 {
1028 	u8 idx;
1029 	unsigned long val;
1030 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1031 	struct etmv4_config *config = &drvdata->config;
1032 
1033 	spin_lock(&drvdata->spinlock);
1034 	idx = config->addr_idx;
1035 
1036 	if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
1037 	      config->addr_type[idx] == ETM_ADDR_TYPE_START)) {
1038 		spin_unlock(&drvdata->spinlock);
1039 		return -EPERM;
1040 	}
1041 
1042 	val = (unsigned long)config->addr_val[idx];
1043 	spin_unlock(&drvdata->spinlock);
1044 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1045 }
1046 
1047 static ssize_t addr_start_store(struct device *dev,
1048 				struct device_attribute *attr,
1049 				const char *buf, size_t size)
1050 {
1051 	u8 idx;
1052 	unsigned long val;
1053 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1054 	struct etmv4_config *config = &drvdata->config;
1055 
1056 	if (kstrtoul(buf, 16, &val))
1057 		return -EINVAL;
1058 
1059 	spin_lock(&drvdata->spinlock);
1060 	idx = config->addr_idx;
1061 	if (!drvdata->nr_addr_cmp) {
1062 		spin_unlock(&drvdata->spinlock);
1063 		return -EINVAL;
1064 	}
1065 	if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
1066 	      config->addr_type[idx] == ETM_ADDR_TYPE_START)) {
1067 		spin_unlock(&drvdata->spinlock);
1068 		return -EPERM;
1069 	}
1070 
1071 	config->addr_val[idx] = (u64)val;
1072 	config->addr_type[idx] = ETM_ADDR_TYPE_START;
1073 	config->vissctlr |= BIT(idx);
1074 	/* SSSTATUS, bit[9] - turn on start/stop logic */
1075 	config->vinst_ctrl |= BIT(9);
1076 	spin_unlock(&drvdata->spinlock);
1077 	return size;
1078 }
1079 static DEVICE_ATTR_RW(addr_start);
1080 
1081 static ssize_t addr_stop_show(struct device *dev,
1082 			      struct device_attribute *attr,
1083 			      char *buf)
1084 {
1085 	u8 idx;
1086 	unsigned long val;
1087 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1088 	struct etmv4_config *config = &drvdata->config;
1089 
1090 	spin_lock(&drvdata->spinlock);
1091 	idx = config->addr_idx;
1092 
1093 	if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
1094 	      config->addr_type[idx] == ETM_ADDR_TYPE_STOP)) {
1095 		spin_unlock(&drvdata->spinlock);
1096 		return -EPERM;
1097 	}
1098 
1099 	val = (unsigned long)config->addr_val[idx];
1100 	spin_unlock(&drvdata->spinlock);
1101 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1102 }
1103 
1104 static ssize_t addr_stop_store(struct device *dev,
1105 			       struct device_attribute *attr,
1106 			       const char *buf, size_t size)
1107 {
1108 	u8 idx;
1109 	unsigned long val;
1110 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1111 	struct etmv4_config *config = &drvdata->config;
1112 
1113 	if (kstrtoul(buf, 16, &val))
1114 		return -EINVAL;
1115 
1116 	spin_lock(&drvdata->spinlock);
1117 	idx = config->addr_idx;
1118 	if (!drvdata->nr_addr_cmp) {
1119 		spin_unlock(&drvdata->spinlock);
1120 		return -EINVAL;
1121 	}
1122 	if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
1123 	       config->addr_type[idx] == ETM_ADDR_TYPE_STOP)) {
1124 		spin_unlock(&drvdata->spinlock);
1125 		return -EPERM;
1126 	}
1127 
1128 	config->addr_val[idx] = (u64)val;
1129 	config->addr_type[idx] = ETM_ADDR_TYPE_STOP;
1130 	config->vissctlr |= BIT(idx + 16);
1131 	/* SSSTATUS, bit[9] - turn on start/stop logic */
1132 	config->vinst_ctrl |= BIT(9);
1133 	spin_unlock(&drvdata->spinlock);
1134 	return size;
1135 }
1136 static DEVICE_ATTR_RW(addr_stop);
1137 
1138 static ssize_t addr_ctxtype_show(struct device *dev,
1139 				 struct device_attribute *attr,
1140 				 char *buf)
1141 {
1142 	ssize_t len;
1143 	u8 idx, val;
1144 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1145 	struct etmv4_config *config = &drvdata->config;
1146 
1147 	spin_lock(&drvdata->spinlock);
1148 	idx = config->addr_idx;
1149 	/* CONTEXTTYPE, bits[3:2] */
1150 	val = BMVAL(config->addr_acc[idx], 2, 3);
1151 	len = scnprintf(buf, PAGE_SIZE, "%s\n", val == ETM_CTX_NONE ? "none" :
1152 			(val == ETM_CTX_CTXID ? "ctxid" :
1153 			(val == ETM_CTX_VMID ? "vmid" : "all")));
1154 	spin_unlock(&drvdata->spinlock);
1155 	return len;
1156 }
1157 
1158 static ssize_t addr_ctxtype_store(struct device *dev,
1159 				  struct device_attribute *attr,
1160 				  const char *buf, size_t size)
1161 {
1162 	u8 idx;
1163 	char str[10] = "";
1164 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1165 	struct etmv4_config *config = &drvdata->config;
1166 
1167 	if (strlen(buf) >= 10)
1168 		return -EINVAL;
1169 	if (sscanf(buf, "%s", str) != 1)
1170 		return -EINVAL;
1171 
1172 	spin_lock(&drvdata->spinlock);
1173 	idx = config->addr_idx;
1174 	if (!strcmp(str, "none"))
1175 		/* start by clearing context type bits */
1176 		config->addr_acc[idx] &= ~(BIT(2) | BIT(3));
1177 	else if (!strcmp(str, "ctxid")) {
1178 		/* 0b01 The trace unit performs a Context ID */
1179 		if (drvdata->numcidc) {
1180 			config->addr_acc[idx] |= BIT(2);
1181 			config->addr_acc[idx] &= ~BIT(3);
1182 		}
1183 	} else if (!strcmp(str, "vmid")) {
1184 		/* 0b10 The trace unit performs a VMID */
1185 		if (drvdata->numvmidc) {
1186 			config->addr_acc[idx] &= ~BIT(2);
1187 			config->addr_acc[idx] |= BIT(3);
1188 		}
1189 	} else if (!strcmp(str, "all")) {
1190 		/*
1191 		 * 0b11 The trace unit performs a Context ID
1192 		 * comparison and a VMID
1193 		 */
1194 		if (drvdata->numcidc)
1195 			config->addr_acc[idx] |= BIT(2);
1196 		if (drvdata->numvmidc)
1197 			config->addr_acc[idx] |= BIT(3);
1198 	}
1199 	spin_unlock(&drvdata->spinlock);
1200 	return size;
1201 }
1202 static DEVICE_ATTR_RW(addr_ctxtype);
1203 
1204 static ssize_t addr_context_show(struct device *dev,
1205 				 struct device_attribute *attr,
1206 				 char *buf)
1207 {
1208 	u8 idx;
1209 	unsigned long val;
1210 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1211 	struct etmv4_config *config = &drvdata->config;
1212 
1213 	spin_lock(&drvdata->spinlock);
1214 	idx = config->addr_idx;
1215 	/* context ID comparator bits[6:4] */
1216 	val = BMVAL(config->addr_acc[idx], 4, 6);
1217 	spin_unlock(&drvdata->spinlock);
1218 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1219 }
1220 
1221 static ssize_t addr_context_store(struct device *dev,
1222 				  struct device_attribute *attr,
1223 				  const char *buf, size_t size)
1224 {
1225 	u8 idx;
1226 	unsigned long val;
1227 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1228 	struct etmv4_config *config = &drvdata->config;
1229 
1230 	if (kstrtoul(buf, 16, &val))
1231 		return -EINVAL;
1232 	if ((drvdata->numcidc <= 1) && (drvdata->numvmidc <= 1))
1233 		return -EINVAL;
1234 	if (val >=  (drvdata->numcidc >= drvdata->numvmidc ?
1235 		     drvdata->numcidc : drvdata->numvmidc))
1236 		return -EINVAL;
1237 
1238 	spin_lock(&drvdata->spinlock);
1239 	idx = config->addr_idx;
1240 	/* clear context ID comparator bits[6:4] */
1241 	config->addr_acc[idx] &= ~(BIT(4) | BIT(5) | BIT(6));
1242 	config->addr_acc[idx] |= (val << 4);
1243 	spin_unlock(&drvdata->spinlock);
1244 	return size;
1245 }
1246 static DEVICE_ATTR_RW(addr_context);
1247 
1248 static ssize_t seq_idx_show(struct device *dev,
1249 			    struct device_attribute *attr,
1250 			    char *buf)
1251 {
1252 	unsigned long val;
1253 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1254 	struct etmv4_config *config = &drvdata->config;
1255 
1256 	val = config->seq_idx;
1257 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1258 }
1259 
1260 static ssize_t seq_idx_store(struct device *dev,
1261 			     struct device_attribute *attr,
1262 			     const char *buf, size_t size)
1263 {
1264 	unsigned long val;
1265 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1266 	struct etmv4_config *config = &drvdata->config;
1267 
1268 	if (kstrtoul(buf, 16, &val))
1269 		return -EINVAL;
1270 	if (val >= drvdata->nrseqstate - 1)
1271 		return -EINVAL;
1272 
1273 	/*
1274 	 * Use spinlock to ensure index doesn't change while it gets
1275 	 * dereferenced multiple times within a spinlock block elsewhere.
1276 	 */
1277 	spin_lock(&drvdata->spinlock);
1278 	config->seq_idx = val;
1279 	spin_unlock(&drvdata->spinlock);
1280 	return size;
1281 }
1282 static DEVICE_ATTR_RW(seq_idx);
1283 
1284 static ssize_t seq_state_show(struct device *dev,
1285 			      struct device_attribute *attr,
1286 			      char *buf)
1287 {
1288 	unsigned long val;
1289 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1290 	struct etmv4_config *config = &drvdata->config;
1291 
1292 	val = config->seq_state;
1293 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1294 }
1295 
1296 static ssize_t seq_state_store(struct device *dev,
1297 			       struct device_attribute *attr,
1298 			       const char *buf, size_t size)
1299 {
1300 	unsigned long val;
1301 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1302 	struct etmv4_config *config = &drvdata->config;
1303 
1304 	if (kstrtoul(buf, 16, &val))
1305 		return -EINVAL;
1306 	if (val >= drvdata->nrseqstate)
1307 		return -EINVAL;
1308 
1309 	config->seq_state = val;
1310 	return size;
1311 }
1312 static DEVICE_ATTR_RW(seq_state);
1313 
1314 static ssize_t seq_event_show(struct device *dev,
1315 			      struct device_attribute *attr,
1316 			      char *buf)
1317 {
1318 	u8 idx;
1319 	unsigned long val;
1320 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1321 	struct etmv4_config *config = &drvdata->config;
1322 
1323 	spin_lock(&drvdata->spinlock);
1324 	idx = config->seq_idx;
1325 	val = config->seq_ctrl[idx];
1326 	spin_unlock(&drvdata->spinlock);
1327 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1328 }
1329 
1330 static ssize_t seq_event_store(struct device *dev,
1331 			       struct device_attribute *attr,
1332 			       const char *buf, size_t size)
1333 {
1334 	u8 idx;
1335 	unsigned long val;
1336 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1337 	struct etmv4_config *config = &drvdata->config;
1338 
1339 	if (kstrtoul(buf, 16, &val))
1340 		return -EINVAL;
1341 
1342 	spin_lock(&drvdata->spinlock);
1343 	idx = config->seq_idx;
1344 	/* RST, bits[7:0] */
1345 	config->seq_ctrl[idx] = val & 0xFF;
1346 	spin_unlock(&drvdata->spinlock);
1347 	return size;
1348 }
1349 static DEVICE_ATTR_RW(seq_event);
1350 
1351 static ssize_t seq_reset_event_show(struct device *dev,
1352 				    struct device_attribute *attr,
1353 				    char *buf)
1354 {
1355 	unsigned long val;
1356 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1357 	struct etmv4_config *config = &drvdata->config;
1358 
1359 	val = config->seq_rst;
1360 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1361 }
1362 
1363 static ssize_t seq_reset_event_store(struct device *dev,
1364 				     struct device_attribute *attr,
1365 				     const char *buf, size_t size)
1366 {
1367 	unsigned long val;
1368 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1369 	struct etmv4_config *config = &drvdata->config;
1370 
1371 	if (kstrtoul(buf, 16, &val))
1372 		return -EINVAL;
1373 	if (!(drvdata->nrseqstate))
1374 		return -EINVAL;
1375 
1376 	config->seq_rst = val & ETMv4_EVENT_MASK;
1377 	return size;
1378 }
1379 static DEVICE_ATTR_RW(seq_reset_event);
1380 
1381 static ssize_t cntr_idx_show(struct device *dev,
1382 			     struct device_attribute *attr,
1383 			     char *buf)
1384 {
1385 	unsigned long val;
1386 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1387 	struct etmv4_config *config = &drvdata->config;
1388 
1389 	val = config->cntr_idx;
1390 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1391 }
1392 
1393 static ssize_t cntr_idx_store(struct device *dev,
1394 			      struct device_attribute *attr,
1395 			      const char *buf, size_t size)
1396 {
1397 	unsigned long val;
1398 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1399 	struct etmv4_config *config = &drvdata->config;
1400 
1401 	if (kstrtoul(buf, 16, &val))
1402 		return -EINVAL;
1403 	if (val >= drvdata->nr_cntr)
1404 		return -EINVAL;
1405 
1406 	/*
1407 	 * Use spinlock to ensure index doesn't change while it gets
1408 	 * dereferenced multiple times within a spinlock block elsewhere.
1409 	 */
1410 	spin_lock(&drvdata->spinlock);
1411 	config->cntr_idx = val;
1412 	spin_unlock(&drvdata->spinlock);
1413 	return size;
1414 }
1415 static DEVICE_ATTR_RW(cntr_idx);
1416 
1417 static ssize_t cntrldvr_show(struct device *dev,
1418 			     struct device_attribute *attr,
1419 			     char *buf)
1420 {
1421 	u8 idx;
1422 	unsigned long val;
1423 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1424 	struct etmv4_config *config = &drvdata->config;
1425 
1426 	spin_lock(&drvdata->spinlock);
1427 	idx = config->cntr_idx;
1428 	val = config->cntrldvr[idx];
1429 	spin_unlock(&drvdata->spinlock);
1430 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1431 }
1432 
1433 static ssize_t cntrldvr_store(struct device *dev,
1434 			      struct device_attribute *attr,
1435 			      const char *buf, size_t size)
1436 {
1437 	u8 idx;
1438 	unsigned long val;
1439 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1440 	struct etmv4_config *config = &drvdata->config;
1441 
1442 	if (kstrtoul(buf, 16, &val))
1443 		return -EINVAL;
1444 	if (val > ETM_CNTR_MAX_VAL)
1445 		return -EINVAL;
1446 
1447 	spin_lock(&drvdata->spinlock);
1448 	idx = config->cntr_idx;
1449 	config->cntrldvr[idx] = val;
1450 	spin_unlock(&drvdata->spinlock);
1451 	return size;
1452 }
1453 static DEVICE_ATTR_RW(cntrldvr);
1454 
1455 static ssize_t cntr_val_show(struct device *dev,
1456 			     struct device_attribute *attr,
1457 			     char *buf)
1458 {
1459 	u8 idx;
1460 	unsigned long val;
1461 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1462 	struct etmv4_config *config = &drvdata->config;
1463 
1464 	spin_lock(&drvdata->spinlock);
1465 	idx = config->cntr_idx;
1466 	val = config->cntr_val[idx];
1467 	spin_unlock(&drvdata->spinlock);
1468 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1469 }
1470 
1471 static ssize_t cntr_val_store(struct device *dev,
1472 			      struct device_attribute *attr,
1473 			      const char *buf, size_t size)
1474 {
1475 	u8 idx;
1476 	unsigned long val;
1477 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1478 	struct etmv4_config *config = &drvdata->config;
1479 
1480 	if (kstrtoul(buf, 16, &val))
1481 		return -EINVAL;
1482 	if (val > ETM_CNTR_MAX_VAL)
1483 		return -EINVAL;
1484 
1485 	spin_lock(&drvdata->spinlock);
1486 	idx = config->cntr_idx;
1487 	config->cntr_val[idx] = val;
1488 	spin_unlock(&drvdata->spinlock);
1489 	return size;
1490 }
1491 static DEVICE_ATTR_RW(cntr_val);
1492 
1493 static ssize_t cntr_ctrl_show(struct device *dev,
1494 			      struct device_attribute *attr,
1495 			      char *buf)
1496 {
1497 	u8 idx;
1498 	unsigned long val;
1499 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1500 	struct etmv4_config *config = &drvdata->config;
1501 
1502 	spin_lock(&drvdata->spinlock);
1503 	idx = config->cntr_idx;
1504 	val = config->cntr_ctrl[idx];
1505 	spin_unlock(&drvdata->spinlock);
1506 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1507 }
1508 
1509 static ssize_t cntr_ctrl_store(struct device *dev,
1510 			       struct device_attribute *attr,
1511 			       const char *buf, size_t size)
1512 {
1513 	u8 idx;
1514 	unsigned long val;
1515 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1516 	struct etmv4_config *config = &drvdata->config;
1517 
1518 	if (kstrtoul(buf, 16, &val))
1519 		return -EINVAL;
1520 
1521 	spin_lock(&drvdata->spinlock);
1522 	idx = config->cntr_idx;
1523 	config->cntr_ctrl[idx] = val;
1524 	spin_unlock(&drvdata->spinlock);
1525 	return size;
1526 }
1527 static DEVICE_ATTR_RW(cntr_ctrl);
1528 
1529 static ssize_t res_idx_show(struct device *dev,
1530 			    struct device_attribute *attr,
1531 			    char *buf)
1532 {
1533 	unsigned long val;
1534 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1535 	struct etmv4_config *config = &drvdata->config;
1536 
1537 	val = config->res_idx;
1538 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1539 }
1540 
1541 static ssize_t res_idx_store(struct device *dev,
1542 			     struct device_attribute *attr,
1543 			     const char *buf, size_t size)
1544 {
1545 	unsigned long val;
1546 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1547 	struct etmv4_config *config = &drvdata->config;
1548 
1549 	if (kstrtoul(buf, 16, &val))
1550 		return -EINVAL;
1551 	/* Resource selector pair 0 is always implemented and reserved */
1552 	if ((val == 0) || (val >= drvdata->nr_resource))
1553 		return -EINVAL;
1554 
1555 	/*
1556 	 * Use spinlock to ensure index doesn't change while it gets
1557 	 * dereferenced multiple times within a spinlock block elsewhere.
1558 	 */
1559 	spin_lock(&drvdata->spinlock);
1560 	config->res_idx = val;
1561 	spin_unlock(&drvdata->spinlock);
1562 	return size;
1563 }
1564 static DEVICE_ATTR_RW(res_idx);
1565 
1566 static ssize_t res_ctrl_show(struct device *dev,
1567 			     struct device_attribute *attr,
1568 			     char *buf)
1569 {
1570 	u8 idx;
1571 	unsigned long val;
1572 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1573 	struct etmv4_config *config = &drvdata->config;
1574 
1575 	spin_lock(&drvdata->spinlock);
1576 	idx = config->res_idx;
1577 	val = config->res_ctrl[idx];
1578 	spin_unlock(&drvdata->spinlock);
1579 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1580 }
1581 
1582 static ssize_t res_ctrl_store(struct device *dev,
1583 			      struct device_attribute *attr,
1584 			      const char *buf, size_t size)
1585 {
1586 	u8 idx;
1587 	unsigned long val;
1588 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1589 	struct etmv4_config *config = &drvdata->config;
1590 
1591 	if (kstrtoul(buf, 16, &val))
1592 		return -EINVAL;
1593 
1594 	spin_lock(&drvdata->spinlock);
1595 	idx = config->res_idx;
1596 	/* For odd idx pair inversal bit is RES0 */
1597 	if (idx % 2 != 0)
1598 		/* PAIRINV, bit[21] */
1599 		val &= ~BIT(21);
1600 	config->res_ctrl[idx] = val;
1601 	spin_unlock(&drvdata->spinlock);
1602 	return size;
1603 }
1604 static DEVICE_ATTR_RW(res_ctrl);
1605 
1606 static ssize_t ctxid_idx_show(struct device *dev,
1607 			      struct device_attribute *attr,
1608 			      char *buf)
1609 {
1610 	unsigned long val;
1611 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1612 	struct etmv4_config *config = &drvdata->config;
1613 
1614 	val = config->ctxid_idx;
1615 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1616 }
1617 
1618 static ssize_t ctxid_idx_store(struct device *dev,
1619 			       struct device_attribute *attr,
1620 			       const char *buf, size_t size)
1621 {
1622 	unsigned long val;
1623 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1624 	struct etmv4_config *config = &drvdata->config;
1625 
1626 	if (kstrtoul(buf, 16, &val))
1627 		return -EINVAL;
1628 	if (val >= drvdata->numcidc)
1629 		return -EINVAL;
1630 
1631 	/*
1632 	 * Use spinlock to ensure index doesn't change while it gets
1633 	 * dereferenced multiple times within a spinlock block elsewhere.
1634 	 */
1635 	spin_lock(&drvdata->spinlock);
1636 	config->ctxid_idx = val;
1637 	spin_unlock(&drvdata->spinlock);
1638 	return size;
1639 }
1640 static DEVICE_ATTR_RW(ctxid_idx);
1641 
1642 static ssize_t ctxid_pid_show(struct device *dev,
1643 			      struct device_attribute *attr,
1644 			      char *buf)
1645 {
1646 	u8 idx;
1647 	unsigned long val;
1648 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1649 	struct etmv4_config *config = &drvdata->config;
1650 
1651 	spin_lock(&drvdata->spinlock);
1652 	idx = config->ctxid_idx;
1653 	val = (unsigned long)config->ctxid_vpid[idx];
1654 	spin_unlock(&drvdata->spinlock);
1655 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1656 }
1657 
1658 static ssize_t ctxid_pid_store(struct device *dev,
1659 			       struct device_attribute *attr,
1660 			       const char *buf, size_t size)
1661 {
1662 	u8 idx;
1663 	unsigned long vpid, pid;
1664 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1665 	struct etmv4_config *config = &drvdata->config;
1666 
1667 	/*
1668 	 * only implemented when ctxid tracing is enabled, i.e. at least one
1669 	 * ctxid comparator is implemented and ctxid is greater than 0 bits
1670 	 * in length
1671 	 */
1672 	if (!drvdata->ctxid_size || !drvdata->numcidc)
1673 		return -EINVAL;
1674 	if (kstrtoul(buf, 16, &vpid))
1675 		return -EINVAL;
1676 
1677 	pid = coresight_vpid_to_pid(vpid);
1678 
1679 	spin_lock(&drvdata->spinlock);
1680 	idx = config->ctxid_idx;
1681 	config->ctxid_pid[idx] = (u64)pid;
1682 	config->ctxid_vpid[idx] = (u64)vpid;
1683 	spin_unlock(&drvdata->spinlock);
1684 	return size;
1685 }
1686 static DEVICE_ATTR_RW(ctxid_pid);
1687 
1688 static ssize_t ctxid_masks_show(struct device *dev,
1689 				struct device_attribute *attr,
1690 				char *buf)
1691 {
1692 	unsigned long val1, val2;
1693 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1694 	struct etmv4_config *config = &drvdata->config;
1695 
1696 	spin_lock(&drvdata->spinlock);
1697 	val1 = config->ctxid_mask0;
1698 	val2 = config->ctxid_mask1;
1699 	spin_unlock(&drvdata->spinlock);
1700 	return scnprintf(buf, PAGE_SIZE, "%#lx %#lx\n", val1, val2);
1701 }
1702 
1703 static ssize_t ctxid_masks_store(struct device *dev,
1704 				struct device_attribute *attr,
1705 				const char *buf, size_t size)
1706 {
1707 	u8 i, j, maskbyte;
1708 	unsigned long val1, val2, mask;
1709 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1710 	struct etmv4_config *config = &drvdata->config;
1711 
1712 	/*
1713 	 * only implemented when ctxid tracing is enabled, i.e. at least one
1714 	 * ctxid comparator is implemented and ctxid is greater than 0 bits
1715 	 * in length
1716 	 */
1717 	if (!drvdata->ctxid_size || !drvdata->numcidc)
1718 		return -EINVAL;
1719 	if (sscanf(buf, "%lx %lx", &val1, &val2) != 2)
1720 		return -EINVAL;
1721 
1722 	spin_lock(&drvdata->spinlock);
1723 	/*
1724 	 * each byte[0..3] controls mask value applied to ctxid
1725 	 * comparator[0..3]
1726 	 */
1727 	switch (drvdata->numcidc) {
1728 	case 0x1:
1729 		/* COMP0, bits[7:0] */
1730 		config->ctxid_mask0 = val1 & 0xFF;
1731 		break;
1732 	case 0x2:
1733 		/* COMP1, bits[15:8] */
1734 		config->ctxid_mask0 = val1 & 0xFFFF;
1735 		break;
1736 	case 0x3:
1737 		/* COMP2, bits[23:16] */
1738 		config->ctxid_mask0 = val1 & 0xFFFFFF;
1739 		break;
1740 	case 0x4:
1741 		 /* COMP3, bits[31:24] */
1742 		config->ctxid_mask0 = val1;
1743 		break;
1744 	case 0x5:
1745 		/* COMP4, bits[7:0] */
1746 		config->ctxid_mask0 = val1;
1747 		config->ctxid_mask1 = val2 & 0xFF;
1748 		break;
1749 	case 0x6:
1750 		/* COMP5, bits[15:8] */
1751 		config->ctxid_mask0 = val1;
1752 		config->ctxid_mask1 = val2 & 0xFFFF;
1753 		break;
1754 	case 0x7:
1755 		/* COMP6, bits[23:16] */
1756 		config->ctxid_mask0 = val1;
1757 		config->ctxid_mask1 = val2 & 0xFFFFFF;
1758 		break;
1759 	case 0x8:
1760 		/* COMP7, bits[31:24] */
1761 		config->ctxid_mask0 = val1;
1762 		config->ctxid_mask1 = val2;
1763 		break;
1764 	default:
1765 		break;
1766 	}
1767 	/*
1768 	 * If software sets a mask bit to 1, it must program relevant byte
1769 	 * of ctxid comparator value 0x0, otherwise behavior is unpredictable.
1770 	 * For example, if bit[3] of ctxid_mask0 is 1, we must clear bits[31:24]
1771 	 * of ctxid comparator0 value (corresponding to byte 0) register.
1772 	 */
1773 	mask = config->ctxid_mask0;
1774 	for (i = 0; i < drvdata->numcidc; i++) {
1775 		/* mask value of corresponding ctxid comparator */
1776 		maskbyte = mask & ETMv4_EVENT_MASK;
1777 		/*
1778 		 * each bit corresponds to a byte of respective ctxid comparator
1779 		 * value register
1780 		 */
1781 		for (j = 0; j < 8; j++) {
1782 			if (maskbyte & 1)
1783 				config->ctxid_pid[i] &= ~(0xFF << (j * 8));
1784 			maskbyte >>= 1;
1785 		}
1786 		/* Select the next ctxid comparator mask value */
1787 		if (i == 3)
1788 			/* ctxid comparators[4-7] */
1789 			mask = config->ctxid_mask1;
1790 		else
1791 			mask >>= 0x8;
1792 	}
1793 
1794 	spin_unlock(&drvdata->spinlock);
1795 	return size;
1796 }
1797 static DEVICE_ATTR_RW(ctxid_masks);
1798 
1799 static ssize_t vmid_idx_show(struct device *dev,
1800 			     struct device_attribute *attr,
1801 			     char *buf)
1802 {
1803 	unsigned long val;
1804 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1805 	struct etmv4_config *config = &drvdata->config;
1806 
1807 	val = config->vmid_idx;
1808 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1809 }
1810 
1811 static ssize_t vmid_idx_store(struct device *dev,
1812 			      struct device_attribute *attr,
1813 			      const char *buf, size_t size)
1814 {
1815 	unsigned long val;
1816 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1817 	struct etmv4_config *config = &drvdata->config;
1818 
1819 	if (kstrtoul(buf, 16, &val))
1820 		return -EINVAL;
1821 	if (val >= drvdata->numvmidc)
1822 		return -EINVAL;
1823 
1824 	/*
1825 	 * Use spinlock to ensure index doesn't change while it gets
1826 	 * dereferenced multiple times within a spinlock block elsewhere.
1827 	 */
1828 	spin_lock(&drvdata->spinlock);
1829 	config->vmid_idx = val;
1830 	spin_unlock(&drvdata->spinlock);
1831 	return size;
1832 }
1833 static DEVICE_ATTR_RW(vmid_idx);
1834 
1835 static ssize_t vmid_val_show(struct device *dev,
1836 			     struct device_attribute *attr,
1837 			     char *buf)
1838 {
1839 	unsigned long val;
1840 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1841 	struct etmv4_config *config = &drvdata->config;
1842 
1843 	val = (unsigned long)config->vmid_val[config->vmid_idx];
1844 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1845 }
1846 
1847 static ssize_t vmid_val_store(struct device *dev,
1848 			      struct device_attribute *attr,
1849 			      const char *buf, size_t size)
1850 {
1851 	unsigned long val;
1852 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1853 	struct etmv4_config *config = &drvdata->config;
1854 
1855 	/*
1856 	 * only implemented when vmid tracing is enabled, i.e. at least one
1857 	 * vmid comparator is implemented and at least 8 bit vmid size
1858 	 */
1859 	if (!drvdata->vmid_size || !drvdata->numvmidc)
1860 		return -EINVAL;
1861 	if (kstrtoul(buf, 16, &val))
1862 		return -EINVAL;
1863 
1864 	spin_lock(&drvdata->spinlock);
1865 	config->vmid_val[config->vmid_idx] = (u64)val;
1866 	spin_unlock(&drvdata->spinlock);
1867 	return size;
1868 }
1869 static DEVICE_ATTR_RW(vmid_val);
1870 
1871 static ssize_t vmid_masks_show(struct device *dev,
1872 			       struct device_attribute *attr, char *buf)
1873 {
1874 	unsigned long val1, val2;
1875 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1876 	struct etmv4_config *config = &drvdata->config;
1877 
1878 	spin_lock(&drvdata->spinlock);
1879 	val1 = config->vmid_mask0;
1880 	val2 = config->vmid_mask1;
1881 	spin_unlock(&drvdata->spinlock);
1882 	return scnprintf(buf, PAGE_SIZE, "%#lx %#lx\n", val1, val2);
1883 }
1884 
1885 static ssize_t vmid_masks_store(struct device *dev,
1886 				struct device_attribute *attr,
1887 				const char *buf, size_t size)
1888 {
1889 	u8 i, j, maskbyte;
1890 	unsigned long val1, val2, mask;
1891 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1892 	struct etmv4_config *config = &drvdata->config;
1893 
1894 	/*
1895 	 * only implemented when vmid tracing is enabled, i.e. at least one
1896 	 * vmid comparator is implemented and at least 8 bit vmid size
1897 	 */
1898 	if (!drvdata->vmid_size || !drvdata->numvmidc)
1899 		return -EINVAL;
1900 	if (sscanf(buf, "%lx %lx", &val1, &val2) != 2)
1901 		return -EINVAL;
1902 
1903 	spin_lock(&drvdata->spinlock);
1904 
1905 	/*
1906 	 * each byte[0..3] controls mask value applied to vmid
1907 	 * comparator[0..3]
1908 	 */
1909 	switch (drvdata->numvmidc) {
1910 	case 0x1:
1911 		/* COMP0, bits[7:0] */
1912 		config->vmid_mask0 = val1 & 0xFF;
1913 		break;
1914 	case 0x2:
1915 		/* COMP1, bits[15:8] */
1916 		config->vmid_mask0 = val1 & 0xFFFF;
1917 		break;
1918 	case 0x3:
1919 		/* COMP2, bits[23:16] */
1920 		config->vmid_mask0 = val1 & 0xFFFFFF;
1921 		break;
1922 	case 0x4:
1923 		/* COMP3, bits[31:24] */
1924 		config->vmid_mask0 = val1;
1925 		break;
1926 	case 0x5:
1927 		/* COMP4, bits[7:0] */
1928 		config->vmid_mask0 = val1;
1929 		config->vmid_mask1 = val2 & 0xFF;
1930 		break;
1931 	case 0x6:
1932 		/* COMP5, bits[15:8] */
1933 		config->vmid_mask0 = val1;
1934 		config->vmid_mask1 = val2 & 0xFFFF;
1935 		break;
1936 	case 0x7:
1937 		/* COMP6, bits[23:16] */
1938 		config->vmid_mask0 = val1;
1939 		config->vmid_mask1 = val2 & 0xFFFFFF;
1940 		break;
1941 	case 0x8:
1942 		/* COMP7, bits[31:24] */
1943 		config->vmid_mask0 = val1;
1944 		config->vmid_mask1 = val2;
1945 		break;
1946 	default:
1947 		break;
1948 	}
1949 
1950 	/*
1951 	 * If software sets a mask bit to 1, it must program relevant byte
1952 	 * of vmid comparator value 0x0, otherwise behavior is unpredictable.
1953 	 * For example, if bit[3] of vmid_mask0 is 1, we must clear bits[31:24]
1954 	 * of vmid comparator0 value (corresponding to byte 0) register.
1955 	 */
1956 	mask = config->vmid_mask0;
1957 	for (i = 0; i < drvdata->numvmidc; i++) {
1958 		/* mask value of corresponding vmid comparator */
1959 		maskbyte = mask & ETMv4_EVENT_MASK;
1960 		/*
1961 		 * each bit corresponds to a byte of respective vmid comparator
1962 		 * value register
1963 		 */
1964 		for (j = 0; j < 8; j++) {
1965 			if (maskbyte & 1)
1966 				config->vmid_val[i] &= ~(0xFF << (j * 8));
1967 			maskbyte >>= 1;
1968 		}
1969 		/* Select the next vmid comparator mask value */
1970 		if (i == 3)
1971 			/* vmid comparators[4-7] */
1972 			mask = config->vmid_mask1;
1973 		else
1974 			mask >>= 0x8;
1975 	}
1976 	spin_unlock(&drvdata->spinlock);
1977 	return size;
1978 }
1979 static DEVICE_ATTR_RW(vmid_masks);
1980 
1981 static ssize_t cpu_show(struct device *dev,
1982 			struct device_attribute *attr, char *buf)
1983 {
1984 	int val;
1985 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1986 
1987 	val = drvdata->cpu;
1988 	return scnprintf(buf, PAGE_SIZE, "%d\n", val);
1989 
1990 }
1991 static DEVICE_ATTR_RO(cpu);
1992 
1993 static struct attribute *coresight_etmv4_attrs[] = {
1994 	&dev_attr_nr_pe_cmp.attr,
1995 	&dev_attr_nr_addr_cmp.attr,
1996 	&dev_attr_nr_cntr.attr,
1997 	&dev_attr_nr_ext_inp.attr,
1998 	&dev_attr_numcidc.attr,
1999 	&dev_attr_numvmidc.attr,
2000 	&dev_attr_nrseqstate.attr,
2001 	&dev_attr_nr_resource.attr,
2002 	&dev_attr_nr_ss_cmp.attr,
2003 	&dev_attr_reset.attr,
2004 	&dev_attr_mode.attr,
2005 	&dev_attr_pe.attr,
2006 	&dev_attr_event.attr,
2007 	&dev_attr_event_instren.attr,
2008 	&dev_attr_event_ts.attr,
2009 	&dev_attr_syncfreq.attr,
2010 	&dev_attr_cyc_threshold.attr,
2011 	&dev_attr_bb_ctrl.attr,
2012 	&dev_attr_event_vinst.attr,
2013 	&dev_attr_s_exlevel_vinst.attr,
2014 	&dev_attr_ns_exlevel_vinst.attr,
2015 	&dev_attr_addr_idx.attr,
2016 	&dev_attr_addr_instdatatype.attr,
2017 	&dev_attr_addr_single.attr,
2018 	&dev_attr_addr_range.attr,
2019 	&dev_attr_addr_start.attr,
2020 	&dev_attr_addr_stop.attr,
2021 	&dev_attr_addr_ctxtype.attr,
2022 	&dev_attr_addr_context.attr,
2023 	&dev_attr_seq_idx.attr,
2024 	&dev_attr_seq_state.attr,
2025 	&dev_attr_seq_event.attr,
2026 	&dev_attr_seq_reset_event.attr,
2027 	&dev_attr_cntr_idx.attr,
2028 	&dev_attr_cntrldvr.attr,
2029 	&dev_attr_cntr_val.attr,
2030 	&dev_attr_cntr_ctrl.attr,
2031 	&dev_attr_res_idx.attr,
2032 	&dev_attr_res_ctrl.attr,
2033 	&dev_attr_ctxid_idx.attr,
2034 	&dev_attr_ctxid_pid.attr,
2035 	&dev_attr_ctxid_masks.attr,
2036 	&dev_attr_vmid_idx.attr,
2037 	&dev_attr_vmid_val.attr,
2038 	&dev_attr_vmid_masks.attr,
2039 	&dev_attr_cpu.attr,
2040 	NULL,
2041 };
2042 
2043 struct etmv4_reg {
2044 	void __iomem *addr;
2045 	u32 data;
2046 };
2047 
2048 static void do_smp_cross_read(void *data)
2049 {
2050 	struct etmv4_reg *reg = data;
2051 
2052 	reg->data = readl_relaxed(reg->addr);
2053 }
2054 
2055 static u32 etmv4_cross_read(const struct device *dev, u32 offset)
2056 {
2057 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev);
2058 	struct etmv4_reg reg;
2059 
2060 	reg.addr = drvdata->base + offset;
2061 	/*
2062 	 * smp cross call ensures the CPU will be powered up before
2063 	 * accessing the ETMv4 trace core registers
2064 	 */
2065 	smp_call_function_single(drvdata->cpu, do_smp_cross_read, &reg, 1);
2066 	return reg.data;
2067 }
2068 
2069 #define coresight_etm4x_reg(name, offset)			\
2070 	coresight_simple_reg32(struct etmv4_drvdata, name, offset)
2071 
2072 #define coresight_etm4x_cross_read(name, offset)			\
2073 	coresight_simple_func(struct etmv4_drvdata, etmv4_cross_read,	\
2074 			      name, offset)
2075 
2076 coresight_etm4x_reg(trcpdcr, TRCPDCR);
2077 coresight_etm4x_reg(trcpdsr, TRCPDSR);
2078 coresight_etm4x_reg(trclsr, TRCLSR);
2079 coresight_etm4x_reg(trcauthstatus, TRCAUTHSTATUS);
2080 coresight_etm4x_reg(trcdevid, TRCDEVID);
2081 coresight_etm4x_reg(trcdevtype, TRCDEVTYPE);
2082 coresight_etm4x_reg(trcpidr0, TRCPIDR0);
2083 coresight_etm4x_reg(trcpidr1, TRCPIDR1);
2084 coresight_etm4x_reg(trcpidr2, TRCPIDR2);
2085 coresight_etm4x_reg(trcpidr3, TRCPIDR3);
2086 coresight_etm4x_cross_read(trcoslsr, TRCOSLSR);
2087 coresight_etm4x_cross_read(trcconfig, TRCCONFIGR);
2088 coresight_etm4x_cross_read(trctraceid, TRCTRACEIDR);
2089 
2090 static struct attribute *coresight_etmv4_mgmt_attrs[] = {
2091 	&dev_attr_trcoslsr.attr,
2092 	&dev_attr_trcpdcr.attr,
2093 	&dev_attr_trcpdsr.attr,
2094 	&dev_attr_trclsr.attr,
2095 	&dev_attr_trcconfig.attr,
2096 	&dev_attr_trctraceid.attr,
2097 	&dev_attr_trcauthstatus.attr,
2098 	&dev_attr_trcdevid.attr,
2099 	&dev_attr_trcdevtype.attr,
2100 	&dev_attr_trcpidr0.attr,
2101 	&dev_attr_trcpidr1.attr,
2102 	&dev_attr_trcpidr2.attr,
2103 	&dev_attr_trcpidr3.attr,
2104 	NULL,
2105 };
2106 
2107 coresight_etm4x_cross_read(trcidr0, TRCIDR0);
2108 coresight_etm4x_cross_read(trcidr1, TRCIDR1);
2109 coresight_etm4x_cross_read(trcidr2, TRCIDR2);
2110 coresight_etm4x_cross_read(trcidr3, TRCIDR3);
2111 coresight_etm4x_cross_read(trcidr4, TRCIDR4);
2112 coresight_etm4x_cross_read(trcidr5, TRCIDR5);
2113 /* trcidr[6,7] are reserved */
2114 coresight_etm4x_cross_read(trcidr8, TRCIDR8);
2115 coresight_etm4x_cross_read(trcidr9, TRCIDR9);
2116 coresight_etm4x_cross_read(trcidr10, TRCIDR10);
2117 coresight_etm4x_cross_read(trcidr11, TRCIDR11);
2118 coresight_etm4x_cross_read(trcidr12, TRCIDR12);
2119 coresight_etm4x_cross_read(trcidr13, TRCIDR13);
2120 
2121 static struct attribute *coresight_etmv4_trcidr_attrs[] = {
2122 	&dev_attr_trcidr0.attr,
2123 	&dev_attr_trcidr1.attr,
2124 	&dev_attr_trcidr2.attr,
2125 	&dev_attr_trcidr3.attr,
2126 	&dev_attr_trcidr4.attr,
2127 	&dev_attr_trcidr5.attr,
2128 	/* trcidr[6,7] are reserved */
2129 	&dev_attr_trcidr8.attr,
2130 	&dev_attr_trcidr9.attr,
2131 	&dev_attr_trcidr10.attr,
2132 	&dev_attr_trcidr11.attr,
2133 	&dev_attr_trcidr12.attr,
2134 	&dev_attr_trcidr13.attr,
2135 	NULL,
2136 };
2137 
2138 static const struct attribute_group coresight_etmv4_group = {
2139 	.attrs = coresight_etmv4_attrs,
2140 };
2141 
2142 static const struct attribute_group coresight_etmv4_mgmt_group = {
2143 	.attrs = coresight_etmv4_mgmt_attrs,
2144 	.name = "mgmt",
2145 };
2146 
2147 static const struct attribute_group coresight_etmv4_trcidr_group = {
2148 	.attrs = coresight_etmv4_trcidr_attrs,
2149 	.name = "trcidr",
2150 };
2151 
2152 const struct attribute_group *coresight_etmv4_groups[] = {
2153 	&coresight_etmv4_group,
2154 	&coresight_etmv4_mgmt_group,
2155 	&coresight_etmv4_trcidr_group,
2156 	NULL,
2157 };
2158