1 /*
2  * Copyright(C) 2015 Linaro Limited. All rights reserved.
3  * Author: Mathieu Poirier <mathieu.poirier@linaro.org>
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms of the GNU General Public License version 2 as published by
7  * the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
12  * more details.
13  *
14  * You should have received a copy of the GNU General Public License along with
15  * this program.  If not, see <http://www.gnu.org/licenses/>.
16  */
17 
18 #include <linux/pm_runtime.h>
19 #include <linux/sysfs.h>
20 #include "coresight-etm4x.h"
21 
22 static int etm4_set_mode_exclude(struct etmv4_drvdata *drvdata, bool exclude)
23 {
24 	u8 idx;
25 	struct etmv4_config *config = &drvdata->config;
26 
27 	idx = config->addr_idx;
28 
29 	/*
30 	 * TRCACATRn.TYPE bit[1:0]: type of comparison
31 	 * the trace unit performs
32 	 */
33 	if (BMVAL(config->addr_acc[idx], 0, 1) == ETM_INSTR_ADDR) {
34 		if (idx % 2 != 0)
35 			return -EINVAL;
36 
37 		/*
38 		 * We are performing instruction address comparison. Set the
39 		 * relevant bit of ViewInst Include/Exclude Control register
40 		 * for corresponding address comparator pair.
41 		 */
42 		if (config->addr_type[idx] != ETM_ADDR_TYPE_RANGE ||
43 		    config->addr_type[idx + 1] != ETM_ADDR_TYPE_RANGE)
44 			return -EINVAL;
45 
46 		if (exclude == true) {
47 			/*
48 			 * Set exclude bit and unset the include bit
49 			 * corresponding to comparator pair
50 			 */
51 			config->viiectlr |= BIT(idx / 2 + 16);
52 			config->viiectlr &= ~BIT(idx / 2);
53 		} else {
54 			/*
55 			 * Set include bit and unset exclude bit
56 			 * corresponding to comparator pair
57 			 */
58 			config->viiectlr |= BIT(idx / 2);
59 			config->viiectlr &= ~BIT(idx / 2 + 16);
60 		}
61 	}
62 	return 0;
63 }
64 
65 static ssize_t nr_pe_cmp_show(struct device *dev,
66 			      struct device_attribute *attr,
67 			      char *buf)
68 {
69 	unsigned long val;
70 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
71 
72 	val = drvdata->nr_pe_cmp;
73 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
74 }
75 static DEVICE_ATTR_RO(nr_pe_cmp);
76 
77 static ssize_t nr_addr_cmp_show(struct device *dev,
78 				struct device_attribute *attr,
79 				char *buf)
80 {
81 	unsigned long val;
82 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
83 
84 	val = drvdata->nr_addr_cmp;
85 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
86 }
87 static DEVICE_ATTR_RO(nr_addr_cmp);
88 
89 static ssize_t nr_cntr_show(struct device *dev,
90 			    struct device_attribute *attr,
91 			    char *buf)
92 {
93 	unsigned long val;
94 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
95 
96 	val = drvdata->nr_cntr;
97 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
98 }
99 static DEVICE_ATTR_RO(nr_cntr);
100 
101 static ssize_t nr_ext_inp_show(struct device *dev,
102 			       struct device_attribute *attr,
103 			       char *buf)
104 {
105 	unsigned long val;
106 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
107 
108 	val = drvdata->nr_ext_inp;
109 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
110 }
111 static DEVICE_ATTR_RO(nr_ext_inp);
112 
113 static ssize_t numcidc_show(struct device *dev,
114 			    struct device_attribute *attr,
115 			    char *buf)
116 {
117 	unsigned long val;
118 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
119 
120 	val = drvdata->numcidc;
121 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
122 }
123 static DEVICE_ATTR_RO(numcidc);
124 
125 static ssize_t numvmidc_show(struct device *dev,
126 			     struct device_attribute *attr,
127 			     char *buf)
128 {
129 	unsigned long val;
130 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
131 
132 	val = drvdata->numvmidc;
133 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
134 }
135 static DEVICE_ATTR_RO(numvmidc);
136 
137 static ssize_t nrseqstate_show(struct device *dev,
138 			       struct device_attribute *attr,
139 			       char *buf)
140 {
141 	unsigned long val;
142 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
143 
144 	val = drvdata->nrseqstate;
145 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
146 }
147 static DEVICE_ATTR_RO(nrseqstate);
148 
149 static ssize_t nr_resource_show(struct device *dev,
150 				struct device_attribute *attr,
151 				char *buf)
152 {
153 	unsigned long val;
154 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
155 
156 	val = drvdata->nr_resource;
157 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
158 }
159 static DEVICE_ATTR_RO(nr_resource);
160 
161 static ssize_t nr_ss_cmp_show(struct device *dev,
162 			      struct device_attribute *attr,
163 			      char *buf)
164 {
165 	unsigned long val;
166 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
167 
168 	val = drvdata->nr_ss_cmp;
169 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
170 }
171 static DEVICE_ATTR_RO(nr_ss_cmp);
172 
173 static ssize_t reset_store(struct device *dev,
174 			   struct device_attribute *attr,
175 			   const char *buf, size_t size)
176 {
177 	int i;
178 	unsigned long val;
179 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
180 	struct etmv4_config *config = &drvdata->config;
181 
182 	if (kstrtoul(buf, 16, &val))
183 		return -EINVAL;
184 
185 	spin_lock(&drvdata->spinlock);
186 	if (val)
187 		config->mode = 0x0;
188 
189 	/* Disable data tracing: do not trace load and store data transfers */
190 	config->mode &= ~(ETM_MODE_LOAD | ETM_MODE_STORE);
191 	config->cfg &= ~(BIT(1) | BIT(2));
192 
193 	/* Disable data value and data address tracing */
194 	config->mode &= ~(ETM_MODE_DATA_TRACE_ADDR |
195 			   ETM_MODE_DATA_TRACE_VAL);
196 	config->cfg &= ~(BIT(16) | BIT(17));
197 
198 	/* Disable all events tracing */
199 	config->eventctrl0 = 0x0;
200 	config->eventctrl1 = 0x0;
201 
202 	/* Disable timestamp event */
203 	config->ts_ctrl = 0x0;
204 
205 	/* Disable stalling */
206 	config->stall_ctrl = 0x0;
207 
208 	/* Reset trace synchronization period  to 2^8 = 256 bytes*/
209 	if (drvdata->syncpr == false)
210 		config->syncfreq = 0x8;
211 
212 	/*
213 	 * Enable ViewInst to trace everything with start-stop logic in
214 	 * started state. ARM recommends start-stop logic is set before
215 	 * each trace run.
216 	 */
217 	config->vinst_ctrl |= BIT(0);
218 	if (drvdata->nr_addr_cmp == true) {
219 		config->mode |= ETM_MODE_VIEWINST_STARTSTOP;
220 		/* SSSTATUS, bit[9] */
221 		config->vinst_ctrl |= BIT(9);
222 	}
223 
224 	/* No address range filtering for ViewInst */
225 	config->viiectlr = 0x0;
226 
227 	/* No start-stop filtering for ViewInst */
228 	config->vissctlr = 0x0;
229 
230 	/* Disable seq events */
231 	for (i = 0; i < drvdata->nrseqstate-1; i++)
232 		config->seq_ctrl[i] = 0x0;
233 	config->seq_rst = 0x0;
234 	config->seq_state = 0x0;
235 
236 	/* Disable external input events */
237 	config->ext_inp = 0x0;
238 
239 	config->cntr_idx = 0x0;
240 	for (i = 0; i < drvdata->nr_cntr; i++) {
241 		config->cntrldvr[i] = 0x0;
242 		config->cntr_ctrl[i] = 0x0;
243 		config->cntr_val[i] = 0x0;
244 	}
245 
246 	config->res_idx = 0x0;
247 	for (i = 0; i < drvdata->nr_resource; i++)
248 		config->res_ctrl[i] = 0x0;
249 
250 	for (i = 0; i < drvdata->nr_ss_cmp; i++) {
251 		config->ss_ctrl[i] = 0x0;
252 		config->ss_pe_cmp[i] = 0x0;
253 	}
254 
255 	config->addr_idx = 0x0;
256 	for (i = 0; i < drvdata->nr_addr_cmp * 2; i++) {
257 		config->addr_val[i] = 0x0;
258 		config->addr_acc[i] = 0x0;
259 		config->addr_type[i] = ETM_ADDR_TYPE_NONE;
260 	}
261 
262 	config->ctxid_idx = 0x0;
263 	for (i = 0; i < drvdata->numcidc; i++) {
264 		config->ctxid_pid[i] = 0x0;
265 		config->ctxid_vpid[i] = 0x0;
266 	}
267 
268 	config->ctxid_mask0 = 0x0;
269 	config->ctxid_mask1 = 0x0;
270 
271 	config->vmid_idx = 0x0;
272 	for (i = 0; i < drvdata->numvmidc; i++)
273 		config->vmid_val[i] = 0x0;
274 	config->vmid_mask0 = 0x0;
275 	config->vmid_mask1 = 0x0;
276 
277 	drvdata->trcid = drvdata->cpu + 1;
278 
279 	spin_unlock(&drvdata->spinlock);
280 
281 	return size;
282 }
283 static DEVICE_ATTR_WO(reset);
284 
285 static ssize_t mode_show(struct device *dev,
286 			 struct device_attribute *attr,
287 			 char *buf)
288 {
289 	unsigned long val;
290 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
291 	struct etmv4_config *config = &drvdata->config;
292 
293 	val = config->mode;
294 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
295 }
296 
297 static ssize_t mode_store(struct device *dev,
298 			  struct device_attribute *attr,
299 			  const char *buf, size_t size)
300 {
301 	unsigned long val, mode;
302 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
303 	struct etmv4_config *config = &drvdata->config;
304 
305 	if (kstrtoul(buf, 16, &val))
306 		return -EINVAL;
307 
308 	spin_lock(&drvdata->spinlock);
309 	config->mode = val & ETMv4_MODE_ALL;
310 
311 	if (config->mode & ETM_MODE_EXCLUDE)
312 		etm4_set_mode_exclude(drvdata, true);
313 	else
314 		etm4_set_mode_exclude(drvdata, false);
315 
316 	if (drvdata->instrp0 == true) {
317 		/* start by clearing instruction P0 field */
318 		config->cfg  &= ~(BIT(1) | BIT(2));
319 		if (config->mode & ETM_MODE_LOAD)
320 			/* 0b01 Trace load instructions as P0 instructions */
321 			config->cfg  |= BIT(1);
322 		if (config->mode & ETM_MODE_STORE)
323 			/* 0b10 Trace store instructions as P0 instructions */
324 			config->cfg  |= BIT(2);
325 		if (config->mode & ETM_MODE_LOAD_STORE)
326 			/*
327 			 * 0b11 Trace load and store instructions
328 			 * as P0 instructions
329 			 */
330 			config->cfg  |= BIT(1) | BIT(2);
331 	}
332 
333 	/* bit[3], Branch broadcast mode */
334 	if ((config->mode & ETM_MODE_BB) && (drvdata->trcbb == true))
335 		config->cfg |= BIT(3);
336 	else
337 		config->cfg &= ~BIT(3);
338 
339 	/* bit[4], Cycle counting instruction trace bit */
340 	if ((config->mode & ETMv4_MODE_CYCACC) &&
341 		(drvdata->trccci == true))
342 		config->cfg |= BIT(4);
343 	else
344 		config->cfg &= ~BIT(4);
345 
346 	/* bit[6], Context ID tracing bit */
347 	if ((config->mode & ETMv4_MODE_CTXID) && (drvdata->ctxid_size))
348 		config->cfg |= BIT(6);
349 	else
350 		config->cfg &= ~BIT(6);
351 
352 	if ((config->mode & ETM_MODE_VMID) && (drvdata->vmid_size))
353 		config->cfg |= BIT(7);
354 	else
355 		config->cfg &= ~BIT(7);
356 
357 	/* bits[10:8], Conditional instruction tracing bit */
358 	mode = ETM_MODE_COND(config->mode);
359 	if (drvdata->trccond == true) {
360 		config->cfg &= ~(BIT(8) | BIT(9) | BIT(10));
361 		config->cfg |= mode << 8;
362 	}
363 
364 	/* bit[11], Global timestamp tracing bit */
365 	if ((config->mode & ETMv4_MODE_TIMESTAMP) && (drvdata->ts_size))
366 		config->cfg |= BIT(11);
367 	else
368 		config->cfg &= ~BIT(11);
369 
370 	/* bit[12], Return stack enable bit */
371 	if ((config->mode & ETM_MODE_RETURNSTACK) &&
372 					(drvdata->retstack == true))
373 		config->cfg |= BIT(12);
374 	else
375 		config->cfg &= ~BIT(12);
376 
377 	/* bits[14:13], Q element enable field */
378 	mode = ETM_MODE_QELEM(config->mode);
379 	/* start by clearing QE bits */
380 	config->cfg &= ~(BIT(13) | BIT(14));
381 	/* if supported, Q elements with instruction counts are enabled */
382 	if ((mode & BIT(0)) && (drvdata->q_support & BIT(0)))
383 		config->cfg |= BIT(13);
384 	/*
385 	 * if supported, Q elements with and without instruction
386 	 * counts are enabled
387 	 */
388 	if ((mode & BIT(1)) && (drvdata->q_support & BIT(1)))
389 		config->cfg |= BIT(14);
390 
391 	/* bit[11], AMBA Trace Bus (ATB) trigger enable bit */
392 	if ((config->mode & ETM_MODE_ATB_TRIGGER) &&
393 	    (drvdata->atbtrig == true))
394 		config->eventctrl1 |= BIT(11);
395 	else
396 		config->eventctrl1 &= ~BIT(11);
397 
398 	/* bit[12], Low-power state behavior override bit */
399 	if ((config->mode & ETM_MODE_LPOVERRIDE) &&
400 	    (drvdata->lpoverride == true))
401 		config->eventctrl1 |= BIT(12);
402 	else
403 		config->eventctrl1 &= ~BIT(12);
404 
405 	/* bit[8], Instruction stall bit */
406 	if (config->mode & ETM_MODE_ISTALL_EN)
407 		config->stall_ctrl |= BIT(8);
408 	else
409 		config->stall_ctrl &= ~BIT(8);
410 
411 	/* bit[10], Prioritize instruction trace bit */
412 	if (config->mode & ETM_MODE_INSTPRIO)
413 		config->stall_ctrl |= BIT(10);
414 	else
415 		config->stall_ctrl &= ~BIT(10);
416 
417 	/* bit[13], Trace overflow prevention bit */
418 	if ((config->mode & ETM_MODE_NOOVERFLOW) &&
419 		(drvdata->nooverflow == true))
420 		config->stall_ctrl |= BIT(13);
421 	else
422 		config->stall_ctrl &= ~BIT(13);
423 
424 	/* bit[9] Start/stop logic control bit */
425 	if (config->mode & ETM_MODE_VIEWINST_STARTSTOP)
426 		config->vinst_ctrl |= BIT(9);
427 	else
428 		config->vinst_ctrl &= ~BIT(9);
429 
430 	/* bit[10], Whether a trace unit must trace a Reset exception */
431 	if (config->mode & ETM_MODE_TRACE_RESET)
432 		config->vinst_ctrl |= BIT(10);
433 	else
434 		config->vinst_ctrl &= ~BIT(10);
435 
436 	/* bit[11], Whether a trace unit must trace a system error exception */
437 	if ((config->mode & ETM_MODE_TRACE_ERR) &&
438 		(drvdata->trc_error == true))
439 		config->vinst_ctrl |= BIT(11);
440 	else
441 		config->vinst_ctrl &= ~BIT(11);
442 
443 	if (config->mode & (ETM_MODE_EXCL_KERN | ETM_MODE_EXCL_USER))
444 		etm4_config_trace_mode(config);
445 
446 	spin_unlock(&drvdata->spinlock);
447 
448 	return size;
449 }
450 static DEVICE_ATTR_RW(mode);
451 
452 static ssize_t pe_show(struct device *dev,
453 		       struct device_attribute *attr,
454 		       char *buf)
455 {
456 	unsigned long val;
457 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
458 	struct etmv4_config *config = &drvdata->config;
459 
460 	val = config->pe_sel;
461 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
462 }
463 
464 static ssize_t pe_store(struct device *dev,
465 			struct device_attribute *attr,
466 			const char *buf, size_t size)
467 {
468 	unsigned long val;
469 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
470 	struct etmv4_config *config = &drvdata->config;
471 
472 	if (kstrtoul(buf, 16, &val))
473 		return -EINVAL;
474 
475 	spin_lock(&drvdata->spinlock);
476 	if (val > drvdata->nr_pe) {
477 		spin_unlock(&drvdata->spinlock);
478 		return -EINVAL;
479 	}
480 
481 	config->pe_sel = val;
482 	spin_unlock(&drvdata->spinlock);
483 	return size;
484 }
485 static DEVICE_ATTR_RW(pe);
486 
487 static ssize_t event_show(struct device *dev,
488 			  struct device_attribute *attr,
489 			  char *buf)
490 {
491 	unsigned long val;
492 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
493 	struct etmv4_config *config = &drvdata->config;
494 
495 	val = config->eventctrl0;
496 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
497 }
498 
499 static ssize_t event_store(struct device *dev,
500 			   struct device_attribute *attr,
501 			   const char *buf, size_t size)
502 {
503 	unsigned long val;
504 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
505 	struct etmv4_config *config = &drvdata->config;
506 
507 	if (kstrtoul(buf, 16, &val))
508 		return -EINVAL;
509 
510 	spin_lock(&drvdata->spinlock);
511 	switch (drvdata->nr_event) {
512 	case 0x0:
513 		/* EVENT0, bits[7:0] */
514 		config->eventctrl0 = val & 0xFF;
515 		break;
516 	case 0x1:
517 		 /* EVENT1, bits[15:8] */
518 		config->eventctrl0 = val & 0xFFFF;
519 		break;
520 	case 0x2:
521 		/* EVENT2, bits[23:16] */
522 		config->eventctrl0 = val & 0xFFFFFF;
523 		break;
524 	case 0x3:
525 		/* EVENT3, bits[31:24] */
526 		config->eventctrl0 = val;
527 		break;
528 	default:
529 		break;
530 	}
531 	spin_unlock(&drvdata->spinlock);
532 	return size;
533 }
534 static DEVICE_ATTR_RW(event);
535 
536 static ssize_t event_instren_show(struct device *dev,
537 				  struct device_attribute *attr,
538 				  char *buf)
539 {
540 	unsigned long val;
541 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
542 	struct etmv4_config *config = &drvdata->config;
543 
544 	val = BMVAL(config->eventctrl1, 0, 3);
545 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
546 }
547 
548 static ssize_t event_instren_store(struct device *dev,
549 				   struct device_attribute *attr,
550 				   const char *buf, size_t size)
551 {
552 	unsigned long val;
553 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
554 	struct etmv4_config *config = &drvdata->config;
555 
556 	if (kstrtoul(buf, 16, &val))
557 		return -EINVAL;
558 
559 	spin_lock(&drvdata->spinlock);
560 	/* start by clearing all instruction event enable bits */
561 	config->eventctrl1 &= ~(BIT(0) | BIT(1) | BIT(2) | BIT(3));
562 	switch (drvdata->nr_event) {
563 	case 0x0:
564 		/* generate Event element for event 1 */
565 		config->eventctrl1 |= val & BIT(1);
566 		break;
567 	case 0x1:
568 		/* generate Event element for event 1 and 2 */
569 		config->eventctrl1 |= val & (BIT(0) | BIT(1));
570 		break;
571 	case 0x2:
572 		/* generate Event element for event 1, 2 and 3 */
573 		config->eventctrl1 |= val & (BIT(0) | BIT(1) | BIT(2));
574 		break;
575 	case 0x3:
576 		/* generate Event element for all 4 events */
577 		config->eventctrl1 |= val & 0xF;
578 		break;
579 	default:
580 		break;
581 	}
582 	spin_unlock(&drvdata->spinlock);
583 	return size;
584 }
585 static DEVICE_ATTR_RW(event_instren);
586 
587 static ssize_t event_ts_show(struct device *dev,
588 			     struct device_attribute *attr,
589 			     char *buf)
590 {
591 	unsigned long val;
592 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
593 	struct etmv4_config *config = &drvdata->config;
594 
595 	val = config->ts_ctrl;
596 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
597 }
598 
599 static ssize_t event_ts_store(struct device *dev,
600 			      struct device_attribute *attr,
601 			      const char *buf, size_t size)
602 {
603 	unsigned long val;
604 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
605 	struct etmv4_config *config = &drvdata->config;
606 
607 	if (kstrtoul(buf, 16, &val))
608 		return -EINVAL;
609 	if (!drvdata->ts_size)
610 		return -EINVAL;
611 
612 	config->ts_ctrl = val & ETMv4_EVENT_MASK;
613 	return size;
614 }
615 static DEVICE_ATTR_RW(event_ts);
616 
617 static ssize_t syncfreq_show(struct device *dev,
618 			     struct device_attribute *attr,
619 			     char *buf)
620 {
621 	unsigned long val;
622 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
623 	struct etmv4_config *config = &drvdata->config;
624 
625 	val = config->syncfreq;
626 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
627 }
628 
629 static ssize_t syncfreq_store(struct device *dev,
630 			      struct device_attribute *attr,
631 			      const char *buf, size_t size)
632 {
633 	unsigned long val;
634 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
635 	struct etmv4_config *config = &drvdata->config;
636 
637 	if (kstrtoul(buf, 16, &val))
638 		return -EINVAL;
639 	if (drvdata->syncpr == true)
640 		return -EINVAL;
641 
642 	config->syncfreq = val & ETMv4_SYNC_MASK;
643 	return size;
644 }
645 static DEVICE_ATTR_RW(syncfreq);
646 
647 static ssize_t cyc_threshold_show(struct device *dev,
648 				  struct device_attribute *attr,
649 				  char *buf)
650 {
651 	unsigned long val;
652 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
653 	struct etmv4_config *config = &drvdata->config;
654 
655 	val = config->ccctlr;
656 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
657 }
658 
659 static ssize_t cyc_threshold_store(struct device *dev,
660 				   struct device_attribute *attr,
661 				   const char *buf, size_t size)
662 {
663 	unsigned long val;
664 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
665 	struct etmv4_config *config = &drvdata->config;
666 
667 	if (kstrtoul(buf, 16, &val))
668 		return -EINVAL;
669 	if (val < drvdata->ccitmin)
670 		return -EINVAL;
671 
672 	config->ccctlr = val & ETM_CYC_THRESHOLD_MASK;
673 	return size;
674 }
675 static DEVICE_ATTR_RW(cyc_threshold);
676 
677 static ssize_t bb_ctrl_show(struct device *dev,
678 			    struct device_attribute *attr,
679 			    char *buf)
680 {
681 	unsigned long val;
682 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
683 	struct etmv4_config *config = &drvdata->config;
684 
685 	val = config->bb_ctrl;
686 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
687 }
688 
689 static ssize_t bb_ctrl_store(struct device *dev,
690 			     struct device_attribute *attr,
691 			     const char *buf, size_t size)
692 {
693 	unsigned long val;
694 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
695 	struct etmv4_config *config = &drvdata->config;
696 
697 	if (kstrtoul(buf, 16, &val))
698 		return -EINVAL;
699 	if (drvdata->trcbb == false)
700 		return -EINVAL;
701 	if (!drvdata->nr_addr_cmp)
702 		return -EINVAL;
703 	/*
704 	 * Bit[7:0] selects which address range comparator is used for
705 	 * branch broadcast control.
706 	 */
707 	if (BMVAL(val, 0, 7) > drvdata->nr_addr_cmp)
708 		return -EINVAL;
709 
710 	config->bb_ctrl = val;
711 	return size;
712 }
713 static DEVICE_ATTR_RW(bb_ctrl);
714 
715 static ssize_t event_vinst_show(struct device *dev,
716 				struct device_attribute *attr,
717 				char *buf)
718 {
719 	unsigned long val;
720 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
721 	struct etmv4_config *config = &drvdata->config;
722 
723 	val = config->vinst_ctrl & ETMv4_EVENT_MASK;
724 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
725 }
726 
727 static ssize_t event_vinst_store(struct device *dev,
728 				 struct device_attribute *attr,
729 				 const char *buf, size_t size)
730 {
731 	unsigned long val;
732 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
733 	struct etmv4_config *config = &drvdata->config;
734 
735 	if (kstrtoul(buf, 16, &val))
736 		return -EINVAL;
737 
738 	spin_lock(&drvdata->spinlock);
739 	val &= ETMv4_EVENT_MASK;
740 	config->vinst_ctrl &= ~ETMv4_EVENT_MASK;
741 	config->vinst_ctrl |= val;
742 	spin_unlock(&drvdata->spinlock);
743 	return size;
744 }
745 static DEVICE_ATTR_RW(event_vinst);
746 
747 static ssize_t s_exlevel_vinst_show(struct device *dev,
748 				    struct device_attribute *attr,
749 				    char *buf)
750 {
751 	unsigned long val;
752 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
753 	struct etmv4_config *config = &drvdata->config;
754 
755 	val = BMVAL(config->vinst_ctrl, 16, 19);
756 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
757 }
758 
759 static ssize_t s_exlevel_vinst_store(struct device *dev,
760 				     struct device_attribute *attr,
761 				     const char *buf, size_t size)
762 {
763 	unsigned long val;
764 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
765 	struct etmv4_config *config = &drvdata->config;
766 
767 	if (kstrtoul(buf, 16, &val))
768 		return -EINVAL;
769 
770 	spin_lock(&drvdata->spinlock);
771 	/* clear all EXLEVEL_S bits (bit[18] is never implemented) */
772 	config->vinst_ctrl &= ~(BIT(16) | BIT(17) | BIT(19));
773 	/* enable instruction tracing for corresponding exception level */
774 	val &= drvdata->s_ex_level;
775 	config->vinst_ctrl |= (val << 16);
776 	spin_unlock(&drvdata->spinlock);
777 	return size;
778 }
779 static DEVICE_ATTR_RW(s_exlevel_vinst);
780 
781 static ssize_t ns_exlevel_vinst_show(struct device *dev,
782 				     struct device_attribute *attr,
783 				     char *buf)
784 {
785 	unsigned long val;
786 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
787 	struct etmv4_config *config = &drvdata->config;
788 
789 	/* EXLEVEL_NS, bits[23:20] */
790 	val = BMVAL(config->vinst_ctrl, 20, 23);
791 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
792 }
793 
794 static ssize_t ns_exlevel_vinst_store(struct device *dev,
795 				      struct device_attribute *attr,
796 				      const char *buf, size_t size)
797 {
798 	unsigned long val;
799 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
800 	struct etmv4_config *config = &drvdata->config;
801 
802 	if (kstrtoul(buf, 16, &val))
803 		return -EINVAL;
804 
805 	spin_lock(&drvdata->spinlock);
806 	/* clear EXLEVEL_NS bits (bit[23] is never implemented */
807 	config->vinst_ctrl &= ~(BIT(20) | BIT(21) | BIT(22));
808 	/* enable instruction tracing for corresponding exception level */
809 	val &= drvdata->ns_ex_level;
810 	config->vinst_ctrl |= (val << 20);
811 	spin_unlock(&drvdata->spinlock);
812 	return size;
813 }
814 static DEVICE_ATTR_RW(ns_exlevel_vinst);
815 
816 static ssize_t addr_idx_show(struct device *dev,
817 			     struct device_attribute *attr,
818 			     char *buf)
819 {
820 	unsigned long val;
821 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
822 	struct etmv4_config *config = &drvdata->config;
823 
824 	val = config->addr_idx;
825 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
826 }
827 
828 static ssize_t addr_idx_store(struct device *dev,
829 			      struct device_attribute *attr,
830 			      const char *buf, size_t size)
831 {
832 	unsigned long val;
833 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
834 	struct etmv4_config *config = &drvdata->config;
835 
836 	if (kstrtoul(buf, 16, &val))
837 		return -EINVAL;
838 	if (val >= drvdata->nr_addr_cmp * 2)
839 		return -EINVAL;
840 
841 	/*
842 	 * Use spinlock to ensure index doesn't change while it gets
843 	 * dereferenced multiple times within a spinlock block elsewhere.
844 	 */
845 	spin_lock(&drvdata->spinlock);
846 	config->addr_idx = val;
847 	spin_unlock(&drvdata->spinlock);
848 	return size;
849 }
850 static DEVICE_ATTR_RW(addr_idx);
851 
852 static ssize_t addr_instdatatype_show(struct device *dev,
853 				      struct device_attribute *attr,
854 				      char *buf)
855 {
856 	ssize_t len;
857 	u8 val, idx;
858 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
859 	struct etmv4_config *config = &drvdata->config;
860 
861 	spin_lock(&drvdata->spinlock);
862 	idx = config->addr_idx;
863 	val = BMVAL(config->addr_acc[idx], 0, 1);
864 	len = scnprintf(buf, PAGE_SIZE, "%s\n",
865 			val == ETM_INSTR_ADDR ? "instr" :
866 			(val == ETM_DATA_LOAD_ADDR ? "data_load" :
867 			(val == ETM_DATA_STORE_ADDR ? "data_store" :
868 			"data_load_store")));
869 	spin_unlock(&drvdata->spinlock);
870 	return len;
871 }
872 
873 static ssize_t addr_instdatatype_store(struct device *dev,
874 				       struct device_attribute *attr,
875 				       const char *buf, size_t size)
876 {
877 	u8 idx;
878 	char str[20] = "";
879 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
880 	struct etmv4_config *config = &drvdata->config;
881 
882 	if (strlen(buf) >= 20)
883 		return -EINVAL;
884 	if (sscanf(buf, "%s", str) != 1)
885 		return -EINVAL;
886 
887 	spin_lock(&drvdata->spinlock);
888 	idx = config->addr_idx;
889 	if (!strcmp(str, "instr"))
890 		/* TYPE, bits[1:0] */
891 		config->addr_acc[idx] &= ~(BIT(0) | BIT(1));
892 
893 	spin_unlock(&drvdata->spinlock);
894 	return size;
895 }
896 static DEVICE_ATTR_RW(addr_instdatatype);
897 
898 static ssize_t addr_single_show(struct device *dev,
899 				struct device_attribute *attr,
900 				char *buf)
901 {
902 	u8 idx;
903 	unsigned long val;
904 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
905 	struct etmv4_config *config = &drvdata->config;
906 
907 	idx = config->addr_idx;
908 	spin_lock(&drvdata->spinlock);
909 	if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
910 	      config->addr_type[idx] == ETM_ADDR_TYPE_SINGLE)) {
911 		spin_unlock(&drvdata->spinlock);
912 		return -EPERM;
913 	}
914 	val = (unsigned long)config->addr_val[idx];
915 	spin_unlock(&drvdata->spinlock);
916 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
917 }
918 
919 static ssize_t addr_single_store(struct device *dev,
920 				 struct device_attribute *attr,
921 				 const char *buf, size_t size)
922 {
923 	u8 idx;
924 	unsigned long val;
925 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
926 	struct etmv4_config *config = &drvdata->config;
927 
928 	if (kstrtoul(buf, 16, &val))
929 		return -EINVAL;
930 
931 	spin_lock(&drvdata->spinlock);
932 	idx = config->addr_idx;
933 	if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
934 	      config->addr_type[idx] == ETM_ADDR_TYPE_SINGLE)) {
935 		spin_unlock(&drvdata->spinlock);
936 		return -EPERM;
937 	}
938 
939 	config->addr_val[idx] = (u64)val;
940 	config->addr_type[idx] = ETM_ADDR_TYPE_SINGLE;
941 	spin_unlock(&drvdata->spinlock);
942 	return size;
943 }
944 static DEVICE_ATTR_RW(addr_single);
945 
946 static ssize_t addr_range_show(struct device *dev,
947 			       struct device_attribute *attr,
948 			       char *buf)
949 {
950 	u8 idx;
951 	unsigned long val1, val2;
952 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
953 	struct etmv4_config *config = &drvdata->config;
954 
955 	spin_lock(&drvdata->spinlock);
956 	idx = config->addr_idx;
957 	if (idx % 2 != 0) {
958 		spin_unlock(&drvdata->spinlock);
959 		return -EPERM;
960 	}
961 	if (!((config->addr_type[idx] == ETM_ADDR_TYPE_NONE &&
962 	       config->addr_type[idx + 1] == ETM_ADDR_TYPE_NONE) ||
963 	      (config->addr_type[idx] == ETM_ADDR_TYPE_RANGE &&
964 	       config->addr_type[idx + 1] == ETM_ADDR_TYPE_RANGE))) {
965 		spin_unlock(&drvdata->spinlock);
966 		return -EPERM;
967 	}
968 
969 	val1 = (unsigned long)config->addr_val[idx];
970 	val2 = (unsigned long)config->addr_val[idx + 1];
971 	spin_unlock(&drvdata->spinlock);
972 	return scnprintf(buf, PAGE_SIZE, "%#lx %#lx\n", val1, val2);
973 }
974 
975 static ssize_t addr_range_store(struct device *dev,
976 				struct device_attribute *attr,
977 				const char *buf, size_t size)
978 {
979 	u8 idx;
980 	unsigned long val1, val2;
981 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
982 	struct etmv4_config *config = &drvdata->config;
983 
984 	if (sscanf(buf, "%lx %lx", &val1, &val2) != 2)
985 		return -EINVAL;
986 	/* lower address comparator cannot have a higher address value */
987 	if (val1 > val2)
988 		return -EINVAL;
989 
990 	spin_lock(&drvdata->spinlock);
991 	idx = config->addr_idx;
992 	if (idx % 2 != 0) {
993 		spin_unlock(&drvdata->spinlock);
994 		return -EPERM;
995 	}
996 
997 	if (!((config->addr_type[idx] == ETM_ADDR_TYPE_NONE &&
998 	       config->addr_type[idx + 1] == ETM_ADDR_TYPE_NONE) ||
999 	      (config->addr_type[idx] == ETM_ADDR_TYPE_RANGE &&
1000 	       config->addr_type[idx + 1] == ETM_ADDR_TYPE_RANGE))) {
1001 		spin_unlock(&drvdata->spinlock);
1002 		return -EPERM;
1003 	}
1004 
1005 	config->addr_val[idx] = (u64)val1;
1006 	config->addr_type[idx] = ETM_ADDR_TYPE_RANGE;
1007 	config->addr_val[idx + 1] = (u64)val2;
1008 	config->addr_type[idx + 1] = ETM_ADDR_TYPE_RANGE;
1009 	/*
1010 	 * Program include or exclude control bits for vinst or vdata
1011 	 * whenever we change addr comparators to ETM_ADDR_TYPE_RANGE
1012 	 */
1013 	if (config->mode & ETM_MODE_EXCLUDE)
1014 		etm4_set_mode_exclude(drvdata, true);
1015 	else
1016 		etm4_set_mode_exclude(drvdata, false);
1017 
1018 	spin_unlock(&drvdata->spinlock);
1019 	return size;
1020 }
1021 static DEVICE_ATTR_RW(addr_range);
1022 
1023 static ssize_t addr_start_show(struct device *dev,
1024 			       struct device_attribute *attr,
1025 			       char *buf)
1026 {
1027 	u8 idx;
1028 	unsigned long val;
1029 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1030 	struct etmv4_config *config = &drvdata->config;
1031 
1032 	spin_lock(&drvdata->spinlock);
1033 	idx = config->addr_idx;
1034 
1035 	if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
1036 	      config->addr_type[idx] == ETM_ADDR_TYPE_START)) {
1037 		spin_unlock(&drvdata->spinlock);
1038 		return -EPERM;
1039 	}
1040 
1041 	val = (unsigned long)config->addr_val[idx];
1042 	spin_unlock(&drvdata->spinlock);
1043 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1044 }
1045 
1046 static ssize_t addr_start_store(struct device *dev,
1047 				struct device_attribute *attr,
1048 				const char *buf, size_t size)
1049 {
1050 	u8 idx;
1051 	unsigned long val;
1052 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1053 	struct etmv4_config *config = &drvdata->config;
1054 
1055 	if (kstrtoul(buf, 16, &val))
1056 		return -EINVAL;
1057 
1058 	spin_lock(&drvdata->spinlock);
1059 	idx = config->addr_idx;
1060 	if (!drvdata->nr_addr_cmp) {
1061 		spin_unlock(&drvdata->spinlock);
1062 		return -EINVAL;
1063 	}
1064 	if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
1065 	      config->addr_type[idx] == ETM_ADDR_TYPE_START)) {
1066 		spin_unlock(&drvdata->spinlock);
1067 		return -EPERM;
1068 	}
1069 
1070 	config->addr_val[idx] = (u64)val;
1071 	config->addr_type[idx] = ETM_ADDR_TYPE_START;
1072 	config->vissctlr |= BIT(idx);
1073 	/* SSSTATUS, bit[9] - turn on start/stop logic */
1074 	config->vinst_ctrl |= BIT(9);
1075 	spin_unlock(&drvdata->spinlock);
1076 	return size;
1077 }
1078 static DEVICE_ATTR_RW(addr_start);
1079 
1080 static ssize_t addr_stop_show(struct device *dev,
1081 			      struct device_attribute *attr,
1082 			      char *buf)
1083 {
1084 	u8 idx;
1085 	unsigned long val;
1086 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1087 	struct etmv4_config *config = &drvdata->config;
1088 
1089 	spin_lock(&drvdata->spinlock);
1090 	idx = config->addr_idx;
1091 
1092 	if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
1093 	      config->addr_type[idx] == ETM_ADDR_TYPE_STOP)) {
1094 		spin_unlock(&drvdata->spinlock);
1095 		return -EPERM;
1096 	}
1097 
1098 	val = (unsigned long)config->addr_val[idx];
1099 	spin_unlock(&drvdata->spinlock);
1100 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1101 }
1102 
1103 static ssize_t addr_stop_store(struct device *dev,
1104 			       struct device_attribute *attr,
1105 			       const char *buf, size_t size)
1106 {
1107 	u8 idx;
1108 	unsigned long val;
1109 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1110 	struct etmv4_config *config = &drvdata->config;
1111 
1112 	if (kstrtoul(buf, 16, &val))
1113 		return -EINVAL;
1114 
1115 	spin_lock(&drvdata->spinlock);
1116 	idx = config->addr_idx;
1117 	if (!drvdata->nr_addr_cmp) {
1118 		spin_unlock(&drvdata->spinlock);
1119 		return -EINVAL;
1120 	}
1121 	if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
1122 	       config->addr_type[idx] == ETM_ADDR_TYPE_STOP)) {
1123 		spin_unlock(&drvdata->spinlock);
1124 		return -EPERM;
1125 	}
1126 
1127 	config->addr_val[idx] = (u64)val;
1128 	config->addr_type[idx] = ETM_ADDR_TYPE_STOP;
1129 	config->vissctlr |= BIT(idx + 16);
1130 	/* SSSTATUS, bit[9] - turn on start/stop logic */
1131 	config->vinst_ctrl |= BIT(9);
1132 	spin_unlock(&drvdata->spinlock);
1133 	return size;
1134 }
1135 static DEVICE_ATTR_RW(addr_stop);
1136 
1137 static ssize_t addr_ctxtype_show(struct device *dev,
1138 				 struct device_attribute *attr,
1139 				 char *buf)
1140 {
1141 	ssize_t len;
1142 	u8 idx, val;
1143 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1144 	struct etmv4_config *config = &drvdata->config;
1145 
1146 	spin_lock(&drvdata->spinlock);
1147 	idx = config->addr_idx;
1148 	/* CONTEXTTYPE, bits[3:2] */
1149 	val = BMVAL(config->addr_acc[idx], 2, 3);
1150 	len = scnprintf(buf, PAGE_SIZE, "%s\n", val == ETM_CTX_NONE ? "none" :
1151 			(val == ETM_CTX_CTXID ? "ctxid" :
1152 			(val == ETM_CTX_VMID ? "vmid" : "all")));
1153 	spin_unlock(&drvdata->spinlock);
1154 	return len;
1155 }
1156 
1157 static ssize_t addr_ctxtype_store(struct device *dev,
1158 				  struct device_attribute *attr,
1159 				  const char *buf, size_t size)
1160 {
1161 	u8 idx;
1162 	char str[10] = "";
1163 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1164 	struct etmv4_config *config = &drvdata->config;
1165 
1166 	if (strlen(buf) >= 10)
1167 		return -EINVAL;
1168 	if (sscanf(buf, "%s", str) != 1)
1169 		return -EINVAL;
1170 
1171 	spin_lock(&drvdata->spinlock);
1172 	idx = config->addr_idx;
1173 	if (!strcmp(str, "none"))
1174 		/* start by clearing context type bits */
1175 		config->addr_acc[idx] &= ~(BIT(2) | BIT(3));
1176 	else if (!strcmp(str, "ctxid")) {
1177 		/* 0b01 The trace unit performs a Context ID */
1178 		if (drvdata->numcidc) {
1179 			config->addr_acc[idx] |= BIT(2);
1180 			config->addr_acc[idx] &= ~BIT(3);
1181 		}
1182 	} else if (!strcmp(str, "vmid")) {
1183 		/* 0b10 The trace unit performs a VMID */
1184 		if (drvdata->numvmidc) {
1185 			config->addr_acc[idx] &= ~BIT(2);
1186 			config->addr_acc[idx] |= BIT(3);
1187 		}
1188 	} else if (!strcmp(str, "all")) {
1189 		/*
1190 		 * 0b11 The trace unit performs a Context ID
1191 		 * comparison and a VMID
1192 		 */
1193 		if (drvdata->numcidc)
1194 			config->addr_acc[idx] |= BIT(2);
1195 		if (drvdata->numvmidc)
1196 			config->addr_acc[idx] |= BIT(3);
1197 	}
1198 	spin_unlock(&drvdata->spinlock);
1199 	return size;
1200 }
1201 static DEVICE_ATTR_RW(addr_ctxtype);
1202 
1203 static ssize_t addr_context_show(struct device *dev,
1204 				 struct device_attribute *attr,
1205 				 char *buf)
1206 {
1207 	u8 idx;
1208 	unsigned long val;
1209 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1210 	struct etmv4_config *config = &drvdata->config;
1211 
1212 	spin_lock(&drvdata->spinlock);
1213 	idx = config->addr_idx;
1214 	/* context ID comparator bits[6:4] */
1215 	val = BMVAL(config->addr_acc[idx], 4, 6);
1216 	spin_unlock(&drvdata->spinlock);
1217 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1218 }
1219 
1220 static ssize_t addr_context_store(struct device *dev,
1221 				  struct device_attribute *attr,
1222 				  const char *buf, size_t size)
1223 {
1224 	u8 idx;
1225 	unsigned long val;
1226 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1227 	struct etmv4_config *config = &drvdata->config;
1228 
1229 	if (kstrtoul(buf, 16, &val))
1230 		return -EINVAL;
1231 	if ((drvdata->numcidc <= 1) && (drvdata->numvmidc <= 1))
1232 		return -EINVAL;
1233 	if (val >=  (drvdata->numcidc >= drvdata->numvmidc ?
1234 		     drvdata->numcidc : drvdata->numvmidc))
1235 		return -EINVAL;
1236 
1237 	spin_lock(&drvdata->spinlock);
1238 	idx = config->addr_idx;
1239 	/* clear context ID comparator bits[6:4] */
1240 	config->addr_acc[idx] &= ~(BIT(4) | BIT(5) | BIT(6));
1241 	config->addr_acc[idx] |= (val << 4);
1242 	spin_unlock(&drvdata->spinlock);
1243 	return size;
1244 }
1245 static DEVICE_ATTR_RW(addr_context);
1246 
1247 static ssize_t seq_idx_show(struct device *dev,
1248 			    struct device_attribute *attr,
1249 			    char *buf)
1250 {
1251 	unsigned long val;
1252 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1253 	struct etmv4_config *config = &drvdata->config;
1254 
1255 	val = config->seq_idx;
1256 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1257 }
1258 
1259 static ssize_t seq_idx_store(struct device *dev,
1260 			     struct device_attribute *attr,
1261 			     const char *buf, size_t size)
1262 {
1263 	unsigned long val;
1264 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1265 	struct etmv4_config *config = &drvdata->config;
1266 
1267 	if (kstrtoul(buf, 16, &val))
1268 		return -EINVAL;
1269 	if (val >= drvdata->nrseqstate - 1)
1270 		return -EINVAL;
1271 
1272 	/*
1273 	 * Use spinlock to ensure index doesn't change while it gets
1274 	 * dereferenced multiple times within a spinlock block elsewhere.
1275 	 */
1276 	spin_lock(&drvdata->spinlock);
1277 	config->seq_idx = val;
1278 	spin_unlock(&drvdata->spinlock);
1279 	return size;
1280 }
1281 static DEVICE_ATTR_RW(seq_idx);
1282 
1283 static ssize_t seq_state_show(struct device *dev,
1284 			      struct device_attribute *attr,
1285 			      char *buf)
1286 {
1287 	unsigned long val;
1288 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1289 	struct etmv4_config *config = &drvdata->config;
1290 
1291 	val = config->seq_state;
1292 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1293 }
1294 
1295 static ssize_t seq_state_store(struct device *dev,
1296 			       struct device_attribute *attr,
1297 			       const char *buf, size_t size)
1298 {
1299 	unsigned long val;
1300 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1301 	struct etmv4_config *config = &drvdata->config;
1302 
1303 	if (kstrtoul(buf, 16, &val))
1304 		return -EINVAL;
1305 	if (val >= drvdata->nrseqstate)
1306 		return -EINVAL;
1307 
1308 	config->seq_state = val;
1309 	return size;
1310 }
1311 static DEVICE_ATTR_RW(seq_state);
1312 
1313 static ssize_t seq_event_show(struct device *dev,
1314 			      struct device_attribute *attr,
1315 			      char *buf)
1316 {
1317 	u8 idx;
1318 	unsigned long val;
1319 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1320 	struct etmv4_config *config = &drvdata->config;
1321 
1322 	spin_lock(&drvdata->spinlock);
1323 	idx = config->seq_idx;
1324 	val = config->seq_ctrl[idx];
1325 	spin_unlock(&drvdata->spinlock);
1326 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1327 }
1328 
1329 static ssize_t seq_event_store(struct device *dev,
1330 			       struct device_attribute *attr,
1331 			       const char *buf, size_t size)
1332 {
1333 	u8 idx;
1334 	unsigned long val;
1335 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1336 	struct etmv4_config *config = &drvdata->config;
1337 
1338 	if (kstrtoul(buf, 16, &val))
1339 		return -EINVAL;
1340 
1341 	spin_lock(&drvdata->spinlock);
1342 	idx = config->seq_idx;
1343 	/* RST, bits[7:0] */
1344 	config->seq_ctrl[idx] = val & 0xFF;
1345 	spin_unlock(&drvdata->spinlock);
1346 	return size;
1347 }
1348 static DEVICE_ATTR_RW(seq_event);
1349 
1350 static ssize_t seq_reset_event_show(struct device *dev,
1351 				    struct device_attribute *attr,
1352 				    char *buf)
1353 {
1354 	unsigned long val;
1355 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1356 	struct etmv4_config *config = &drvdata->config;
1357 
1358 	val = config->seq_rst;
1359 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1360 }
1361 
1362 static ssize_t seq_reset_event_store(struct device *dev,
1363 				     struct device_attribute *attr,
1364 				     const char *buf, size_t size)
1365 {
1366 	unsigned long val;
1367 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1368 	struct etmv4_config *config = &drvdata->config;
1369 
1370 	if (kstrtoul(buf, 16, &val))
1371 		return -EINVAL;
1372 	if (!(drvdata->nrseqstate))
1373 		return -EINVAL;
1374 
1375 	config->seq_rst = val & ETMv4_EVENT_MASK;
1376 	return size;
1377 }
1378 static DEVICE_ATTR_RW(seq_reset_event);
1379 
1380 static ssize_t cntr_idx_show(struct device *dev,
1381 			     struct device_attribute *attr,
1382 			     char *buf)
1383 {
1384 	unsigned long val;
1385 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1386 	struct etmv4_config *config = &drvdata->config;
1387 
1388 	val = config->cntr_idx;
1389 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1390 }
1391 
1392 static ssize_t cntr_idx_store(struct device *dev,
1393 			      struct device_attribute *attr,
1394 			      const char *buf, size_t size)
1395 {
1396 	unsigned long val;
1397 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1398 	struct etmv4_config *config = &drvdata->config;
1399 
1400 	if (kstrtoul(buf, 16, &val))
1401 		return -EINVAL;
1402 	if (val >= drvdata->nr_cntr)
1403 		return -EINVAL;
1404 
1405 	/*
1406 	 * Use spinlock to ensure index doesn't change while it gets
1407 	 * dereferenced multiple times within a spinlock block elsewhere.
1408 	 */
1409 	spin_lock(&drvdata->spinlock);
1410 	config->cntr_idx = val;
1411 	spin_unlock(&drvdata->spinlock);
1412 	return size;
1413 }
1414 static DEVICE_ATTR_RW(cntr_idx);
1415 
1416 static ssize_t cntrldvr_show(struct device *dev,
1417 			     struct device_attribute *attr,
1418 			     char *buf)
1419 {
1420 	u8 idx;
1421 	unsigned long val;
1422 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1423 	struct etmv4_config *config = &drvdata->config;
1424 
1425 	spin_lock(&drvdata->spinlock);
1426 	idx = config->cntr_idx;
1427 	val = config->cntrldvr[idx];
1428 	spin_unlock(&drvdata->spinlock);
1429 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1430 }
1431 
1432 static ssize_t cntrldvr_store(struct device *dev,
1433 			      struct device_attribute *attr,
1434 			      const char *buf, size_t size)
1435 {
1436 	u8 idx;
1437 	unsigned long val;
1438 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1439 	struct etmv4_config *config = &drvdata->config;
1440 
1441 	if (kstrtoul(buf, 16, &val))
1442 		return -EINVAL;
1443 	if (val > ETM_CNTR_MAX_VAL)
1444 		return -EINVAL;
1445 
1446 	spin_lock(&drvdata->spinlock);
1447 	idx = config->cntr_idx;
1448 	config->cntrldvr[idx] = val;
1449 	spin_unlock(&drvdata->spinlock);
1450 	return size;
1451 }
1452 static DEVICE_ATTR_RW(cntrldvr);
1453 
1454 static ssize_t cntr_val_show(struct device *dev,
1455 			     struct device_attribute *attr,
1456 			     char *buf)
1457 {
1458 	u8 idx;
1459 	unsigned long val;
1460 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1461 	struct etmv4_config *config = &drvdata->config;
1462 
1463 	spin_lock(&drvdata->spinlock);
1464 	idx = config->cntr_idx;
1465 	val = config->cntr_val[idx];
1466 	spin_unlock(&drvdata->spinlock);
1467 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1468 }
1469 
1470 static ssize_t cntr_val_store(struct device *dev,
1471 			      struct device_attribute *attr,
1472 			      const char *buf, size_t size)
1473 {
1474 	u8 idx;
1475 	unsigned long val;
1476 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1477 	struct etmv4_config *config = &drvdata->config;
1478 
1479 	if (kstrtoul(buf, 16, &val))
1480 		return -EINVAL;
1481 	if (val > ETM_CNTR_MAX_VAL)
1482 		return -EINVAL;
1483 
1484 	spin_lock(&drvdata->spinlock);
1485 	idx = config->cntr_idx;
1486 	config->cntr_val[idx] = val;
1487 	spin_unlock(&drvdata->spinlock);
1488 	return size;
1489 }
1490 static DEVICE_ATTR_RW(cntr_val);
1491 
1492 static ssize_t cntr_ctrl_show(struct device *dev,
1493 			      struct device_attribute *attr,
1494 			      char *buf)
1495 {
1496 	u8 idx;
1497 	unsigned long val;
1498 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1499 	struct etmv4_config *config = &drvdata->config;
1500 
1501 	spin_lock(&drvdata->spinlock);
1502 	idx = config->cntr_idx;
1503 	val = config->cntr_ctrl[idx];
1504 	spin_unlock(&drvdata->spinlock);
1505 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1506 }
1507 
1508 static ssize_t cntr_ctrl_store(struct device *dev,
1509 			       struct device_attribute *attr,
1510 			       const char *buf, size_t size)
1511 {
1512 	u8 idx;
1513 	unsigned long val;
1514 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1515 	struct etmv4_config *config = &drvdata->config;
1516 
1517 	if (kstrtoul(buf, 16, &val))
1518 		return -EINVAL;
1519 
1520 	spin_lock(&drvdata->spinlock);
1521 	idx = config->cntr_idx;
1522 	config->cntr_ctrl[idx] = val;
1523 	spin_unlock(&drvdata->spinlock);
1524 	return size;
1525 }
1526 static DEVICE_ATTR_RW(cntr_ctrl);
1527 
1528 static ssize_t res_idx_show(struct device *dev,
1529 			    struct device_attribute *attr,
1530 			    char *buf)
1531 {
1532 	unsigned long val;
1533 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1534 	struct etmv4_config *config = &drvdata->config;
1535 
1536 	val = config->res_idx;
1537 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1538 }
1539 
1540 static ssize_t res_idx_store(struct device *dev,
1541 			     struct device_attribute *attr,
1542 			     const char *buf, size_t size)
1543 {
1544 	unsigned long val;
1545 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1546 	struct etmv4_config *config = &drvdata->config;
1547 
1548 	if (kstrtoul(buf, 16, &val))
1549 		return -EINVAL;
1550 	/* Resource selector pair 0 is always implemented and reserved */
1551 	if ((val == 0) || (val >= drvdata->nr_resource))
1552 		return -EINVAL;
1553 
1554 	/*
1555 	 * Use spinlock to ensure index doesn't change while it gets
1556 	 * dereferenced multiple times within a spinlock block elsewhere.
1557 	 */
1558 	spin_lock(&drvdata->spinlock);
1559 	config->res_idx = val;
1560 	spin_unlock(&drvdata->spinlock);
1561 	return size;
1562 }
1563 static DEVICE_ATTR_RW(res_idx);
1564 
1565 static ssize_t res_ctrl_show(struct device *dev,
1566 			     struct device_attribute *attr,
1567 			     char *buf)
1568 {
1569 	u8 idx;
1570 	unsigned long val;
1571 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1572 	struct etmv4_config *config = &drvdata->config;
1573 
1574 	spin_lock(&drvdata->spinlock);
1575 	idx = config->res_idx;
1576 	val = config->res_ctrl[idx];
1577 	spin_unlock(&drvdata->spinlock);
1578 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1579 }
1580 
1581 static ssize_t res_ctrl_store(struct device *dev,
1582 			      struct device_attribute *attr,
1583 			      const char *buf, size_t size)
1584 {
1585 	u8 idx;
1586 	unsigned long val;
1587 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1588 	struct etmv4_config *config = &drvdata->config;
1589 
1590 	if (kstrtoul(buf, 16, &val))
1591 		return -EINVAL;
1592 
1593 	spin_lock(&drvdata->spinlock);
1594 	idx = config->res_idx;
1595 	/* For odd idx pair inversal bit is RES0 */
1596 	if (idx % 2 != 0)
1597 		/* PAIRINV, bit[21] */
1598 		val &= ~BIT(21);
1599 	config->res_ctrl[idx] = val;
1600 	spin_unlock(&drvdata->spinlock);
1601 	return size;
1602 }
1603 static DEVICE_ATTR_RW(res_ctrl);
1604 
1605 static ssize_t ctxid_idx_show(struct device *dev,
1606 			      struct device_attribute *attr,
1607 			      char *buf)
1608 {
1609 	unsigned long val;
1610 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1611 	struct etmv4_config *config = &drvdata->config;
1612 
1613 	val = config->ctxid_idx;
1614 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1615 }
1616 
1617 static ssize_t ctxid_idx_store(struct device *dev,
1618 			       struct device_attribute *attr,
1619 			       const char *buf, size_t size)
1620 {
1621 	unsigned long val;
1622 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1623 	struct etmv4_config *config = &drvdata->config;
1624 
1625 	if (kstrtoul(buf, 16, &val))
1626 		return -EINVAL;
1627 	if (val >= drvdata->numcidc)
1628 		return -EINVAL;
1629 
1630 	/*
1631 	 * Use spinlock to ensure index doesn't change while it gets
1632 	 * dereferenced multiple times within a spinlock block elsewhere.
1633 	 */
1634 	spin_lock(&drvdata->spinlock);
1635 	config->ctxid_idx = val;
1636 	spin_unlock(&drvdata->spinlock);
1637 	return size;
1638 }
1639 static DEVICE_ATTR_RW(ctxid_idx);
1640 
1641 static ssize_t ctxid_pid_show(struct device *dev,
1642 			      struct device_attribute *attr,
1643 			      char *buf)
1644 {
1645 	u8 idx;
1646 	unsigned long val;
1647 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1648 	struct etmv4_config *config = &drvdata->config;
1649 
1650 	spin_lock(&drvdata->spinlock);
1651 	idx = config->ctxid_idx;
1652 	val = (unsigned long)config->ctxid_vpid[idx];
1653 	spin_unlock(&drvdata->spinlock);
1654 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1655 }
1656 
1657 static ssize_t ctxid_pid_store(struct device *dev,
1658 			       struct device_attribute *attr,
1659 			       const char *buf, size_t size)
1660 {
1661 	u8 idx;
1662 	unsigned long vpid, pid;
1663 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1664 	struct etmv4_config *config = &drvdata->config;
1665 
1666 	/*
1667 	 * only implemented when ctxid tracing is enabled, i.e. at least one
1668 	 * ctxid comparator is implemented and ctxid is greater than 0 bits
1669 	 * in length
1670 	 */
1671 	if (!drvdata->ctxid_size || !drvdata->numcidc)
1672 		return -EINVAL;
1673 	if (kstrtoul(buf, 16, &vpid))
1674 		return -EINVAL;
1675 
1676 	pid = coresight_vpid_to_pid(vpid);
1677 
1678 	spin_lock(&drvdata->spinlock);
1679 	idx = config->ctxid_idx;
1680 	config->ctxid_pid[idx] = (u64)pid;
1681 	config->ctxid_vpid[idx] = (u64)vpid;
1682 	spin_unlock(&drvdata->spinlock);
1683 	return size;
1684 }
1685 static DEVICE_ATTR_RW(ctxid_pid);
1686 
1687 static ssize_t ctxid_masks_show(struct device *dev,
1688 				struct device_attribute *attr,
1689 				char *buf)
1690 {
1691 	unsigned long val1, val2;
1692 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1693 	struct etmv4_config *config = &drvdata->config;
1694 
1695 	spin_lock(&drvdata->spinlock);
1696 	val1 = config->ctxid_mask0;
1697 	val2 = config->ctxid_mask1;
1698 	spin_unlock(&drvdata->spinlock);
1699 	return scnprintf(buf, PAGE_SIZE, "%#lx %#lx\n", val1, val2);
1700 }
1701 
1702 static ssize_t ctxid_masks_store(struct device *dev,
1703 				struct device_attribute *attr,
1704 				const char *buf, size_t size)
1705 {
1706 	u8 i, j, maskbyte;
1707 	unsigned long val1, val2, mask;
1708 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1709 	struct etmv4_config *config = &drvdata->config;
1710 
1711 	/*
1712 	 * only implemented when ctxid tracing is enabled, i.e. at least one
1713 	 * ctxid comparator is implemented and ctxid is greater than 0 bits
1714 	 * in length
1715 	 */
1716 	if (!drvdata->ctxid_size || !drvdata->numcidc)
1717 		return -EINVAL;
1718 	if (sscanf(buf, "%lx %lx", &val1, &val2) != 2)
1719 		return -EINVAL;
1720 
1721 	spin_lock(&drvdata->spinlock);
1722 	/*
1723 	 * each byte[0..3] controls mask value applied to ctxid
1724 	 * comparator[0..3]
1725 	 */
1726 	switch (drvdata->numcidc) {
1727 	case 0x1:
1728 		/* COMP0, bits[7:0] */
1729 		config->ctxid_mask0 = val1 & 0xFF;
1730 		break;
1731 	case 0x2:
1732 		/* COMP1, bits[15:8] */
1733 		config->ctxid_mask0 = val1 & 0xFFFF;
1734 		break;
1735 	case 0x3:
1736 		/* COMP2, bits[23:16] */
1737 		config->ctxid_mask0 = val1 & 0xFFFFFF;
1738 		break;
1739 	case 0x4:
1740 		 /* COMP3, bits[31:24] */
1741 		config->ctxid_mask0 = val1;
1742 		break;
1743 	case 0x5:
1744 		/* COMP4, bits[7:0] */
1745 		config->ctxid_mask0 = val1;
1746 		config->ctxid_mask1 = val2 & 0xFF;
1747 		break;
1748 	case 0x6:
1749 		/* COMP5, bits[15:8] */
1750 		config->ctxid_mask0 = val1;
1751 		config->ctxid_mask1 = val2 & 0xFFFF;
1752 		break;
1753 	case 0x7:
1754 		/* COMP6, bits[23:16] */
1755 		config->ctxid_mask0 = val1;
1756 		config->ctxid_mask1 = val2 & 0xFFFFFF;
1757 		break;
1758 	case 0x8:
1759 		/* COMP7, bits[31:24] */
1760 		config->ctxid_mask0 = val1;
1761 		config->ctxid_mask1 = val2;
1762 		break;
1763 	default:
1764 		break;
1765 	}
1766 	/*
1767 	 * If software sets a mask bit to 1, it must program relevant byte
1768 	 * of ctxid comparator value 0x0, otherwise behavior is unpredictable.
1769 	 * For example, if bit[3] of ctxid_mask0 is 1, we must clear bits[31:24]
1770 	 * of ctxid comparator0 value (corresponding to byte 0) register.
1771 	 */
1772 	mask = config->ctxid_mask0;
1773 	for (i = 0; i < drvdata->numcidc; i++) {
1774 		/* mask value of corresponding ctxid comparator */
1775 		maskbyte = mask & ETMv4_EVENT_MASK;
1776 		/*
1777 		 * each bit corresponds to a byte of respective ctxid comparator
1778 		 * value register
1779 		 */
1780 		for (j = 0; j < 8; j++) {
1781 			if (maskbyte & 1)
1782 				config->ctxid_pid[i] &= ~(0xFF << (j * 8));
1783 			maskbyte >>= 1;
1784 		}
1785 		/* Select the next ctxid comparator mask value */
1786 		if (i == 3)
1787 			/* ctxid comparators[4-7] */
1788 			mask = config->ctxid_mask1;
1789 		else
1790 			mask >>= 0x8;
1791 	}
1792 
1793 	spin_unlock(&drvdata->spinlock);
1794 	return size;
1795 }
1796 static DEVICE_ATTR_RW(ctxid_masks);
1797 
1798 static ssize_t vmid_idx_show(struct device *dev,
1799 			     struct device_attribute *attr,
1800 			     char *buf)
1801 {
1802 	unsigned long val;
1803 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1804 	struct etmv4_config *config = &drvdata->config;
1805 
1806 	val = config->vmid_idx;
1807 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1808 }
1809 
1810 static ssize_t vmid_idx_store(struct device *dev,
1811 			      struct device_attribute *attr,
1812 			      const char *buf, size_t size)
1813 {
1814 	unsigned long val;
1815 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1816 	struct etmv4_config *config = &drvdata->config;
1817 
1818 	if (kstrtoul(buf, 16, &val))
1819 		return -EINVAL;
1820 	if (val >= drvdata->numvmidc)
1821 		return -EINVAL;
1822 
1823 	/*
1824 	 * Use spinlock to ensure index doesn't change while it gets
1825 	 * dereferenced multiple times within a spinlock block elsewhere.
1826 	 */
1827 	spin_lock(&drvdata->spinlock);
1828 	config->vmid_idx = val;
1829 	spin_unlock(&drvdata->spinlock);
1830 	return size;
1831 }
1832 static DEVICE_ATTR_RW(vmid_idx);
1833 
1834 static ssize_t vmid_val_show(struct device *dev,
1835 			     struct device_attribute *attr,
1836 			     char *buf)
1837 {
1838 	unsigned long val;
1839 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1840 	struct etmv4_config *config = &drvdata->config;
1841 
1842 	val = (unsigned long)config->vmid_val[config->vmid_idx];
1843 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1844 }
1845 
1846 static ssize_t vmid_val_store(struct device *dev,
1847 			      struct device_attribute *attr,
1848 			      const char *buf, size_t size)
1849 {
1850 	unsigned long val;
1851 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1852 	struct etmv4_config *config = &drvdata->config;
1853 
1854 	/*
1855 	 * only implemented when vmid tracing is enabled, i.e. at least one
1856 	 * vmid comparator is implemented and at least 8 bit vmid size
1857 	 */
1858 	if (!drvdata->vmid_size || !drvdata->numvmidc)
1859 		return -EINVAL;
1860 	if (kstrtoul(buf, 16, &val))
1861 		return -EINVAL;
1862 
1863 	spin_lock(&drvdata->spinlock);
1864 	config->vmid_val[config->vmid_idx] = (u64)val;
1865 	spin_unlock(&drvdata->spinlock);
1866 	return size;
1867 }
1868 static DEVICE_ATTR_RW(vmid_val);
1869 
1870 static ssize_t vmid_masks_show(struct device *dev,
1871 			       struct device_attribute *attr, char *buf)
1872 {
1873 	unsigned long val1, val2;
1874 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1875 	struct etmv4_config *config = &drvdata->config;
1876 
1877 	spin_lock(&drvdata->spinlock);
1878 	val1 = config->vmid_mask0;
1879 	val2 = config->vmid_mask1;
1880 	spin_unlock(&drvdata->spinlock);
1881 	return scnprintf(buf, PAGE_SIZE, "%#lx %#lx\n", val1, val2);
1882 }
1883 
1884 static ssize_t vmid_masks_store(struct device *dev,
1885 				struct device_attribute *attr,
1886 				const char *buf, size_t size)
1887 {
1888 	u8 i, j, maskbyte;
1889 	unsigned long val1, val2, mask;
1890 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1891 	struct etmv4_config *config = &drvdata->config;
1892 
1893 	/*
1894 	 * only implemented when vmid tracing is enabled, i.e. at least one
1895 	 * vmid comparator is implemented and at least 8 bit vmid size
1896 	 */
1897 	if (!drvdata->vmid_size || !drvdata->numvmidc)
1898 		return -EINVAL;
1899 	if (sscanf(buf, "%lx %lx", &val1, &val2) != 2)
1900 		return -EINVAL;
1901 
1902 	spin_lock(&drvdata->spinlock);
1903 
1904 	/*
1905 	 * each byte[0..3] controls mask value applied to vmid
1906 	 * comparator[0..3]
1907 	 */
1908 	switch (drvdata->numvmidc) {
1909 	case 0x1:
1910 		/* COMP0, bits[7:0] */
1911 		config->vmid_mask0 = val1 & 0xFF;
1912 		break;
1913 	case 0x2:
1914 		/* COMP1, bits[15:8] */
1915 		config->vmid_mask0 = val1 & 0xFFFF;
1916 		break;
1917 	case 0x3:
1918 		/* COMP2, bits[23:16] */
1919 		config->vmid_mask0 = val1 & 0xFFFFFF;
1920 		break;
1921 	case 0x4:
1922 		/* COMP3, bits[31:24] */
1923 		config->vmid_mask0 = val1;
1924 		break;
1925 	case 0x5:
1926 		/* COMP4, bits[7:0] */
1927 		config->vmid_mask0 = val1;
1928 		config->vmid_mask1 = val2 & 0xFF;
1929 		break;
1930 	case 0x6:
1931 		/* COMP5, bits[15:8] */
1932 		config->vmid_mask0 = val1;
1933 		config->vmid_mask1 = val2 & 0xFFFF;
1934 		break;
1935 	case 0x7:
1936 		/* COMP6, bits[23:16] */
1937 		config->vmid_mask0 = val1;
1938 		config->vmid_mask1 = val2 & 0xFFFFFF;
1939 		break;
1940 	case 0x8:
1941 		/* COMP7, bits[31:24] */
1942 		config->vmid_mask0 = val1;
1943 		config->vmid_mask1 = val2;
1944 		break;
1945 	default:
1946 		break;
1947 	}
1948 
1949 	/*
1950 	 * If software sets a mask bit to 1, it must program relevant byte
1951 	 * of vmid comparator value 0x0, otherwise behavior is unpredictable.
1952 	 * For example, if bit[3] of vmid_mask0 is 1, we must clear bits[31:24]
1953 	 * of vmid comparator0 value (corresponding to byte 0) register.
1954 	 */
1955 	mask = config->vmid_mask0;
1956 	for (i = 0; i < drvdata->numvmidc; i++) {
1957 		/* mask value of corresponding vmid comparator */
1958 		maskbyte = mask & ETMv4_EVENT_MASK;
1959 		/*
1960 		 * each bit corresponds to a byte of respective vmid comparator
1961 		 * value register
1962 		 */
1963 		for (j = 0; j < 8; j++) {
1964 			if (maskbyte & 1)
1965 				config->vmid_val[i] &= ~(0xFF << (j * 8));
1966 			maskbyte >>= 1;
1967 		}
1968 		/* Select the next vmid comparator mask value */
1969 		if (i == 3)
1970 			/* vmid comparators[4-7] */
1971 			mask = config->vmid_mask1;
1972 		else
1973 			mask >>= 0x8;
1974 	}
1975 	spin_unlock(&drvdata->spinlock);
1976 	return size;
1977 }
1978 static DEVICE_ATTR_RW(vmid_masks);
1979 
1980 static ssize_t cpu_show(struct device *dev,
1981 			struct device_attribute *attr, char *buf)
1982 {
1983 	int val;
1984 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1985 
1986 	val = drvdata->cpu;
1987 	return scnprintf(buf, PAGE_SIZE, "%d\n", val);
1988 
1989 }
1990 static DEVICE_ATTR_RO(cpu);
1991 
1992 static struct attribute *coresight_etmv4_attrs[] = {
1993 	&dev_attr_nr_pe_cmp.attr,
1994 	&dev_attr_nr_addr_cmp.attr,
1995 	&dev_attr_nr_cntr.attr,
1996 	&dev_attr_nr_ext_inp.attr,
1997 	&dev_attr_numcidc.attr,
1998 	&dev_attr_numvmidc.attr,
1999 	&dev_attr_nrseqstate.attr,
2000 	&dev_attr_nr_resource.attr,
2001 	&dev_attr_nr_ss_cmp.attr,
2002 	&dev_attr_reset.attr,
2003 	&dev_attr_mode.attr,
2004 	&dev_attr_pe.attr,
2005 	&dev_attr_event.attr,
2006 	&dev_attr_event_instren.attr,
2007 	&dev_attr_event_ts.attr,
2008 	&dev_attr_syncfreq.attr,
2009 	&dev_attr_cyc_threshold.attr,
2010 	&dev_attr_bb_ctrl.attr,
2011 	&dev_attr_event_vinst.attr,
2012 	&dev_attr_s_exlevel_vinst.attr,
2013 	&dev_attr_ns_exlevel_vinst.attr,
2014 	&dev_attr_addr_idx.attr,
2015 	&dev_attr_addr_instdatatype.attr,
2016 	&dev_attr_addr_single.attr,
2017 	&dev_attr_addr_range.attr,
2018 	&dev_attr_addr_start.attr,
2019 	&dev_attr_addr_stop.attr,
2020 	&dev_attr_addr_ctxtype.attr,
2021 	&dev_attr_addr_context.attr,
2022 	&dev_attr_seq_idx.attr,
2023 	&dev_attr_seq_state.attr,
2024 	&dev_attr_seq_event.attr,
2025 	&dev_attr_seq_reset_event.attr,
2026 	&dev_attr_cntr_idx.attr,
2027 	&dev_attr_cntrldvr.attr,
2028 	&dev_attr_cntr_val.attr,
2029 	&dev_attr_cntr_ctrl.attr,
2030 	&dev_attr_res_idx.attr,
2031 	&dev_attr_res_ctrl.attr,
2032 	&dev_attr_ctxid_idx.attr,
2033 	&dev_attr_ctxid_pid.attr,
2034 	&dev_attr_ctxid_masks.attr,
2035 	&dev_attr_vmid_idx.attr,
2036 	&dev_attr_vmid_val.attr,
2037 	&dev_attr_vmid_masks.attr,
2038 	&dev_attr_cpu.attr,
2039 	NULL,
2040 };
2041 
2042 #define coresight_etm4x_simple_func(name, offset)			\
2043 	coresight_simple_func(struct etmv4_drvdata, name, offset)
2044 
2045 coresight_etm4x_simple_func(trcoslsr, TRCOSLSR);
2046 coresight_etm4x_simple_func(trcpdcr, TRCPDCR);
2047 coresight_etm4x_simple_func(trcpdsr, TRCPDSR);
2048 coresight_etm4x_simple_func(trclsr, TRCLSR);
2049 coresight_etm4x_simple_func(trcconfig, TRCCONFIGR);
2050 coresight_etm4x_simple_func(trctraceid, TRCTRACEIDR);
2051 coresight_etm4x_simple_func(trcauthstatus, TRCAUTHSTATUS);
2052 coresight_etm4x_simple_func(trcdevid, TRCDEVID);
2053 coresight_etm4x_simple_func(trcdevtype, TRCDEVTYPE);
2054 coresight_etm4x_simple_func(trcpidr0, TRCPIDR0);
2055 coresight_etm4x_simple_func(trcpidr1, TRCPIDR1);
2056 coresight_etm4x_simple_func(trcpidr2, TRCPIDR2);
2057 coresight_etm4x_simple_func(trcpidr3, TRCPIDR3);
2058 
2059 static struct attribute *coresight_etmv4_mgmt_attrs[] = {
2060 	&dev_attr_trcoslsr.attr,
2061 	&dev_attr_trcpdcr.attr,
2062 	&dev_attr_trcpdsr.attr,
2063 	&dev_attr_trclsr.attr,
2064 	&dev_attr_trcconfig.attr,
2065 	&dev_attr_trctraceid.attr,
2066 	&dev_attr_trcauthstatus.attr,
2067 	&dev_attr_trcdevid.attr,
2068 	&dev_attr_trcdevtype.attr,
2069 	&dev_attr_trcpidr0.attr,
2070 	&dev_attr_trcpidr1.attr,
2071 	&dev_attr_trcpidr2.attr,
2072 	&dev_attr_trcpidr3.attr,
2073 	NULL,
2074 };
2075 
2076 coresight_etm4x_simple_func(trcidr0, TRCIDR0);
2077 coresight_etm4x_simple_func(trcidr1, TRCIDR1);
2078 coresight_etm4x_simple_func(trcidr2, TRCIDR2);
2079 coresight_etm4x_simple_func(trcidr3, TRCIDR3);
2080 coresight_etm4x_simple_func(trcidr4, TRCIDR4);
2081 coresight_etm4x_simple_func(trcidr5, TRCIDR5);
2082 /* trcidr[6,7] are reserved */
2083 coresight_etm4x_simple_func(trcidr8, TRCIDR8);
2084 coresight_etm4x_simple_func(trcidr9, TRCIDR9);
2085 coresight_etm4x_simple_func(trcidr10, TRCIDR10);
2086 coresight_etm4x_simple_func(trcidr11, TRCIDR11);
2087 coresight_etm4x_simple_func(trcidr12, TRCIDR12);
2088 coresight_etm4x_simple_func(trcidr13, TRCIDR13);
2089 
2090 static struct attribute *coresight_etmv4_trcidr_attrs[] = {
2091 	&dev_attr_trcidr0.attr,
2092 	&dev_attr_trcidr1.attr,
2093 	&dev_attr_trcidr2.attr,
2094 	&dev_attr_trcidr3.attr,
2095 	&dev_attr_trcidr4.attr,
2096 	&dev_attr_trcidr5.attr,
2097 	/* trcidr[6,7] are reserved */
2098 	&dev_attr_trcidr8.attr,
2099 	&dev_attr_trcidr9.attr,
2100 	&dev_attr_trcidr10.attr,
2101 	&dev_attr_trcidr11.attr,
2102 	&dev_attr_trcidr12.attr,
2103 	&dev_attr_trcidr13.attr,
2104 	NULL,
2105 };
2106 
2107 static const struct attribute_group coresight_etmv4_group = {
2108 	.attrs = coresight_etmv4_attrs,
2109 };
2110 
2111 static const struct attribute_group coresight_etmv4_mgmt_group = {
2112 	.attrs = coresight_etmv4_mgmt_attrs,
2113 	.name = "mgmt",
2114 };
2115 
2116 static const struct attribute_group coresight_etmv4_trcidr_group = {
2117 	.attrs = coresight_etmv4_trcidr_attrs,
2118 	.name = "trcidr",
2119 };
2120 
2121 const struct attribute_group *coresight_etmv4_groups[] = {
2122 	&coresight_etmv4_group,
2123 	&coresight_etmv4_mgmt_group,
2124 	&coresight_etmv4_trcidr_group,
2125 	NULL,
2126 };
2127