1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright(C) 2015 Linaro Limited. All rights reserved. 4 * Author: Mathieu Poirier <mathieu.poirier@linaro.org> 5 */ 6 7 #include <linux/pm_runtime.h> 8 #include <linux/sysfs.h> 9 #include "coresight-etm4x.h" 10 #include "coresight-priv.h" 11 12 static int etm4_set_mode_exclude(struct etmv4_drvdata *drvdata, bool exclude) 13 { 14 u8 idx; 15 struct etmv4_config *config = &drvdata->config; 16 17 idx = config->addr_idx; 18 19 /* 20 * TRCACATRn.TYPE bit[1:0]: type of comparison 21 * the trace unit performs 22 */ 23 if (BMVAL(config->addr_acc[idx], 0, 1) == ETM_INSTR_ADDR) { 24 if (idx % 2 != 0) 25 return -EINVAL; 26 27 /* 28 * We are performing instruction address comparison. Set the 29 * relevant bit of ViewInst Include/Exclude Control register 30 * for corresponding address comparator pair. 31 */ 32 if (config->addr_type[idx] != ETM_ADDR_TYPE_RANGE || 33 config->addr_type[idx + 1] != ETM_ADDR_TYPE_RANGE) 34 return -EINVAL; 35 36 if (exclude == true) { 37 /* 38 * Set exclude bit and unset the include bit 39 * corresponding to comparator pair 40 */ 41 config->viiectlr |= BIT(idx / 2 + 16); 42 config->viiectlr &= ~BIT(idx / 2); 43 } else { 44 /* 45 * Set include bit and unset exclude bit 46 * corresponding to comparator pair 47 */ 48 config->viiectlr |= BIT(idx / 2); 49 config->viiectlr &= ~BIT(idx / 2 + 16); 50 } 51 } 52 return 0; 53 } 54 55 static ssize_t nr_pe_cmp_show(struct device *dev, 56 struct device_attribute *attr, 57 char *buf) 58 { 59 unsigned long val; 60 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); 61 62 val = drvdata->nr_pe_cmp; 63 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); 64 } 65 static DEVICE_ATTR_RO(nr_pe_cmp); 66 67 static ssize_t nr_addr_cmp_show(struct device *dev, 68 struct device_attribute *attr, 69 char *buf) 70 { 71 unsigned long val; 72 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); 73 74 val = drvdata->nr_addr_cmp; 75 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); 76 } 77 static DEVICE_ATTR_RO(nr_addr_cmp); 78 79 static ssize_t nr_cntr_show(struct device *dev, 80 struct device_attribute *attr, 81 char *buf) 82 { 83 unsigned long val; 84 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); 85 86 val = drvdata->nr_cntr; 87 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); 88 } 89 static DEVICE_ATTR_RO(nr_cntr); 90 91 static ssize_t nr_ext_inp_show(struct device *dev, 92 struct device_attribute *attr, 93 char *buf) 94 { 95 unsigned long val; 96 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); 97 98 val = drvdata->nr_ext_inp; 99 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); 100 } 101 static DEVICE_ATTR_RO(nr_ext_inp); 102 103 static ssize_t numcidc_show(struct device *dev, 104 struct device_attribute *attr, 105 char *buf) 106 { 107 unsigned long val; 108 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); 109 110 val = drvdata->numcidc; 111 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); 112 } 113 static DEVICE_ATTR_RO(numcidc); 114 115 static ssize_t numvmidc_show(struct device *dev, 116 struct device_attribute *attr, 117 char *buf) 118 { 119 unsigned long val; 120 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); 121 122 val = drvdata->numvmidc; 123 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); 124 } 125 static DEVICE_ATTR_RO(numvmidc); 126 127 static ssize_t nrseqstate_show(struct device *dev, 128 struct device_attribute *attr, 129 char *buf) 130 { 131 unsigned long val; 132 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); 133 134 val = drvdata->nrseqstate; 135 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); 136 } 137 static DEVICE_ATTR_RO(nrseqstate); 138 139 static ssize_t nr_resource_show(struct device *dev, 140 struct device_attribute *attr, 141 char *buf) 142 { 143 unsigned long val; 144 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); 145 146 val = drvdata->nr_resource; 147 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); 148 } 149 static DEVICE_ATTR_RO(nr_resource); 150 151 static ssize_t nr_ss_cmp_show(struct device *dev, 152 struct device_attribute *attr, 153 char *buf) 154 { 155 unsigned long val; 156 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); 157 158 val = drvdata->nr_ss_cmp; 159 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); 160 } 161 static DEVICE_ATTR_RO(nr_ss_cmp); 162 163 static ssize_t reset_store(struct device *dev, 164 struct device_attribute *attr, 165 const char *buf, size_t size) 166 { 167 int i; 168 unsigned long val; 169 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); 170 struct etmv4_config *config = &drvdata->config; 171 172 if (kstrtoul(buf, 16, &val)) 173 return -EINVAL; 174 175 spin_lock(&drvdata->spinlock); 176 if (val) 177 config->mode = 0x0; 178 179 /* Disable data tracing: do not trace load and store data transfers */ 180 config->mode &= ~(ETM_MODE_LOAD | ETM_MODE_STORE); 181 config->cfg &= ~(BIT(1) | BIT(2)); 182 183 /* Disable data value and data address tracing */ 184 config->mode &= ~(ETM_MODE_DATA_TRACE_ADDR | 185 ETM_MODE_DATA_TRACE_VAL); 186 config->cfg &= ~(BIT(16) | BIT(17)); 187 188 /* Disable all events tracing */ 189 config->eventctrl0 = 0x0; 190 config->eventctrl1 = 0x0; 191 192 /* Disable timestamp event */ 193 config->ts_ctrl = 0x0; 194 195 /* Disable stalling */ 196 config->stall_ctrl = 0x0; 197 198 /* Reset trace synchronization period to 2^8 = 256 bytes*/ 199 if (drvdata->syncpr == false) 200 config->syncfreq = 0x8; 201 202 /* 203 * Enable ViewInst to trace everything with start-stop logic in 204 * started state. ARM recommends start-stop logic is set before 205 * each trace run. 206 */ 207 config->vinst_ctrl |= BIT(0); 208 if (drvdata->nr_addr_cmp == true) { 209 config->mode |= ETM_MODE_VIEWINST_STARTSTOP; 210 /* SSSTATUS, bit[9] */ 211 config->vinst_ctrl |= BIT(9); 212 } 213 214 /* No address range filtering for ViewInst */ 215 config->viiectlr = 0x0; 216 217 /* No start-stop filtering for ViewInst */ 218 config->vissctlr = 0x0; 219 220 /* Disable seq events */ 221 for (i = 0; i < drvdata->nrseqstate-1; i++) 222 config->seq_ctrl[i] = 0x0; 223 config->seq_rst = 0x0; 224 config->seq_state = 0x0; 225 226 /* Disable external input events */ 227 config->ext_inp = 0x0; 228 229 config->cntr_idx = 0x0; 230 for (i = 0; i < drvdata->nr_cntr; i++) { 231 config->cntrldvr[i] = 0x0; 232 config->cntr_ctrl[i] = 0x0; 233 config->cntr_val[i] = 0x0; 234 } 235 236 config->res_idx = 0x0; 237 for (i = 0; i < drvdata->nr_resource; i++) 238 config->res_ctrl[i] = 0x0; 239 240 for (i = 0; i < drvdata->nr_ss_cmp; i++) { 241 config->ss_ctrl[i] = 0x0; 242 config->ss_pe_cmp[i] = 0x0; 243 } 244 245 config->addr_idx = 0x0; 246 for (i = 0; i < drvdata->nr_addr_cmp * 2; i++) { 247 config->addr_val[i] = 0x0; 248 config->addr_acc[i] = 0x0; 249 config->addr_type[i] = ETM_ADDR_TYPE_NONE; 250 } 251 252 config->ctxid_idx = 0x0; 253 for (i = 0; i < drvdata->numcidc; i++) { 254 config->ctxid_pid[i] = 0x0; 255 config->ctxid_vpid[i] = 0x0; 256 } 257 258 config->ctxid_mask0 = 0x0; 259 config->ctxid_mask1 = 0x0; 260 261 config->vmid_idx = 0x0; 262 for (i = 0; i < drvdata->numvmidc; i++) 263 config->vmid_val[i] = 0x0; 264 config->vmid_mask0 = 0x0; 265 config->vmid_mask1 = 0x0; 266 267 drvdata->trcid = drvdata->cpu + 1; 268 269 spin_unlock(&drvdata->spinlock); 270 271 return size; 272 } 273 static DEVICE_ATTR_WO(reset); 274 275 static ssize_t mode_show(struct device *dev, 276 struct device_attribute *attr, 277 char *buf) 278 { 279 unsigned long val; 280 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); 281 struct etmv4_config *config = &drvdata->config; 282 283 val = config->mode; 284 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); 285 } 286 287 static ssize_t mode_store(struct device *dev, 288 struct device_attribute *attr, 289 const char *buf, size_t size) 290 { 291 unsigned long val, mode; 292 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); 293 struct etmv4_config *config = &drvdata->config; 294 295 if (kstrtoul(buf, 16, &val)) 296 return -EINVAL; 297 298 spin_lock(&drvdata->spinlock); 299 config->mode = val & ETMv4_MODE_ALL; 300 301 if (config->mode & ETM_MODE_EXCLUDE) 302 etm4_set_mode_exclude(drvdata, true); 303 else 304 etm4_set_mode_exclude(drvdata, false); 305 306 if (drvdata->instrp0 == true) { 307 /* start by clearing instruction P0 field */ 308 config->cfg &= ~(BIT(1) | BIT(2)); 309 if (config->mode & ETM_MODE_LOAD) 310 /* 0b01 Trace load instructions as P0 instructions */ 311 config->cfg |= BIT(1); 312 if (config->mode & ETM_MODE_STORE) 313 /* 0b10 Trace store instructions as P0 instructions */ 314 config->cfg |= BIT(2); 315 if (config->mode & ETM_MODE_LOAD_STORE) 316 /* 317 * 0b11 Trace load and store instructions 318 * as P0 instructions 319 */ 320 config->cfg |= BIT(1) | BIT(2); 321 } 322 323 /* bit[3], Branch broadcast mode */ 324 if ((config->mode & ETM_MODE_BB) && (drvdata->trcbb == true)) 325 config->cfg |= BIT(3); 326 else 327 config->cfg &= ~BIT(3); 328 329 /* bit[4], Cycle counting instruction trace bit */ 330 if ((config->mode & ETMv4_MODE_CYCACC) && 331 (drvdata->trccci == true)) 332 config->cfg |= BIT(4); 333 else 334 config->cfg &= ~BIT(4); 335 336 /* bit[6], Context ID tracing bit */ 337 if ((config->mode & ETMv4_MODE_CTXID) && (drvdata->ctxid_size)) 338 config->cfg |= BIT(6); 339 else 340 config->cfg &= ~BIT(6); 341 342 if ((config->mode & ETM_MODE_VMID) && (drvdata->vmid_size)) 343 config->cfg |= BIT(7); 344 else 345 config->cfg &= ~BIT(7); 346 347 /* bits[10:8], Conditional instruction tracing bit */ 348 mode = ETM_MODE_COND(config->mode); 349 if (drvdata->trccond == true) { 350 config->cfg &= ~(BIT(8) | BIT(9) | BIT(10)); 351 config->cfg |= mode << 8; 352 } 353 354 /* bit[11], Global timestamp tracing bit */ 355 if ((config->mode & ETMv4_MODE_TIMESTAMP) && (drvdata->ts_size)) 356 config->cfg |= BIT(11); 357 else 358 config->cfg &= ~BIT(11); 359 360 /* bit[12], Return stack enable bit */ 361 if ((config->mode & ETM_MODE_RETURNSTACK) && 362 (drvdata->retstack == true)) 363 config->cfg |= BIT(12); 364 else 365 config->cfg &= ~BIT(12); 366 367 /* bits[14:13], Q element enable field */ 368 mode = ETM_MODE_QELEM(config->mode); 369 /* start by clearing QE bits */ 370 config->cfg &= ~(BIT(13) | BIT(14)); 371 /* if supported, Q elements with instruction counts are enabled */ 372 if ((mode & BIT(0)) && (drvdata->q_support & BIT(0))) 373 config->cfg |= BIT(13); 374 /* 375 * if supported, Q elements with and without instruction 376 * counts are enabled 377 */ 378 if ((mode & BIT(1)) && (drvdata->q_support & BIT(1))) 379 config->cfg |= BIT(14); 380 381 /* bit[11], AMBA Trace Bus (ATB) trigger enable bit */ 382 if ((config->mode & ETM_MODE_ATB_TRIGGER) && 383 (drvdata->atbtrig == true)) 384 config->eventctrl1 |= BIT(11); 385 else 386 config->eventctrl1 &= ~BIT(11); 387 388 /* bit[12], Low-power state behavior override bit */ 389 if ((config->mode & ETM_MODE_LPOVERRIDE) && 390 (drvdata->lpoverride == true)) 391 config->eventctrl1 |= BIT(12); 392 else 393 config->eventctrl1 &= ~BIT(12); 394 395 /* bit[8], Instruction stall bit */ 396 if (config->mode & ETM_MODE_ISTALL_EN) 397 config->stall_ctrl |= BIT(8); 398 else 399 config->stall_ctrl &= ~BIT(8); 400 401 /* bit[10], Prioritize instruction trace bit */ 402 if (config->mode & ETM_MODE_INSTPRIO) 403 config->stall_ctrl |= BIT(10); 404 else 405 config->stall_ctrl &= ~BIT(10); 406 407 /* bit[13], Trace overflow prevention bit */ 408 if ((config->mode & ETM_MODE_NOOVERFLOW) && 409 (drvdata->nooverflow == true)) 410 config->stall_ctrl |= BIT(13); 411 else 412 config->stall_ctrl &= ~BIT(13); 413 414 /* bit[9] Start/stop logic control bit */ 415 if (config->mode & ETM_MODE_VIEWINST_STARTSTOP) 416 config->vinst_ctrl |= BIT(9); 417 else 418 config->vinst_ctrl &= ~BIT(9); 419 420 /* bit[10], Whether a trace unit must trace a Reset exception */ 421 if (config->mode & ETM_MODE_TRACE_RESET) 422 config->vinst_ctrl |= BIT(10); 423 else 424 config->vinst_ctrl &= ~BIT(10); 425 426 /* bit[11], Whether a trace unit must trace a system error exception */ 427 if ((config->mode & ETM_MODE_TRACE_ERR) && 428 (drvdata->trc_error == true)) 429 config->vinst_ctrl |= BIT(11); 430 else 431 config->vinst_ctrl &= ~BIT(11); 432 433 if (config->mode & (ETM_MODE_EXCL_KERN | ETM_MODE_EXCL_USER)) 434 etm4_config_trace_mode(config); 435 436 spin_unlock(&drvdata->spinlock); 437 438 return size; 439 } 440 static DEVICE_ATTR_RW(mode); 441 442 static ssize_t pe_show(struct device *dev, 443 struct device_attribute *attr, 444 char *buf) 445 { 446 unsigned long val; 447 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); 448 struct etmv4_config *config = &drvdata->config; 449 450 val = config->pe_sel; 451 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); 452 } 453 454 static ssize_t pe_store(struct device *dev, 455 struct device_attribute *attr, 456 const char *buf, size_t size) 457 { 458 unsigned long val; 459 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); 460 struct etmv4_config *config = &drvdata->config; 461 462 if (kstrtoul(buf, 16, &val)) 463 return -EINVAL; 464 465 spin_lock(&drvdata->spinlock); 466 if (val > drvdata->nr_pe) { 467 spin_unlock(&drvdata->spinlock); 468 return -EINVAL; 469 } 470 471 config->pe_sel = val; 472 spin_unlock(&drvdata->spinlock); 473 return size; 474 } 475 static DEVICE_ATTR_RW(pe); 476 477 static ssize_t event_show(struct device *dev, 478 struct device_attribute *attr, 479 char *buf) 480 { 481 unsigned long val; 482 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); 483 struct etmv4_config *config = &drvdata->config; 484 485 val = config->eventctrl0; 486 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); 487 } 488 489 static ssize_t event_store(struct device *dev, 490 struct device_attribute *attr, 491 const char *buf, size_t size) 492 { 493 unsigned long val; 494 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); 495 struct etmv4_config *config = &drvdata->config; 496 497 if (kstrtoul(buf, 16, &val)) 498 return -EINVAL; 499 500 spin_lock(&drvdata->spinlock); 501 switch (drvdata->nr_event) { 502 case 0x0: 503 /* EVENT0, bits[7:0] */ 504 config->eventctrl0 = val & 0xFF; 505 break; 506 case 0x1: 507 /* EVENT1, bits[15:8] */ 508 config->eventctrl0 = val & 0xFFFF; 509 break; 510 case 0x2: 511 /* EVENT2, bits[23:16] */ 512 config->eventctrl0 = val & 0xFFFFFF; 513 break; 514 case 0x3: 515 /* EVENT3, bits[31:24] */ 516 config->eventctrl0 = val; 517 break; 518 default: 519 break; 520 } 521 spin_unlock(&drvdata->spinlock); 522 return size; 523 } 524 static DEVICE_ATTR_RW(event); 525 526 static ssize_t event_instren_show(struct device *dev, 527 struct device_attribute *attr, 528 char *buf) 529 { 530 unsigned long val; 531 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); 532 struct etmv4_config *config = &drvdata->config; 533 534 val = BMVAL(config->eventctrl1, 0, 3); 535 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); 536 } 537 538 static ssize_t event_instren_store(struct device *dev, 539 struct device_attribute *attr, 540 const char *buf, size_t size) 541 { 542 unsigned long val; 543 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); 544 struct etmv4_config *config = &drvdata->config; 545 546 if (kstrtoul(buf, 16, &val)) 547 return -EINVAL; 548 549 spin_lock(&drvdata->spinlock); 550 /* start by clearing all instruction event enable bits */ 551 config->eventctrl1 &= ~(BIT(0) | BIT(1) | BIT(2) | BIT(3)); 552 switch (drvdata->nr_event) { 553 case 0x0: 554 /* generate Event element for event 1 */ 555 config->eventctrl1 |= val & BIT(1); 556 break; 557 case 0x1: 558 /* generate Event element for event 1 and 2 */ 559 config->eventctrl1 |= val & (BIT(0) | BIT(1)); 560 break; 561 case 0x2: 562 /* generate Event element for event 1, 2 and 3 */ 563 config->eventctrl1 |= val & (BIT(0) | BIT(1) | BIT(2)); 564 break; 565 case 0x3: 566 /* generate Event element for all 4 events */ 567 config->eventctrl1 |= val & 0xF; 568 break; 569 default: 570 break; 571 } 572 spin_unlock(&drvdata->spinlock); 573 return size; 574 } 575 static DEVICE_ATTR_RW(event_instren); 576 577 static ssize_t event_ts_show(struct device *dev, 578 struct device_attribute *attr, 579 char *buf) 580 { 581 unsigned long val; 582 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); 583 struct etmv4_config *config = &drvdata->config; 584 585 val = config->ts_ctrl; 586 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); 587 } 588 589 static ssize_t event_ts_store(struct device *dev, 590 struct device_attribute *attr, 591 const char *buf, size_t size) 592 { 593 unsigned long val; 594 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); 595 struct etmv4_config *config = &drvdata->config; 596 597 if (kstrtoul(buf, 16, &val)) 598 return -EINVAL; 599 if (!drvdata->ts_size) 600 return -EINVAL; 601 602 config->ts_ctrl = val & ETMv4_EVENT_MASK; 603 return size; 604 } 605 static DEVICE_ATTR_RW(event_ts); 606 607 static ssize_t syncfreq_show(struct device *dev, 608 struct device_attribute *attr, 609 char *buf) 610 { 611 unsigned long val; 612 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); 613 struct etmv4_config *config = &drvdata->config; 614 615 val = config->syncfreq; 616 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); 617 } 618 619 static ssize_t syncfreq_store(struct device *dev, 620 struct device_attribute *attr, 621 const char *buf, size_t size) 622 { 623 unsigned long val; 624 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); 625 struct etmv4_config *config = &drvdata->config; 626 627 if (kstrtoul(buf, 16, &val)) 628 return -EINVAL; 629 if (drvdata->syncpr == true) 630 return -EINVAL; 631 632 config->syncfreq = val & ETMv4_SYNC_MASK; 633 return size; 634 } 635 static DEVICE_ATTR_RW(syncfreq); 636 637 static ssize_t cyc_threshold_show(struct device *dev, 638 struct device_attribute *attr, 639 char *buf) 640 { 641 unsigned long val; 642 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); 643 struct etmv4_config *config = &drvdata->config; 644 645 val = config->ccctlr; 646 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); 647 } 648 649 static ssize_t cyc_threshold_store(struct device *dev, 650 struct device_attribute *attr, 651 const char *buf, size_t size) 652 { 653 unsigned long val; 654 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); 655 struct etmv4_config *config = &drvdata->config; 656 657 if (kstrtoul(buf, 16, &val)) 658 return -EINVAL; 659 if (val < drvdata->ccitmin) 660 return -EINVAL; 661 662 config->ccctlr = val & ETM_CYC_THRESHOLD_MASK; 663 return size; 664 } 665 static DEVICE_ATTR_RW(cyc_threshold); 666 667 static ssize_t bb_ctrl_show(struct device *dev, 668 struct device_attribute *attr, 669 char *buf) 670 { 671 unsigned long val; 672 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); 673 struct etmv4_config *config = &drvdata->config; 674 675 val = config->bb_ctrl; 676 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); 677 } 678 679 static ssize_t bb_ctrl_store(struct device *dev, 680 struct device_attribute *attr, 681 const char *buf, size_t size) 682 { 683 unsigned long val; 684 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); 685 struct etmv4_config *config = &drvdata->config; 686 687 if (kstrtoul(buf, 16, &val)) 688 return -EINVAL; 689 if (drvdata->trcbb == false) 690 return -EINVAL; 691 if (!drvdata->nr_addr_cmp) 692 return -EINVAL; 693 /* 694 * Bit[7:0] selects which address range comparator is used for 695 * branch broadcast control. 696 */ 697 if (BMVAL(val, 0, 7) > drvdata->nr_addr_cmp) 698 return -EINVAL; 699 700 config->bb_ctrl = val; 701 return size; 702 } 703 static DEVICE_ATTR_RW(bb_ctrl); 704 705 static ssize_t event_vinst_show(struct device *dev, 706 struct device_attribute *attr, 707 char *buf) 708 { 709 unsigned long val; 710 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); 711 struct etmv4_config *config = &drvdata->config; 712 713 val = config->vinst_ctrl & ETMv4_EVENT_MASK; 714 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); 715 } 716 717 static ssize_t event_vinst_store(struct device *dev, 718 struct device_attribute *attr, 719 const char *buf, size_t size) 720 { 721 unsigned long val; 722 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); 723 struct etmv4_config *config = &drvdata->config; 724 725 if (kstrtoul(buf, 16, &val)) 726 return -EINVAL; 727 728 spin_lock(&drvdata->spinlock); 729 val &= ETMv4_EVENT_MASK; 730 config->vinst_ctrl &= ~ETMv4_EVENT_MASK; 731 config->vinst_ctrl |= val; 732 spin_unlock(&drvdata->spinlock); 733 return size; 734 } 735 static DEVICE_ATTR_RW(event_vinst); 736 737 static ssize_t s_exlevel_vinst_show(struct device *dev, 738 struct device_attribute *attr, 739 char *buf) 740 { 741 unsigned long val; 742 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); 743 struct etmv4_config *config = &drvdata->config; 744 745 val = BMVAL(config->vinst_ctrl, 16, 19); 746 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); 747 } 748 749 static ssize_t s_exlevel_vinst_store(struct device *dev, 750 struct device_attribute *attr, 751 const char *buf, size_t size) 752 { 753 unsigned long val; 754 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); 755 struct etmv4_config *config = &drvdata->config; 756 757 if (kstrtoul(buf, 16, &val)) 758 return -EINVAL; 759 760 spin_lock(&drvdata->spinlock); 761 /* clear all EXLEVEL_S bits (bit[18] is never implemented) */ 762 config->vinst_ctrl &= ~(BIT(16) | BIT(17) | BIT(19)); 763 /* enable instruction tracing for corresponding exception level */ 764 val &= drvdata->s_ex_level; 765 config->vinst_ctrl |= (val << 16); 766 spin_unlock(&drvdata->spinlock); 767 return size; 768 } 769 static DEVICE_ATTR_RW(s_exlevel_vinst); 770 771 static ssize_t ns_exlevel_vinst_show(struct device *dev, 772 struct device_attribute *attr, 773 char *buf) 774 { 775 unsigned long val; 776 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); 777 struct etmv4_config *config = &drvdata->config; 778 779 /* EXLEVEL_NS, bits[23:20] */ 780 val = BMVAL(config->vinst_ctrl, 20, 23); 781 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); 782 } 783 784 static ssize_t ns_exlevel_vinst_store(struct device *dev, 785 struct device_attribute *attr, 786 const char *buf, size_t size) 787 { 788 unsigned long val; 789 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); 790 struct etmv4_config *config = &drvdata->config; 791 792 if (kstrtoul(buf, 16, &val)) 793 return -EINVAL; 794 795 spin_lock(&drvdata->spinlock); 796 /* clear EXLEVEL_NS bits (bit[23] is never implemented */ 797 config->vinst_ctrl &= ~(BIT(20) | BIT(21) | BIT(22)); 798 /* enable instruction tracing for corresponding exception level */ 799 val &= drvdata->ns_ex_level; 800 config->vinst_ctrl |= (val << 20); 801 spin_unlock(&drvdata->spinlock); 802 return size; 803 } 804 static DEVICE_ATTR_RW(ns_exlevel_vinst); 805 806 static ssize_t addr_idx_show(struct device *dev, 807 struct device_attribute *attr, 808 char *buf) 809 { 810 unsigned long val; 811 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); 812 struct etmv4_config *config = &drvdata->config; 813 814 val = config->addr_idx; 815 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); 816 } 817 818 static ssize_t addr_idx_store(struct device *dev, 819 struct device_attribute *attr, 820 const char *buf, size_t size) 821 { 822 unsigned long val; 823 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); 824 struct etmv4_config *config = &drvdata->config; 825 826 if (kstrtoul(buf, 16, &val)) 827 return -EINVAL; 828 if (val >= drvdata->nr_addr_cmp * 2) 829 return -EINVAL; 830 831 /* 832 * Use spinlock to ensure index doesn't change while it gets 833 * dereferenced multiple times within a spinlock block elsewhere. 834 */ 835 spin_lock(&drvdata->spinlock); 836 config->addr_idx = val; 837 spin_unlock(&drvdata->spinlock); 838 return size; 839 } 840 static DEVICE_ATTR_RW(addr_idx); 841 842 static ssize_t addr_instdatatype_show(struct device *dev, 843 struct device_attribute *attr, 844 char *buf) 845 { 846 ssize_t len; 847 u8 val, idx; 848 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); 849 struct etmv4_config *config = &drvdata->config; 850 851 spin_lock(&drvdata->spinlock); 852 idx = config->addr_idx; 853 val = BMVAL(config->addr_acc[idx], 0, 1); 854 len = scnprintf(buf, PAGE_SIZE, "%s\n", 855 val == ETM_INSTR_ADDR ? "instr" : 856 (val == ETM_DATA_LOAD_ADDR ? "data_load" : 857 (val == ETM_DATA_STORE_ADDR ? "data_store" : 858 "data_load_store"))); 859 spin_unlock(&drvdata->spinlock); 860 return len; 861 } 862 863 static ssize_t addr_instdatatype_store(struct device *dev, 864 struct device_attribute *attr, 865 const char *buf, size_t size) 866 { 867 u8 idx; 868 char str[20] = ""; 869 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); 870 struct etmv4_config *config = &drvdata->config; 871 872 if (strlen(buf) >= 20) 873 return -EINVAL; 874 if (sscanf(buf, "%s", str) != 1) 875 return -EINVAL; 876 877 spin_lock(&drvdata->spinlock); 878 idx = config->addr_idx; 879 if (!strcmp(str, "instr")) 880 /* TYPE, bits[1:0] */ 881 config->addr_acc[idx] &= ~(BIT(0) | BIT(1)); 882 883 spin_unlock(&drvdata->spinlock); 884 return size; 885 } 886 static DEVICE_ATTR_RW(addr_instdatatype); 887 888 static ssize_t addr_single_show(struct device *dev, 889 struct device_attribute *attr, 890 char *buf) 891 { 892 u8 idx; 893 unsigned long val; 894 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); 895 struct etmv4_config *config = &drvdata->config; 896 897 idx = config->addr_idx; 898 spin_lock(&drvdata->spinlock); 899 if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE || 900 config->addr_type[idx] == ETM_ADDR_TYPE_SINGLE)) { 901 spin_unlock(&drvdata->spinlock); 902 return -EPERM; 903 } 904 val = (unsigned long)config->addr_val[idx]; 905 spin_unlock(&drvdata->spinlock); 906 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); 907 } 908 909 static ssize_t addr_single_store(struct device *dev, 910 struct device_attribute *attr, 911 const char *buf, size_t size) 912 { 913 u8 idx; 914 unsigned long val; 915 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); 916 struct etmv4_config *config = &drvdata->config; 917 918 if (kstrtoul(buf, 16, &val)) 919 return -EINVAL; 920 921 spin_lock(&drvdata->spinlock); 922 idx = config->addr_idx; 923 if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE || 924 config->addr_type[idx] == ETM_ADDR_TYPE_SINGLE)) { 925 spin_unlock(&drvdata->spinlock); 926 return -EPERM; 927 } 928 929 config->addr_val[idx] = (u64)val; 930 config->addr_type[idx] = ETM_ADDR_TYPE_SINGLE; 931 spin_unlock(&drvdata->spinlock); 932 return size; 933 } 934 static DEVICE_ATTR_RW(addr_single); 935 936 static ssize_t addr_range_show(struct device *dev, 937 struct device_attribute *attr, 938 char *buf) 939 { 940 u8 idx; 941 unsigned long val1, val2; 942 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); 943 struct etmv4_config *config = &drvdata->config; 944 945 spin_lock(&drvdata->spinlock); 946 idx = config->addr_idx; 947 if (idx % 2 != 0) { 948 spin_unlock(&drvdata->spinlock); 949 return -EPERM; 950 } 951 if (!((config->addr_type[idx] == ETM_ADDR_TYPE_NONE && 952 config->addr_type[idx + 1] == ETM_ADDR_TYPE_NONE) || 953 (config->addr_type[idx] == ETM_ADDR_TYPE_RANGE && 954 config->addr_type[idx + 1] == ETM_ADDR_TYPE_RANGE))) { 955 spin_unlock(&drvdata->spinlock); 956 return -EPERM; 957 } 958 959 val1 = (unsigned long)config->addr_val[idx]; 960 val2 = (unsigned long)config->addr_val[idx + 1]; 961 spin_unlock(&drvdata->spinlock); 962 return scnprintf(buf, PAGE_SIZE, "%#lx %#lx\n", val1, val2); 963 } 964 965 static ssize_t addr_range_store(struct device *dev, 966 struct device_attribute *attr, 967 const char *buf, size_t size) 968 { 969 u8 idx; 970 unsigned long val1, val2; 971 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); 972 struct etmv4_config *config = &drvdata->config; 973 974 if (sscanf(buf, "%lx %lx", &val1, &val2) != 2) 975 return -EINVAL; 976 /* lower address comparator cannot have a higher address value */ 977 if (val1 > val2) 978 return -EINVAL; 979 980 spin_lock(&drvdata->spinlock); 981 idx = config->addr_idx; 982 if (idx % 2 != 0) { 983 spin_unlock(&drvdata->spinlock); 984 return -EPERM; 985 } 986 987 if (!((config->addr_type[idx] == ETM_ADDR_TYPE_NONE && 988 config->addr_type[idx + 1] == ETM_ADDR_TYPE_NONE) || 989 (config->addr_type[idx] == ETM_ADDR_TYPE_RANGE && 990 config->addr_type[idx + 1] == ETM_ADDR_TYPE_RANGE))) { 991 spin_unlock(&drvdata->spinlock); 992 return -EPERM; 993 } 994 995 config->addr_val[idx] = (u64)val1; 996 config->addr_type[idx] = ETM_ADDR_TYPE_RANGE; 997 config->addr_val[idx + 1] = (u64)val2; 998 config->addr_type[idx + 1] = ETM_ADDR_TYPE_RANGE; 999 /* 1000 * Program include or exclude control bits for vinst or vdata 1001 * whenever we change addr comparators to ETM_ADDR_TYPE_RANGE 1002 */ 1003 if (config->mode & ETM_MODE_EXCLUDE) 1004 etm4_set_mode_exclude(drvdata, true); 1005 else 1006 etm4_set_mode_exclude(drvdata, false); 1007 1008 spin_unlock(&drvdata->spinlock); 1009 return size; 1010 } 1011 static DEVICE_ATTR_RW(addr_range); 1012 1013 static ssize_t addr_start_show(struct device *dev, 1014 struct device_attribute *attr, 1015 char *buf) 1016 { 1017 u8 idx; 1018 unsigned long val; 1019 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); 1020 struct etmv4_config *config = &drvdata->config; 1021 1022 spin_lock(&drvdata->spinlock); 1023 idx = config->addr_idx; 1024 1025 if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE || 1026 config->addr_type[idx] == ETM_ADDR_TYPE_START)) { 1027 spin_unlock(&drvdata->spinlock); 1028 return -EPERM; 1029 } 1030 1031 val = (unsigned long)config->addr_val[idx]; 1032 spin_unlock(&drvdata->spinlock); 1033 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); 1034 } 1035 1036 static ssize_t addr_start_store(struct device *dev, 1037 struct device_attribute *attr, 1038 const char *buf, size_t size) 1039 { 1040 u8 idx; 1041 unsigned long val; 1042 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); 1043 struct etmv4_config *config = &drvdata->config; 1044 1045 if (kstrtoul(buf, 16, &val)) 1046 return -EINVAL; 1047 1048 spin_lock(&drvdata->spinlock); 1049 idx = config->addr_idx; 1050 if (!drvdata->nr_addr_cmp) { 1051 spin_unlock(&drvdata->spinlock); 1052 return -EINVAL; 1053 } 1054 if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE || 1055 config->addr_type[idx] == ETM_ADDR_TYPE_START)) { 1056 spin_unlock(&drvdata->spinlock); 1057 return -EPERM; 1058 } 1059 1060 config->addr_val[idx] = (u64)val; 1061 config->addr_type[idx] = ETM_ADDR_TYPE_START; 1062 config->vissctlr |= BIT(idx); 1063 /* SSSTATUS, bit[9] - turn on start/stop logic */ 1064 config->vinst_ctrl |= BIT(9); 1065 spin_unlock(&drvdata->spinlock); 1066 return size; 1067 } 1068 static DEVICE_ATTR_RW(addr_start); 1069 1070 static ssize_t addr_stop_show(struct device *dev, 1071 struct device_attribute *attr, 1072 char *buf) 1073 { 1074 u8 idx; 1075 unsigned long val; 1076 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); 1077 struct etmv4_config *config = &drvdata->config; 1078 1079 spin_lock(&drvdata->spinlock); 1080 idx = config->addr_idx; 1081 1082 if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE || 1083 config->addr_type[idx] == ETM_ADDR_TYPE_STOP)) { 1084 spin_unlock(&drvdata->spinlock); 1085 return -EPERM; 1086 } 1087 1088 val = (unsigned long)config->addr_val[idx]; 1089 spin_unlock(&drvdata->spinlock); 1090 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); 1091 } 1092 1093 static ssize_t addr_stop_store(struct device *dev, 1094 struct device_attribute *attr, 1095 const char *buf, size_t size) 1096 { 1097 u8 idx; 1098 unsigned long val; 1099 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); 1100 struct etmv4_config *config = &drvdata->config; 1101 1102 if (kstrtoul(buf, 16, &val)) 1103 return -EINVAL; 1104 1105 spin_lock(&drvdata->spinlock); 1106 idx = config->addr_idx; 1107 if (!drvdata->nr_addr_cmp) { 1108 spin_unlock(&drvdata->spinlock); 1109 return -EINVAL; 1110 } 1111 if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE || 1112 config->addr_type[idx] == ETM_ADDR_TYPE_STOP)) { 1113 spin_unlock(&drvdata->spinlock); 1114 return -EPERM; 1115 } 1116 1117 config->addr_val[idx] = (u64)val; 1118 config->addr_type[idx] = ETM_ADDR_TYPE_STOP; 1119 config->vissctlr |= BIT(idx + 16); 1120 /* SSSTATUS, bit[9] - turn on start/stop logic */ 1121 config->vinst_ctrl |= BIT(9); 1122 spin_unlock(&drvdata->spinlock); 1123 return size; 1124 } 1125 static DEVICE_ATTR_RW(addr_stop); 1126 1127 static ssize_t addr_ctxtype_show(struct device *dev, 1128 struct device_attribute *attr, 1129 char *buf) 1130 { 1131 ssize_t len; 1132 u8 idx, val; 1133 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); 1134 struct etmv4_config *config = &drvdata->config; 1135 1136 spin_lock(&drvdata->spinlock); 1137 idx = config->addr_idx; 1138 /* CONTEXTTYPE, bits[3:2] */ 1139 val = BMVAL(config->addr_acc[idx], 2, 3); 1140 len = scnprintf(buf, PAGE_SIZE, "%s\n", val == ETM_CTX_NONE ? "none" : 1141 (val == ETM_CTX_CTXID ? "ctxid" : 1142 (val == ETM_CTX_VMID ? "vmid" : "all"))); 1143 spin_unlock(&drvdata->spinlock); 1144 return len; 1145 } 1146 1147 static ssize_t addr_ctxtype_store(struct device *dev, 1148 struct device_attribute *attr, 1149 const char *buf, size_t size) 1150 { 1151 u8 idx; 1152 char str[10] = ""; 1153 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); 1154 struct etmv4_config *config = &drvdata->config; 1155 1156 if (strlen(buf) >= 10) 1157 return -EINVAL; 1158 if (sscanf(buf, "%s", str) != 1) 1159 return -EINVAL; 1160 1161 spin_lock(&drvdata->spinlock); 1162 idx = config->addr_idx; 1163 if (!strcmp(str, "none")) 1164 /* start by clearing context type bits */ 1165 config->addr_acc[idx] &= ~(BIT(2) | BIT(3)); 1166 else if (!strcmp(str, "ctxid")) { 1167 /* 0b01 The trace unit performs a Context ID */ 1168 if (drvdata->numcidc) { 1169 config->addr_acc[idx] |= BIT(2); 1170 config->addr_acc[idx] &= ~BIT(3); 1171 } 1172 } else if (!strcmp(str, "vmid")) { 1173 /* 0b10 The trace unit performs a VMID */ 1174 if (drvdata->numvmidc) { 1175 config->addr_acc[idx] &= ~BIT(2); 1176 config->addr_acc[idx] |= BIT(3); 1177 } 1178 } else if (!strcmp(str, "all")) { 1179 /* 1180 * 0b11 The trace unit performs a Context ID 1181 * comparison and a VMID 1182 */ 1183 if (drvdata->numcidc) 1184 config->addr_acc[idx] |= BIT(2); 1185 if (drvdata->numvmidc) 1186 config->addr_acc[idx] |= BIT(3); 1187 } 1188 spin_unlock(&drvdata->spinlock); 1189 return size; 1190 } 1191 static DEVICE_ATTR_RW(addr_ctxtype); 1192 1193 static ssize_t addr_context_show(struct device *dev, 1194 struct device_attribute *attr, 1195 char *buf) 1196 { 1197 u8 idx; 1198 unsigned long val; 1199 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); 1200 struct etmv4_config *config = &drvdata->config; 1201 1202 spin_lock(&drvdata->spinlock); 1203 idx = config->addr_idx; 1204 /* context ID comparator bits[6:4] */ 1205 val = BMVAL(config->addr_acc[idx], 4, 6); 1206 spin_unlock(&drvdata->spinlock); 1207 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); 1208 } 1209 1210 static ssize_t addr_context_store(struct device *dev, 1211 struct device_attribute *attr, 1212 const char *buf, size_t size) 1213 { 1214 u8 idx; 1215 unsigned long val; 1216 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); 1217 struct etmv4_config *config = &drvdata->config; 1218 1219 if (kstrtoul(buf, 16, &val)) 1220 return -EINVAL; 1221 if ((drvdata->numcidc <= 1) && (drvdata->numvmidc <= 1)) 1222 return -EINVAL; 1223 if (val >= (drvdata->numcidc >= drvdata->numvmidc ? 1224 drvdata->numcidc : drvdata->numvmidc)) 1225 return -EINVAL; 1226 1227 spin_lock(&drvdata->spinlock); 1228 idx = config->addr_idx; 1229 /* clear context ID comparator bits[6:4] */ 1230 config->addr_acc[idx] &= ~(BIT(4) | BIT(5) | BIT(6)); 1231 config->addr_acc[idx] |= (val << 4); 1232 spin_unlock(&drvdata->spinlock); 1233 return size; 1234 } 1235 static DEVICE_ATTR_RW(addr_context); 1236 1237 static ssize_t seq_idx_show(struct device *dev, 1238 struct device_attribute *attr, 1239 char *buf) 1240 { 1241 unsigned long val; 1242 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); 1243 struct etmv4_config *config = &drvdata->config; 1244 1245 val = config->seq_idx; 1246 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); 1247 } 1248 1249 static ssize_t seq_idx_store(struct device *dev, 1250 struct device_attribute *attr, 1251 const char *buf, size_t size) 1252 { 1253 unsigned long val; 1254 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); 1255 struct etmv4_config *config = &drvdata->config; 1256 1257 if (kstrtoul(buf, 16, &val)) 1258 return -EINVAL; 1259 if (val >= drvdata->nrseqstate - 1) 1260 return -EINVAL; 1261 1262 /* 1263 * Use spinlock to ensure index doesn't change while it gets 1264 * dereferenced multiple times within a spinlock block elsewhere. 1265 */ 1266 spin_lock(&drvdata->spinlock); 1267 config->seq_idx = val; 1268 spin_unlock(&drvdata->spinlock); 1269 return size; 1270 } 1271 static DEVICE_ATTR_RW(seq_idx); 1272 1273 static ssize_t seq_state_show(struct device *dev, 1274 struct device_attribute *attr, 1275 char *buf) 1276 { 1277 unsigned long val; 1278 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); 1279 struct etmv4_config *config = &drvdata->config; 1280 1281 val = config->seq_state; 1282 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); 1283 } 1284 1285 static ssize_t seq_state_store(struct device *dev, 1286 struct device_attribute *attr, 1287 const char *buf, size_t size) 1288 { 1289 unsigned long val; 1290 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); 1291 struct etmv4_config *config = &drvdata->config; 1292 1293 if (kstrtoul(buf, 16, &val)) 1294 return -EINVAL; 1295 if (val >= drvdata->nrseqstate) 1296 return -EINVAL; 1297 1298 config->seq_state = val; 1299 return size; 1300 } 1301 static DEVICE_ATTR_RW(seq_state); 1302 1303 static ssize_t seq_event_show(struct device *dev, 1304 struct device_attribute *attr, 1305 char *buf) 1306 { 1307 u8 idx; 1308 unsigned long val; 1309 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); 1310 struct etmv4_config *config = &drvdata->config; 1311 1312 spin_lock(&drvdata->spinlock); 1313 idx = config->seq_idx; 1314 val = config->seq_ctrl[idx]; 1315 spin_unlock(&drvdata->spinlock); 1316 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); 1317 } 1318 1319 static ssize_t seq_event_store(struct device *dev, 1320 struct device_attribute *attr, 1321 const char *buf, size_t size) 1322 { 1323 u8 idx; 1324 unsigned long val; 1325 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); 1326 struct etmv4_config *config = &drvdata->config; 1327 1328 if (kstrtoul(buf, 16, &val)) 1329 return -EINVAL; 1330 1331 spin_lock(&drvdata->spinlock); 1332 idx = config->seq_idx; 1333 /* RST, bits[7:0] */ 1334 config->seq_ctrl[idx] = val & 0xFF; 1335 spin_unlock(&drvdata->spinlock); 1336 return size; 1337 } 1338 static DEVICE_ATTR_RW(seq_event); 1339 1340 static ssize_t seq_reset_event_show(struct device *dev, 1341 struct device_attribute *attr, 1342 char *buf) 1343 { 1344 unsigned long val; 1345 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); 1346 struct etmv4_config *config = &drvdata->config; 1347 1348 val = config->seq_rst; 1349 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); 1350 } 1351 1352 static ssize_t seq_reset_event_store(struct device *dev, 1353 struct device_attribute *attr, 1354 const char *buf, size_t size) 1355 { 1356 unsigned long val; 1357 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); 1358 struct etmv4_config *config = &drvdata->config; 1359 1360 if (kstrtoul(buf, 16, &val)) 1361 return -EINVAL; 1362 if (!(drvdata->nrseqstate)) 1363 return -EINVAL; 1364 1365 config->seq_rst = val & ETMv4_EVENT_MASK; 1366 return size; 1367 } 1368 static DEVICE_ATTR_RW(seq_reset_event); 1369 1370 static ssize_t cntr_idx_show(struct device *dev, 1371 struct device_attribute *attr, 1372 char *buf) 1373 { 1374 unsigned long val; 1375 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); 1376 struct etmv4_config *config = &drvdata->config; 1377 1378 val = config->cntr_idx; 1379 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); 1380 } 1381 1382 static ssize_t cntr_idx_store(struct device *dev, 1383 struct device_attribute *attr, 1384 const char *buf, size_t size) 1385 { 1386 unsigned long val; 1387 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); 1388 struct etmv4_config *config = &drvdata->config; 1389 1390 if (kstrtoul(buf, 16, &val)) 1391 return -EINVAL; 1392 if (val >= drvdata->nr_cntr) 1393 return -EINVAL; 1394 1395 /* 1396 * Use spinlock to ensure index doesn't change while it gets 1397 * dereferenced multiple times within a spinlock block elsewhere. 1398 */ 1399 spin_lock(&drvdata->spinlock); 1400 config->cntr_idx = val; 1401 spin_unlock(&drvdata->spinlock); 1402 return size; 1403 } 1404 static DEVICE_ATTR_RW(cntr_idx); 1405 1406 static ssize_t cntrldvr_show(struct device *dev, 1407 struct device_attribute *attr, 1408 char *buf) 1409 { 1410 u8 idx; 1411 unsigned long val; 1412 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); 1413 struct etmv4_config *config = &drvdata->config; 1414 1415 spin_lock(&drvdata->spinlock); 1416 idx = config->cntr_idx; 1417 val = config->cntrldvr[idx]; 1418 spin_unlock(&drvdata->spinlock); 1419 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); 1420 } 1421 1422 static ssize_t cntrldvr_store(struct device *dev, 1423 struct device_attribute *attr, 1424 const char *buf, size_t size) 1425 { 1426 u8 idx; 1427 unsigned long val; 1428 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); 1429 struct etmv4_config *config = &drvdata->config; 1430 1431 if (kstrtoul(buf, 16, &val)) 1432 return -EINVAL; 1433 if (val > ETM_CNTR_MAX_VAL) 1434 return -EINVAL; 1435 1436 spin_lock(&drvdata->spinlock); 1437 idx = config->cntr_idx; 1438 config->cntrldvr[idx] = val; 1439 spin_unlock(&drvdata->spinlock); 1440 return size; 1441 } 1442 static DEVICE_ATTR_RW(cntrldvr); 1443 1444 static ssize_t cntr_val_show(struct device *dev, 1445 struct device_attribute *attr, 1446 char *buf) 1447 { 1448 u8 idx; 1449 unsigned long val; 1450 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); 1451 struct etmv4_config *config = &drvdata->config; 1452 1453 spin_lock(&drvdata->spinlock); 1454 idx = config->cntr_idx; 1455 val = config->cntr_val[idx]; 1456 spin_unlock(&drvdata->spinlock); 1457 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); 1458 } 1459 1460 static ssize_t cntr_val_store(struct device *dev, 1461 struct device_attribute *attr, 1462 const char *buf, size_t size) 1463 { 1464 u8 idx; 1465 unsigned long val; 1466 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); 1467 struct etmv4_config *config = &drvdata->config; 1468 1469 if (kstrtoul(buf, 16, &val)) 1470 return -EINVAL; 1471 if (val > ETM_CNTR_MAX_VAL) 1472 return -EINVAL; 1473 1474 spin_lock(&drvdata->spinlock); 1475 idx = config->cntr_idx; 1476 config->cntr_val[idx] = val; 1477 spin_unlock(&drvdata->spinlock); 1478 return size; 1479 } 1480 static DEVICE_ATTR_RW(cntr_val); 1481 1482 static ssize_t cntr_ctrl_show(struct device *dev, 1483 struct device_attribute *attr, 1484 char *buf) 1485 { 1486 u8 idx; 1487 unsigned long val; 1488 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); 1489 struct etmv4_config *config = &drvdata->config; 1490 1491 spin_lock(&drvdata->spinlock); 1492 idx = config->cntr_idx; 1493 val = config->cntr_ctrl[idx]; 1494 spin_unlock(&drvdata->spinlock); 1495 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); 1496 } 1497 1498 static ssize_t cntr_ctrl_store(struct device *dev, 1499 struct device_attribute *attr, 1500 const char *buf, size_t size) 1501 { 1502 u8 idx; 1503 unsigned long val; 1504 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); 1505 struct etmv4_config *config = &drvdata->config; 1506 1507 if (kstrtoul(buf, 16, &val)) 1508 return -EINVAL; 1509 1510 spin_lock(&drvdata->spinlock); 1511 idx = config->cntr_idx; 1512 config->cntr_ctrl[idx] = val; 1513 spin_unlock(&drvdata->spinlock); 1514 return size; 1515 } 1516 static DEVICE_ATTR_RW(cntr_ctrl); 1517 1518 static ssize_t res_idx_show(struct device *dev, 1519 struct device_attribute *attr, 1520 char *buf) 1521 { 1522 unsigned long val; 1523 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); 1524 struct etmv4_config *config = &drvdata->config; 1525 1526 val = config->res_idx; 1527 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); 1528 } 1529 1530 static ssize_t res_idx_store(struct device *dev, 1531 struct device_attribute *attr, 1532 const char *buf, size_t size) 1533 { 1534 unsigned long val; 1535 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); 1536 struct etmv4_config *config = &drvdata->config; 1537 1538 if (kstrtoul(buf, 16, &val)) 1539 return -EINVAL; 1540 /* Resource selector pair 0 is always implemented and reserved */ 1541 if ((val == 0) || (val >= drvdata->nr_resource)) 1542 return -EINVAL; 1543 1544 /* 1545 * Use spinlock to ensure index doesn't change while it gets 1546 * dereferenced multiple times within a spinlock block elsewhere. 1547 */ 1548 spin_lock(&drvdata->spinlock); 1549 config->res_idx = val; 1550 spin_unlock(&drvdata->spinlock); 1551 return size; 1552 } 1553 static DEVICE_ATTR_RW(res_idx); 1554 1555 static ssize_t res_ctrl_show(struct device *dev, 1556 struct device_attribute *attr, 1557 char *buf) 1558 { 1559 u8 idx; 1560 unsigned long val; 1561 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); 1562 struct etmv4_config *config = &drvdata->config; 1563 1564 spin_lock(&drvdata->spinlock); 1565 idx = config->res_idx; 1566 val = config->res_ctrl[idx]; 1567 spin_unlock(&drvdata->spinlock); 1568 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); 1569 } 1570 1571 static ssize_t res_ctrl_store(struct device *dev, 1572 struct device_attribute *attr, 1573 const char *buf, size_t size) 1574 { 1575 u8 idx; 1576 unsigned long val; 1577 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); 1578 struct etmv4_config *config = &drvdata->config; 1579 1580 if (kstrtoul(buf, 16, &val)) 1581 return -EINVAL; 1582 1583 spin_lock(&drvdata->spinlock); 1584 idx = config->res_idx; 1585 /* For odd idx pair inversal bit is RES0 */ 1586 if (idx % 2 != 0) 1587 /* PAIRINV, bit[21] */ 1588 val &= ~BIT(21); 1589 config->res_ctrl[idx] = val; 1590 spin_unlock(&drvdata->spinlock); 1591 return size; 1592 } 1593 static DEVICE_ATTR_RW(res_ctrl); 1594 1595 static ssize_t ctxid_idx_show(struct device *dev, 1596 struct device_attribute *attr, 1597 char *buf) 1598 { 1599 unsigned long val; 1600 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); 1601 struct etmv4_config *config = &drvdata->config; 1602 1603 val = config->ctxid_idx; 1604 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); 1605 } 1606 1607 static ssize_t ctxid_idx_store(struct device *dev, 1608 struct device_attribute *attr, 1609 const char *buf, size_t size) 1610 { 1611 unsigned long val; 1612 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); 1613 struct etmv4_config *config = &drvdata->config; 1614 1615 if (kstrtoul(buf, 16, &val)) 1616 return -EINVAL; 1617 if (val >= drvdata->numcidc) 1618 return -EINVAL; 1619 1620 /* 1621 * Use spinlock to ensure index doesn't change while it gets 1622 * dereferenced multiple times within a spinlock block elsewhere. 1623 */ 1624 spin_lock(&drvdata->spinlock); 1625 config->ctxid_idx = val; 1626 spin_unlock(&drvdata->spinlock); 1627 return size; 1628 } 1629 static DEVICE_ATTR_RW(ctxid_idx); 1630 1631 static ssize_t ctxid_pid_show(struct device *dev, 1632 struct device_attribute *attr, 1633 char *buf) 1634 { 1635 u8 idx; 1636 unsigned long val; 1637 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); 1638 struct etmv4_config *config = &drvdata->config; 1639 1640 spin_lock(&drvdata->spinlock); 1641 idx = config->ctxid_idx; 1642 val = (unsigned long)config->ctxid_vpid[idx]; 1643 spin_unlock(&drvdata->spinlock); 1644 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); 1645 } 1646 1647 static ssize_t ctxid_pid_store(struct device *dev, 1648 struct device_attribute *attr, 1649 const char *buf, size_t size) 1650 { 1651 u8 idx; 1652 unsigned long vpid, pid; 1653 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); 1654 struct etmv4_config *config = &drvdata->config; 1655 1656 /* 1657 * only implemented when ctxid tracing is enabled, i.e. at least one 1658 * ctxid comparator is implemented and ctxid is greater than 0 bits 1659 * in length 1660 */ 1661 if (!drvdata->ctxid_size || !drvdata->numcidc) 1662 return -EINVAL; 1663 if (kstrtoul(buf, 16, &vpid)) 1664 return -EINVAL; 1665 1666 pid = coresight_vpid_to_pid(vpid); 1667 1668 spin_lock(&drvdata->spinlock); 1669 idx = config->ctxid_idx; 1670 config->ctxid_pid[idx] = (u64)pid; 1671 config->ctxid_vpid[idx] = (u64)vpid; 1672 spin_unlock(&drvdata->spinlock); 1673 return size; 1674 } 1675 static DEVICE_ATTR_RW(ctxid_pid); 1676 1677 static ssize_t ctxid_masks_show(struct device *dev, 1678 struct device_attribute *attr, 1679 char *buf) 1680 { 1681 unsigned long val1, val2; 1682 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); 1683 struct etmv4_config *config = &drvdata->config; 1684 1685 spin_lock(&drvdata->spinlock); 1686 val1 = config->ctxid_mask0; 1687 val2 = config->ctxid_mask1; 1688 spin_unlock(&drvdata->spinlock); 1689 return scnprintf(buf, PAGE_SIZE, "%#lx %#lx\n", val1, val2); 1690 } 1691 1692 static ssize_t ctxid_masks_store(struct device *dev, 1693 struct device_attribute *attr, 1694 const char *buf, size_t size) 1695 { 1696 u8 i, j, maskbyte; 1697 unsigned long val1, val2, mask; 1698 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); 1699 struct etmv4_config *config = &drvdata->config; 1700 1701 /* 1702 * only implemented when ctxid tracing is enabled, i.e. at least one 1703 * ctxid comparator is implemented and ctxid is greater than 0 bits 1704 * in length 1705 */ 1706 if (!drvdata->ctxid_size || !drvdata->numcidc) 1707 return -EINVAL; 1708 if (sscanf(buf, "%lx %lx", &val1, &val2) != 2) 1709 return -EINVAL; 1710 1711 spin_lock(&drvdata->spinlock); 1712 /* 1713 * each byte[0..3] controls mask value applied to ctxid 1714 * comparator[0..3] 1715 */ 1716 switch (drvdata->numcidc) { 1717 case 0x1: 1718 /* COMP0, bits[7:0] */ 1719 config->ctxid_mask0 = val1 & 0xFF; 1720 break; 1721 case 0x2: 1722 /* COMP1, bits[15:8] */ 1723 config->ctxid_mask0 = val1 & 0xFFFF; 1724 break; 1725 case 0x3: 1726 /* COMP2, bits[23:16] */ 1727 config->ctxid_mask0 = val1 & 0xFFFFFF; 1728 break; 1729 case 0x4: 1730 /* COMP3, bits[31:24] */ 1731 config->ctxid_mask0 = val1; 1732 break; 1733 case 0x5: 1734 /* COMP4, bits[7:0] */ 1735 config->ctxid_mask0 = val1; 1736 config->ctxid_mask1 = val2 & 0xFF; 1737 break; 1738 case 0x6: 1739 /* COMP5, bits[15:8] */ 1740 config->ctxid_mask0 = val1; 1741 config->ctxid_mask1 = val2 & 0xFFFF; 1742 break; 1743 case 0x7: 1744 /* COMP6, bits[23:16] */ 1745 config->ctxid_mask0 = val1; 1746 config->ctxid_mask1 = val2 & 0xFFFFFF; 1747 break; 1748 case 0x8: 1749 /* COMP7, bits[31:24] */ 1750 config->ctxid_mask0 = val1; 1751 config->ctxid_mask1 = val2; 1752 break; 1753 default: 1754 break; 1755 } 1756 /* 1757 * If software sets a mask bit to 1, it must program relevant byte 1758 * of ctxid comparator value 0x0, otherwise behavior is unpredictable. 1759 * For example, if bit[3] of ctxid_mask0 is 1, we must clear bits[31:24] 1760 * of ctxid comparator0 value (corresponding to byte 0) register. 1761 */ 1762 mask = config->ctxid_mask0; 1763 for (i = 0; i < drvdata->numcidc; i++) { 1764 /* mask value of corresponding ctxid comparator */ 1765 maskbyte = mask & ETMv4_EVENT_MASK; 1766 /* 1767 * each bit corresponds to a byte of respective ctxid comparator 1768 * value register 1769 */ 1770 for (j = 0; j < 8; j++) { 1771 if (maskbyte & 1) 1772 config->ctxid_pid[i] &= ~(0xFFUL << (j * 8)); 1773 maskbyte >>= 1; 1774 } 1775 /* Select the next ctxid comparator mask value */ 1776 if (i == 3) 1777 /* ctxid comparators[4-7] */ 1778 mask = config->ctxid_mask1; 1779 else 1780 mask >>= 0x8; 1781 } 1782 1783 spin_unlock(&drvdata->spinlock); 1784 return size; 1785 } 1786 static DEVICE_ATTR_RW(ctxid_masks); 1787 1788 static ssize_t vmid_idx_show(struct device *dev, 1789 struct device_attribute *attr, 1790 char *buf) 1791 { 1792 unsigned long val; 1793 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); 1794 struct etmv4_config *config = &drvdata->config; 1795 1796 val = config->vmid_idx; 1797 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); 1798 } 1799 1800 static ssize_t vmid_idx_store(struct device *dev, 1801 struct device_attribute *attr, 1802 const char *buf, size_t size) 1803 { 1804 unsigned long val; 1805 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); 1806 struct etmv4_config *config = &drvdata->config; 1807 1808 if (kstrtoul(buf, 16, &val)) 1809 return -EINVAL; 1810 if (val >= drvdata->numvmidc) 1811 return -EINVAL; 1812 1813 /* 1814 * Use spinlock to ensure index doesn't change while it gets 1815 * dereferenced multiple times within a spinlock block elsewhere. 1816 */ 1817 spin_lock(&drvdata->spinlock); 1818 config->vmid_idx = val; 1819 spin_unlock(&drvdata->spinlock); 1820 return size; 1821 } 1822 static DEVICE_ATTR_RW(vmid_idx); 1823 1824 static ssize_t vmid_val_show(struct device *dev, 1825 struct device_attribute *attr, 1826 char *buf) 1827 { 1828 unsigned long val; 1829 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); 1830 struct etmv4_config *config = &drvdata->config; 1831 1832 val = (unsigned long)config->vmid_val[config->vmid_idx]; 1833 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); 1834 } 1835 1836 static ssize_t vmid_val_store(struct device *dev, 1837 struct device_attribute *attr, 1838 const char *buf, size_t size) 1839 { 1840 unsigned long val; 1841 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); 1842 struct etmv4_config *config = &drvdata->config; 1843 1844 /* 1845 * only implemented when vmid tracing is enabled, i.e. at least one 1846 * vmid comparator is implemented and at least 8 bit vmid size 1847 */ 1848 if (!drvdata->vmid_size || !drvdata->numvmidc) 1849 return -EINVAL; 1850 if (kstrtoul(buf, 16, &val)) 1851 return -EINVAL; 1852 1853 spin_lock(&drvdata->spinlock); 1854 config->vmid_val[config->vmid_idx] = (u64)val; 1855 spin_unlock(&drvdata->spinlock); 1856 return size; 1857 } 1858 static DEVICE_ATTR_RW(vmid_val); 1859 1860 static ssize_t vmid_masks_show(struct device *dev, 1861 struct device_attribute *attr, char *buf) 1862 { 1863 unsigned long val1, val2; 1864 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); 1865 struct etmv4_config *config = &drvdata->config; 1866 1867 spin_lock(&drvdata->spinlock); 1868 val1 = config->vmid_mask0; 1869 val2 = config->vmid_mask1; 1870 spin_unlock(&drvdata->spinlock); 1871 return scnprintf(buf, PAGE_SIZE, "%#lx %#lx\n", val1, val2); 1872 } 1873 1874 static ssize_t vmid_masks_store(struct device *dev, 1875 struct device_attribute *attr, 1876 const char *buf, size_t size) 1877 { 1878 u8 i, j, maskbyte; 1879 unsigned long val1, val2, mask; 1880 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); 1881 struct etmv4_config *config = &drvdata->config; 1882 1883 /* 1884 * only implemented when vmid tracing is enabled, i.e. at least one 1885 * vmid comparator is implemented and at least 8 bit vmid size 1886 */ 1887 if (!drvdata->vmid_size || !drvdata->numvmidc) 1888 return -EINVAL; 1889 if (sscanf(buf, "%lx %lx", &val1, &val2) != 2) 1890 return -EINVAL; 1891 1892 spin_lock(&drvdata->spinlock); 1893 1894 /* 1895 * each byte[0..3] controls mask value applied to vmid 1896 * comparator[0..3] 1897 */ 1898 switch (drvdata->numvmidc) { 1899 case 0x1: 1900 /* COMP0, bits[7:0] */ 1901 config->vmid_mask0 = val1 & 0xFF; 1902 break; 1903 case 0x2: 1904 /* COMP1, bits[15:8] */ 1905 config->vmid_mask0 = val1 & 0xFFFF; 1906 break; 1907 case 0x3: 1908 /* COMP2, bits[23:16] */ 1909 config->vmid_mask0 = val1 & 0xFFFFFF; 1910 break; 1911 case 0x4: 1912 /* COMP3, bits[31:24] */ 1913 config->vmid_mask0 = val1; 1914 break; 1915 case 0x5: 1916 /* COMP4, bits[7:0] */ 1917 config->vmid_mask0 = val1; 1918 config->vmid_mask1 = val2 & 0xFF; 1919 break; 1920 case 0x6: 1921 /* COMP5, bits[15:8] */ 1922 config->vmid_mask0 = val1; 1923 config->vmid_mask1 = val2 & 0xFFFF; 1924 break; 1925 case 0x7: 1926 /* COMP6, bits[23:16] */ 1927 config->vmid_mask0 = val1; 1928 config->vmid_mask1 = val2 & 0xFFFFFF; 1929 break; 1930 case 0x8: 1931 /* COMP7, bits[31:24] */ 1932 config->vmid_mask0 = val1; 1933 config->vmid_mask1 = val2; 1934 break; 1935 default: 1936 break; 1937 } 1938 1939 /* 1940 * If software sets a mask bit to 1, it must program relevant byte 1941 * of vmid comparator value 0x0, otherwise behavior is unpredictable. 1942 * For example, if bit[3] of vmid_mask0 is 1, we must clear bits[31:24] 1943 * of vmid comparator0 value (corresponding to byte 0) register. 1944 */ 1945 mask = config->vmid_mask0; 1946 for (i = 0; i < drvdata->numvmidc; i++) { 1947 /* mask value of corresponding vmid comparator */ 1948 maskbyte = mask & ETMv4_EVENT_MASK; 1949 /* 1950 * each bit corresponds to a byte of respective vmid comparator 1951 * value register 1952 */ 1953 for (j = 0; j < 8; j++) { 1954 if (maskbyte & 1) 1955 config->vmid_val[i] &= ~(0xFFUL << (j * 8)); 1956 maskbyte >>= 1; 1957 } 1958 /* Select the next vmid comparator mask value */ 1959 if (i == 3) 1960 /* vmid comparators[4-7] */ 1961 mask = config->vmid_mask1; 1962 else 1963 mask >>= 0x8; 1964 } 1965 spin_unlock(&drvdata->spinlock); 1966 return size; 1967 } 1968 static DEVICE_ATTR_RW(vmid_masks); 1969 1970 static ssize_t cpu_show(struct device *dev, 1971 struct device_attribute *attr, char *buf) 1972 { 1973 int val; 1974 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); 1975 1976 val = drvdata->cpu; 1977 return scnprintf(buf, PAGE_SIZE, "%d\n", val); 1978 1979 } 1980 static DEVICE_ATTR_RO(cpu); 1981 1982 static struct attribute *coresight_etmv4_attrs[] = { 1983 &dev_attr_nr_pe_cmp.attr, 1984 &dev_attr_nr_addr_cmp.attr, 1985 &dev_attr_nr_cntr.attr, 1986 &dev_attr_nr_ext_inp.attr, 1987 &dev_attr_numcidc.attr, 1988 &dev_attr_numvmidc.attr, 1989 &dev_attr_nrseqstate.attr, 1990 &dev_attr_nr_resource.attr, 1991 &dev_attr_nr_ss_cmp.attr, 1992 &dev_attr_reset.attr, 1993 &dev_attr_mode.attr, 1994 &dev_attr_pe.attr, 1995 &dev_attr_event.attr, 1996 &dev_attr_event_instren.attr, 1997 &dev_attr_event_ts.attr, 1998 &dev_attr_syncfreq.attr, 1999 &dev_attr_cyc_threshold.attr, 2000 &dev_attr_bb_ctrl.attr, 2001 &dev_attr_event_vinst.attr, 2002 &dev_attr_s_exlevel_vinst.attr, 2003 &dev_attr_ns_exlevel_vinst.attr, 2004 &dev_attr_addr_idx.attr, 2005 &dev_attr_addr_instdatatype.attr, 2006 &dev_attr_addr_single.attr, 2007 &dev_attr_addr_range.attr, 2008 &dev_attr_addr_start.attr, 2009 &dev_attr_addr_stop.attr, 2010 &dev_attr_addr_ctxtype.attr, 2011 &dev_attr_addr_context.attr, 2012 &dev_attr_seq_idx.attr, 2013 &dev_attr_seq_state.attr, 2014 &dev_attr_seq_event.attr, 2015 &dev_attr_seq_reset_event.attr, 2016 &dev_attr_cntr_idx.attr, 2017 &dev_attr_cntrldvr.attr, 2018 &dev_attr_cntr_val.attr, 2019 &dev_attr_cntr_ctrl.attr, 2020 &dev_attr_res_idx.attr, 2021 &dev_attr_res_ctrl.attr, 2022 &dev_attr_ctxid_idx.attr, 2023 &dev_attr_ctxid_pid.attr, 2024 &dev_attr_ctxid_masks.attr, 2025 &dev_attr_vmid_idx.attr, 2026 &dev_attr_vmid_val.attr, 2027 &dev_attr_vmid_masks.attr, 2028 &dev_attr_cpu.attr, 2029 NULL, 2030 }; 2031 2032 struct etmv4_reg { 2033 void __iomem *addr; 2034 u32 data; 2035 }; 2036 2037 static void do_smp_cross_read(void *data) 2038 { 2039 struct etmv4_reg *reg = data; 2040 2041 reg->data = readl_relaxed(reg->addr); 2042 } 2043 2044 static u32 etmv4_cross_read(const struct device *dev, u32 offset) 2045 { 2046 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev); 2047 struct etmv4_reg reg; 2048 2049 reg.addr = drvdata->base + offset; 2050 /* 2051 * smp cross call ensures the CPU will be powered up before 2052 * accessing the ETMv4 trace core registers 2053 */ 2054 smp_call_function_single(drvdata->cpu, do_smp_cross_read, ®, 1); 2055 return reg.data; 2056 } 2057 2058 #define coresight_etm4x_reg(name, offset) \ 2059 coresight_simple_reg32(struct etmv4_drvdata, name, offset) 2060 2061 #define coresight_etm4x_cross_read(name, offset) \ 2062 coresight_simple_func(struct etmv4_drvdata, etmv4_cross_read, \ 2063 name, offset) 2064 2065 coresight_etm4x_reg(trcpdcr, TRCPDCR); 2066 coresight_etm4x_reg(trcpdsr, TRCPDSR); 2067 coresight_etm4x_reg(trclsr, TRCLSR); 2068 coresight_etm4x_reg(trcauthstatus, TRCAUTHSTATUS); 2069 coresight_etm4x_reg(trcdevid, TRCDEVID); 2070 coresight_etm4x_reg(trcdevtype, TRCDEVTYPE); 2071 coresight_etm4x_reg(trcpidr0, TRCPIDR0); 2072 coresight_etm4x_reg(trcpidr1, TRCPIDR1); 2073 coresight_etm4x_reg(trcpidr2, TRCPIDR2); 2074 coresight_etm4x_reg(trcpidr3, TRCPIDR3); 2075 coresight_etm4x_cross_read(trcoslsr, TRCOSLSR); 2076 coresight_etm4x_cross_read(trcconfig, TRCCONFIGR); 2077 coresight_etm4x_cross_read(trctraceid, TRCTRACEIDR); 2078 2079 static struct attribute *coresight_etmv4_mgmt_attrs[] = { 2080 &dev_attr_trcoslsr.attr, 2081 &dev_attr_trcpdcr.attr, 2082 &dev_attr_trcpdsr.attr, 2083 &dev_attr_trclsr.attr, 2084 &dev_attr_trcconfig.attr, 2085 &dev_attr_trctraceid.attr, 2086 &dev_attr_trcauthstatus.attr, 2087 &dev_attr_trcdevid.attr, 2088 &dev_attr_trcdevtype.attr, 2089 &dev_attr_trcpidr0.attr, 2090 &dev_attr_trcpidr1.attr, 2091 &dev_attr_trcpidr2.attr, 2092 &dev_attr_trcpidr3.attr, 2093 NULL, 2094 }; 2095 2096 coresight_etm4x_cross_read(trcidr0, TRCIDR0); 2097 coresight_etm4x_cross_read(trcidr1, TRCIDR1); 2098 coresight_etm4x_cross_read(trcidr2, TRCIDR2); 2099 coresight_etm4x_cross_read(trcidr3, TRCIDR3); 2100 coresight_etm4x_cross_read(trcidr4, TRCIDR4); 2101 coresight_etm4x_cross_read(trcidr5, TRCIDR5); 2102 /* trcidr[6,7] are reserved */ 2103 coresight_etm4x_cross_read(trcidr8, TRCIDR8); 2104 coresight_etm4x_cross_read(trcidr9, TRCIDR9); 2105 coresight_etm4x_cross_read(trcidr10, TRCIDR10); 2106 coresight_etm4x_cross_read(trcidr11, TRCIDR11); 2107 coresight_etm4x_cross_read(trcidr12, TRCIDR12); 2108 coresight_etm4x_cross_read(trcidr13, TRCIDR13); 2109 2110 static struct attribute *coresight_etmv4_trcidr_attrs[] = { 2111 &dev_attr_trcidr0.attr, 2112 &dev_attr_trcidr1.attr, 2113 &dev_attr_trcidr2.attr, 2114 &dev_attr_trcidr3.attr, 2115 &dev_attr_trcidr4.attr, 2116 &dev_attr_trcidr5.attr, 2117 /* trcidr[6,7] are reserved */ 2118 &dev_attr_trcidr8.attr, 2119 &dev_attr_trcidr9.attr, 2120 &dev_attr_trcidr10.attr, 2121 &dev_attr_trcidr11.attr, 2122 &dev_attr_trcidr12.attr, 2123 &dev_attr_trcidr13.attr, 2124 NULL, 2125 }; 2126 2127 static const struct attribute_group coresight_etmv4_group = { 2128 .attrs = coresight_etmv4_attrs, 2129 }; 2130 2131 static const struct attribute_group coresight_etmv4_mgmt_group = { 2132 .attrs = coresight_etmv4_mgmt_attrs, 2133 .name = "mgmt", 2134 }; 2135 2136 static const struct attribute_group coresight_etmv4_trcidr_group = { 2137 .attrs = coresight_etmv4_trcidr_attrs, 2138 .name = "trcidr", 2139 }; 2140 2141 const struct attribute_group *coresight_etmv4_groups[] = { 2142 &coresight_etmv4_group, 2143 &coresight_etmv4_mgmt_group, 2144 &coresight_etmv4_trcidr_group, 2145 NULL, 2146 }; 2147