1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright(C) 2015 Linaro Limited. All rights reserved. 4 * Author: Mathieu Poirier <mathieu.poirier@linaro.org> 5 */ 6 7 #include <linux/pid_namespace.h> 8 #include <linux/pm_runtime.h> 9 #include <linux/sysfs.h> 10 #include "coresight-etm4x.h" 11 #include "coresight-priv.h" 12 #include "coresight-syscfg.h" 13 14 static int etm4_set_mode_exclude(struct etmv4_drvdata *drvdata, bool exclude) 15 { 16 u8 idx; 17 struct etmv4_config *config = &drvdata->config; 18 19 idx = config->addr_idx; 20 21 /* 22 * TRCACATRn.TYPE bit[1:0]: type of comparison 23 * the trace unit performs 24 */ 25 if (BMVAL(config->addr_acc[idx], 0, 1) == ETM_INSTR_ADDR) { 26 if (idx % 2 != 0) 27 return -EINVAL; 28 29 /* 30 * We are performing instruction address comparison. Set the 31 * relevant bit of ViewInst Include/Exclude Control register 32 * for corresponding address comparator pair. 33 */ 34 if (config->addr_type[idx] != ETM_ADDR_TYPE_RANGE || 35 config->addr_type[idx + 1] != ETM_ADDR_TYPE_RANGE) 36 return -EINVAL; 37 38 if (exclude == true) { 39 /* 40 * Set exclude bit and unset the include bit 41 * corresponding to comparator pair 42 */ 43 config->viiectlr |= BIT(idx / 2 + 16); 44 config->viiectlr &= ~BIT(idx / 2); 45 } else { 46 /* 47 * Set include bit and unset exclude bit 48 * corresponding to comparator pair 49 */ 50 config->viiectlr |= BIT(idx / 2); 51 config->viiectlr &= ~BIT(idx / 2 + 16); 52 } 53 } 54 return 0; 55 } 56 57 static ssize_t nr_pe_cmp_show(struct device *dev, 58 struct device_attribute *attr, 59 char *buf) 60 { 61 unsigned long val; 62 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); 63 64 val = drvdata->nr_pe_cmp; 65 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); 66 } 67 static DEVICE_ATTR_RO(nr_pe_cmp); 68 69 static ssize_t nr_addr_cmp_show(struct device *dev, 70 struct device_attribute *attr, 71 char *buf) 72 { 73 unsigned long val; 74 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); 75 76 val = drvdata->nr_addr_cmp; 77 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); 78 } 79 static DEVICE_ATTR_RO(nr_addr_cmp); 80 81 static ssize_t nr_cntr_show(struct device *dev, 82 struct device_attribute *attr, 83 char *buf) 84 { 85 unsigned long val; 86 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); 87 88 val = drvdata->nr_cntr; 89 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); 90 } 91 static DEVICE_ATTR_RO(nr_cntr); 92 93 static ssize_t nr_ext_inp_show(struct device *dev, 94 struct device_attribute *attr, 95 char *buf) 96 { 97 unsigned long val; 98 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); 99 100 val = drvdata->nr_ext_inp; 101 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); 102 } 103 static DEVICE_ATTR_RO(nr_ext_inp); 104 105 static ssize_t numcidc_show(struct device *dev, 106 struct device_attribute *attr, 107 char *buf) 108 { 109 unsigned long val; 110 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); 111 112 val = drvdata->numcidc; 113 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); 114 } 115 static DEVICE_ATTR_RO(numcidc); 116 117 static ssize_t numvmidc_show(struct device *dev, 118 struct device_attribute *attr, 119 char *buf) 120 { 121 unsigned long val; 122 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); 123 124 val = drvdata->numvmidc; 125 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); 126 } 127 static DEVICE_ATTR_RO(numvmidc); 128 129 static ssize_t nrseqstate_show(struct device *dev, 130 struct device_attribute *attr, 131 char *buf) 132 { 133 unsigned long val; 134 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); 135 136 val = drvdata->nrseqstate; 137 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); 138 } 139 static DEVICE_ATTR_RO(nrseqstate); 140 141 static ssize_t nr_resource_show(struct device *dev, 142 struct device_attribute *attr, 143 char *buf) 144 { 145 unsigned long val; 146 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); 147 148 val = drvdata->nr_resource; 149 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); 150 } 151 static DEVICE_ATTR_RO(nr_resource); 152 153 static ssize_t nr_ss_cmp_show(struct device *dev, 154 struct device_attribute *attr, 155 char *buf) 156 { 157 unsigned long val; 158 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); 159 160 val = drvdata->nr_ss_cmp; 161 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); 162 } 163 static DEVICE_ATTR_RO(nr_ss_cmp); 164 165 static ssize_t reset_store(struct device *dev, 166 struct device_attribute *attr, 167 const char *buf, size_t size) 168 { 169 int i; 170 unsigned long val; 171 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); 172 struct etmv4_config *config = &drvdata->config; 173 174 if (kstrtoul(buf, 16, &val)) 175 return -EINVAL; 176 177 spin_lock(&drvdata->spinlock); 178 if (val) 179 config->mode = 0x0; 180 181 /* Disable data tracing: do not trace load and store data transfers */ 182 config->mode &= ~(ETM_MODE_LOAD | ETM_MODE_STORE); 183 config->cfg &= ~(BIT(1) | BIT(2)); 184 185 /* Disable data value and data address tracing */ 186 config->mode &= ~(ETM_MODE_DATA_TRACE_ADDR | 187 ETM_MODE_DATA_TRACE_VAL); 188 config->cfg &= ~(BIT(16) | BIT(17)); 189 190 /* Disable all events tracing */ 191 config->eventctrl0 = 0x0; 192 config->eventctrl1 = 0x0; 193 194 /* Disable timestamp event */ 195 config->ts_ctrl = 0x0; 196 197 /* Disable stalling */ 198 config->stall_ctrl = 0x0; 199 200 /* Reset trace synchronization period to 2^8 = 256 bytes*/ 201 if (drvdata->syncpr == false) 202 config->syncfreq = 0x8; 203 204 /* 205 * Enable ViewInst to trace everything with start-stop logic in 206 * started state. ARM recommends start-stop logic is set before 207 * each trace run. 208 */ 209 config->vinst_ctrl = BIT(0); 210 if (drvdata->nr_addr_cmp > 0) { 211 config->mode |= ETM_MODE_VIEWINST_STARTSTOP; 212 /* SSSTATUS, bit[9] */ 213 config->vinst_ctrl |= BIT(9); 214 } 215 216 /* No address range filtering for ViewInst */ 217 config->viiectlr = 0x0; 218 219 /* No start-stop filtering for ViewInst */ 220 config->vissctlr = 0x0; 221 config->vipcssctlr = 0x0; 222 223 /* Disable seq events */ 224 for (i = 0; i < drvdata->nrseqstate-1; i++) 225 config->seq_ctrl[i] = 0x0; 226 config->seq_rst = 0x0; 227 config->seq_state = 0x0; 228 229 /* Disable external input events */ 230 config->ext_inp = 0x0; 231 232 config->cntr_idx = 0x0; 233 for (i = 0; i < drvdata->nr_cntr; i++) { 234 config->cntrldvr[i] = 0x0; 235 config->cntr_ctrl[i] = 0x0; 236 config->cntr_val[i] = 0x0; 237 } 238 239 config->res_idx = 0x0; 240 for (i = 2; i < 2 * drvdata->nr_resource; i++) 241 config->res_ctrl[i] = 0x0; 242 243 config->ss_idx = 0x0; 244 for (i = 0; i < drvdata->nr_ss_cmp; i++) { 245 config->ss_ctrl[i] = 0x0; 246 config->ss_pe_cmp[i] = 0x0; 247 } 248 249 config->addr_idx = 0x0; 250 for (i = 0; i < drvdata->nr_addr_cmp * 2; i++) { 251 config->addr_val[i] = 0x0; 252 config->addr_acc[i] = 0x0; 253 config->addr_type[i] = ETM_ADDR_TYPE_NONE; 254 } 255 256 config->ctxid_idx = 0x0; 257 for (i = 0; i < drvdata->numcidc; i++) 258 config->ctxid_pid[i] = 0x0; 259 260 config->ctxid_mask0 = 0x0; 261 config->ctxid_mask1 = 0x0; 262 263 config->vmid_idx = 0x0; 264 for (i = 0; i < drvdata->numvmidc; i++) 265 config->vmid_val[i] = 0x0; 266 config->vmid_mask0 = 0x0; 267 config->vmid_mask1 = 0x0; 268 269 drvdata->trcid = drvdata->cpu + 1; 270 271 spin_unlock(&drvdata->spinlock); 272 273 cscfg_csdev_reset_feats(to_coresight_device(dev)); 274 275 return size; 276 } 277 static DEVICE_ATTR_WO(reset); 278 279 static ssize_t mode_show(struct device *dev, 280 struct device_attribute *attr, 281 char *buf) 282 { 283 unsigned long val; 284 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); 285 struct etmv4_config *config = &drvdata->config; 286 287 val = config->mode; 288 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); 289 } 290 291 static ssize_t mode_store(struct device *dev, 292 struct device_attribute *attr, 293 const char *buf, size_t size) 294 { 295 unsigned long val, mode; 296 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); 297 struct etmv4_config *config = &drvdata->config; 298 299 if (kstrtoul(buf, 16, &val)) 300 return -EINVAL; 301 302 spin_lock(&drvdata->spinlock); 303 config->mode = val & ETMv4_MODE_ALL; 304 305 if (drvdata->instrp0 == true) { 306 /* start by clearing instruction P0 field */ 307 config->cfg &= ~(BIT(1) | BIT(2)); 308 if (config->mode & ETM_MODE_LOAD) 309 /* 0b01 Trace load instructions as P0 instructions */ 310 config->cfg |= BIT(1); 311 if (config->mode & ETM_MODE_STORE) 312 /* 0b10 Trace store instructions as P0 instructions */ 313 config->cfg |= BIT(2); 314 if (config->mode & ETM_MODE_LOAD_STORE) 315 /* 316 * 0b11 Trace load and store instructions 317 * as P0 instructions 318 */ 319 config->cfg |= BIT(1) | BIT(2); 320 } 321 322 /* bit[3], Branch broadcast mode */ 323 if ((config->mode & ETM_MODE_BB) && (drvdata->trcbb == true)) 324 config->cfg |= BIT(3); 325 else 326 config->cfg &= ~BIT(3); 327 328 /* bit[4], Cycle counting instruction trace bit */ 329 if ((config->mode & ETMv4_MODE_CYCACC) && 330 (drvdata->trccci == true)) 331 config->cfg |= BIT(4); 332 else 333 config->cfg &= ~BIT(4); 334 335 /* bit[6], Context ID tracing bit */ 336 if ((config->mode & ETMv4_MODE_CTXID) && (drvdata->ctxid_size)) 337 config->cfg |= BIT(6); 338 else 339 config->cfg &= ~BIT(6); 340 341 if ((config->mode & ETM_MODE_VMID) && (drvdata->vmid_size)) 342 config->cfg |= BIT(7); 343 else 344 config->cfg &= ~BIT(7); 345 346 /* bits[10:8], Conditional instruction tracing bit */ 347 mode = ETM_MODE_COND(config->mode); 348 if (drvdata->trccond == true) { 349 config->cfg &= ~(BIT(8) | BIT(9) | BIT(10)); 350 config->cfg |= mode << 8; 351 } 352 353 /* bit[11], Global timestamp tracing bit */ 354 if ((config->mode & ETMv4_MODE_TIMESTAMP) && (drvdata->ts_size)) 355 config->cfg |= BIT(11); 356 else 357 config->cfg &= ~BIT(11); 358 359 /* bit[12], Return stack enable bit */ 360 if ((config->mode & ETM_MODE_RETURNSTACK) && 361 (drvdata->retstack == true)) 362 config->cfg |= BIT(12); 363 else 364 config->cfg &= ~BIT(12); 365 366 /* bits[14:13], Q element enable field */ 367 mode = ETM_MODE_QELEM(config->mode); 368 /* start by clearing QE bits */ 369 config->cfg &= ~(BIT(13) | BIT(14)); 370 /* 371 * if supported, Q elements with instruction counts are enabled. 372 * Always set the low bit for any requested mode. Valid combos are 373 * 0b00, 0b01 and 0b11. 374 */ 375 if (mode && drvdata->q_support) 376 config->cfg |= BIT(13); 377 /* 378 * if supported, Q elements with and without instruction 379 * counts are enabled 380 */ 381 if ((mode & BIT(1)) && (drvdata->q_support & BIT(1))) 382 config->cfg |= BIT(14); 383 384 /* bit[11], AMBA Trace Bus (ATB) trigger enable bit */ 385 if ((config->mode & ETM_MODE_ATB_TRIGGER) && 386 (drvdata->atbtrig == true)) 387 config->eventctrl1 |= BIT(11); 388 else 389 config->eventctrl1 &= ~BIT(11); 390 391 /* bit[12], Low-power state behavior override bit */ 392 if ((config->mode & ETM_MODE_LPOVERRIDE) && 393 (drvdata->lpoverride == true)) 394 config->eventctrl1 |= BIT(12); 395 else 396 config->eventctrl1 &= ~BIT(12); 397 398 /* bit[8], Instruction stall bit */ 399 if ((config->mode & ETM_MODE_ISTALL_EN) && (drvdata->stallctl == true)) 400 config->stall_ctrl |= BIT(8); 401 else 402 config->stall_ctrl &= ~BIT(8); 403 404 /* bit[10], Prioritize instruction trace bit */ 405 if (config->mode & ETM_MODE_INSTPRIO) 406 config->stall_ctrl |= BIT(10); 407 else 408 config->stall_ctrl &= ~BIT(10); 409 410 /* bit[13], Trace overflow prevention bit */ 411 if ((config->mode & ETM_MODE_NOOVERFLOW) && 412 (drvdata->nooverflow == true)) 413 config->stall_ctrl |= BIT(13); 414 else 415 config->stall_ctrl &= ~BIT(13); 416 417 /* bit[9] Start/stop logic control bit */ 418 if (config->mode & ETM_MODE_VIEWINST_STARTSTOP) 419 config->vinst_ctrl |= BIT(9); 420 else 421 config->vinst_ctrl &= ~BIT(9); 422 423 /* bit[10], Whether a trace unit must trace a Reset exception */ 424 if (config->mode & ETM_MODE_TRACE_RESET) 425 config->vinst_ctrl |= BIT(10); 426 else 427 config->vinst_ctrl &= ~BIT(10); 428 429 /* bit[11], Whether a trace unit must trace a system error exception */ 430 if ((config->mode & ETM_MODE_TRACE_ERR) && 431 (drvdata->trc_error == true)) 432 config->vinst_ctrl |= BIT(11); 433 else 434 config->vinst_ctrl &= ~BIT(11); 435 436 if (config->mode & (ETM_MODE_EXCL_KERN | ETM_MODE_EXCL_USER)) 437 etm4_config_trace_mode(config); 438 439 spin_unlock(&drvdata->spinlock); 440 441 return size; 442 } 443 static DEVICE_ATTR_RW(mode); 444 445 static ssize_t pe_show(struct device *dev, 446 struct device_attribute *attr, 447 char *buf) 448 { 449 unsigned long val; 450 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); 451 struct etmv4_config *config = &drvdata->config; 452 453 val = config->pe_sel; 454 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); 455 } 456 457 static ssize_t pe_store(struct device *dev, 458 struct device_attribute *attr, 459 const char *buf, size_t size) 460 { 461 unsigned long val; 462 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); 463 struct etmv4_config *config = &drvdata->config; 464 465 if (kstrtoul(buf, 16, &val)) 466 return -EINVAL; 467 468 spin_lock(&drvdata->spinlock); 469 if (val > drvdata->nr_pe) { 470 spin_unlock(&drvdata->spinlock); 471 return -EINVAL; 472 } 473 474 config->pe_sel = val; 475 spin_unlock(&drvdata->spinlock); 476 return size; 477 } 478 static DEVICE_ATTR_RW(pe); 479 480 static ssize_t event_show(struct device *dev, 481 struct device_attribute *attr, 482 char *buf) 483 { 484 unsigned long val; 485 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); 486 struct etmv4_config *config = &drvdata->config; 487 488 val = config->eventctrl0; 489 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); 490 } 491 492 static ssize_t event_store(struct device *dev, 493 struct device_attribute *attr, 494 const char *buf, size_t size) 495 { 496 unsigned long val; 497 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); 498 struct etmv4_config *config = &drvdata->config; 499 500 if (kstrtoul(buf, 16, &val)) 501 return -EINVAL; 502 503 spin_lock(&drvdata->spinlock); 504 switch (drvdata->nr_event) { 505 case 0x0: 506 /* EVENT0, bits[7:0] */ 507 config->eventctrl0 = val & 0xFF; 508 break; 509 case 0x1: 510 /* EVENT1, bits[15:8] */ 511 config->eventctrl0 = val & 0xFFFF; 512 break; 513 case 0x2: 514 /* EVENT2, bits[23:16] */ 515 config->eventctrl0 = val & 0xFFFFFF; 516 break; 517 case 0x3: 518 /* EVENT3, bits[31:24] */ 519 config->eventctrl0 = val; 520 break; 521 default: 522 break; 523 } 524 spin_unlock(&drvdata->spinlock); 525 return size; 526 } 527 static DEVICE_ATTR_RW(event); 528 529 static ssize_t event_instren_show(struct device *dev, 530 struct device_attribute *attr, 531 char *buf) 532 { 533 unsigned long val; 534 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); 535 struct etmv4_config *config = &drvdata->config; 536 537 val = BMVAL(config->eventctrl1, 0, 3); 538 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); 539 } 540 541 static ssize_t event_instren_store(struct device *dev, 542 struct device_attribute *attr, 543 const char *buf, size_t size) 544 { 545 unsigned long val; 546 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); 547 struct etmv4_config *config = &drvdata->config; 548 549 if (kstrtoul(buf, 16, &val)) 550 return -EINVAL; 551 552 spin_lock(&drvdata->spinlock); 553 /* start by clearing all instruction event enable bits */ 554 config->eventctrl1 &= ~(BIT(0) | BIT(1) | BIT(2) | BIT(3)); 555 switch (drvdata->nr_event) { 556 case 0x0: 557 /* generate Event element for event 1 */ 558 config->eventctrl1 |= val & BIT(1); 559 break; 560 case 0x1: 561 /* generate Event element for event 1 and 2 */ 562 config->eventctrl1 |= val & (BIT(0) | BIT(1)); 563 break; 564 case 0x2: 565 /* generate Event element for event 1, 2 and 3 */ 566 config->eventctrl1 |= val & (BIT(0) | BIT(1) | BIT(2)); 567 break; 568 case 0x3: 569 /* generate Event element for all 4 events */ 570 config->eventctrl1 |= val & 0xF; 571 break; 572 default: 573 break; 574 } 575 spin_unlock(&drvdata->spinlock); 576 return size; 577 } 578 static DEVICE_ATTR_RW(event_instren); 579 580 static ssize_t event_ts_show(struct device *dev, 581 struct device_attribute *attr, 582 char *buf) 583 { 584 unsigned long val; 585 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); 586 struct etmv4_config *config = &drvdata->config; 587 588 val = config->ts_ctrl; 589 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); 590 } 591 592 static ssize_t event_ts_store(struct device *dev, 593 struct device_attribute *attr, 594 const char *buf, size_t size) 595 { 596 unsigned long val; 597 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); 598 struct etmv4_config *config = &drvdata->config; 599 600 if (kstrtoul(buf, 16, &val)) 601 return -EINVAL; 602 if (!drvdata->ts_size) 603 return -EINVAL; 604 605 config->ts_ctrl = val & ETMv4_EVENT_MASK; 606 return size; 607 } 608 static DEVICE_ATTR_RW(event_ts); 609 610 static ssize_t syncfreq_show(struct device *dev, 611 struct device_attribute *attr, 612 char *buf) 613 { 614 unsigned long val; 615 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); 616 struct etmv4_config *config = &drvdata->config; 617 618 val = config->syncfreq; 619 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); 620 } 621 622 static ssize_t syncfreq_store(struct device *dev, 623 struct device_attribute *attr, 624 const char *buf, size_t size) 625 { 626 unsigned long val; 627 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); 628 struct etmv4_config *config = &drvdata->config; 629 630 if (kstrtoul(buf, 16, &val)) 631 return -EINVAL; 632 if (drvdata->syncpr == true) 633 return -EINVAL; 634 635 config->syncfreq = val & ETMv4_SYNC_MASK; 636 return size; 637 } 638 static DEVICE_ATTR_RW(syncfreq); 639 640 static ssize_t cyc_threshold_show(struct device *dev, 641 struct device_attribute *attr, 642 char *buf) 643 { 644 unsigned long val; 645 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); 646 struct etmv4_config *config = &drvdata->config; 647 648 val = config->ccctlr; 649 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); 650 } 651 652 static ssize_t cyc_threshold_store(struct device *dev, 653 struct device_attribute *attr, 654 const char *buf, size_t size) 655 { 656 unsigned long val; 657 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); 658 struct etmv4_config *config = &drvdata->config; 659 660 if (kstrtoul(buf, 16, &val)) 661 return -EINVAL; 662 663 /* mask off max threshold before checking min value */ 664 val &= ETM_CYC_THRESHOLD_MASK; 665 if (val < drvdata->ccitmin) 666 return -EINVAL; 667 668 config->ccctlr = val; 669 return size; 670 } 671 static DEVICE_ATTR_RW(cyc_threshold); 672 673 static ssize_t bb_ctrl_show(struct device *dev, 674 struct device_attribute *attr, 675 char *buf) 676 { 677 unsigned long val; 678 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); 679 struct etmv4_config *config = &drvdata->config; 680 681 val = config->bb_ctrl; 682 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); 683 } 684 685 static ssize_t bb_ctrl_store(struct device *dev, 686 struct device_attribute *attr, 687 const char *buf, size_t size) 688 { 689 unsigned long val; 690 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); 691 struct etmv4_config *config = &drvdata->config; 692 693 if (kstrtoul(buf, 16, &val)) 694 return -EINVAL; 695 if (drvdata->trcbb == false) 696 return -EINVAL; 697 if (!drvdata->nr_addr_cmp) 698 return -EINVAL; 699 700 /* 701 * Bit[8] controls include(1) / exclude(0), bits[0-7] select 702 * individual range comparators. If include then at least 1 703 * range must be selected. 704 */ 705 if ((val & BIT(8)) && (BMVAL(val, 0, 7) == 0)) 706 return -EINVAL; 707 708 config->bb_ctrl = val & GENMASK(8, 0); 709 return size; 710 } 711 static DEVICE_ATTR_RW(bb_ctrl); 712 713 static ssize_t event_vinst_show(struct device *dev, 714 struct device_attribute *attr, 715 char *buf) 716 { 717 unsigned long val; 718 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); 719 struct etmv4_config *config = &drvdata->config; 720 721 val = config->vinst_ctrl & ETMv4_EVENT_MASK; 722 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); 723 } 724 725 static ssize_t event_vinst_store(struct device *dev, 726 struct device_attribute *attr, 727 const char *buf, size_t size) 728 { 729 unsigned long val; 730 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); 731 struct etmv4_config *config = &drvdata->config; 732 733 if (kstrtoul(buf, 16, &val)) 734 return -EINVAL; 735 736 spin_lock(&drvdata->spinlock); 737 val &= ETMv4_EVENT_MASK; 738 config->vinst_ctrl &= ~ETMv4_EVENT_MASK; 739 config->vinst_ctrl |= val; 740 spin_unlock(&drvdata->spinlock); 741 return size; 742 } 743 static DEVICE_ATTR_RW(event_vinst); 744 745 static ssize_t s_exlevel_vinst_show(struct device *dev, 746 struct device_attribute *attr, 747 char *buf) 748 { 749 unsigned long val; 750 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); 751 struct etmv4_config *config = &drvdata->config; 752 753 val = (config->vinst_ctrl & TRCVICTLR_EXLEVEL_S_MASK) >> TRCVICTLR_EXLEVEL_S_SHIFT; 754 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); 755 } 756 757 static ssize_t s_exlevel_vinst_store(struct device *dev, 758 struct device_attribute *attr, 759 const char *buf, size_t size) 760 { 761 unsigned long val; 762 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); 763 struct etmv4_config *config = &drvdata->config; 764 765 if (kstrtoul(buf, 16, &val)) 766 return -EINVAL; 767 768 spin_lock(&drvdata->spinlock); 769 /* clear all EXLEVEL_S bits */ 770 config->vinst_ctrl &= ~(TRCVICTLR_EXLEVEL_S_MASK); 771 /* enable instruction tracing for corresponding exception level */ 772 val &= drvdata->s_ex_level; 773 config->vinst_ctrl |= (val << TRCVICTLR_EXLEVEL_S_SHIFT); 774 spin_unlock(&drvdata->spinlock); 775 return size; 776 } 777 static DEVICE_ATTR_RW(s_exlevel_vinst); 778 779 static ssize_t ns_exlevel_vinst_show(struct device *dev, 780 struct device_attribute *attr, 781 char *buf) 782 { 783 unsigned long val; 784 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); 785 struct etmv4_config *config = &drvdata->config; 786 787 /* EXLEVEL_NS, bits[23:20] */ 788 val = (config->vinst_ctrl & TRCVICTLR_EXLEVEL_NS_MASK) >> TRCVICTLR_EXLEVEL_NS_SHIFT; 789 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); 790 } 791 792 static ssize_t ns_exlevel_vinst_store(struct device *dev, 793 struct device_attribute *attr, 794 const char *buf, size_t size) 795 { 796 unsigned long val; 797 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); 798 struct etmv4_config *config = &drvdata->config; 799 800 if (kstrtoul(buf, 16, &val)) 801 return -EINVAL; 802 803 spin_lock(&drvdata->spinlock); 804 /* clear EXLEVEL_NS bits */ 805 config->vinst_ctrl &= ~(TRCVICTLR_EXLEVEL_NS_MASK); 806 /* enable instruction tracing for corresponding exception level */ 807 val &= drvdata->ns_ex_level; 808 config->vinst_ctrl |= (val << TRCVICTLR_EXLEVEL_NS_SHIFT); 809 spin_unlock(&drvdata->spinlock); 810 return size; 811 } 812 static DEVICE_ATTR_RW(ns_exlevel_vinst); 813 814 static ssize_t addr_idx_show(struct device *dev, 815 struct device_attribute *attr, 816 char *buf) 817 { 818 unsigned long val; 819 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); 820 struct etmv4_config *config = &drvdata->config; 821 822 val = config->addr_idx; 823 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); 824 } 825 826 static ssize_t addr_idx_store(struct device *dev, 827 struct device_attribute *attr, 828 const char *buf, size_t size) 829 { 830 unsigned long val; 831 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); 832 struct etmv4_config *config = &drvdata->config; 833 834 if (kstrtoul(buf, 16, &val)) 835 return -EINVAL; 836 if (val >= drvdata->nr_addr_cmp * 2) 837 return -EINVAL; 838 839 /* 840 * Use spinlock to ensure index doesn't change while it gets 841 * dereferenced multiple times within a spinlock block elsewhere. 842 */ 843 spin_lock(&drvdata->spinlock); 844 config->addr_idx = val; 845 spin_unlock(&drvdata->spinlock); 846 return size; 847 } 848 static DEVICE_ATTR_RW(addr_idx); 849 850 static ssize_t addr_instdatatype_show(struct device *dev, 851 struct device_attribute *attr, 852 char *buf) 853 { 854 ssize_t len; 855 u8 val, idx; 856 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); 857 struct etmv4_config *config = &drvdata->config; 858 859 spin_lock(&drvdata->spinlock); 860 idx = config->addr_idx; 861 val = BMVAL(config->addr_acc[idx], 0, 1); 862 len = scnprintf(buf, PAGE_SIZE, "%s\n", 863 val == ETM_INSTR_ADDR ? "instr" : 864 (val == ETM_DATA_LOAD_ADDR ? "data_load" : 865 (val == ETM_DATA_STORE_ADDR ? "data_store" : 866 "data_load_store"))); 867 spin_unlock(&drvdata->spinlock); 868 return len; 869 } 870 871 static ssize_t addr_instdatatype_store(struct device *dev, 872 struct device_attribute *attr, 873 const char *buf, size_t size) 874 { 875 u8 idx; 876 char str[20] = ""; 877 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); 878 struct etmv4_config *config = &drvdata->config; 879 880 if (strlen(buf) >= 20) 881 return -EINVAL; 882 if (sscanf(buf, "%s", str) != 1) 883 return -EINVAL; 884 885 spin_lock(&drvdata->spinlock); 886 idx = config->addr_idx; 887 if (!strcmp(str, "instr")) 888 /* TYPE, bits[1:0] */ 889 config->addr_acc[idx] &= ~(BIT(0) | BIT(1)); 890 891 spin_unlock(&drvdata->spinlock); 892 return size; 893 } 894 static DEVICE_ATTR_RW(addr_instdatatype); 895 896 static ssize_t addr_single_show(struct device *dev, 897 struct device_attribute *attr, 898 char *buf) 899 { 900 u8 idx; 901 unsigned long val; 902 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); 903 struct etmv4_config *config = &drvdata->config; 904 905 idx = config->addr_idx; 906 spin_lock(&drvdata->spinlock); 907 if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE || 908 config->addr_type[idx] == ETM_ADDR_TYPE_SINGLE)) { 909 spin_unlock(&drvdata->spinlock); 910 return -EPERM; 911 } 912 val = (unsigned long)config->addr_val[idx]; 913 spin_unlock(&drvdata->spinlock); 914 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); 915 } 916 917 static ssize_t addr_single_store(struct device *dev, 918 struct device_attribute *attr, 919 const char *buf, size_t size) 920 { 921 u8 idx; 922 unsigned long val; 923 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); 924 struct etmv4_config *config = &drvdata->config; 925 926 if (kstrtoul(buf, 16, &val)) 927 return -EINVAL; 928 929 spin_lock(&drvdata->spinlock); 930 idx = config->addr_idx; 931 if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE || 932 config->addr_type[idx] == ETM_ADDR_TYPE_SINGLE)) { 933 spin_unlock(&drvdata->spinlock); 934 return -EPERM; 935 } 936 937 config->addr_val[idx] = (u64)val; 938 config->addr_type[idx] = ETM_ADDR_TYPE_SINGLE; 939 spin_unlock(&drvdata->spinlock); 940 return size; 941 } 942 static DEVICE_ATTR_RW(addr_single); 943 944 static ssize_t addr_range_show(struct device *dev, 945 struct device_attribute *attr, 946 char *buf) 947 { 948 u8 idx; 949 unsigned long val1, val2; 950 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); 951 struct etmv4_config *config = &drvdata->config; 952 953 spin_lock(&drvdata->spinlock); 954 idx = config->addr_idx; 955 if (idx % 2 != 0) { 956 spin_unlock(&drvdata->spinlock); 957 return -EPERM; 958 } 959 if (!((config->addr_type[idx] == ETM_ADDR_TYPE_NONE && 960 config->addr_type[idx + 1] == ETM_ADDR_TYPE_NONE) || 961 (config->addr_type[idx] == ETM_ADDR_TYPE_RANGE && 962 config->addr_type[idx + 1] == ETM_ADDR_TYPE_RANGE))) { 963 spin_unlock(&drvdata->spinlock); 964 return -EPERM; 965 } 966 967 val1 = (unsigned long)config->addr_val[idx]; 968 val2 = (unsigned long)config->addr_val[idx + 1]; 969 spin_unlock(&drvdata->spinlock); 970 return scnprintf(buf, PAGE_SIZE, "%#lx %#lx\n", val1, val2); 971 } 972 973 static ssize_t addr_range_store(struct device *dev, 974 struct device_attribute *attr, 975 const char *buf, size_t size) 976 { 977 u8 idx; 978 unsigned long val1, val2; 979 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); 980 struct etmv4_config *config = &drvdata->config; 981 int elements, exclude; 982 983 elements = sscanf(buf, "%lx %lx %x", &val1, &val2, &exclude); 984 985 /* exclude is optional, but need at least two parameter */ 986 if (elements < 2) 987 return -EINVAL; 988 /* lower address comparator cannot have a higher address value */ 989 if (val1 > val2) 990 return -EINVAL; 991 992 spin_lock(&drvdata->spinlock); 993 idx = config->addr_idx; 994 if (idx % 2 != 0) { 995 spin_unlock(&drvdata->spinlock); 996 return -EPERM; 997 } 998 999 if (!((config->addr_type[idx] == ETM_ADDR_TYPE_NONE && 1000 config->addr_type[idx + 1] == ETM_ADDR_TYPE_NONE) || 1001 (config->addr_type[idx] == ETM_ADDR_TYPE_RANGE && 1002 config->addr_type[idx + 1] == ETM_ADDR_TYPE_RANGE))) { 1003 spin_unlock(&drvdata->spinlock); 1004 return -EPERM; 1005 } 1006 1007 config->addr_val[idx] = (u64)val1; 1008 config->addr_type[idx] = ETM_ADDR_TYPE_RANGE; 1009 config->addr_val[idx + 1] = (u64)val2; 1010 config->addr_type[idx + 1] = ETM_ADDR_TYPE_RANGE; 1011 /* 1012 * Program include or exclude control bits for vinst or vdata 1013 * whenever we change addr comparators to ETM_ADDR_TYPE_RANGE 1014 * use supplied value, or default to bit set in 'mode' 1015 */ 1016 if (elements != 3) 1017 exclude = config->mode & ETM_MODE_EXCLUDE; 1018 etm4_set_mode_exclude(drvdata, exclude ? true : false); 1019 1020 spin_unlock(&drvdata->spinlock); 1021 return size; 1022 } 1023 static DEVICE_ATTR_RW(addr_range); 1024 1025 static ssize_t addr_start_show(struct device *dev, 1026 struct device_attribute *attr, 1027 char *buf) 1028 { 1029 u8 idx; 1030 unsigned long val; 1031 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); 1032 struct etmv4_config *config = &drvdata->config; 1033 1034 spin_lock(&drvdata->spinlock); 1035 idx = config->addr_idx; 1036 1037 if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE || 1038 config->addr_type[idx] == ETM_ADDR_TYPE_START)) { 1039 spin_unlock(&drvdata->spinlock); 1040 return -EPERM; 1041 } 1042 1043 val = (unsigned long)config->addr_val[idx]; 1044 spin_unlock(&drvdata->spinlock); 1045 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); 1046 } 1047 1048 static ssize_t addr_start_store(struct device *dev, 1049 struct device_attribute *attr, 1050 const char *buf, size_t size) 1051 { 1052 u8 idx; 1053 unsigned long val; 1054 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); 1055 struct etmv4_config *config = &drvdata->config; 1056 1057 if (kstrtoul(buf, 16, &val)) 1058 return -EINVAL; 1059 1060 spin_lock(&drvdata->spinlock); 1061 idx = config->addr_idx; 1062 if (!drvdata->nr_addr_cmp) { 1063 spin_unlock(&drvdata->spinlock); 1064 return -EINVAL; 1065 } 1066 if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE || 1067 config->addr_type[idx] == ETM_ADDR_TYPE_START)) { 1068 spin_unlock(&drvdata->spinlock); 1069 return -EPERM; 1070 } 1071 1072 config->addr_val[idx] = (u64)val; 1073 config->addr_type[idx] = ETM_ADDR_TYPE_START; 1074 config->vissctlr |= BIT(idx); 1075 spin_unlock(&drvdata->spinlock); 1076 return size; 1077 } 1078 static DEVICE_ATTR_RW(addr_start); 1079 1080 static ssize_t addr_stop_show(struct device *dev, 1081 struct device_attribute *attr, 1082 char *buf) 1083 { 1084 u8 idx; 1085 unsigned long val; 1086 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); 1087 struct etmv4_config *config = &drvdata->config; 1088 1089 spin_lock(&drvdata->spinlock); 1090 idx = config->addr_idx; 1091 1092 if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE || 1093 config->addr_type[idx] == ETM_ADDR_TYPE_STOP)) { 1094 spin_unlock(&drvdata->spinlock); 1095 return -EPERM; 1096 } 1097 1098 val = (unsigned long)config->addr_val[idx]; 1099 spin_unlock(&drvdata->spinlock); 1100 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); 1101 } 1102 1103 static ssize_t addr_stop_store(struct device *dev, 1104 struct device_attribute *attr, 1105 const char *buf, size_t size) 1106 { 1107 u8 idx; 1108 unsigned long val; 1109 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); 1110 struct etmv4_config *config = &drvdata->config; 1111 1112 if (kstrtoul(buf, 16, &val)) 1113 return -EINVAL; 1114 1115 spin_lock(&drvdata->spinlock); 1116 idx = config->addr_idx; 1117 if (!drvdata->nr_addr_cmp) { 1118 spin_unlock(&drvdata->spinlock); 1119 return -EINVAL; 1120 } 1121 if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE || 1122 config->addr_type[idx] == ETM_ADDR_TYPE_STOP)) { 1123 spin_unlock(&drvdata->spinlock); 1124 return -EPERM; 1125 } 1126 1127 config->addr_val[idx] = (u64)val; 1128 config->addr_type[idx] = ETM_ADDR_TYPE_STOP; 1129 config->vissctlr |= BIT(idx + 16); 1130 spin_unlock(&drvdata->spinlock); 1131 return size; 1132 } 1133 static DEVICE_ATTR_RW(addr_stop); 1134 1135 static ssize_t addr_ctxtype_show(struct device *dev, 1136 struct device_attribute *attr, 1137 char *buf) 1138 { 1139 ssize_t len; 1140 u8 idx, val; 1141 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); 1142 struct etmv4_config *config = &drvdata->config; 1143 1144 spin_lock(&drvdata->spinlock); 1145 idx = config->addr_idx; 1146 /* CONTEXTTYPE, bits[3:2] */ 1147 val = BMVAL(config->addr_acc[idx], 2, 3); 1148 len = scnprintf(buf, PAGE_SIZE, "%s\n", val == ETM_CTX_NONE ? "none" : 1149 (val == ETM_CTX_CTXID ? "ctxid" : 1150 (val == ETM_CTX_VMID ? "vmid" : "all"))); 1151 spin_unlock(&drvdata->spinlock); 1152 return len; 1153 } 1154 1155 static ssize_t addr_ctxtype_store(struct device *dev, 1156 struct device_attribute *attr, 1157 const char *buf, size_t size) 1158 { 1159 u8 idx; 1160 char str[10] = ""; 1161 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); 1162 struct etmv4_config *config = &drvdata->config; 1163 1164 if (strlen(buf) >= 10) 1165 return -EINVAL; 1166 if (sscanf(buf, "%s", str) != 1) 1167 return -EINVAL; 1168 1169 spin_lock(&drvdata->spinlock); 1170 idx = config->addr_idx; 1171 if (!strcmp(str, "none")) 1172 /* start by clearing context type bits */ 1173 config->addr_acc[idx] &= ~(BIT(2) | BIT(3)); 1174 else if (!strcmp(str, "ctxid")) { 1175 /* 0b01 The trace unit performs a Context ID */ 1176 if (drvdata->numcidc) { 1177 config->addr_acc[idx] |= BIT(2); 1178 config->addr_acc[idx] &= ~BIT(3); 1179 } 1180 } else if (!strcmp(str, "vmid")) { 1181 /* 0b10 The trace unit performs a VMID */ 1182 if (drvdata->numvmidc) { 1183 config->addr_acc[idx] &= ~BIT(2); 1184 config->addr_acc[idx] |= BIT(3); 1185 } 1186 } else if (!strcmp(str, "all")) { 1187 /* 1188 * 0b11 The trace unit performs a Context ID 1189 * comparison and a VMID 1190 */ 1191 if (drvdata->numcidc) 1192 config->addr_acc[idx] |= BIT(2); 1193 if (drvdata->numvmidc) 1194 config->addr_acc[idx] |= BIT(3); 1195 } 1196 spin_unlock(&drvdata->spinlock); 1197 return size; 1198 } 1199 static DEVICE_ATTR_RW(addr_ctxtype); 1200 1201 static ssize_t addr_context_show(struct device *dev, 1202 struct device_attribute *attr, 1203 char *buf) 1204 { 1205 u8 idx; 1206 unsigned long val; 1207 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); 1208 struct etmv4_config *config = &drvdata->config; 1209 1210 spin_lock(&drvdata->spinlock); 1211 idx = config->addr_idx; 1212 /* context ID comparator bits[6:4] */ 1213 val = BMVAL(config->addr_acc[idx], 4, 6); 1214 spin_unlock(&drvdata->spinlock); 1215 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); 1216 } 1217 1218 static ssize_t addr_context_store(struct device *dev, 1219 struct device_attribute *attr, 1220 const char *buf, size_t size) 1221 { 1222 u8 idx; 1223 unsigned long val; 1224 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); 1225 struct etmv4_config *config = &drvdata->config; 1226 1227 if (kstrtoul(buf, 16, &val)) 1228 return -EINVAL; 1229 if ((drvdata->numcidc <= 1) && (drvdata->numvmidc <= 1)) 1230 return -EINVAL; 1231 if (val >= (drvdata->numcidc >= drvdata->numvmidc ? 1232 drvdata->numcidc : drvdata->numvmidc)) 1233 return -EINVAL; 1234 1235 spin_lock(&drvdata->spinlock); 1236 idx = config->addr_idx; 1237 /* clear context ID comparator bits[6:4] */ 1238 config->addr_acc[idx] &= ~(BIT(4) | BIT(5) | BIT(6)); 1239 config->addr_acc[idx] |= (val << 4); 1240 spin_unlock(&drvdata->spinlock); 1241 return size; 1242 } 1243 static DEVICE_ATTR_RW(addr_context); 1244 1245 static ssize_t addr_exlevel_s_ns_show(struct device *dev, 1246 struct device_attribute *attr, 1247 char *buf) 1248 { 1249 u8 idx; 1250 unsigned long val; 1251 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); 1252 struct etmv4_config *config = &drvdata->config; 1253 1254 spin_lock(&drvdata->spinlock); 1255 idx = config->addr_idx; 1256 val = BMVAL(config->addr_acc[idx], 8, 14); 1257 spin_unlock(&drvdata->spinlock); 1258 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); 1259 } 1260 1261 static ssize_t addr_exlevel_s_ns_store(struct device *dev, 1262 struct device_attribute *attr, 1263 const char *buf, size_t size) 1264 { 1265 u8 idx; 1266 unsigned long val; 1267 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); 1268 struct etmv4_config *config = &drvdata->config; 1269 1270 if (kstrtoul(buf, 0, &val)) 1271 return -EINVAL; 1272 1273 if (val & ~((GENMASK(14, 8) >> 8))) 1274 return -EINVAL; 1275 1276 spin_lock(&drvdata->spinlock); 1277 idx = config->addr_idx; 1278 /* clear Exlevel_ns & Exlevel_s bits[14:12, 11:8], bit[15] is res0 */ 1279 config->addr_acc[idx] &= ~(GENMASK(14, 8)); 1280 config->addr_acc[idx] |= (val << 8); 1281 spin_unlock(&drvdata->spinlock); 1282 return size; 1283 } 1284 static DEVICE_ATTR_RW(addr_exlevel_s_ns); 1285 1286 static const char * const addr_type_names[] = { 1287 "unused", 1288 "single", 1289 "range", 1290 "start", 1291 "stop" 1292 }; 1293 1294 static ssize_t addr_cmp_view_show(struct device *dev, 1295 struct device_attribute *attr, char *buf) 1296 { 1297 u8 idx, addr_type; 1298 unsigned long addr_v, addr_v2, addr_ctrl; 1299 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); 1300 struct etmv4_config *config = &drvdata->config; 1301 int size = 0; 1302 bool exclude = false; 1303 1304 spin_lock(&drvdata->spinlock); 1305 idx = config->addr_idx; 1306 addr_v = config->addr_val[idx]; 1307 addr_ctrl = config->addr_acc[idx]; 1308 addr_type = config->addr_type[idx]; 1309 if (addr_type == ETM_ADDR_TYPE_RANGE) { 1310 if (idx & 0x1) { 1311 idx -= 1; 1312 addr_v2 = addr_v; 1313 addr_v = config->addr_val[idx]; 1314 } else { 1315 addr_v2 = config->addr_val[idx + 1]; 1316 } 1317 exclude = config->viiectlr & BIT(idx / 2 + 16); 1318 } 1319 spin_unlock(&drvdata->spinlock); 1320 if (addr_type) { 1321 size = scnprintf(buf, PAGE_SIZE, "addr_cmp[%i] %s %#lx", idx, 1322 addr_type_names[addr_type], addr_v); 1323 if (addr_type == ETM_ADDR_TYPE_RANGE) { 1324 size += scnprintf(buf + size, PAGE_SIZE - size, 1325 " %#lx %s", addr_v2, 1326 exclude ? "exclude" : "include"); 1327 } 1328 size += scnprintf(buf + size, PAGE_SIZE - size, 1329 " ctrl(%#lx)\n", addr_ctrl); 1330 } else { 1331 size = scnprintf(buf, PAGE_SIZE, "addr_cmp[%i] unused\n", idx); 1332 } 1333 return size; 1334 } 1335 static DEVICE_ATTR_RO(addr_cmp_view); 1336 1337 static ssize_t vinst_pe_cmp_start_stop_show(struct device *dev, 1338 struct device_attribute *attr, 1339 char *buf) 1340 { 1341 unsigned long val; 1342 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); 1343 struct etmv4_config *config = &drvdata->config; 1344 1345 if (!drvdata->nr_pe_cmp) 1346 return -EINVAL; 1347 val = config->vipcssctlr; 1348 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); 1349 } 1350 static ssize_t vinst_pe_cmp_start_stop_store(struct device *dev, 1351 struct device_attribute *attr, 1352 const char *buf, size_t size) 1353 { 1354 unsigned long val; 1355 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); 1356 struct etmv4_config *config = &drvdata->config; 1357 1358 if (kstrtoul(buf, 16, &val)) 1359 return -EINVAL; 1360 if (!drvdata->nr_pe_cmp) 1361 return -EINVAL; 1362 1363 spin_lock(&drvdata->spinlock); 1364 config->vipcssctlr = val; 1365 spin_unlock(&drvdata->spinlock); 1366 return size; 1367 } 1368 static DEVICE_ATTR_RW(vinst_pe_cmp_start_stop); 1369 1370 static ssize_t seq_idx_show(struct device *dev, 1371 struct device_attribute *attr, 1372 char *buf) 1373 { 1374 unsigned long val; 1375 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); 1376 struct etmv4_config *config = &drvdata->config; 1377 1378 val = config->seq_idx; 1379 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); 1380 } 1381 1382 static ssize_t seq_idx_store(struct device *dev, 1383 struct device_attribute *attr, 1384 const char *buf, size_t size) 1385 { 1386 unsigned long val; 1387 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); 1388 struct etmv4_config *config = &drvdata->config; 1389 1390 if (kstrtoul(buf, 16, &val)) 1391 return -EINVAL; 1392 if (val >= drvdata->nrseqstate - 1) 1393 return -EINVAL; 1394 1395 /* 1396 * Use spinlock to ensure index doesn't change while it gets 1397 * dereferenced multiple times within a spinlock block elsewhere. 1398 */ 1399 spin_lock(&drvdata->spinlock); 1400 config->seq_idx = val; 1401 spin_unlock(&drvdata->spinlock); 1402 return size; 1403 } 1404 static DEVICE_ATTR_RW(seq_idx); 1405 1406 static ssize_t seq_state_show(struct device *dev, 1407 struct device_attribute *attr, 1408 char *buf) 1409 { 1410 unsigned long val; 1411 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); 1412 struct etmv4_config *config = &drvdata->config; 1413 1414 val = config->seq_state; 1415 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); 1416 } 1417 1418 static ssize_t seq_state_store(struct device *dev, 1419 struct device_attribute *attr, 1420 const char *buf, size_t size) 1421 { 1422 unsigned long val; 1423 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); 1424 struct etmv4_config *config = &drvdata->config; 1425 1426 if (kstrtoul(buf, 16, &val)) 1427 return -EINVAL; 1428 if (val >= drvdata->nrseqstate) 1429 return -EINVAL; 1430 1431 config->seq_state = val; 1432 return size; 1433 } 1434 static DEVICE_ATTR_RW(seq_state); 1435 1436 static ssize_t seq_event_show(struct device *dev, 1437 struct device_attribute *attr, 1438 char *buf) 1439 { 1440 u8 idx; 1441 unsigned long val; 1442 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); 1443 struct etmv4_config *config = &drvdata->config; 1444 1445 spin_lock(&drvdata->spinlock); 1446 idx = config->seq_idx; 1447 val = config->seq_ctrl[idx]; 1448 spin_unlock(&drvdata->spinlock); 1449 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); 1450 } 1451 1452 static ssize_t seq_event_store(struct device *dev, 1453 struct device_attribute *attr, 1454 const char *buf, size_t size) 1455 { 1456 u8 idx; 1457 unsigned long val; 1458 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); 1459 struct etmv4_config *config = &drvdata->config; 1460 1461 if (kstrtoul(buf, 16, &val)) 1462 return -EINVAL; 1463 1464 spin_lock(&drvdata->spinlock); 1465 idx = config->seq_idx; 1466 /* Seq control has two masks B[15:8] F[7:0] */ 1467 config->seq_ctrl[idx] = val & 0xFFFF; 1468 spin_unlock(&drvdata->spinlock); 1469 return size; 1470 } 1471 static DEVICE_ATTR_RW(seq_event); 1472 1473 static ssize_t seq_reset_event_show(struct device *dev, 1474 struct device_attribute *attr, 1475 char *buf) 1476 { 1477 unsigned long val; 1478 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); 1479 struct etmv4_config *config = &drvdata->config; 1480 1481 val = config->seq_rst; 1482 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); 1483 } 1484 1485 static ssize_t seq_reset_event_store(struct device *dev, 1486 struct device_attribute *attr, 1487 const char *buf, size_t size) 1488 { 1489 unsigned long val; 1490 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); 1491 struct etmv4_config *config = &drvdata->config; 1492 1493 if (kstrtoul(buf, 16, &val)) 1494 return -EINVAL; 1495 if (!(drvdata->nrseqstate)) 1496 return -EINVAL; 1497 1498 config->seq_rst = val & ETMv4_EVENT_MASK; 1499 return size; 1500 } 1501 static DEVICE_ATTR_RW(seq_reset_event); 1502 1503 static ssize_t cntr_idx_show(struct device *dev, 1504 struct device_attribute *attr, 1505 char *buf) 1506 { 1507 unsigned long val; 1508 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); 1509 struct etmv4_config *config = &drvdata->config; 1510 1511 val = config->cntr_idx; 1512 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); 1513 } 1514 1515 static ssize_t cntr_idx_store(struct device *dev, 1516 struct device_attribute *attr, 1517 const char *buf, size_t size) 1518 { 1519 unsigned long val; 1520 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); 1521 struct etmv4_config *config = &drvdata->config; 1522 1523 if (kstrtoul(buf, 16, &val)) 1524 return -EINVAL; 1525 if (val >= drvdata->nr_cntr) 1526 return -EINVAL; 1527 1528 /* 1529 * Use spinlock to ensure index doesn't change while it gets 1530 * dereferenced multiple times within a spinlock block elsewhere. 1531 */ 1532 spin_lock(&drvdata->spinlock); 1533 config->cntr_idx = val; 1534 spin_unlock(&drvdata->spinlock); 1535 return size; 1536 } 1537 static DEVICE_ATTR_RW(cntr_idx); 1538 1539 static ssize_t cntrldvr_show(struct device *dev, 1540 struct device_attribute *attr, 1541 char *buf) 1542 { 1543 u8 idx; 1544 unsigned long val; 1545 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); 1546 struct etmv4_config *config = &drvdata->config; 1547 1548 spin_lock(&drvdata->spinlock); 1549 idx = config->cntr_idx; 1550 val = config->cntrldvr[idx]; 1551 spin_unlock(&drvdata->spinlock); 1552 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); 1553 } 1554 1555 static ssize_t cntrldvr_store(struct device *dev, 1556 struct device_attribute *attr, 1557 const char *buf, size_t size) 1558 { 1559 u8 idx; 1560 unsigned long val; 1561 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); 1562 struct etmv4_config *config = &drvdata->config; 1563 1564 if (kstrtoul(buf, 16, &val)) 1565 return -EINVAL; 1566 if (val > ETM_CNTR_MAX_VAL) 1567 return -EINVAL; 1568 1569 spin_lock(&drvdata->spinlock); 1570 idx = config->cntr_idx; 1571 config->cntrldvr[idx] = val; 1572 spin_unlock(&drvdata->spinlock); 1573 return size; 1574 } 1575 static DEVICE_ATTR_RW(cntrldvr); 1576 1577 static ssize_t cntr_val_show(struct device *dev, 1578 struct device_attribute *attr, 1579 char *buf) 1580 { 1581 u8 idx; 1582 unsigned long val; 1583 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); 1584 struct etmv4_config *config = &drvdata->config; 1585 1586 spin_lock(&drvdata->spinlock); 1587 idx = config->cntr_idx; 1588 val = config->cntr_val[idx]; 1589 spin_unlock(&drvdata->spinlock); 1590 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); 1591 } 1592 1593 static ssize_t cntr_val_store(struct device *dev, 1594 struct device_attribute *attr, 1595 const char *buf, size_t size) 1596 { 1597 u8 idx; 1598 unsigned long val; 1599 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); 1600 struct etmv4_config *config = &drvdata->config; 1601 1602 if (kstrtoul(buf, 16, &val)) 1603 return -EINVAL; 1604 if (val > ETM_CNTR_MAX_VAL) 1605 return -EINVAL; 1606 1607 spin_lock(&drvdata->spinlock); 1608 idx = config->cntr_idx; 1609 config->cntr_val[idx] = val; 1610 spin_unlock(&drvdata->spinlock); 1611 return size; 1612 } 1613 static DEVICE_ATTR_RW(cntr_val); 1614 1615 static ssize_t cntr_ctrl_show(struct device *dev, 1616 struct device_attribute *attr, 1617 char *buf) 1618 { 1619 u8 idx; 1620 unsigned long val; 1621 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); 1622 struct etmv4_config *config = &drvdata->config; 1623 1624 spin_lock(&drvdata->spinlock); 1625 idx = config->cntr_idx; 1626 val = config->cntr_ctrl[idx]; 1627 spin_unlock(&drvdata->spinlock); 1628 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); 1629 } 1630 1631 static ssize_t cntr_ctrl_store(struct device *dev, 1632 struct device_attribute *attr, 1633 const char *buf, size_t size) 1634 { 1635 u8 idx; 1636 unsigned long val; 1637 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); 1638 struct etmv4_config *config = &drvdata->config; 1639 1640 if (kstrtoul(buf, 16, &val)) 1641 return -EINVAL; 1642 1643 spin_lock(&drvdata->spinlock); 1644 idx = config->cntr_idx; 1645 config->cntr_ctrl[idx] = val; 1646 spin_unlock(&drvdata->spinlock); 1647 return size; 1648 } 1649 static DEVICE_ATTR_RW(cntr_ctrl); 1650 1651 static ssize_t res_idx_show(struct device *dev, 1652 struct device_attribute *attr, 1653 char *buf) 1654 { 1655 unsigned long val; 1656 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); 1657 struct etmv4_config *config = &drvdata->config; 1658 1659 val = config->res_idx; 1660 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); 1661 } 1662 1663 static ssize_t res_idx_store(struct device *dev, 1664 struct device_attribute *attr, 1665 const char *buf, size_t size) 1666 { 1667 unsigned long val; 1668 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); 1669 struct etmv4_config *config = &drvdata->config; 1670 1671 if (kstrtoul(buf, 16, &val)) 1672 return -EINVAL; 1673 /* 1674 * Resource selector pair 0 is always implemented and reserved, 1675 * namely an idx with 0 and 1 is illegal. 1676 */ 1677 if ((val < 2) || (val >= 2 * drvdata->nr_resource)) 1678 return -EINVAL; 1679 1680 /* 1681 * Use spinlock to ensure index doesn't change while it gets 1682 * dereferenced multiple times within a spinlock block elsewhere. 1683 */ 1684 spin_lock(&drvdata->spinlock); 1685 config->res_idx = val; 1686 spin_unlock(&drvdata->spinlock); 1687 return size; 1688 } 1689 static DEVICE_ATTR_RW(res_idx); 1690 1691 static ssize_t res_ctrl_show(struct device *dev, 1692 struct device_attribute *attr, 1693 char *buf) 1694 { 1695 u8 idx; 1696 unsigned long val; 1697 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); 1698 struct etmv4_config *config = &drvdata->config; 1699 1700 spin_lock(&drvdata->spinlock); 1701 idx = config->res_idx; 1702 val = config->res_ctrl[idx]; 1703 spin_unlock(&drvdata->spinlock); 1704 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); 1705 } 1706 1707 static ssize_t res_ctrl_store(struct device *dev, 1708 struct device_attribute *attr, 1709 const char *buf, size_t size) 1710 { 1711 u8 idx; 1712 unsigned long val; 1713 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); 1714 struct etmv4_config *config = &drvdata->config; 1715 1716 if (kstrtoul(buf, 16, &val)) 1717 return -EINVAL; 1718 1719 spin_lock(&drvdata->spinlock); 1720 idx = config->res_idx; 1721 /* For odd idx pair inversal bit is RES0 */ 1722 if (idx % 2 != 0) 1723 /* PAIRINV, bit[21] */ 1724 val &= ~BIT(21); 1725 config->res_ctrl[idx] = val & GENMASK(21, 0); 1726 spin_unlock(&drvdata->spinlock); 1727 return size; 1728 } 1729 static DEVICE_ATTR_RW(res_ctrl); 1730 1731 static ssize_t sshot_idx_show(struct device *dev, 1732 struct device_attribute *attr, char *buf) 1733 { 1734 unsigned long val; 1735 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); 1736 struct etmv4_config *config = &drvdata->config; 1737 1738 val = config->ss_idx; 1739 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); 1740 } 1741 1742 static ssize_t sshot_idx_store(struct device *dev, 1743 struct device_attribute *attr, 1744 const char *buf, size_t size) 1745 { 1746 unsigned long val; 1747 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); 1748 struct etmv4_config *config = &drvdata->config; 1749 1750 if (kstrtoul(buf, 16, &val)) 1751 return -EINVAL; 1752 if (val >= drvdata->nr_ss_cmp) 1753 return -EINVAL; 1754 1755 spin_lock(&drvdata->spinlock); 1756 config->ss_idx = val; 1757 spin_unlock(&drvdata->spinlock); 1758 return size; 1759 } 1760 static DEVICE_ATTR_RW(sshot_idx); 1761 1762 static ssize_t sshot_ctrl_show(struct device *dev, 1763 struct device_attribute *attr, 1764 char *buf) 1765 { 1766 unsigned long val; 1767 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); 1768 struct etmv4_config *config = &drvdata->config; 1769 1770 spin_lock(&drvdata->spinlock); 1771 val = config->ss_ctrl[config->ss_idx]; 1772 spin_unlock(&drvdata->spinlock); 1773 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); 1774 } 1775 1776 static ssize_t sshot_ctrl_store(struct device *dev, 1777 struct device_attribute *attr, 1778 const char *buf, size_t size) 1779 { 1780 u8 idx; 1781 unsigned long val; 1782 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); 1783 struct etmv4_config *config = &drvdata->config; 1784 1785 if (kstrtoul(buf, 16, &val)) 1786 return -EINVAL; 1787 1788 spin_lock(&drvdata->spinlock); 1789 idx = config->ss_idx; 1790 config->ss_ctrl[idx] = val & GENMASK(24, 0); 1791 /* must clear bit 31 in related status register on programming */ 1792 config->ss_status[idx] &= ~BIT(31); 1793 spin_unlock(&drvdata->spinlock); 1794 return size; 1795 } 1796 static DEVICE_ATTR_RW(sshot_ctrl); 1797 1798 static ssize_t sshot_status_show(struct device *dev, 1799 struct device_attribute *attr, char *buf) 1800 { 1801 unsigned long val; 1802 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); 1803 struct etmv4_config *config = &drvdata->config; 1804 1805 spin_lock(&drvdata->spinlock); 1806 val = config->ss_status[config->ss_idx]; 1807 spin_unlock(&drvdata->spinlock); 1808 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); 1809 } 1810 static DEVICE_ATTR_RO(sshot_status); 1811 1812 static ssize_t sshot_pe_ctrl_show(struct device *dev, 1813 struct device_attribute *attr, 1814 char *buf) 1815 { 1816 unsigned long val; 1817 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); 1818 struct etmv4_config *config = &drvdata->config; 1819 1820 spin_lock(&drvdata->spinlock); 1821 val = config->ss_pe_cmp[config->ss_idx]; 1822 spin_unlock(&drvdata->spinlock); 1823 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); 1824 } 1825 1826 static ssize_t sshot_pe_ctrl_store(struct device *dev, 1827 struct device_attribute *attr, 1828 const char *buf, size_t size) 1829 { 1830 u8 idx; 1831 unsigned long val; 1832 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); 1833 struct etmv4_config *config = &drvdata->config; 1834 1835 if (kstrtoul(buf, 16, &val)) 1836 return -EINVAL; 1837 1838 spin_lock(&drvdata->spinlock); 1839 idx = config->ss_idx; 1840 config->ss_pe_cmp[idx] = val & GENMASK(7, 0); 1841 /* must clear bit 31 in related status register on programming */ 1842 config->ss_status[idx] &= ~BIT(31); 1843 spin_unlock(&drvdata->spinlock); 1844 return size; 1845 } 1846 static DEVICE_ATTR_RW(sshot_pe_ctrl); 1847 1848 static ssize_t ctxid_idx_show(struct device *dev, 1849 struct device_attribute *attr, 1850 char *buf) 1851 { 1852 unsigned long val; 1853 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); 1854 struct etmv4_config *config = &drvdata->config; 1855 1856 val = config->ctxid_idx; 1857 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); 1858 } 1859 1860 static ssize_t ctxid_idx_store(struct device *dev, 1861 struct device_attribute *attr, 1862 const char *buf, size_t size) 1863 { 1864 unsigned long val; 1865 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); 1866 struct etmv4_config *config = &drvdata->config; 1867 1868 if (kstrtoul(buf, 16, &val)) 1869 return -EINVAL; 1870 if (val >= drvdata->numcidc) 1871 return -EINVAL; 1872 1873 /* 1874 * Use spinlock to ensure index doesn't change while it gets 1875 * dereferenced multiple times within a spinlock block elsewhere. 1876 */ 1877 spin_lock(&drvdata->spinlock); 1878 config->ctxid_idx = val; 1879 spin_unlock(&drvdata->spinlock); 1880 return size; 1881 } 1882 static DEVICE_ATTR_RW(ctxid_idx); 1883 1884 static ssize_t ctxid_pid_show(struct device *dev, 1885 struct device_attribute *attr, 1886 char *buf) 1887 { 1888 u8 idx; 1889 unsigned long val; 1890 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); 1891 struct etmv4_config *config = &drvdata->config; 1892 1893 /* 1894 * Don't use contextID tracing if coming from a PID namespace. See 1895 * comment in ctxid_pid_store(). 1896 */ 1897 if (task_active_pid_ns(current) != &init_pid_ns) 1898 return -EINVAL; 1899 1900 spin_lock(&drvdata->spinlock); 1901 idx = config->ctxid_idx; 1902 val = (unsigned long)config->ctxid_pid[idx]; 1903 spin_unlock(&drvdata->spinlock); 1904 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); 1905 } 1906 1907 static ssize_t ctxid_pid_store(struct device *dev, 1908 struct device_attribute *attr, 1909 const char *buf, size_t size) 1910 { 1911 u8 idx; 1912 unsigned long pid; 1913 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); 1914 struct etmv4_config *config = &drvdata->config; 1915 1916 /* 1917 * When contextID tracing is enabled the tracers will insert the 1918 * value found in the contextID register in the trace stream. But if 1919 * a process is in a namespace the PID of that process as seen from the 1920 * namespace won't be what the kernel sees, something that makes the 1921 * feature confusing and can potentially leak kernel only information. 1922 * As such refuse to use the feature if @current is not in the initial 1923 * PID namespace. 1924 */ 1925 if (task_active_pid_ns(current) != &init_pid_ns) 1926 return -EINVAL; 1927 1928 /* 1929 * only implemented when ctxid tracing is enabled, i.e. at least one 1930 * ctxid comparator is implemented and ctxid is greater than 0 bits 1931 * in length 1932 */ 1933 if (!drvdata->ctxid_size || !drvdata->numcidc) 1934 return -EINVAL; 1935 if (kstrtoul(buf, 16, &pid)) 1936 return -EINVAL; 1937 1938 spin_lock(&drvdata->spinlock); 1939 idx = config->ctxid_idx; 1940 config->ctxid_pid[idx] = (u64)pid; 1941 spin_unlock(&drvdata->spinlock); 1942 return size; 1943 } 1944 static DEVICE_ATTR_RW(ctxid_pid); 1945 1946 static ssize_t ctxid_masks_show(struct device *dev, 1947 struct device_attribute *attr, 1948 char *buf) 1949 { 1950 unsigned long val1, val2; 1951 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); 1952 struct etmv4_config *config = &drvdata->config; 1953 1954 /* 1955 * Don't use contextID tracing if coming from a PID namespace. See 1956 * comment in ctxid_pid_store(). 1957 */ 1958 if (task_active_pid_ns(current) != &init_pid_ns) 1959 return -EINVAL; 1960 1961 spin_lock(&drvdata->spinlock); 1962 val1 = config->ctxid_mask0; 1963 val2 = config->ctxid_mask1; 1964 spin_unlock(&drvdata->spinlock); 1965 return scnprintf(buf, PAGE_SIZE, "%#lx %#lx\n", val1, val2); 1966 } 1967 1968 static ssize_t ctxid_masks_store(struct device *dev, 1969 struct device_attribute *attr, 1970 const char *buf, size_t size) 1971 { 1972 u8 i, j, maskbyte; 1973 unsigned long val1, val2, mask; 1974 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); 1975 struct etmv4_config *config = &drvdata->config; 1976 int nr_inputs; 1977 1978 /* 1979 * Don't use contextID tracing if coming from a PID namespace. See 1980 * comment in ctxid_pid_store(). 1981 */ 1982 if (task_active_pid_ns(current) != &init_pid_ns) 1983 return -EINVAL; 1984 1985 /* 1986 * only implemented when ctxid tracing is enabled, i.e. at least one 1987 * ctxid comparator is implemented and ctxid is greater than 0 bits 1988 * in length 1989 */ 1990 if (!drvdata->ctxid_size || !drvdata->numcidc) 1991 return -EINVAL; 1992 /* one mask if <= 4 comparators, two for up to 8 */ 1993 nr_inputs = sscanf(buf, "%lx %lx", &val1, &val2); 1994 if ((drvdata->numcidc > 4) && (nr_inputs != 2)) 1995 return -EINVAL; 1996 1997 spin_lock(&drvdata->spinlock); 1998 /* 1999 * each byte[0..3] controls mask value applied to ctxid 2000 * comparator[0..3] 2001 */ 2002 switch (drvdata->numcidc) { 2003 case 0x1: 2004 /* COMP0, bits[7:0] */ 2005 config->ctxid_mask0 = val1 & 0xFF; 2006 break; 2007 case 0x2: 2008 /* COMP1, bits[15:8] */ 2009 config->ctxid_mask0 = val1 & 0xFFFF; 2010 break; 2011 case 0x3: 2012 /* COMP2, bits[23:16] */ 2013 config->ctxid_mask0 = val1 & 0xFFFFFF; 2014 break; 2015 case 0x4: 2016 /* COMP3, bits[31:24] */ 2017 config->ctxid_mask0 = val1; 2018 break; 2019 case 0x5: 2020 /* COMP4, bits[7:0] */ 2021 config->ctxid_mask0 = val1; 2022 config->ctxid_mask1 = val2 & 0xFF; 2023 break; 2024 case 0x6: 2025 /* COMP5, bits[15:8] */ 2026 config->ctxid_mask0 = val1; 2027 config->ctxid_mask1 = val2 & 0xFFFF; 2028 break; 2029 case 0x7: 2030 /* COMP6, bits[23:16] */ 2031 config->ctxid_mask0 = val1; 2032 config->ctxid_mask1 = val2 & 0xFFFFFF; 2033 break; 2034 case 0x8: 2035 /* COMP7, bits[31:24] */ 2036 config->ctxid_mask0 = val1; 2037 config->ctxid_mask1 = val2; 2038 break; 2039 default: 2040 break; 2041 } 2042 /* 2043 * If software sets a mask bit to 1, it must program relevant byte 2044 * of ctxid comparator value 0x0, otherwise behavior is unpredictable. 2045 * For example, if bit[3] of ctxid_mask0 is 1, we must clear bits[31:24] 2046 * of ctxid comparator0 value (corresponding to byte 0) register. 2047 */ 2048 mask = config->ctxid_mask0; 2049 for (i = 0; i < drvdata->numcidc; i++) { 2050 /* mask value of corresponding ctxid comparator */ 2051 maskbyte = mask & ETMv4_EVENT_MASK; 2052 /* 2053 * each bit corresponds to a byte of respective ctxid comparator 2054 * value register 2055 */ 2056 for (j = 0; j < 8; j++) { 2057 if (maskbyte & 1) 2058 config->ctxid_pid[i] &= ~(0xFFUL << (j * 8)); 2059 maskbyte >>= 1; 2060 } 2061 /* Select the next ctxid comparator mask value */ 2062 if (i == 3) 2063 /* ctxid comparators[4-7] */ 2064 mask = config->ctxid_mask1; 2065 else 2066 mask >>= 0x8; 2067 } 2068 2069 spin_unlock(&drvdata->spinlock); 2070 return size; 2071 } 2072 static DEVICE_ATTR_RW(ctxid_masks); 2073 2074 static ssize_t vmid_idx_show(struct device *dev, 2075 struct device_attribute *attr, 2076 char *buf) 2077 { 2078 unsigned long val; 2079 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); 2080 struct etmv4_config *config = &drvdata->config; 2081 2082 val = config->vmid_idx; 2083 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); 2084 } 2085 2086 static ssize_t vmid_idx_store(struct device *dev, 2087 struct device_attribute *attr, 2088 const char *buf, size_t size) 2089 { 2090 unsigned long val; 2091 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); 2092 struct etmv4_config *config = &drvdata->config; 2093 2094 if (kstrtoul(buf, 16, &val)) 2095 return -EINVAL; 2096 if (val >= drvdata->numvmidc) 2097 return -EINVAL; 2098 2099 /* 2100 * Use spinlock to ensure index doesn't change while it gets 2101 * dereferenced multiple times within a spinlock block elsewhere. 2102 */ 2103 spin_lock(&drvdata->spinlock); 2104 config->vmid_idx = val; 2105 spin_unlock(&drvdata->spinlock); 2106 return size; 2107 } 2108 static DEVICE_ATTR_RW(vmid_idx); 2109 2110 static ssize_t vmid_val_show(struct device *dev, 2111 struct device_attribute *attr, 2112 char *buf) 2113 { 2114 unsigned long val; 2115 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); 2116 struct etmv4_config *config = &drvdata->config; 2117 2118 /* 2119 * Don't use virtual contextID tracing if coming from a PID namespace. 2120 * See comment in ctxid_pid_store(). 2121 */ 2122 if (!task_is_in_init_pid_ns(current)) 2123 return -EINVAL; 2124 2125 spin_lock(&drvdata->spinlock); 2126 val = (unsigned long)config->vmid_val[config->vmid_idx]; 2127 spin_unlock(&drvdata->spinlock); 2128 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); 2129 } 2130 2131 static ssize_t vmid_val_store(struct device *dev, 2132 struct device_attribute *attr, 2133 const char *buf, size_t size) 2134 { 2135 unsigned long val; 2136 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); 2137 struct etmv4_config *config = &drvdata->config; 2138 2139 /* 2140 * Don't use virtual contextID tracing if coming from a PID namespace. 2141 * See comment in ctxid_pid_store(). 2142 */ 2143 if (!task_is_in_init_pid_ns(current)) 2144 return -EINVAL; 2145 2146 /* 2147 * only implemented when vmid tracing is enabled, i.e. at least one 2148 * vmid comparator is implemented and at least 8 bit vmid size 2149 */ 2150 if (!drvdata->vmid_size || !drvdata->numvmidc) 2151 return -EINVAL; 2152 if (kstrtoul(buf, 16, &val)) 2153 return -EINVAL; 2154 2155 spin_lock(&drvdata->spinlock); 2156 config->vmid_val[config->vmid_idx] = (u64)val; 2157 spin_unlock(&drvdata->spinlock); 2158 return size; 2159 } 2160 static DEVICE_ATTR_RW(vmid_val); 2161 2162 static ssize_t vmid_masks_show(struct device *dev, 2163 struct device_attribute *attr, char *buf) 2164 { 2165 unsigned long val1, val2; 2166 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); 2167 struct etmv4_config *config = &drvdata->config; 2168 2169 /* 2170 * Don't use virtual contextID tracing if coming from a PID namespace. 2171 * See comment in ctxid_pid_store(). 2172 */ 2173 if (!task_is_in_init_pid_ns(current)) 2174 return -EINVAL; 2175 2176 spin_lock(&drvdata->spinlock); 2177 val1 = config->vmid_mask0; 2178 val2 = config->vmid_mask1; 2179 spin_unlock(&drvdata->spinlock); 2180 return scnprintf(buf, PAGE_SIZE, "%#lx %#lx\n", val1, val2); 2181 } 2182 2183 static ssize_t vmid_masks_store(struct device *dev, 2184 struct device_attribute *attr, 2185 const char *buf, size_t size) 2186 { 2187 u8 i, j, maskbyte; 2188 unsigned long val1, val2, mask; 2189 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); 2190 struct etmv4_config *config = &drvdata->config; 2191 int nr_inputs; 2192 2193 /* 2194 * Don't use virtual contextID tracing if coming from a PID namespace. 2195 * See comment in ctxid_pid_store(). 2196 */ 2197 if (!task_is_in_init_pid_ns(current)) 2198 return -EINVAL; 2199 2200 /* 2201 * only implemented when vmid tracing is enabled, i.e. at least one 2202 * vmid comparator is implemented and at least 8 bit vmid size 2203 */ 2204 if (!drvdata->vmid_size || !drvdata->numvmidc) 2205 return -EINVAL; 2206 /* one mask if <= 4 comparators, two for up to 8 */ 2207 nr_inputs = sscanf(buf, "%lx %lx", &val1, &val2); 2208 if ((drvdata->numvmidc > 4) && (nr_inputs != 2)) 2209 return -EINVAL; 2210 2211 spin_lock(&drvdata->spinlock); 2212 2213 /* 2214 * each byte[0..3] controls mask value applied to vmid 2215 * comparator[0..3] 2216 */ 2217 switch (drvdata->numvmidc) { 2218 case 0x1: 2219 /* COMP0, bits[7:0] */ 2220 config->vmid_mask0 = val1 & 0xFF; 2221 break; 2222 case 0x2: 2223 /* COMP1, bits[15:8] */ 2224 config->vmid_mask0 = val1 & 0xFFFF; 2225 break; 2226 case 0x3: 2227 /* COMP2, bits[23:16] */ 2228 config->vmid_mask0 = val1 & 0xFFFFFF; 2229 break; 2230 case 0x4: 2231 /* COMP3, bits[31:24] */ 2232 config->vmid_mask0 = val1; 2233 break; 2234 case 0x5: 2235 /* COMP4, bits[7:0] */ 2236 config->vmid_mask0 = val1; 2237 config->vmid_mask1 = val2 & 0xFF; 2238 break; 2239 case 0x6: 2240 /* COMP5, bits[15:8] */ 2241 config->vmid_mask0 = val1; 2242 config->vmid_mask1 = val2 & 0xFFFF; 2243 break; 2244 case 0x7: 2245 /* COMP6, bits[23:16] */ 2246 config->vmid_mask0 = val1; 2247 config->vmid_mask1 = val2 & 0xFFFFFF; 2248 break; 2249 case 0x8: 2250 /* COMP7, bits[31:24] */ 2251 config->vmid_mask0 = val1; 2252 config->vmid_mask1 = val2; 2253 break; 2254 default: 2255 break; 2256 } 2257 2258 /* 2259 * If software sets a mask bit to 1, it must program relevant byte 2260 * of vmid comparator value 0x0, otherwise behavior is unpredictable. 2261 * For example, if bit[3] of vmid_mask0 is 1, we must clear bits[31:24] 2262 * of vmid comparator0 value (corresponding to byte 0) register. 2263 */ 2264 mask = config->vmid_mask0; 2265 for (i = 0; i < drvdata->numvmidc; i++) { 2266 /* mask value of corresponding vmid comparator */ 2267 maskbyte = mask & ETMv4_EVENT_MASK; 2268 /* 2269 * each bit corresponds to a byte of respective vmid comparator 2270 * value register 2271 */ 2272 for (j = 0; j < 8; j++) { 2273 if (maskbyte & 1) 2274 config->vmid_val[i] &= ~(0xFFUL << (j * 8)); 2275 maskbyte >>= 1; 2276 } 2277 /* Select the next vmid comparator mask value */ 2278 if (i == 3) 2279 /* vmid comparators[4-7] */ 2280 mask = config->vmid_mask1; 2281 else 2282 mask >>= 0x8; 2283 } 2284 spin_unlock(&drvdata->spinlock); 2285 return size; 2286 } 2287 static DEVICE_ATTR_RW(vmid_masks); 2288 2289 static ssize_t cpu_show(struct device *dev, 2290 struct device_attribute *attr, char *buf) 2291 { 2292 int val; 2293 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); 2294 2295 val = drvdata->cpu; 2296 return scnprintf(buf, PAGE_SIZE, "%d\n", val); 2297 2298 } 2299 static DEVICE_ATTR_RO(cpu); 2300 2301 static struct attribute *coresight_etmv4_attrs[] = { 2302 &dev_attr_nr_pe_cmp.attr, 2303 &dev_attr_nr_addr_cmp.attr, 2304 &dev_attr_nr_cntr.attr, 2305 &dev_attr_nr_ext_inp.attr, 2306 &dev_attr_numcidc.attr, 2307 &dev_attr_numvmidc.attr, 2308 &dev_attr_nrseqstate.attr, 2309 &dev_attr_nr_resource.attr, 2310 &dev_attr_nr_ss_cmp.attr, 2311 &dev_attr_reset.attr, 2312 &dev_attr_mode.attr, 2313 &dev_attr_pe.attr, 2314 &dev_attr_event.attr, 2315 &dev_attr_event_instren.attr, 2316 &dev_attr_event_ts.attr, 2317 &dev_attr_syncfreq.attr, 2318 &dev_attr_cyc_threshold.attr, 2319 &dev_attr_bb_ctrl.attr, 2320 &dev_attr_event_vinst.attr, 2321 &dev_attr_s_exlevel_vinst.attr, 2322 &dev_attr_ns_exlevel_vinst.attr, 2323 &dev_attr_addr_idx.attr, 2324 &dev_attr_addr_instdatatype.attr, 2325 &dev_attr_addr_single.attr, 2326 &dev_attr_addr_range.attr, 2327 &dev_attr_addr_start.attr, 2328 &dev_attr_addr_stop.attr, 2329 &dev_attr_addr_ctxtype.attr, 2330 &dev_attr_addr_context.attr, 2331 &dev_attr_addr_exlevel_s_ns.attr, 2332 &dev_attr_addr_cmp_view.attr, 2333 &dev_attr_vinst_pe_cmp_start_stop.attr, 2334 &dev_attr_sshot_idx.attr, 2335 &dev_attr_sshot_ctrl.attr, 2336 &dev_attr_sshot_pe_ctrl.attr, 2337 &dev_attr_sshot_status.attr, 2338 &dev_attr_seq_idx.attr, 2339 &dev_attr_seq_state.attr, 2340 &dev_attr_seq_event.attr, 2341 &dev_attr_seq_reset_event.attr, 2342 &dev_attr_cntr_idx.attr, 2343 &dev_attr_cntrldvr.attr, 2344 &dev_attr_cntr_val.attr, 2345 &dev_attr_cntr_ctrl.attr, 2346 &dev_attr_res_idx.attr, 2347 &dev_attr_res_ctrl.attr, 2348 &dev_attr_ctxid_idx.attr, 2349 &dev_attr_ctxid_pid.attr, 2350 &dev_attr_ctxid_masks.attr, 2351 &dev_attr_vmid_idx.attr, 2352 &dev_attr_vmid_val.attr, 2353 &dev_attr_vmid_masks.attr, 2354 &dev_attr_cpu.attr, 2355 NULL, 2356 }; 2357 2358 struct etmv4_reg { 2359 struct coresight_device *csdev; 2360 u32 offset; 2361 u32 data; 2362 }; 2363 2364 static void do_smp_cross_read(void *data) 2365 { 2366 struct etmv4_reg *reg = data; 2367 2368 reg->data = etm4x_relaxed_read32(®->csdev->access, reg->offset); 2369 } 2370 2371 static u32 etmv4_cross_read(const struct etmv4_drvdata *drvdata, u32 offset) 2372 { 2373 struct etmv4_reg reg; 2374 2375 reg.offset = offset; 2376 reg.csdev = drvdata->csdev; 2377 2378 /* 2379 * smp cross call ensures the CPU will be powered up before 2380 * accessing the ETMv4 trace core registers 2381 */ 2382 smp_call_function_single(drvdata->cpu, do_smp_cross_read, ®, 1); 2383 return reg.data; 2384 } 2385 2386 static inline u32 coresight_etm4x_attr_to_offset(struct device_attribute *attr) 2387 { 2388 struct dev_ext_attribute *eattr; 2389 2390 eattr = container_of(attr, struct dev_ext_attribute, attr); 2391 return (u32)(unsigned long)eattr->var; 2392 } 2393 2394 static ssize_t coresight_etm4x_reg_show(struct device *dev, 2395 struct device_attribute *d_attr, 2396 char *buf) 2397 { 2398 u32 val, offset; 2399 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); 2400 2401 offset = coresight_etm4x_attr_to_offset(d_attr); 2402 2403 pm_runtime_get_sync(dev->parent); 2404 val = etmv4_cross_read(drvdata, offset); 2405 pm_runtime_put_sync(dev->parent); 2406 2407 return scnprintf(buf, PAGE_SIZE, "0x%x\n", val); 2408 } 2409 2410 static inline bool 2411 etm4x_register_implemented(struct etmv4_drvdata *drvdata, u32 offset) 2412 { 2413 switch (offset) { 2414 ETM_COMMON_SYSREG_LIST_CASES 2415 /* 2416 * Common registers to ETE & ETM4x accessible via system 2417 * instructions are always implemented. 2418 */ 2419 return true; 2420 2421 ETM4x_ONLY_SYSREG_LIST_CASES 2422 /* 2423 * We only support etm4x and ete. So if the device is not 2424 * ETE, it must be ETMv4x. 2425 */ 2426 return !etm4x_is_ete(drvdata); 2427 2428 ETM4x_MMAP_LIST_CASES 2429 /* 2430 * Registers accessible only via memory-mapped registers 2431 * must not be accessed via system instructions. 2432 * We cannot access the drvdata->csdev here, as this 2433 * function is called during the device creation, via 2434 * coresight_register() and the csdev is not initialized 2435 * until that is done. So rely on the drvdata->base to 2436 * detect if we have a memory mapped access. 2437 * Also ETE doesn't implement memory mapped access, thus 2438 * it is sufficient to check that we are using mmio. 2439 */ 2440 return !!drvdata->base; 2441 2442 ETE_ONLY_SYSREG_LIST_CASES 2443 return etm4x_is_ete(drvdata); 2444 } 2445 2446 return false; 2447 } 2448 2449 /* 2450 * Hide the ETM4x registers that may not be available on the 2451 * hardware. 2452 * There are certain management registers unavailable via system 2453 * instructions. Make those sysfs attributes hidden on such 2454 * systems. 2455 */ 2456 static umode_t 2457 coresight_etm4x_attr_reg_implemented(struct kobject *kobj, 2458 struct attribute *attr, int unused) 2459 { 2460 struct device *dev = kobj_to_dev(kobj); 2461 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); 2462 struct device_attribute *d_attr; 2463 u32 offset; 2464 2465 d_attr = container_of(attr, struct device_attribute, attr); 2466 offset = coresight_etm4x_attr_to_offset(d_attr); 2467 2468 if (etm4x_register_implemented(drvdata, offset)) 2469 return attr->mode; 2470 return 0; 2471 } 2472 2473 #define coresight_etm4x_reg(name, offset) \ 2474 &((struct dev_ext_attribute[]) { \ 2475 { \ 2476 __ATTR(name, 0444, coresight_etm4x_reg_show, NULL), \ 2477 (void *)(unsigned long)offset \ 2478 } \ 2479 })[0].attr.attr 2480 2481 static struct attribute *coresight_etmv4_mgmt_attrs[] = { 2482 coresight_etm4x_reg(trcpdcr, TRCPDCR), 2483 coresight_etm4x_reg(trcpdsr, TRCPDSR), 2484 coresight_etm4x_reg(trclsr, TRCLSR), 2485 coresight_etm4x_reg(trcauthstatus, TRCAUTHSTATUS), 2486 coresight_etm4x_reg(trcdevid, TRCDEVID), 2487 coresight_etm4x_reg(trcdevtype, TRCDEVTYPE), 2488 coresight_etm4x_reg(trcpidr0, TRCPIDR0), 2489 coresight_etm4x_reg(trcpidr1, TRCPIDR1), 2490 coresight_etm4x_reg(trcpidr2, TRCPIDR2), 2491 coresight_etm4x_reg(trcpidr3, TRCPIDR3), 2492 coresight_etm4x_reg(trcoslsr, TRCOSLSR), 2493 coresight_etm4x_reg(trcconfig, TRCCONFIGR), 2494 coresight_etm4x_reg(trctraceid, TRCTRACEIDR), 2495 coresight_etm4x_reg(trcdevarch, TRCDEVARCH), 2496 NULL, 2497 }; 2498 2499 static struct attribute *coresight_etmv4_trcidr_attrs[] = { 2500 coresight_etm4x_reg(trcidr0, TRCIDR0), 2501 coresight_etm4x_reg(trcidr1, TRCIDR1), 2502 coresight_etm4x_reg(trcidr2, TRCIDR2), 2503 coresight_etm4x_reg(trcidr3, TRCIDR3), 2504 coresight_etm4x_reg(trcidr4, TRCIDR4), 2505 coresight_etm4x_reg(trcidr5, TRCIDR5), 2506 /* trcidr[6,7] are reserved */ 2507 coresight_etm4x_reg(trcidr8, TRCIDR8), 2508 coresight_etm4x_reg(trcidr9, TRCIDR9), 2509 coresight_etm4x_reg(trcidr10, TRCIDR10), 2510 coresight_etm4x_reg(trcidr11, TRCIDR11), 2511 coresight_etm4x_reg(trcidr12, TRCIDR12), 2512 coresight_etm4x_reg(trcidr13, TRCIDR13), 2513 NULL, 2514 }; 2515 2516 static const struct attribute_group coresight_etmv4_group = { 2517 .attrs = coresight_etmv4_attrs, 2518 }; 2519 2520 static const struct attribute_group coresight_etmv4_mgmt_group = { 2521 .is_visible = coresight_etm4x_attr_reg_implemented, 2522 .attrs = coresight_etmv4_mgmt_attrs, 2523 .name = "mgmt", 2524 }; 2525 2526 static const struct attribute_group coresight_etmv4_trcidr_group = { 2527 .attrs = coresight_etmv4_trcidr_attrs, 2528 .name = "trcidr", 2529 }; 2530 2531 const struct attribute_group *coresight_etmv4_groups[] = { 2532 &coresight_etmv4_group, 2533 &coresight_etmv4_mgmt_group, 2534 &coresight_etmv4_trcidr_group, 2535 NULL, 2536 }; 2537