1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright(C) 2015 Linaro Limited. All rights reserved. 4 * Author: Mathieu Poirier <mathieu.poirier@linaro.org> 5 */ 6 7 #include <linux/pid_namespace.h> 8 #include <linux/pm_runtime.h> 9 #include <linux/sysfs.h> 10 #include "coresight-etm.h" 11 #include "coresight-priv.h" 12 13 static ssize_t nr_addr_cmp_show(struct device *dev, 14 struct device_attribute *attr, char *buf) 15 { 16 unsigned long val; 17 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); 18 19 val = drvdata->nr_addr_cmp; 20 return sprintf(buf, "%#lx\n", val); 21 } 22 static DEVICE_ATTR_RO(nr_addr_cmp); 23 24 static ssize_t nr_cntr_show(struct device *dev, 25 struct device_attribute *attr, char *buf) 26 { unsigned long val; 27 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); 28 29 val = drvdata->nr_cntr; 30 return sprintf(buf, "%#lx\n", val); 31 } 32 static DEVICE_ATTR_RO(nr_cntr); 33 34 static ssize_t nr_ctxid_cmp_show(struct device *dev, 35 struct device_attribute *attr, char *buf) 36 { 37 unsigned long val; 38 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); 39 40 val = drvdata->nr_ctxid_cmp; 41 return sprintf(buf, "%#lx\n", val); 42 } 43 static DEVICE_ATTR_RO(nr_ctxid_cmp); 44 45 static ssize_t etmsr_show(struct device *dev, 46 struct device_attribute *attr, char *buf) 47 { 48 unsigned long flags, val; 49 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); 50 51 pm_runtime_get_sync(dev->parent); 52 spin_lock_irqsave(&drvdata->spinlock, flags); 53 CS_UNLOCK(drvdata->base); 54 55 val = etm_readl(drvdata, ETMSR); 56 57 CS_LOCK(drvdata->base); 58 spin_unlock_irqrestore(&drvdata->spinlock, flags); 59 pm_runtime_put(dev->parent); 60 61 return sprintf(buf, "%#lx\n", val); 62 } 63 static DEVICE_ATTR_RO(etmsr); 64 65 static ssize_t reset_store(struct device *dev, 66 struct device_attribute *attr, 67 const char *buf, size_t size) 68 { 69 int i, ret; 70 unsigned long val; 71 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); 72 struct etm_config *config = &drvdata->config; 73 74 ret = kstrtoul(buf, 16, &val); 75 if (ret) 76 return ret; 77 78 if (val) { 79 spin_lock(&drvdata->spinlock); 80 memset(config, 0, sizeof(struct etm_config)); 81 config->mode = ETM_MODE_EXCLUDE; 82 config->trigger_event = ETM_DEFAULT_EVENT_VAL; 83 for (i = 0; i < drvdata->nr_addr_cmp; i++) { 84 config->addr_type[i] = ETM_ADDR_TYPE_NONE; 85 } 86 87 etm_set_default(config); 88 spin_unlock(&drvdata->spinlock); 89 } 90 91 return size; 92 } 93 static DEVICE_ATTR_WO(reset); 94 95 static ssize_t mode_show(struct device *dev, 96 struct device_attribute *attr, char *buf) 97 { 98 unsigned long val; 99 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); 100 struct etm_config *config = &drvdata->config; 101 102 val = config->mode; 103 return sprintf(buf, "%#lx\n", val); 104 } 105 106 static ssize_t mode_store(struct device *dev, 107 struct device_attribute *attr, 108 const char *buf, size_t size) 109 { 110 int ret; 111 unsigned long val; 112 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); 113 struct etm_config *config = &drvdata->config; 114 115 ret = kstrtoul(buf, 16, &val); 116 if (ret) 117 return ret; 118 119 spin_lock(&drvdata->spinlock); 120 config->mode = val & ETM_MODE_ALL; 121 122 if (config->mode & ETM_MODE_EXCLUDE) 123 config->enable_ctrl1 |= ETMTECR1_INC_EXC; 124 else 125 config->enable_ctrl1 &= ~ETMTECR1_INC_EXC; 126 127 if (config->mode & ETM_MODE_CYCACC) 128 config->ctrl |= ETMCR_CYC_ACC; 129 else 130 config->ctrl &= ~ETMCR_CYC_ACC; 131 132 if (config->mode & ETM_MODE_STALL) { 133 if (!(drvdata->etmccr & ETMCCR_FIFOFULL)) { 134 dev_warn(dev, "stall mode not supported\n"); 135 ret = -EINVAL; 136 goto err_unlock; 137 } 138 config->ctrl |= ETMCR_STALL_MODE; 139 } else 140 config->ctrl &= ~ETMCR_STALL_MODE; 141 142 if (config->mode & ETM_MODE_TIMESTAMP) { 143 if (!(drvdata->etmccer & ETMCCER_TIMESTAMP)) { 144 dev_warn(dev, "timestamp not supported\n"); 145 ret = -EINVAL; 146 goto err_unlock; 147 } 148 config->ctrl |= ETMCR_TIMESTAMP_EN; 149 } else 150 config->ctrl &= ~ETMCR_TIMESTAMP_EN; 151 152 if (config->mode & ETM_MODE_CTXID) 153 config->ctrl |= ETMCR_CTXID_SIZE; 154 else 155 config->ctrl &= ~ETMCR_CTXID_SIZE; 156 157 if (config->mode & ETM_MODE_BBROAD) 158 config->ctrl |= ETMCR_BRANCH_BROADCAST; 159 else 160 config->ctrl &= ~ETMCR_BRANCH_BROADCAST; 161 162 if (config->mode & ETM_MODE_RET_STACK) 163 config->ctrl |= ETMCR_RETURN_STACK; 164 else 165 config->ctrl &= ~ETMCR_RETURN_STACK; 166 167 if (config->mode & (ETM_MODE_EXCL_KERN | ETM_MODE_EXCL_USER)) 168 etm_config_trace_mode(config); 169 170 spin_unlock(&drvdata->spinlock); 171 172 return size; 173 174 err_unlock: 175 spin_unlock(&drvdata->spinlock); 176 return ret; 177 } 178 static DEVICE_ATTR_RW(mode); 179 180 static ssize_t trigger_event_show(struct device *dev, 181 struct device_attribute *attr, char *buf) 182 { 183 unsigned long val; 184 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); 185 struct etm_config *config = &drvdata->config; 186 187 val = config->trigger_event; 188 return sprintf(buf, "%#lx\n", val); 189 } 190 191 static ssize_t trigger_event_store(struct device *dev, 192 struct device_attribute *attr, 193 const char *buf, size_t size) 194 { 195 int ret; 196 unsigned long val; 197 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); 198 struct etm_config *config = &drvdata->config; 199 200 ret = kstrtoul(buf, 16, &val); 201 if (ret) 202 return ret; 203 204 config->trigger_event = val & ETM_EVENT_MASK; 205 206 return size; 207 } 208 static DEVICE_ATTR_RW(trigger_event); 209 210 static ssize_t enable_event_show(struct device *dev, 211 struct device_attribute *attr, char *buf) 212 { 213 unsigned long val; 214 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); 215 struct etm_config *config = &drvdata->config; 216 217 val = config->enable_event; 218 return sprintf(buf, "%#lx\n", val); 219 } 220 221 static ssize_t enable_event_store(struct device *dev, 222 struct device_attribute *attr, 223 const char *buf, size_t size) 224 { 225 int ret; 226 unsigned long val; 227 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); 228 struct etm_config *config = &drvdata->config; 229 230 ret = kstrtoul(buf, 16, &val); 231 if (ret) 232 return ret; 233 234 config->enable_event = val & ETM_EVENT_MASK; 235 236 return size; 237 } 238 static DEVICE_ATTR_RW(enable_event); 239 240 static ssize_t fifofull_level_show(struct device *dev, 241 struct device_attribute *attr, char *buf) 242 { 243 unsigned long val; 244 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); 245 struct etm_config *config = &drvdata->config; 246 247 val = config->fifofull_level; 248 return sprintf(buf, "%#lx\n", val); 249 } 250 251 static ssize_t fifofull_level_store(struct device *dev, 252 struct device_attribute *attr, 253 const char *buf, size_t size) 254 { 255 int ret; 256 unsigned long val; 257 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); 258 struct etm_config *config = &drvdata->config; 259 260 ret = kstrtoul(buf, 16, &val); 261 if (ret) 262 return ret; 263 264 config->fifofull_level = val; 265 266 return size; 267 } 268 static DEVICE_ATTR_RW(fifofull_level); 269 270 static ssize_t addr_idx_show(struct device *dev, 271 struct device_attribute *attr, char *buf) 272 { 273 unsigned long val; 274 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); 275 struct etm_config *config = &drvdata->config; 276 277 val = config->addr_idx; 278 return sprintf(buf, "%#lx\n", val); 279 } 280 281 static ssize_t addr_idx_store(struct device *dev, 282 struct device_attribute *attr, 283 const char *buf, size_t size) 284 { 285 int ret; 286 unsigned long val; 287 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); 288 struct etm_config *config = &drvdata->config; 289 290 ret = kstrtoul(buf, 16, &val); 291 if (ret) 292 return ret; 293 294 if (val >= drvdata->nr_addr_cmp) 295 return -EINVAL; 296 297 /* 298 * Use spinlock to ensure index doesn't change while it gets 299 * dereferenced multiple times within a spinlock block elsewhere. 300 */ 301 spin_lock(&drvdata->spinlock); 302 config->addr_idx = val; 303 spin_unlock(&drvdata->spinlock); 304 305 return size; 306 } 307 static DEVICE_ATTR_RW(addr_idx); 308 309 static ssize_t addr_single_show(struct device *dev, 310 struct device_attribute *attr, char *buf) 311 { 312 u8 idx; 313 unsigned long val; 314 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); 315 struct etm_config *config = &drvdata->config; 316 317 spin_lock(&drvdata->spinlock); 318 idx = config->addr_idx; 319 if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE || 320 config->addr_type[idx] == ETM_ADDR_TYPE_SINGLE)) { 321 spin_unlock(&drvdata->spinlock); 322 return -EINVAL; 323 } 324 325 val = config->addr_val[idx]; 326 spin_unlock(&drvdata->spinlock); 327 328 return sprintf(buf, "%#lx\n", val); 329 } 330 331 static ssize_t addr_single_store(struct device *dev, 332 struct device_attribute *attr, 333 const char *buf, size_t size) 334 { 335 u8 idx; 336 int ret; 337 unsigned long val; 338 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); 339 struct etm_config *config = &drvdata->config; 340 341 ret = kstrtoul(buf, 16, &val); 342 if (ret) 343 return ret; 344 345 spin_lock(&drvdata->spinlock); 346 idx = config->addr_idx; 347 if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE || 348 config->addr_type[idx] == ETM_ADDR_TYPE_SINGLE)) { 349 spin_unlock(&drvdata->spinlock); 350 return -EINVAL; 351 } 352 353 config->addr_val[idx] = val; 354 config->addr_type[idx] = ETM_ADDR_TYPE_SINGLE; 355 spin_unlock(&drvdata->spinlock); 356 357 return size; 358 } 359 static DEVICE_ATTR_RW(addr_single); 360 361 static ssize_t addr_range_show(struct device *dev, 362 struct device_attribute *attr, char *buf) 363 { 364 u8 idx; 365 unsigned long val1, val2; 366 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); 367 struct etm_config *config = &drvdata->config; 368 369 spin_lock(&drvdata->spinlock); 370 idx = config->addr_idx; 371 if (idx % 2 != 0) { 372 spin_unlock(&drvdata->spinlock); 373 return -EPERM; 374 } 375 if (!((config->addr_type[idx] == ETM_ADDR_TYPE_NONE && 376 config->addr_type[idx + 1] == ETM_ADDR_TYPE_NONE) || 377 (config->addr_type[idx] == ETM_ADDR_TYPE_RANGE && 378 config->addr_type[idx + 1] == ETM_ADDR_TYPE_RANGE))) { 379 spin_unlock(&drvdata->spinlock); 380 return -EPERM; 381 } 382 383 val1 = config->addr_val[idx]; 384 val2 = config->addr_val[idx + 1]; 385 spin_unlock(&drvdata->spinlock); 386 387 return sprintf(buf, "%#lx %#lx\n", val1, val2); 388 } 389 390 static ssize_t addr_range_store(struct device *dev, 391 struct device_attribute *attr, 392 const char *buf, size_t size) 393 { 394 u8 idx; 395 unsigned long val1, val2; 396 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); 397 struct etm_config *config = &drvdata->config; 398 399 if (sscanf(buf, "%lx %lx", &val1, &val2) != 2) 400 return -EINVAL; 401 /* Lower address comparator cannot have a higher address value */ 402 if (val1 > val2) 403 return -EINVAL; 404 405 spin_lock(&drvdata->spinlock); 406 idx = config->addr_idx; 407 if (idx % 2 != 0) { 408 spin_unlock(&drvdata->spinlock); 409 return -EPERM; 410 } 411 if (!((config->addr_type[idx] == ETM_ADDR_TYPE_NONE && 412 config->addr_type[idx + 1] == ETM_ADDR_TYPE_NONE) || 413 (config->addr_type[idx] == ETM_ADDR_TYPE_RANGE && 414 config->addr_type[idx + 1] == ETM_ADDR_TYPE_RANGE))) { 415 spin_unlock(&drvdata->spinlock); 416 return -EPERM; 417 } 418 419 config->addr_val[idx] = val1; 420 config->addr_type[idx] = ETM_ADDR_TYPE_RANGE; 421 config->addr_val[idx + 1] = val2; 422 config->addr_type[idx + 1] = ETM_ADDR_TYPE_RANGE; 423 config->enable_ctrl1 |= (1 << (idx/2)); 424 spin_unlock(&drvdata->spinlock); 425 426 return size; 427 } 428 static DEVICE_ATTR_RW(addr_range); 429 430 static ssize_t addr_start_show(struct device *dev, 431 struct device_attribute *attr, char *buf) 432 { 433 u8 idx; 434 unsigned long val; 435 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); 436 struct etm_config *config = &drvdata->config; 437 438 spin_lock(&drvdata->spinlock); 439 idx = config->addr_idx; 440 if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE || 441 config->addr_type[idx] == ETM_ADDR_TYPE_START)) { 442 spin_unlock(&drvdata->spinlock); 443 return -EPERM; 444 } 445 446 val = config->addr_val[idx]; 447 spin_unlock(&drvdata->spinlock); 448 449 return sprintf(buf, "%#lx\n", val); 450 } 451 452 static ssize_t addr_start_store(struct device *dev, 453 struct device_attribute *attr, 454 const char *buf, size_t size) 455 { 456 u8 idx; 457 int ret; 458 unsigned long val; 459 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); 460 struct etm_config *config = &drvdata->config; 461 462 ret = kstrtoul(buf, 16, &val); 463 if (ret) 464 return ret; 465 466 spin_lock(&drvdata->spinlock); 467 idx = config->addr_idx; 468 if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE || 469 config->addr_type[idx] == ETM_ADDR_TYPE_START)) { 470 spin_unlock(&drvdata->spinlock); 471 return -EPERM; 472 } 473 474 config->addr_val[idx] = val; 475 config->addr_type[idx] = ETM_ADDR_TYPE_START; 476 config->startstop_ctrl |= (1 << idx); 477 config->enable_ctrl1 |= ETMTECR1_START_STOP; 478 spin_unlock(&drvdata->spinlock); 479 480 return size; 481 } 482 static DEVICE_ATTR_RW(addr_start); 483 484 static ssize_t addr_stop_show(struct device *dev, 485 struct device_attribute *attr, char *buf) 486 { 487 u8 idx; 488 unsigned long val; 489 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); 490 struct etm_config *config = &drvdata->config; 491 492 spin_lock(&drvdata->spinlock); 493 idx = config->addr_idx; 494 if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE || 495 config->addr_type[idx] == ETM_ADDR_TYPE_STOP)) { 496 spin_unlock(&drvdata->spinlock); 497 return -EPERM; 498 } 499 500 val = config->addr_val[idx]; 501 spin_unlock(&drvdata->spinlock); 502 503 return sprintf(buf, "%#lx\n", val); 504 } 505 506 static ssize_t addr_stop_store(struct device *dev, 507 struct device_attribute *attr, 508 const char *buf, size_t size) 509 { 510 u8 idx; 511 int ret; 512 unsigned long val; 513 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); 514 struct etm_config *config = &drvdata->config; 515 516 ret = kstrtoul(buf, 16, &val); 517 if (ret) 518 return ret; 519 520 spin_lock(&drvdata->spinlock); 521 idx = config->addr_idx; 522 if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE || 523 config->addr_type[idx] == ETM_ADDR_TYPE_STOP)) { 524 spin_unlock(&drvdata->spinlock); 525 return -EPERM; 526 } 527 528 config->addr_val[idx] = val; 529 config->addr_type[idx] = ETM_ADDR_TYPE_STOP; 530 config->startstop_ctrl |= (1 << (idx + 16)); 531 config->enable_ctrl1 |= ETMTECR1_START_STOP; 532 spin_unlock(&drvdata->spinlock); 533 534 return size; 535 } 536 static DEVICE_ATTR_RW(addr_stop); 537 538 static ssize_t addr_acctype_show(struct device *dev, 539 struct device_attribute *attr, char *buf) 540 { 541 unsigned long val; 542 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); 543 struct etm_config *config = &drvdata->config; 544 545 spin_lock(&drvdata->spinlock); 546 val = config->addr_acctype[config->addr_idx]; 547 spin_unlock(&drvdata->spinlock); 548 549 return sprintf(buf, "%#lx\n", val); 550 } 551 552 static ssize_t addr_acctype_store(struct device *dev, 553 struct device_attribute *attr, 554 const char *buf, size_t size) 555 { 556 int ret; 557 unsigned long val; 558 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); 559 struct etm_config *config = &drvdata->config; 560 561 ret = kstrtoul(buf, 16, &val); 562 if (ret) 563 return ret; 564 565 spin_lock(&drvdata->spinlock); 566 config->addr_acctype[config->addr_idx] = val; 567 spin_unlock(&drvdata->spinlock); 568 569 return size; 570 } 571 static DEVICE_ATTR_RW(addr_acctype); 572 573 static ssize_t cntr_idx_show(struct device *dev, 574 struct device_attribute *attr, char *buf) 575 { 576 unsigned long val; 577 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); 578 struct etm_config *config = &drvdata->config; 579 580 val = config->cntr_idx; 581 return sprintf(buf, "%#lx\n", val); 582 } 583 584 static ssize_t cntr_idx_store(struct device *dev, 585 struct device_attribute *attr, 586 const char *buf, size_t size) 587 { 588 int ret; 589 unsigned long val; 590 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); 591 struct etm_config *config = &drvdata->config; 592 593 ret = kstrtoul(buf, 16, &val); 594 if (ret) 595 return ret; 596 597 if (val >= drvdata->nr_cntr) 598 return -EINVAL; 599 /* 600 * Use spinlock to ensure index doesn't change while it gets 601 * dereferenced multiple times within a spinlock block elsewhere. 602 */ 603 spin_lock(&drvdata->spinlock); 604 config->cntr_idx = val; 605 spin_unlock(&drvdata->spinlock); 606 607 return size; 608 } 609 static DEVICE_ATTR_RW(cntr_idx); 610 611 static ssize_t cntr_rld_val_show(struct device *dev, 612 struct device_attribute *attr, char *buf) 613 { 614 unsigned long val; 615 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); 616 struct etm_config *config = &drvdata->config; 617 618 spin_lock(&drvdata->spinlock); 619 val = config->cntr_rld_val[config->cntr_idx]; 620 spin_unlock(&drvdata->spinlock); 621 622 return sprintf(buf, "%#lx\n", val); 623 } 624 625 static ssize_t cntr_rld_val_store(struct device *dev, 626 struct device_attribute *attr, 627 const char *buf, size_t size) 628 { 629 int ret; 630 unsigned long val; 631 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); 632 struct etm_config *config = &drvdata->config; 633 634 ret = kstrtoul(buf, 16, &val); 635 if (ret) 636 return ret; 637 638 spin_lock(&drvdata->spinlock); 639 config->cntr_rld_val[config->cntr_idx] = val; 640 spin_unlock(&drvdata->spinlock); 641 642 return size; 643 } 644 static DEVICE_ATTR_RW(cntr_rld_val); 645 646 static ssize_t cntr_event_show(struct device *dev, 647 struct device_attribute *attr, char *buf) 648 { 649 unsigned long val; 650 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); 651 struct etm_config *config = &drvdata->config; 652 653 spin_lock(&drvdata->spinlock); 654 val = config->cntr_event[config->cntr_idx]; 655 spin_unlock(&drvdata->spinlock); 656 657 return sprintf(buf, "%#lx\n", val); 658 } 659 660 static ssize_t cntr_event_store(struct device *dev, 661 struct device_attribute *attr, 662 const char *buf, size_t size) 663 { 664 int ret; 665 unsigned long val; 666 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); 667 struct etm_config *config = &drvdata->config; 668 669 ret = kstrtoul(buf, 16, &val); 670 if (ret) 671 return ret; 672 673 spin_lock(&drvdata->spinlock); 674 config->cntr_event[config->cntr_idx] = val & ETM_EVENT_MASK; 675 spin_unlock(&drvdata->spinlock); 676 677 return size; 678 } 679 static DEVICE_ATTR_RW(cntr_event); 680 681 static ssize_t cntr_rld_event_show(struct device *dev, 682 struct device_attribute *attr, char *buf) 683 { 684 unsigned long val; 685 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); 686 struct etm_config *config = &drvdata->config; 687 688 spin_lock(&drvdata->spinlock); 689 val = config->cntr_rld_event[config->cntr_idx]; 690 spin_unlock(&drvdata->spinlock); 691 692 return sprintf(buf, "%#lx\n", val); 693 } 694 695 static ssize_t cntr_rld_event_store(struct device *dev, 696 struct device_attribute *attr, 697 const char *buf, size_t size) 698 { 699 int ret; 700 unsigned long val; 701 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); 702 struct etm_config *config = &drvdata->config; 703 704 ret = kstrtoul(buf, 16, &val); 705 if (ret) 706 return ret; 707 708 spin_lock(&drvdata->spinlock); 709 config->cntr_rld_event[config->cntr_idx] = val & ETM_EVENT_MASK; 710 spin_unlock(&drvdata->spinlock); 711 712 return size; 713 } 714 static DEVICE_ATTR_RW(cntr_rld_event); 715 716 static ssize_t cntr_val_show(struct device *dev, 717 struct device_attribute *attr, char *buf) 718 { 719 int i, ret = 0; 720 u32 val; 721 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); 722 struct etm_config *config = &drvdata->config; 723 724 if (!local_read(&drvdata->mode)) { 725 spin_lock(&drvdata->spinlock); 726 for (i = 0; i < drvdata->nr_cntr; i++) 727 ret += sprintf(buf, "counter %d: %x\n", 728 i, config->cntr_val[i]); 729 spin_unlock(&drvdata->spinlock); 730 return ret; 731 } 732 733 for (i = 0; i < drvdata->nr_cntr; i++) { 734 val = etm_readl(drvdata, ETMCNTVRn(i)); 735 ret += sprintf(buf, "counter %d: %x\n", i, val); 736 } 737 738 return ret; 739 } 740 741 static ssize_t cntr_val_store(struct device *dev, 742 struct device_attribute *attr, 743 const char *buf, size_t size) 744 { 745 int ret; 746 unsigned long val; 747 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); 748 struct etm_config *config = &drvdata->config; 749 750 ret = kstrtoul(buf, 16, &val); 751 if (ret) 752 return ret; 753 754 spin_lock(&drvdata->spinlock); 755 config->cntr_val[config->cntr_idx] = val; 756 spin_unlock(&drvdata->spinlock); 757 758 return size; 759 } 760 static DEVICE_ATTR_RW(cntr_val); 761 762 static ssize_t seq_12_event_show(struct device *dev, 763 struct device_attribute *attr, char *buf) 764 { 765 unsigned long val; 766 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); 767 struct etm_config *config = &drvdata->config; 768 769 val = config->seq_12_event; 770 return sprintf(buf, "%#lx\n", val); 771 } 772 773 static ssize_t seq_12_event_store(struct device *dev, 774 struct device_attribute *attr, 775 const char *buf, size_t size) 776 { 777 int ret; 778 unsigned long val; 779 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); 780 struct etm_config *config = &drvdata->config; 781 782 ret = kstrtoul(buf, 16, &val); 783 if (ret) 784 return ret; 785 786 config->seq_12_event = val & ETM_EVENT_MASK; 787 return size; 788 } 789 static DEVICE_ATTR_RW(seq_12_event); 790 791 static ssize_t seq_21_event_show(struct device *dev, 792 struct device_attribute *attr, char *buf) 793 { 794 unsigned long val; 795 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); 796 struct etm_config *config = &drvdata->config; 797 798 val = config->seq_21_event; 799 return sprintf(buf, "%#lx\n", val); 800 } 801 802 static ssize_t seq_21_event_store(struct device *dev, 803 struct device_attribute *attr, 804 const char *buf, size_t size) 805 { 806 int ret; 807 unsigned long val; 808 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); 809 struct etm_config *config = &drvdata->config; 810 811 ret = kstrtoul(buf, 16, &val); 812 if (ret) 813 return ret; 814 815 config->seq_21_event = val & ETM_EVENT_MASK; 816 return size; 817 } 818 static DEVICE_ATTR_RW(seq_21_event); 819 820 static ssize_t seq_23_event_show(struct device *dev, 821 struct device_attribute *attr, char *buf) 822 { 823 unsigned long val; 824 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); 825 struct etm_config *config = &drvdata->config; 826 827 val = config->seq_23_event; 828 return sprintf(buf, "%#lx\n", val); 829 } 830 831 static ssize_t seq_23_event_store(struct device *dev, 832 struct device_attribute *attr, 833 const char *buf, size_t size) 834 { 835 int ret; 836 unsigned long val; 837 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); 838 struct etm_config *config = &drvdata->config; 839 840 ret = kstrtoul(buf, 16, &val); 841 if (ret) 842 return ret; 843 844 config->seq_23_event = val & ETM_EVENT_MASK; 845 return size; 846 } 847 static DEVICE_ATTR_RW(seq_23_event); 848 849 static ssize_t seq_31_event_show(struct device *dev, 850 struct device_attribute *attr, char *buf) 851 { 852 unsigned long val; 853 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); 854 struct etm_config *config = &drvdata->config; 855 856 val = config->seq_31_event; 857 return sprintf(buf, "%#lx\n", val); 858 } 859 860 static ssize_t seq_31_event_store(struct device *dev, 861 struct device_attribute *attr, 862 const char *buf, size_t size) 863 { 864 int ret; 865 unsigned long val; 866 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); 867 struct etm_config *config = &drvdata->config; 868 869 ret = kstrtoul(buf, 16, &val); 870 if (ret) 871 return ret; 872 873 config->seq_31_event = val & ETM_EVENT_MASK; 874 return size; 875 } 876 static DEVICE_ATTR_RW(seq_31_event); 877 878 static ssize_t seq_32_event_show(struct device *dev, 879 struct device_attribute *attr, char *buf) 880 { 881 unsigned long val; 882 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); 883 struct etm_config *config = &drvdata->config; 884 885 val = config->seq_32_event; 886 return sprintf(buf, "%#lx\n", val); 887 } 888 889 static ssize_t seq_32_event_store(struct device *dev, 890 struct device_attribute *attr, 891 const char *buf, size_t size) 892 { 893 int ret; 894 unsigned long val; 895 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); 896 struct etm_config *config = &drvdata->config; 897 898 ret = kstrtoul(buf, 16, &val); 899 if (ret) 900 return ret; 901 902 config->seq_32_event = val & ETM_EVENT_MASK; 903 return size; 904 } 905 static DEVICE_ATTR_RW(seq_32_event); 906 907 static ssize_t seq_13_event_show(struct device *dev, 908 struct device_attribute *attr, char *buf) 909 { 910 unsigned long val; 911 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); 912 struct etm_config *config = &drvdata->config; 913 914 val = config->seq_13_event; 915 return sprintf(buf, "%#lx\n", val); 916 } 917 918 static ssize_t seq_13_event_store(struct device *dev, 919 struct device_attribute *attr, 920 const char *buf, size_t size) 921 { 922 int ret; 923 unsigned long val; 924 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); 925 struct etm_config *config = &drvdata->config; 926 927 ret = kstrtoul(buf, 16, &val); 928 if (ret) 929 return ret; 930 931 config->seq_13_event = val & ETM_EVENT_MASK; 932 return size; 933 } 934 static DEVICE_ATTR_RW(seq_13_event); 935 936 static ssize_t seq_curr_state_show(struct device *dev, 937 struct device_attribute *attr, char *buf) 938 { 939 unsigned long val, flags; 940 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); 941 struct etm_config *config = &drvdata->config; 942 943 if (!local_read(&drvdata->mode)) { 944 val = config->seq_curr_state; 945 goto out; 946 } 947 948 pm_runtime_get_sync(dev->parent); 949 spin_lock_irqsave(&drvdata->spinlock, flags); 950 951 CS_UNLOCK(drvdata->base); 952 val = (etm_readl(drvdata, ETMSQR) & ETM_SQR_MASK); 953 CS_LOCK(drvdata->base); 954 955 spin_unlock_irqrestore(&drvdata->spinlock, flags); 956 pm_runtime_put(dev->parent); 957 out: 958 return sprintf(buf, "%#lx\n", val); 959 } 960 961 static ssize_t seq_curr_state_store(struct device *dev, 962 struct device_attribute *attr, 963 const char *buf, size_t size) 964 { 965 int ret; 966 unsigned long val; 967 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); 968 struct etm_config *config = &drvdata->config; 969 970 ret = kstrtoul(buf, 16, &val); 971 if (ret) 972 return ret; 973 974 if (val > ETM_SEQ_STATE_MAX_VAL) 975 return -EINVAL; 976 977 config->seq_curr_state = val; 978 979 return size; 980 } 981 static DEVICE_ATTR_RW(seq_curr_state); 982 983 static ssize_t ctxid_idx_show(struct device *dev, 984 struct device_attribute *attr, char *buf) 985 { 986 unsigned long val; 987 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); 988 struct etm_config *config = &drvdata->config; 989 990 val = config->ctxid_idx; 991 return sprintf(buf, "%#lx\n", val); 992 } 993 994 static ssize_t ctxid_idx_store(struct device *dev, 995 struct device_attribute *attr, 996 const char *buf, size_t size) 997 { 998 int ret; 999 unsigned long val; 1000 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); 1001 struct etm_config *config = &drvdata->config; 1002 1003 ret = kstrtoul(buf, 16, &val); 1004 if (ret) 1005 return ret; 1006 1007 if (val >= drvdata->nr_ctxid_cmp) 1008 return -EINVAL; 1009 1010 /* 1011 * Use spinlock to ensure index doesn't change while it gets 1012 * dereferenced multiple times within a spinlock block elsewhere. 1013 */ 1014 spin_lock(&drvdata->spinlock); 1015 config->ctxid_idx = val; 1016 spin_unlock(&drvdata->spinlock); 1017 1018 return size; 1019 } 1020 static DEVICE_ATTR_RW(ctxid_idx); 1021 1022 static ssize_t ctxid_pid_show(struct device *dev, 1023 struct device_attribute *attr, char *buf) 1024 { 1025 unsigned long val; 1026 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); 1027 struct etm_config *config = &drvdata->config; 1028 1029 /* 1030 * Don't use contextID tracing if coming from a PID namespace. See 1031 * comment in ctxid_pid_store(). 1032 */ 1033 if (task_active_pid_ns(current) != &init_pid_ns) 1034 return -EINVAL; 1035 1036 spin_lock(&drvdata->spinlock); 1037 val = config->ctxid_pid[config->ctxid_idx]; 1038 spin_unlock(&drvdata->spinlock); 1039 1040 return sprintf(buf, "%#lx\n", val); 1041 } 1042 1043 static ssize_t ctxid_pid_store(struct device *dev, 1044 struct device_attribute *attr, 1045 const char *buf, size_t size) 1046 { 1047 int ret; 1048 unsigned long pid; 1049 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); 1050 struct etm_config *config = &drvdata->config; 1051 1052 /* 1053 * When contextID tracing is enabled the tracers will insert the 1054 * value found in the contextID register in the trace stream. But if 1055 * a process is in a namespace the PID of that process as seen from the 1056 * namespace won't be what the kernel sees, something that makes the 1057 * feature confusing and can potentially leak kernel only information. 1058 * As such refuse to use the feature if @current is not in the initial 1059 * PID namespace. 1060 */ 1061 if (task_active_pid_ns(current) != &init_pid_ns) 1062 return -EINVAL; 1063 1064 ret = kstrtoul(buf, 16, &pid); 1065 if (ret) 1066 return ret; 1067 1068 spin_lock(&drvdata->spinlock); 1069 config->ctxid_pid[config->ctxid_idx] = pid; 1070 spin_unlock(&drvdata->spinlock); 1071 1072 return size; 1073 } 1074 static DEVICE_ATTR_RW(ctxid_pid); 1075 1076 static ssize_t ctxid_mask_show(struct device *dev, 1077 struct device_attribute *attr, char *buf) 1078 { 1079 unsigned long val; 1080 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); 1081 struct etm_config *config = &drvdata->config; 1082 1083 /* 1084 * Don't use contextID tracing if coming from a PID namespace. See 1085 * comment in ctxid_pid_store(). 1086 */ 1087 if (task_active_pid_ns(current) != &init_pid_ns) 1088 return -EINVAL; 1089 1090 val = config->ctxid_mask; 1091 return sprintf(buf, "%#lx\n", val); 1092 } 1093 1094 static ssize_t ctxid_mask_store(struct device *dev, 1095 struct device_attribute *attr, 1096 const char *buf, size_t size) 1097 { 1098 int ret; 1099 unsigned long val; 1100 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); 1101 struct etm_config *config = &drvdata->config; 1102 1103 /* 1104 * Don't use contextID tracing if coming from a PID namespace. See 1105 * comment in ctxid_pid_store(). 1106 */ 1107 if (task_active_pid_ns(current) != &init_pid_ns) 1108 return -EINVAL; 1109 1110 ret = kstrtoul(buf, 16, &val); 1111 if (ret) 1112 return ret; 1113 1114 config->ctxid_mask = val; 1115 return size; 1116 } 1117 static DEVICE_ATTR_RW(ctxid_mask); 1118 1119 static ssize_t sync_freq_show(struct device *dev, 1120 struct device_attribute *attr, char *buf) 1121 { 1122 unsigned long val; 1123 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); 1124 struct etm_config *config = &drvdata->config; 1125 1126 val = config->sync_freq; 1127 return sprintf(buf, "%#lx\n", val); 1128 } 1129 1130 static ssize_t sync_freq_store(struct device *dev, 1131 struct device_attribute *attr, 1132 const char *buf, size_t size) 1133 { 1134 int ret; 1135 unsigned long val; 1136 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); 1137 struct etm_config *config = &drvdata->config; 1138 1139 ret = kstrtoul(buf, 16, &val); 1140 if (ret) 1141 return ret; 1142 1143 config->sync_freq = val & ETM_SYNC_MASK; 1144 return size; 1145 } 1146 static DEVICE_ATTR_RW(sync_freq); 1147 1148 static ssize_t timestamp_event_show(struct device *dev, 1149 struct device_attribute *attr, char *buf) 1150 { 1151 unsigned long val; 1152 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); 1153 struct etm_config *config = &drvdata->config; 1154 1155 val = config->timestamp_event; 1156 return sprintf(buf, "%#lx\n", val); 1157 } 1158 1159 static ssize_t timestamp_event_store(struct device *dev, 1160 struct device_attribute *attr, 1161 const char *buf, size_t size) 1162 { 1163 int ret; 1164 unsigned long val; 1165 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); 1166 struct etm_config *config = &drvdata->config; 1167 1168 ret = kstrtoul(buf, 16, &val); 1169 if (ret) 1170 return ret; 1171 1172 config->timestamp_event = val & ETM_EVENT_MASK; 1173 return size; 1174 } 1175 static DEVICE_ATTR_RW(timestamp_event); 1176 1177 static ssize_t cpu_show(struct device *dev, 1178 struct device_attribute *attr, char *buf) 1179 { 1180 int val; 1181 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); 1182 1183 val = drvdata->cpu; 1184 return scnprintf(buf, PAGE_SIZE, "%d\n", val); 1185 1186 } 1187 static DEVICE_ATTR_RO(cpu); 1188 1189 static ssize_t traceid_show(struct device *dev, 1190 struct device_attribute *attr, char *buf) 1191 { 1192 unsigned long val; 1193 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); 1194 1195 val = etm_get_trace_id(drvdata); 1196 1197 return sprintf(buf, "%#lx\n", val); 1198 } 1199 1200 static ssize_t traceid_store(struct device *dev, 1201 struct device_attribute *attr, 1202 const char *buf, size_t size) 1203 { 1204 int ret; 1205 unsigned long val; 1206 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); 1207 1208 ret = kstrtoul(buf, 16, &val); 1209 if (ret) 1210 return ret; 1211 1212 drvdata->traceid = val & ETM_TRACEID_MASK; 1213 return size; 1214 } 1215 static DEVICE_ATTR_RW(traceid); 1216 1217 static struct attribute *coresight_etm_attrs[] = { 1218 &dev_attr_nr_addr_cmp.attr, 1219 &dev_attr_nr_cntr.attr, 1220 &dev_attr_nr_ctxid_cmp.attr, 1221 &dev_attr_etmsr.attr, 1222 &dev_attr_reset.attr, 1223 &dev_attr_mode.attr, 1224 &dev_attr_trigger_event.attr, 1225 &dev_attr_enable_event.attr, 1226 &dev_attr_fifofull_level.attr, 1227 &dev_attr_addr_idx.attr, 1228 &dev_attr_addr_single.attr, 1229 &dev_attr_addr_range.attr, 1230 &dev_attr_addr_start.attr, 1231 &dev_attr_addr_stop.attr, 1232 &dev_attr_addr_acctype.attr, 1233 &dev_attr_cntr_idx.attr, 1234 &dev_attr_cntr_rld_val.attr, 1235 &dev_attr_cntr_event.attr, 1236 &dev_attr_cntr_rld_event.attr, 1237 &dev_attr_cntr_val.attr, 1238 &dev_attr_seq_12_event.attr, 1239 &dev_attr_seq_21_event.attr, 1240 &dev_attr_seq_23_event.attr, 1241 &dev_attr_seq_31_event.attr, 1242 &dev_attr_seq_32_event.attr, 1243 &dev_attr_seq_13_event.attr, 1244 &dev_attr_seq_curr_state.attr, 1245 &dev_attr_ctxid_idx.attr, 1246 &dev_attr_ctxid_pid.attr, 1247 &dev_attr_ctxid_mask.attr, 1248 &dev_attr_sync_freq.attr, 1249 &dev_attr_timestamp_event.attr, 1250 &dev_attr_traceid.attr, 1251 &dev_attr_cpu.attr, 1252 NULL, 1253 }; 1254 1255 static struct attribute *coresight_etm_mgmt_attrs[] = { 1256 coresight_simple_reg32(etmccr, ETMCCR), 1257 coresight_simple_reg32(etmccer, ETMCCER), 1258 coresight_simple_reg32(etmscr, ETMSCR), 1259 coresight_simple_reg32(etmidr, ETMIDR), 1260 coresight_simple_reg32(etmcr, ETMCR), 1261 coresight_simple_reg32(etmtraceidr, ETMTRACEIDR), 1262 coresight_simple_reg32(etmteevr, ETMTEEVR), 1263 coresight_simple_reg32(etmtssvr, ETMTSSCR), 1264 coresight_simple_reg32(etmtecr1, ETMTECR1), 1265 coresight_simple_reg32(etmtecr2, ETMTECR2), 1266 NULL, 1267 }; 1268 1269 static const struct attribute_group coresight_etm_group = { 1270 .attrs = coresight_etm_attrs, 1271 }; 1272 1273 static const struct attribute_group coresight_etm_mgmt_group = { 1274 .attrs = coresight_etm_mgmt_attrs, 1275 .name = "mgmt", 1276 }; 1277 1278 const struct attribute_group *coresight_etm_groups[] = { 1279 &coresight_etm_group, 1280 &coresight_etm_mgmt_group, 1281 NULL, 1282 }; 1283