1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2011-2012, The Linux Foundation. All rights reserved. 4 * 5 * Description: CoreSight Program Flow Trace driver 6 */ 7 8 #include <linux/kernel.h> 9 #include <linux/moduleparam.h> 10 #include <linux/init.h> 11 #include <linux/types.h> 12 #include <linux/device.h> 13 #include <linux/io.h> 14 #include <linux/err.h> 15 #include <linux/fs.h> 16 #include <linux/slab.h> 17 #include <linux/delay.h> 18 #include <linux/smp.h> 19 #include <linux/sysfs.h> 20 #include <linux/stat.h> 21 #include <linux/pm_runtime.h> 22 #include <linux/cpu.h> 23 #include <linux/of.h> 24 #include <linux/coresight.h> 25 #include <linux/coresight-pmu.h> 26 #include <linux/amba/bus.h> 27 #include <linux/seq_file.h> 28 #include <linux/uaccess.h> 29 #include <linux/clk.h> 30 #include <linux/perf_event.h> 31 #include <asm/sections.h> 32 33 #include "coresight-etm.h" 34 #include "coresight-etm-perf.h" 35 36 /* 37 * Not really modular but using module_param is the easiest way to 38 * remain consistent with existing use cases for now. 39 */ 40 static int boot_enable; 41 module_param_named(boot_enable, boot_enable, int, S_IRUGO); 42 43 static struct etm_drvdata *etmdrvdata[NR_CPUS]; 44 45 static enum cpuhp_state hp_online; 46 47 /* 48 * Memory mapped writes to clear os lock are not supported on some processors 49 * and OS lock must be unlocked before any memory mapped access on such 50 * processors, otherwise memory mapped reads/writes will be invalid. 51 */ 52 static void etm_os_unlock(struct etm_drvdata *drvdata) 53 { 54 /* Writing any value to ETMOSLAR unlocks the trace registers */ 55 etm_writel(drvdata, 0x0, ETMOSLAR); 56 drvdata->os_unlock = true; 57 isb(); 58 } 59 60 static void etm_set_pwrdwn(struct etm_drvdata *drvdata) 61 { 62 u32 etmcr; 63 64 /* Ensure pending cp14 accesses complete before setting pwrdwn */ 65 mb(); 66 isb(); 67 etmcr = etm_readl(drvdata, ETMCR); 68 etmcr |= ETMCR_PWD_DWN; 69 etm_writel(drvdata, etmcr, ETMCR); 70 } 71 72 static void etm_clr_pwrdwn(struct etm_drvdata *drvdata) 73 { 74 u32 etmcr; 75 76 etmcr = etm_readl(drvdata, ETMCR); 77 etmcr &= ~ETMCR_PWD_DWN; 78 etm_writel(drvdata, etmcr, ETMCR); 79 /* Ensure pwrup completes before subsequent cp14 accesses */ 80 mb(); 81 isb(); 82 } 83 84 static void etm_set_pwrup(struct etm_drvdata *drvdata) 85 { 86 u32 etmpdcr; 87 88 etmpdcr = readl_relaxed(drvdata->base + ETMPDCR); 89 etmpdcr |= ETMPDCR_PWD_UP; 90 writel_relaxed(etmpdcr, drvdata->base + ETMPDCR); 91 /* Ensure pwrup completes before subsequent cp14 accesses */ 92 mb(); 93 isb(); 94 } 95 96 static void etm_clr_pwrup(struct etm_drvdata *drvdata) 97 { 98 u32 etmpdcr; 99 100 /* Ensure pending cp14 accesses complete before clearing pwrup */ 101 mb(); 102 isb(); 103 etmpdcr = readl_relaxed(drvdata->base + ETMPDCR); 104 etmpdcr &= ~ETMPDCR_PWD_UP; 105 writel_relaxed(etmpdcr, drvdata->base + ETMPDCR); 106 } 107 108 /** 109 * coresight_timeout_etm - loop until a bit has changed to a specific state. 110 * @drvdata: etm's private data structure. 111 * @offset: address of a register, starting from @addr. 112 * @position: the position of the bit of interest. 113 * @value: the value the bit should have. 114 * 115 * Basically the same as @coresight_timeout except for the register access 116 * method where we have to account for CP14 configurations. 117 118 * Return: 0 as soon as the bit has taken the desired state or -EAGAIN if 119 * TIMEOUT_US has elapsed, which ever happens first. 120 */ 121 122 static int coresight_timeout_etm(struct etm_drvdata *drvdata, u32 offset, 123 int position, int value) 124 { 125 int i; 126 u32 val; 127 128 for (i = TIMEOUT_US; i > 0; i--) { 129 val = etm_readl(drvdata, offset); 130 /* Waiting on the bit to go from 0 to 1 */ 131 if (value) { 132 if (val & BIT(position)) 133 return 0; 134 /* Waiting on the bit to go from 1 to 0 */ 135 } else { 136 if (!(val & BIT(position))) 137 return 0; 138 } 139 140 /* 141 * Delay is arbitrary - the specification doesn't say how long 142 * we are expected to wait. Extra check required to make sure 143 * we don't wait needlessly on the last iteration. 144 */ 145 if (i - 1) 146 udelay(1); 147 } 148 149 return -EAGAIN; 150 } 151 152 153 static void etm_set_prog(struct etm_drvdata *drvdata) 154 { 155 u32 etmcr; 156 157 etmcr = etm_readl(drvdata, ETMCR); 158 etmcr |= ETMCR_ETM_PRG; 159 etm_writel(drvdata, etmcr, ETMCR); 160 /* 161 * Recommended by spec for cp14 accesses to ensure etmcr write is 162 * complete before polling etmsr 163 */ 164 isb(); 165 if (coresight_timeout_etm(drvdata, ETMSR, ETMSR_PROG_BIT, 1)) { 166 dev_err(&drvdata->csdev->dev, 167 "%s: timeout observed when probing at offset %#x\n", 168 __func__, ETMSR); 169 } 170 } 171 172 static void etm_clr_prog(struct etm_drvdata *drvdata) 173 { 174 u32 etmcr; 175 176 etmcr = etm_readl(drvdata, ETMCR); 177 etmcr &= ~ETMCR_ETM_PRG; 178 etm_writel(drvdata, etmcr, ETMCR); 179 /* 180 * Recommended by spec for cp14 accesses to ensure etmcr write is 181 * complete before polling etmsr 182 */ 183 isb(); 184 if (coresight_timeout_etm(drvdata, ETMSR, ETMSR_PROG_BIT, 0)) { 185 dev_err(&drvdata->csdev->dev, 186 "%s: timeout observed when probing at offset %#x\n", 187 __func__, ETMSR); 188 } 189 } 190 191 void etm_set_default(struct etm_config *config) 192 { 193 int i; 194 195 if (WARN_ON_ONCE(!config)) 196 return; 197 198 /* 199 * Taken verbatim from the TRM: 200 * 201 * To trace all memory: 202 * set bit [24] in register 0x009, the ETMTECR1, to 1 203 * set all other bits in register 0x009, the ETMTECR1, to 0 204 * set all bits in register 0x007, the ETMTECR2, to 0 205 * set register 0x008, the ETMTEEVR, to 0x6F (TRUE). 206 */ 207 config->enable_ctrl1 = BIT(24); 208 config->enable_ctrl2 = 0x0; 209 config->enable_event = ETM_HARD_WIRE_RES_A; 210 211 config->trigger_event = ETM_DEFAULT_EVENT_VAL; 212 config->enable_event = ETM_HARD_WIRE_RES_A; 213 214 config->seq_12_event = ETM_DEFAULT_EVENT_VAL; 215 config->seq_21_event = ETM_DEFAULT_EVENT_VAL; 216 config->seq_23_event = ETM_DEFAULT_EVENT_VAL; 217 config->seq_31_event = ETM_DEFAULT_EVENT_VAL; 218 config->seq_32_event = ETM_DEFAULT_EVENT_VAL; 219 config->seq_13_event = ETM_DEFAULT_EVENT_VAL; 220 config->timestamp_event = ETM_DEFAULT_EVENT_VAL; 221 222 for (i = 0; i < ETM_MAX_CNTR; i++) { 223 config->cntr_rld_val[i] = 0x0; 224 config->cntr_event[i] = ETM_DEFAULT_EVENT_VAL; 225 config->cntr_rld_event[i] = ETM_DEFAULT_EVENT_VAL; 226 config->cntr_val[i] = 0x0; 227 } 228 229 config->seq_curr_state = 0x0; 230 config->ctxid_idx = 0x0; 231 for (i = 0; i < ETM_MAX_CTXID_CMP; i++) 232 config->ctxid_pid[i] = 0x0; 233 234 config->ctxid_mask = 0x0; 235 /* Setting default to 1024 as per TRM recommendation */ 236 config->sync_freq = 0x400; 237 } 238 239 void etm_config_trace_mode(struct etm_config *config) 240 { 241 u32 flags, mode; 242 243 mode = config->mode; 244 245 mode &= (ETM_MODE_EXCL_KERN | ETM_MODE_EXCL_USER); 246 247 /* excluding kernel AND user space doesn't make sense */ 248 if (mode == (ETM_MODE_EXCL_KERN | ETM_MODE_EXCL_USER)) 249 return; 250 251 /* nothing to do if neither flags are set */ 252 if (!(mode & ETM_MODE_EXCL_KERN) && !(mode & ETM_MODE_EXCL_USER)) 253 return; 254 255 flags = (1 << 0 | /* instruction execute */ 256 3 << 3 | /* ARM instruction */ 257 0 << 5 | /* No data value comparison */ 258 0 << 7 | /* No exact mach */ 259 0 << 8); /* Ignore context ID */ 260 261 /* No need to worry about single address comparators. */ 262 config->enable_ctrl2 = 0x0; 263 264 /* Bit 0 is address range comparator 1 */ 265 config->enable_ctrl1 = ETMTECR1_ADDR_COMP_1; 266 267 /* 268 * On ETMv3.5: 269 * ETMACTRn[13,11] == Non-secure state comparison control 270 * ETMACTRn[12,10] == Secure state comparison control 271 * 272 * b00 == Match in all modes in this state 273 * b01 == Do not match in any more in this state 274 * b10 == Match in all modes excepts user mode in this state 275 * b11 == Match only in user mode in this state 276 */ 277 278 /* Tracing in secure mode is not supported at this time */ 279 flags |= (0 << 12 | 1 << 10); 280 281 if (mode & ETM_MODE_EXCL_USER) { 282 /* exclude user, match all modes except user mode */ 283 flags |= (1 << 13 | 0 << 11); 284 } else { 285 /* exclude kernel, match only in user mode */ 286 flags |= (1 << 13 | 1 << 11); 287 } 288 289 /* 290 * The ETMEEVR register is already set to "hard wire A". As such 291 * all there is to do is setup an address comparator that spans 292 * the entire address range and configure the state and mode bits. 293 */ 294 config->addr_val[0] = (u32) 0x0; 295 config->addr_val[1] = (u32) ~0x0; 296 config->addr_acctype[0] = flags; 297 config->addr_acctype[1] = flags; 298 config->addr_type[0] = ETM_ADDR_TYPE_RANGE; 299 config->addr_type[1] = ETM_ADDR_TYPE_RANGE; 300 } 301 302 #define ETM3X_SUPPORTED_OPTIONS (ETMCR_CYC_ACC | \ 303 ETMCR_TIMESTAMP_EN | \ 304 ETMCR_RETURN_STACK) 305 306 static int etm_parse_event_config(struct etm_drvdata *drvdata, 307 struct perf_event *event) 308 { 309 struct etm_config *config = &drvdata->config; 310 struct perf_event_attr *attr = &event->attr; 311 312 if (!attr) 313 return -EINVAL; 314 315 /* Clear configuration from previous run */ 316 memset(config, 0, sizeof(struct etm_config)); 317 318 if (attr->exclude_kernel) 319 config->mode = ETM_MODE_EXCL_KERN; 320 321 if (attr->exclude_user) 322 config->mode = ETM_MODE_EXCL_USER; 323 324 /* Always start from the default config */ 325 etm_set_default(config); 326 327 /* 328 * By default the tracers are configured to trace the whole address 329 * range. Narrow the field only if requested by user space. 330 */ 331 if (config->mode) 332 etm_config_trace_mode(config); 333 334 /* 335 * At this time only cycle accurate, return stack and timestamp 336 * options are available. 337 */ 338 if (attr->config & ~ETM3X_SUPPORTED_OPTIONS) 339 return -EINVAL; 340 341 config->ctrl = attr->config; 342 343 /* 344 * Possible to have cores with PTM (supports ret stack) and ETM 345 * (never has ret stack) on the same SoC. So if we have a request 346 * for return stack that can't be honoured on this core then 347 * clear the bit - trace will still continue normally 348 */ 349 if ((config->ctrl & ETMCR_RETURN_STACK) && 350 !(drvdata->etmccer & ETMCCER_RETSTACK)) 351 config->ctrl &= ~ETMCR_RETURN_STACK; 352 353 return 0; 354 } 355 356 static int etm_enable_hw(struct etm_drvdata *drvdata) 357 { 358 int i, rc; 359 u32 etmcr; 360 struct etm_config *config = &drvdata->config; 361 362 CS_UNLOCK(drvdata->base); 363 364 rc = coresight_claim_device_unlocked(drvdata->base); 365 if (rc) 366 goto done; 367 368 /* Turn engine on */ 369 etm_clr_pwrdwn(drvdata); 370 /* Apply power to trace registers */ 371 etm_set_pwrup(drvdata); 372 /* Make sure all registers are accessible */ 373 etm_os_unlock(drvdata); 374 375 etm_set_prog(drvdata); 376 377 etmcr = etm_readl(drvdata, ETMCR); 378 /* Clear setting from a previous run if need be */ 379 etmcr &= ~ETM3X_SUPPORTED_OPTIONS; 380 etmcr |= drvdata->port_size; 381 etmcr |= ETMCR_ETM_EN; 382 etm_writel(drvdata, config->ctrl | etmcr, ETMCR); 383 etm_writel(drvdata, config->trigger_event, ETMTRIGGER); 384 etm_writel(drvdata, config->startstop_ctrl, ETMTSSCR); 385 etm_writel(drvdata, config->enable_event, ETMTEEVR); 386 etm_writel(drvdata, config->enable_ctrl1, ETMTECR1); 387 etm_writel(drvdata, config->fifofull_level, ETMFFLR); 388 for (i = 0; i < drvdata->nr_addr_cmp; i++) { 389 etm_writel(drvdata, config->addr_val[i], ETMACVRn(i)); 390 etm_writel(drvdata, config->addr_acctype[i], ETMACTRn(i)); 391 } 392 for (i = 0; i < drvdata->nr_cntr; i++) { 393 etm_writel(drvdata, config->cntr_rld_val[i], ETMCNTRLDVRn(i)); 394 etm_writel(drvdata, config->cntr_event[i], ETMCNTENRn(i)); 395 etm_writel(drvdata, config->cntr_rld_event[i], 396 ETMCNTRLDEVRn(i)); 397 etm_writel(drvdata, config->cntr_val[i], ETMCNTVRn(i)); 398 } 399 etm_writel(drvdata, config->seq_12_event, ETMSQ12EVR); 400 etm_writel(drvdata, config->seq_21_event, ETMSQ21EVR); 401 etm_writel(drvdata, config->seq_23_event, ETMSQ23EVR); 402 etm_writel(drvdata, config->seq_31_event, ETMSQ31EVR); 403 etm_writel(drvdata, config->seq_32_event, ETMSQ32EVR); 404 etm_writel(drvdata, config->seq_13_event, ETMSQ13EVR); 405 etm_writel(drvdata, config->seq_curr_state, ETMSQR); 406 for (i = 0; i < drvdata->nr_ext_out; i++) 407 etm_writel(drvdata, ETM_DEFAULT_EVENT_VAL, ETMEXTOUTEVRn(i)); 408 for (i = 0; i < drvdata->nr_ctxid_cmp; i++) 409 etm_writel(drvdata, config->ctxid_pid[i], ETMCIDCVRn(i)); 410 etm_writel(drvdata, config->ctxid_mask, ETMCIDCMR); 411 etm_writel(drvdata, config->sync_freq, ETMSYNCFR); 412 /* No external input selected */ 413 etm_writel(drvdata, 0x0, ETMEXTINSELR); 414 etm_writel(drvdata, config->timestamp_event, ETMTSEVR); 415 /* No auxiliary control selected */ 416 etm_writel(drvdata, 0x0, ETMAUXCR); 417 etm_writel(drvdata, drvdata->traceid, ETMTRACEIDR); 418 /* No VMID comparator value selected */ 419 etm_writel(drvdata, 0x0, ETMVMIDCVR); 420 421 etm_clr_prog(drvdata); 422 423 done: 424 CS_LOCK(drvdata->base); 425 426 dev_dbg(&drvdata->csdev->dev, "cpu: %d enable smp call done: %d\n", 427 drvdata->cpu, rc); 428 return rc; 429 } 430 431 struct etm_enable_arg { 432 struct etm_drvdata *drvdata; 433 int rc; 434 }; 435 436 static void etm_enable_hw_smp_call(void *info) 437 { 438 struct etm_enable_arg *arg = info; 439 440 if (WARN_ON(!arg)) 441 return; 442 arg->rc = etm_enable_hw(arg->drvdata); 443 } 444 445 static int etm_cpu_id(struct coresight_device *csdev) 446 { 447 struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); 448 449 return drvdata->cpu; 450 } 451 452 int etm_get_trace_id(struct etm_drvdata *drvdata) 453 { 454 unsigned long flags; 455 int trace_id = -1; 456 struct device *etm_dev; 457 458 if (!drvdata) 459 goto out; 460 461 etm_dev = drvdata->csdev->dev.parent; 462 if (!local_read(&drvdata->mode)) 463 return drvdata->traceid; 464 465 pm_runtime_get_sync(etm_dev); 466 467 spin_lock_irqsave(&drvdata->spinlock, flags); 468 469 CS_UNLOCK(drvdata->base); 470 trace_id = (etm_readl(drvdata, ETMTRACEIDR) & ETM_TRACEID_MASK); 471 CS_LOCK(drvdata->base); 472 473 spin_unlock_irqrestore(&drvdata->spinlock, flags); 474 pm_runtime_put(etm_dev); 475 476 out: 477 return trace_id; 478 479 } 480 481 static int etm_trace_id(struct coresight_device *csdev) 482 { 483 struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); 484 485 return etm_get_trace_id(drvdata); 486 } 487 488 static int etm_enable_perf(struct coresight_device *csdev, 489 struct perf_event *event) 490 { 491 struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); 492 493 if (WARN_ON_ONCE(drvdata->cpu != smp_processor_id())) 494 return -EINVAL; 495 496 /* Configure the tracer based on the session's specifics */ 497 etm_parse_event_config(drvdata, event); 498 /* And enable it */ 499 return etm_enable_hw(drvdata); 500 } 501 502 static int etm_enable_sysfs(struct coresight_device *csdev) 503 { 504 struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); 505 struct etm_enable_arg arg = { }; 506 int ret; 507 508 spin_lock(&drvdata->spinlock); 509 510 /* 511 * Configure the ETM only if the CPU is online. If it isn't online 512 * hw configuration will take place on the local CPU during bring up. 513 */ 514 if (cpu_online(drvdata->cpu)) { 515 arg.drvdata = drvdata; 516 ret = smp_call_function_single(drvdata->cpu, 517 etm_enable_hw_smp_call, &arg, 1); 518 if (!ret) 519 ret = arg.rc; 520 if (!ret) 521 drvdata->sticky_enable = true; 522 } else { 523 ret = -ENODEV; 524 } 525 526 spin_unlock(&drvdata->spinlock); 527 528 if (!ret) 529 dev_dbg(&csdev->dev, "ETM tracing enabled\n"); 530 return ret; 531 } 532 533 static int etm_enable(struct coresight_device *csdev, 534 struct perf_event *event, u32 mode) 535 { 536 int ret; 537 u32 val; 538 struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); 539 540 val = local_cmpxchg(&drvdata->mode, CS_MODE_DISABLED, mode); 541 542 /* Someone is already using the tracer */ 543 if (val) 544 return -EBUSY; 545 546 switch (mode) { 547 case CS_MODE_SYSFS: 548 ret = etm_enable_sysfs(csdev); 549 break; 550 case CS_MODE_PERF: 551 ret = etm_enable_perf(csdev, event); 552 break; 553 default: 554 ret = -EINVAL; 555 } 556 557 /* The tracer didn't start */ 558 if (ret) 559 local_set(&drvdata->mode, CS_MODE_DISABLED); 560 561 return ret; 562 } 563 564 static void etm_disable_hw(void *info) 565 { 566 int i; 567 struct etm_drvdata *drvdata = info; 568 struct etm_config *config = &drvdata->config; 569 570 CS_UNLOCK(drvdata->base); 571 etm_set_prog(drvdata); 572 573 /* Read back sequencer and counters for post trace analysis */ 574 config->seq_curr_state = (etm_readl(drvdata, ETMSQR) & ETM_SQR_MASK); 575 576 for (i = 0; i < drvdata->nr_cntr; i++) 577 config->cntr_val[i] = etm_readl(drvdata, ETMCNTVRn(i)); 578 579 etm_set_pwrdwn(drvdata); 580 coresight_disclaim_device_unlocked(drvdata->base); 581 582 CS_LOCK(drvdata->base); 583 584 dev_dbg(&drvdata->csdev->dev, 585 "cpu: %d disable smp call done\n", drvdata->cpu); 586 } 587 588 static void etm_disable_perf(struct coresight_device *csdev) 589 { 590 struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); 591 592 if (WARN_ON_ONCE(drvdata->cpu != smp_processor_id())) 593 return; 594 595 CS_UNLOCK(drvdata->base); 596 597 /* Setting the prog bit disables tracing immediately */ 598 etm_set_prog(drvdata); 599 600 /* 601 * There is no way to know when the tracer will be used again so 602 * power down the tracer. 603 */ 604 etm_set_pwrdwn(drvdata); 605 coresight_disclaim_device_unlocked(drvdata->base); 606 607 CS_LOCK(drvdata->base); 608 } 609 610 static void etm_disable_sysfs(struct coresight_device *csdev) 611 { 612 struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); 613 614 /* 615 * Taking hotplug lock here protects from clocks getting disabled 616 * with tracing being left on (crash scenario) if user disable occurs 617 * after cpu online mask indicates the cpu is offline but before the 618 * DYING hotplug callback is serviced by the ETM driver. 619 */ 620 cpus_read_lock(); 621 spin_lock(&drvdata->spinlock); 622 623 /* 624 * Executing etm_disable_hw on the cpu whose ETM is being disabled 625 * ensures that register writes occur when cpu is powered. 626 */ 627 smp_call_function_single(drvdata->cpu, etm_disable_hw, drvdata, 1); 628 629 spin_unlock(&drvdata->spinlock); 630 cpus_read_unlock(); 631 632 dev_dbg(&csdev->dev, "ETM tracing disabled\n"); 633 } 634 635 static void etm_disable(struct coresight_device *csdev, 636 struct perf_event *event) 637 { 638 u32 mode; 639 struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); 640 641 /* 642 * For as long as the tracer isn't disabled another entity can't 643 * change its status. As such we can read the status here without 644 * fearing it will change under us. 645 */ 646 mode = local_read(&drvdata->mode); 647 648 switch (mode) { 649 case CS_MODE_DISABLED: 650 break; 651 case CS_MODE_SYSFS: 652 etm_disable_sysfs(csdev); 653 break; 654 case CS_MODE_PERF: 655 etm_disable_perf(csdev); 656 break; 657 default: 658 WARN_ON_ONCE(mode); 659 return; 660 } 661 662 if (mode) 663 local_set(&drvdata->mode, CS_MODE_DISABLED); 664 } 665 666 static const struct coresight_ops_source etm_source_ops = { 667 .cpu_id = etm_cpu_id, 668 .trace_id = etm_trace_id, 669 .enable = etm_enable, 670 .disable = etm_disable, 671 }; 672 673 static const struct coresight_ops etm_cs_ops = { 674 .source_ops = &etm_source_ops, 675 }; 676 677 static int etm_online_cpu(unsigned int cpu) 678 { 679 if (!etmdrvdata[cpu]) 680 return 0; 681 682 if (etmdrvdata[cpu]->boot_enable && !etmdrvdata[cpu]->sticky_enable) 683 coresight_enable(etmdrvdata[cpu]->csdev); 684 return 0; 685 } 686 687 static int etm_starting_cpu(unsigned int cpu) 688 { 689 if (!etmdrvdata[cpu]) 690 return 0; 691 692 spin_lock(&etmdrvdata[cpu]->spinlock); 693 if (!etmdrvdata[cpu]->os_unlock) { 694 etm_os_unlock(etmdrvdata[cpu]); 695 etmdrvdata[cpu]->os_unlock = true; 696 } 697 698 if (local_read(&etmdrvdata[cpu]->mode)) 699 etm_enable_hw(etmdrvdata[cpu]); 700 spin_unlock(&etmdrvdata[cpu]->spinlock); 701 return 0; 702 } 703 704 static int etm_dying_cpu(unsigned int cpu) 705 { 706 if (!etmdrvdata[cpu]) 707 return 0; 708 709 spin_lock(&etmdrvdata[cpu]->spinlock); 710 if (local_read(&etmdrvdata[cpu]->mode)) 711 etm_disable_hw(etmdrvdata[cpu]); 712 spin_unlock(&etmdrvdata[cpu]->spinlock); 713 return 0; 714 } 715 716 static bool etm_arch_supported(u8 arch) 717 { 718 switch (arch) { 719 case ETM_ARCH_V3_3: 720 break; 721 case ETM_ARCH_V3_5: 722 break; 723 case PFT_ARCH_V1_0: 724 break; 725 case PFT_ARCH_V1_1: 726 break; 727 default: 728 return false; 729 } 730 return true; 731 } 732 733 static void etm_init_arch_data(void *info) 734 { 735 u32 etmidr; 736 u32 etmccr; 737 struct etm_drvdata *drvdata = info; 738 739 /* Make sure all registers are accessible */ 740 etm_os_unlock(drvdata); 741 742 CS_UNLOCK(drvdata->base); 743 744 /* First dummy read */ 745 (void)etm_readl(drvdata, ETMPDSR); 746 /* Provide power to ETM: ETMPDCR[3] == 1 */ 747 etm_set_pwrup(drvdata); 748 /* 749 * Clear power down bit since when this bit is set writes to 750 * certain registers might be ignored. 751 */ 752 etm_clr_pwrdwn(drvdata); 753 /* 754 * Set prog bit. It will be set from reset but this is included to 755 * ensure it is set 756 */ 757 etm_set_prog(drvdata); 758 759 /* Find all capabilities */ 760 etmidr = etm_readl(drvdata, ETMIDR); 761 drvdata->arch = BMVAL(etmidr, 4, 11); 762 drvdata->port_size = etm_readl(drvdata, ETMCR) & PORT_SIZE_MASK; 763 764 drvdata->etmccer = etm_readl(drvdata, ETMCCER); 765 etmccr = etm_readl(drvdata, ETMCCR); 766 drvdata->etmccr = etmccr; 767 drvdata->nr_addr_cmp = BMVAL(etmccr, 0, 3) * 2; 768 drvdata->nr_cntr = BMVAL(etmccr, 13, 15); 769 drvdata->nr_ext_inp = BMVAL(etmccr, 17, 19); 770 drvdata->nr_ext_out = BMVAL(etmccr, 20, 22); 771 drvdata->nr_ctxid_cmp = BMVAL(etmccr, 24, 25); 772 773 etm_set_pwrdwn(drvdata); 774 etm_clr_pwrup(drvdata); 775 CS_LOCK(drvdata->base); 776 } 777 778 static void etm_init_trace_id(struct etm_drvdata *drvdata) 779 { 780 drvdata->traceid = coresight_get_trace_id(drvdata->cpu); 781 } 782 783 static int __init etm_hp_setup(void) 784 { 785 int ret; 786 787 ret = cpuhp_setup_state_nocalls_cpuslocked(CPUHP_AP_ARM_CORESIGHT_STARTING, 788 "arm/coresight:starting", 789 etm_starting_cpu, etm_dying_cpu); 790 791 if (ret) 792 return ret; 793 794 ret = cpuhp_setup_state_nocalls_cpuslocked(CPUHP_AP_ONLINE_DYN, 795 "arm/coresight:online", 796 etm_online_cpu, NULL); 797 798 /* HP dyn state ID returned in ret on success */ 799 if (ret > 0) { 800 hp_online = ret; 801 return 0; 802 } 803 804 /* failed dyn state - remove others */ 805 cpuhp_remove_state_nocalls(CPUHP_AP_ARM_CORESIGHT_STARTING); 806 807 return ret; 808 } 809 810 static void etm_hp_clear(void) 811 { 812 cpuhp_remove_state_nocalls(CPUHP_AP_ARM_CORESIGHT_STARTING); 813 if (hp_online) { 814 cpuhp_remove_state_nocalls(hp_online); 815 hp_online = 0; 816 } 817 } 818 819 static int etm_probe(struct amba_device *adev, const struct amba_id *id) 820 { 821 int ret; 822 void __iomem *base; 823 struct device *dev = &adev->dev; 824 struct coresight_platform_data *pdata = NULL; 825 struct etm_drvdata *drvdata; 826 struct resource *res = &adev->res; 827 struct coresight_desc desc = { 0 }; 828 829 drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL); 830 if (!drvdata) 831 return -ENOMEM; 832 833 drvdata->use_cp14 = fwnode_property_read_bool(dev->fwnode, "arm,cp14"); 834 dev_set_drvdata(dev, drvdata); 835 836 /* Validity for the resource is already checked by the AMBA core */ 837 base = devm_ioremap_resource(dev, res); 838 if (IS_ERR(base)) 839 return PTR_ERR(base); 840 841 drvdata->base = base; 842 843 spin_lock_init(&drvdata->spinlock); 844 845 drvdata->atclk = devm_clk_get(&adev->dev, "atclk"); /* optional */ 846 if (!IS_ERR(drvdata->atclk)) { 847 ret = clk_prepare_enable(drvdata->atclk); 848 if (ret) 849 return ret; 850 } 851 852 drvdata->cpu = coresight_get_cpu(dev); 853 if (drvdata->cpu < 0) 854 return drvdata->cpu; 855 856 desc.name = devm_kasprintf(dev, GFP_KERNEL, "etm%d", drvdata->cpu); 857 if (!desc.name) 858 return -ENOMEM; 859 860 if (smp_call_function_single(drvdata->cpu, 861 etm_init_arch_data, drvdata, 1)) 862 dev_err(dev, "ETM arch init failed\n"); 863 864 if (etm_arch_supported(drvdata->arch) == false) 865 return -EINVAL; 866 867 etm_init_trace_id(drvdata); 868 etm_set_default(&drvdata->config); 869 870 pdata = coresight_get_platform_data(dev); 871 if (IS_ERR(pdata)) 872 return PTR_ERR(pdata); 873 874 adev->dev.platform_data = pdata; 875 876 desc.type = CORESIGHT_DEV_TYPE_SOURCE; 877 desc.subtype.source_subtype = CORESIGHT_DEV_SUBTYPE_SOURCE_PROC; 878 desc.ops = &etm_cs_ops; 879 desc.pdata = pdata; 880 desc.dev = dev; 881 desc.groups = coresight_etm_groups; 882 drvdata->csdev = coresight_register(&desc); 883 if (IS_ERR(drvdata->csdev)) 884 return PTR_ERR(drvdata->csdev); 885 886 ret = etm_perf_symlink(drvdata->csdev, true); 887 if (ret) { 888 coresight_unregister(drvdata->csdev); 889 return ret; 890 } 891 892 etmdrvdata[drvdata->cpu] = drvdata; 893 894 pm_runtime_put(&adev->dev); 895 dev_info(&drvdata->csdev->dev, 896 "%s initialized\n", (char *)coresight_get_uci_data(id)); 897 if (boot_enable) { 898 coresight_enable(drvdata->csdev); 899 drvdata->boot_enable = true; 900 } 901 902 return 0; 903 } 904 905 static void clear_etmdrvdata(void *info) 906 { 907 int cpu = *(int *)info; 908 909 etmdrvdata[cpu] = NULL; 910 } 911 912 static int etm_remove(struct amba_device *adev) 913 { 914 struct etm_drvdata *drvdata = dev_get_drvdata(&adev->dev); 915 916 etm_perf_symlink(drvdata->csdev, false); 917 918 /* 919 * Taking hotplug lock here to avoid racing between etm_remove and 920 * CPU hotplug call backs. 921 */ 922 cpus_read_lock(); 923 /* 924 * The readers for etmdrvdata[] are CPU hotplug call backs 925 * and PM notification call backs. Change etmdrvdata[i] on 926 * CPU i ensures these call backs has consistent view 927 * inside one call back function. 928 */ 929 if (smp_call_function_single(drvdata->cpu, clear_etmdrvdata, &drvdata->cpu, 1)) 930 etmdrvdata[drvdata->cpu] = NULL; 931 932 cpus_read_unlock(); 933 934 coresight_unregister(drvdata->csdev); 935 936 return 0; 937 } 938 939 #ifdef CONFIG_PM 940 static int etm_runtime_suspend(struct device *dev) 941 { 942 struct etm_drvdata *drvdata = dev_get_drvdata(dev); 943 944 if (drvdata && !IS_ERR(drvdata->atclk)) 945 clk_disable_unprepare(drvdata->atclk); 946 947 return 0; 948 } 949 950 static int etm_runtime_resume(struct device *dev) 951 { 952 struct etm_drvdata *drvdata = dev_get_drvdata(dev); 953 954 if (drvdata && !IS_ERR(drvdata->atclk)) 955 clk_prepare_enable(drvdata->atclk); 956 957 return 0; 958 } 959 #endif 960 961 static const struct dev_pm_ops etm_dev_pm_ops = { 962 SET_RUNTIME_PM_OPS(etm_runtime_suspend, etm_runtime_resume, NULL) 963 }; 964 965 static const struct amba_id etm_ids[] = { 966 /* ETM 3.3 */ 967 CS_AMBA_ID_DATA(0x000bb921, "ETM 3.3"), 968 /* ETM 3.5 - Cortex-A5 */ 969 CS_AMBA_ID_DATA(0x000bb955, "ETM 3.5"), 970 /* ETM 3.5 */ 971 CS_AMBA_ID_DATA(0x000bb956, "ETM 3.5"), 972 /* PTM 1.0 */ 973 CS_AMBA_ID_DATA(0x000bb950, "PTM 1.0"), 974 /* PTM 1.1 */ 975 CS_AMBA_ID_DATA(0x000bb95f, "PTM 1.1"), 976 /* PTM 1.1 Qualcomm */ 977 CS_AMBA_ID_DATA(0x000b006f, "PTM 1.1"), 978 { 0, 0}, 979 }; 980 981 MODULE_DEVICE_TABLE(amba, etm_ids); 982 983 static struct amba_driver etm_driver = { 984 .drv = { 985 .name = "coresight-etm3x", 986 .owner = THIS_MODULE, 987 .pm = &etm_dev_pm_ops, 988 .suppress_bind_attrs = true, 989 }, 990 .probe = etm_probe, 991 .remove = etm_remove, 992 .id_table = etm_ids, 993 }; 994 995 static int __init etm_init(void) 996 { 997 int ret; 998 999 ret = etm_hp_setup(); 1000 1001 /* etm_hp_setup() does its own cleanup - exit on error */ 1002 if (ret) 1003 return ret; 1004 1005 ret = amba_driver_register(&etm_driver); 1006 if (ret) { 1007 pr_err("Error registering etm3x driver\n"); 1008 etm_hp_clear(); 1009 } 1010 1011 return ret; 1012 } 1013 1014 static void __exit etm_exit(void) 1015 { 1016 amba_driver_unregister(&etm_driver); 1017 etm_hp_clear(); 1018 } 1019 1020 module_init(etm_init); 1021 module_exit(etm_exit); 1022 1023 MODULE_AUTHOR("Pratik Patel <pratikp@codeaurora.org>"); 1024 MODULE_AUTHOR("Mathieu Poirier <mathieu.poirier@linaro.org>"); 1025 MODULE_DESCRIPTION("Arm CoreSight Program Flow Trace driver"); 1026 MODULE_LICENSE("GPL v2"); 1027