1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright(C) 2015 Linaro Limited. All rights reserved. 4 * Author: Mathieu Poirier <mathieu.poirier@linaro.org> 5 */ 6 7 #include <linux/coresight.h> 8 #include <linux/coresight-pmu.h> 9 #include <linux/cpumask.h> 10 #include <linux/device.h> 11 #include <linux/list.h> 12 #include <linux/mm.h> 13 #include <linux/init.h> 14 #include <linux/perf_event.h> 15 #include <linux/percpu-defs.h> 16 #include <linux/slab.h> 17 #include <linux/stringhash.h> 18 #include <linux/types.h> 19 #include <linux/workqueue.h> 20 21 #include "coresight-etm-perf.h" 22 #include "coresight-priv.h" 23 24 static struct pmu etm_pmu; 25 static bool etm_perf_up; 26 27 /* 28 * An ETM context for a running event includes the perf aux handle 29 * and aux_data. For ETM, the aux_data (etm_event_data), consists of 30 * the trace path and the sink configuration. The event data is accessible 31 * via perf_get_aux(handle). However, a sink could "end" a perf output 32 * handle via the IRQ handler. And if the "sink" encounters a failure 33 * to "begin" another session (e.g due to lack of space in the buffer), 34 * the handle will be cleared. Thus, the event_data may not be accessible 35 * from the handle when we get to the etm_event_stop(), which is required 36 * for stopping the trace path. The event_data is guaranteed to stay alive 37 * until "free_aux()", which cannot happen as long as the event is active on 38 * the ETM. Thus the event_data for the session must be part of the ETM context 39 * to make sure we can disable the trace path. 40 */ 41 struct etm_ctxt { 42 struct perf_output_handle handle; 43 struct etm_event_data *event_data; 44 }; 45 46 static DEFINE_PER_CPU(struct etm_ctxt, etm_ctxt); 47 static DEFINE_PER_CPU(struct coresight_device *, csdev_src); 48 49 /* 50 * The PMU formats were orignally for ETMv3.5/PTM's ETMCR 'config'; 51 * now take them as general formats and apply on all ETMs. 52 */ 53 PMU_FORMAT_ATTR(cycacc, "config:" __stringify(ETM_OPT_CYCACC)); 54 /* contextid1 enables tracing CONTEXTIDR_EL1 for ETMv4 */ 55 PMU_FORMAT_ATTR(contextid1, "config:" __stringify(ETM_OPT_CTXTID)); 56 /* contextid2 enables tracing CONTEXTIDR_EL2 for ETMv4 */ 57 PMU_FORMAT_ATTR(contextid2, "config:" __stringify(ETM_OPT_CTXTID2)); 58 PMU_FORMAT_ATTR(timestamp, "config:" __stringify(ETM_OPT_TS)); 59 PMU_FORMAT_ATTR(retstack, "config:" __stringify(ETM_OPT_RETSTK)); 60 /* Sink ID - same for all ETMs */ 61 PMU_FORMAT_ATTR(sinkid, "config2:0-31"); 62 63 /* 64 * contextid always traces the "PID". The PID is in CONTEXTIDR_EL1 65 * when the kernel is running at EL1; when the kernel is at EL2, 66 * the PID is in CONTEXTIDR_EL2. 67 */ 68 static ssize_t format_attr_contextid_show(struct device *dev, 69 struct device_attribute *attr, 70 char *page) 71 { 72 int pid_fmt = ETM_OPT_CTXTID; 73 74 #if IS_ENABLED(CONFIG_CORESIGHT_SOURCE_ETM4X) 75 pid_fmt = is_kernel_in_hyp_mode() ? ETM_OPT_CTXTID2 : ETM_OPT_CTXTID; 76 #endif 77 return sprintf(page, "config:%d\n", pid_fmt); 78 } 79 80 static struct device_attribute format_attr_contextid = 81 __ATTR(contextid, 0444, format_attr_contextid_show, NULL); 82 83 static struct attribute *etm_config_formats_attr[] = { 84 &format_attr_cycacc.attr, 85 &format_attr_contextid.attr, 86 &format_attr_contextid1.attr, 87 &format_attr_contextid2.attr, 88 &format_attr_timestamp.attr, 89 &format_attr_retstack.attr, 90 &format_attr_sinkid.attr, 91 NULL, 92 }; 93 94 static const struct attribute_group etm_pmu_format_group = { 95 .name = "format", 96 .attrs = etm_config_formats_attr, 97 }; 98 99 static struct attribute *etm_config_sinks_attr[] = { 100 NULL, 101 }; 102 103 static const struct attribute_group etm_pmu_sinks_group = { 104 .name = "sinks", 105 .attrs = etm_config_sinks_attr, 106 }; 107 108 static const struct attribute_group *etm_pmu_attr_groups[] = { 109 &etm_pmu_format_group, 110 &etm_pmu_sinks_group, 111 NULL, 112 }; 113 114 static inline struct list_head ** 115 etm_event_cpu_path_ptr(struct etm_event_data *data, int cpu) 116 { 117 return per_cpu_ptr(data->path, cpu); 118 } 119 120 static inline struct list_head * 121 etm_event_cpu_path(struct etm_event_data *data, int cpu) 122 { 123 return *etm_event_cpu_path_ptr(data, cpu); 124 } 125 126 static void etm_event_read(struct perf_event *event) {} 127 128 static int etm_addr_filters_alloc(struct perf_event *event) 129 { 130 struct etm_filters *filters; 131 int node = event->cpu == -1 ? -1 : cpu_to_node(event->cpu); 132 133 filters = kzalloc_node(sizeof(struct etm_filters), GFP_KERNEL, node); 134 if (!filters) 135 return -ENOMEM; 136 137 if (event->parent) 138 memcpy(filters, event->parent->hw.addr_filters, 139 sizeof(*filters)); 140 141 event->hw.addr_filters = filters; 142 143 return 0; 144 } 145 146 static void etm_event_destroy(struct perf_event *event) 147 { 148 kfree(event->hw.addr_filters); 149 event->hw.addr_filters = NULL; 150 } 151 152 static int etm_event_init(struct perf_event *event) 153 { 154 int ret = 0; 155 156 if (event->attr.type != etm_pmu.type) { 157 ret = -ENOENT; 158 goto out; 159 } 160 161 ret = etm_addr_filters_alloc(event); 162 if (ret) 163 goto out; 164 165 event->destroy = etm_event_destroy; 166 out: 167 return ret; 168 } 169 170 static void free_sink_buffer(struct etm_event_data *event_data) 171 { 172 int cpu; 173 cpumask_t *mask = &event_data->mask; 174 struct coresight_device *sink; 175 176 if (!event_data->snk_config) 177 return; 178 179 if (WARN_ON(cpumask_empty(mask))) 180 return; 181 182 cpu = cpumask_first(mask); 183 sink = coresight_get_sink(etm_event_cpu_path(event_data, cpu)); 184 sink_ops(sink)->free_buffer(event_data->snk_config); 185 } 186 187 static void free_event_data(struct work_struct *work) 188 { 189 int cpu; 190 cpumask_t *mask; 191 struct etm_event_data *event_data; 192 193 event_data = container_of(work, struct etm_event_data, work); 194 mask = &event_data->mask; 195 196 /* Free the sink buffers, if there are any */ 197 free_sink_buffer(event_data); 198 199 for_each_cpu(cpu, mask) { 200 struct list_head **ppath; 201 202 ppath = etm_event_cpu_path_ptr(event_data, cpu); 203 if (!(IS_ERR_OR_NULL(*ppath))) 204 coresight_release_path(*ppath); 205 *ppath = NULL; 206 } 207 208 free_percpu(event_data->path); 209 kfree(event_data); 210 } 211 212 static void *alloc_event_data(int cpu) 213 { 214 cpumask_t *mask; 215 struct etm_event_data *event_data; 216 217 /* First get memory for the session's data */ 218 event_data = kzalloc(sizeof(struct etm_event_data), GFP_KERNEL); 219 if (!event_data) 220 return NULL; 221 222 223 mask = &event_data->mask; 224 if (cpu != -1) 225 cpumask_set_cpu(cpu, mask); 226 else 227 cpumask_copy(mask, cpu_present_mask); 228 229 /* 230 * Each CPU has a single path between source and destination. As such 231 * allocate an array using CPU numbers as indexes. That way a path 232 * for any CPU can easily be accessed at any given time. We proceed 233 * the same way for sessions involving a single CPU. The cost of 234 * unused memory when dealing with single CPU trace scenarios is small 235 * compared to the cost of searching through an optimized array. 236 */ 237 event_data->path = alloc_percpu(struct list_head *); 238 239 if (!event_data->path) { 240 kfree(event_data); 241 return NULL; 242 } 243 244 return event_data; 245 } 246 247 static void etm_free_aux(void *data) 248 { 249 struct etm_event_data *event_data = data; 250 251 schedule_work(&event_data->work); 252 } 253 254 /* 255 * Check if two given sinks are compatible with each other, 256 * so that they can use the same sink buffers, when an event 257 * moves around. 258 */ 259 static bool sinks_compatible(struct coresight_device *a, 260 struct coresight_device *b) 261 { 262 if (!a || !b) 263 return false; 264 /* 265 * If the sinks are of the same subtype and driven 266 * by the same driver, we can use the same buffer 267 * on these sinks. 268 */ 269 return (a->subtype.sink_subtype == b->subtype.sink_subtype) && 270 (sink_ops(a) == sink_ops(b)); 271 } 272 273 static void *etm_setup_aux(struct perf_event *event, void **pages, 274 int nr_pages, bool overwrite) 275 { 276 u32 id; 277 int cpu = event->cpu; 278 cpumask_t *mask; 279 struct coresight_device *sink = NULL; 280 struct coresight_device *user_sink = NULL, *last_sink = NULL; 281 struct etm_event_data *event_data = NULL; 282 283 event_data = alloc_event_data(cpu); 284 if (!event_data) 285 return NULL; 286 INIT_WORK(&event_data->work, free_event_data); 287 288 /* First get the selected sink from user space. */ 289 if (event->attr.config2) { 290 id = (u32)event->attr.config2; 291 sink = user_sink = coresight_get_sink_by_id(id); 292 } 293 294 mask = &event_data->mask; 295 296 /* 297 * Setup the path for each CPU in a trace session. We try to build 298 * trace path for each CPU in the mask. If we don't find an ETM 299 * for the CPU or fail to build a path, we clear the CPU from the 300 * mask and continue with the rest. If ever we try to trace on those 301 * CPUs, we can handle it and fail the session. 302 */ 303 for_each_cpu(cpu, mask) { 304 struct list_head *path; 305 struct coresight_device *csdev; 306 307 csdev = per_cpu(csdev_src, cpu); 308 /* 309 * If there is no ETM associated with this CPU clear it from 310 * the mask and continue with the rest. If ever we try to trace 311 * on this CPU, we handle it accordingly. 312 */ 313 if (!csdev) { 314 cpumask_clear_cpu(cpu, mask); 315 continue; 316 } 317 318 /* 319 * No sink provided - look for a default sink for all the ETMs, 320 * where this event can be scheduled. 321 * We allocate the sink specific buffers only once for this 322 * event. If the ETMs have different default sink devices, we 323 * can only use a single "type" of sink as the event can carry 324 * only one sink specific buffer. Thus we have to make sure 325 * that the sinks are of the same type and driven by the same 326 * driver, as the one we allocate the buffer for. As such 327 * we choose the first sink and check if the remaining ETMs 328 * have a compatible default sink. We don't trace on a CPU 329 * if the sink is not compatible. 330 */ 331 if (!user_sink) { 332 /* Find the default sink for this ETM */ 333 sink = coresight_find_default_sink(csdev); 334 if (!sink) { 335 cpumask_clear_cpu(cpu, mask); 336 continue; 337 } 338 339 /* Check if this sink compatible with the last sink */ 340 if (last_sink && !sinks_compatible(last_sink, sink)) { 341 cpumask_clear_cpu(cpu, mask); 342 continue; 343 } 344 last_sink = sink; 345 } 346 347 /* 348 * Building a path doesn't enable it, it simply builds a 349 * list of devices from source to sink that can be 350 * referenced later when the path is actually needed. 351 */ 352 path = coresight_build_path(csdev, sink); 353 if (IS_ERR(path)) { 354 cpumask_clear_cpu(cpu, mask); 355 continue; 356 } 357 358 *etm_event_cpu_path_ptr(event_data, cpu) = path; 359 } 360 361 /* no sink found for any CPU - cannot trace */ 362 if (!sink) 363 goto err; 364 365 /* If we don't have any CPUs ready for tracing, abort */ 366 cpu = cpumask_first(mask); 367 if (cpu >= nr_cpu_ids) 368 goto err; 369 370 if (!sink_ops(sink)->alloc_buffer || !sink_ops(sink)->free_buffer) 371 goto err; 372 373 /* 374 * Allocate the sink buffer for this session. All the sinks 375 * where this event can be scheduled are ensured to be of the 376 * same type. Thus the same sink configuration is used by the 377 * sinks. 378 */ 379 event_data->snk_config = 380 sink_ops(sink)->alloc_buffer(sink, event, pages, 381 nr_pages, overwrite); 382 if (!event_data->snk_config) 383 goto err; 384 385 out: 386 return event_data; 387 388 err: 389 etm_free_aux(event_data); 390 event_data = NULL; 391 goto out; 392 } 393 394 static void etm_event_start(struct perf_event *event, int flags) 395 { 396 int cpu = smp_processor_id(); 397 struct etm_event_data *event_data; 398 struct etm_ctxt *ctxt = this_cpu_ptr(&etm_ctxt); 399 struct perf_output_handle *handle = &ctxt->handle; 400 struct coresight_device *sink, *csdev = per_cpu(csdev_src, cpu); 401 struct list_head *path; 402 403 if (!csdev) 404 goto fail; 405 406 /* Have we messed up our tracking ? */ 407 if (WARN_ON(ctxt->event_data)) 408 goto fail; 409 410 /* 411 * Deal with the ring buffer API and get a handle on the 412 * session's information. 413 */ 414 event_data = perf_aux_output_begin(handle, event); 415 if (!event_data) 416 goto fail; 417 418 /* 419 * Check if this ETM is allowed to trace, as decided 420 * at etm_setup_aux(). This could be due to an unreachable 421 * sink from this ETM. We can't do much in this case if 422 * the sink was specified or hinted to the driver. For 423 * now, simply don't record anything on this ETM. 424 */ 425 if (!cpumask_test_cpu(cpu, &event_data->mask)) 426 goto fail_end_stop; 427 428 path = etm_event_cpu_path(event_data, cpu); 429 /* We need a sink, no need to continue without one */ 430 sink = coresight_get_sink(path); 431 if (WARN_ON_ONCE(!sink)) 432 goto fail_end_stop; 433 434 /* Nothing will happen without a path */ 435 if (coresight_enable_path(path, CS_MODE_PERF, handle)) 436 goto fail_end_stop; 437 438 /* Tell the perf core the event is alive */ 439 event->hw.state = 0; 440 441 /* Finally enable the tracer */ 442 if (source_ops(csdev)->enable(csdev, event, CS_MODE_PERF)) 443 goto fail_disable_path; 444 445 /* Save the event_data for this ETM */ 446 ctxt->event_data = event_data; 447 out: 448 return; 449 450 fail_disable_path: 451 coresight_disable_path(path); 452 fail_end_stop: 453 perf_aux_output_flag(handle, PERF_AUX_FLAG_TRUNCATED); 454 perf_aux_output_end(handle, 0); 455 fail: 456 event->hw.state = PERF_HES_STOPPED; 457 goto out; 458 } 459 460 static void etm_event_stop(struct perf_event *event, int mode) 461 { 462 int cpu = smp_processor_id(); 463 unsigned long size; 464 struct coresight_device *sink, *csdev = per_cpu(csdev_src, cpu); 465 struct etm_ctxt *ctxt = this_cpu_ptr(&etm_ctxt); 466 struct perf_output_handle *handle = &ctxt->handle; 467 struct etm_event_data *event_data; 468 struct list_head *path; 469 470 /* 471 * If we still have access to the event_data via handle, 472 * confirm that we haven't messed up the tracking. 473 */ 474 if (handle->event && 475 WARN_ON(perf_get_aux(handle) != ctxt->event_data)) 476 return; 477 478 event_data = ctxt->event_data; 479 /* Clear the event_data as this ETM is stopping the trace. */ 480 ctxt->event_data = NULL; 481 482 if (event->hw.state == PERF_HES_STOPPED) 483 return; 484 485 /* We must have a valid event_data for a running event */ 486 if (WARN_ON(!event_data)) 487 return; 488 489 if (!csdev) 490 return; 491 492 path = etm_event_cpu_path(event_data, cpu); 493 if (!path) 494 return; 495 496 sink = coresight_get_sink(path); 497 if (!sink) 498 return; 499 500 /* stop tracer */ 501 source_ops(csdev)->disable(csdev, event); 502 503 /* tell the core */ 504 event->hw.state = PERF_HES_STOPPED; 505 506 /* 507 * If the handle is not bound to an event anymore 508 * (e.g, the sink driver was unable to restart the 509 * handle due to lack of buffer space), we don't 510 * have to do anything here. 511 */ 512 if (handle->event && (mode & PERF_EF_UPDATE)) { 513 if (WARN_ON_ONCE(handle->event != event)) 514 return; 515 516 /* update trace information */ 517 if (!sink_ops(sink)->update_buffer) 518 return; 519 520 size = sink_ops(sink)->update_buffer(sink, handle, 521 event_data->snk_config); 522 perf_aux_output_end(handle, size); 523 } 524 525 /* Disabling the path make its elements available to other sessions */ 526 coresight_disable_path(path); 527 } 528 529 static int etm_event_add(struct perf_event *event, int mode) 530 { 531 int ret = 0; 532 struct hw_perf_event *hwc = &event->hw; 533 534 if (mode & PERF_EF_START) { 535 etm_event_start(event, 0); 536 if (hwc->state & PERF_HES_STOPPED) 537 ret = -EINVAL; 538 } else { 539 hwc->state = PERF_HES_STOPPED; 540 } 541 542 return ret; 543 } 544 545 static void etm_event_del(struct perf_event *event, int mode) 546 { 547 etm_event_stop(event, PERF_EF_UPDATE); 548 } 549 550 static int etm_addr_filters_validate(struct list_head *filters) 551 { 552 bool range = false, address = false; 553 int index = 0; 554 struct perf_addr_filter *filter; 555 556 list_for_each_entry(filter, filters, entry) { 557 /* 558 * No need to go further if there's no more 559 * room for filters. 560 */ 561 if (++index > ETM_ADDR_CMP_MAX) 562 return -EOPNOTSUPP; 563 564 /* filter::size==0 means single address trigger */ 565 if (filter->size) { 566 /* 567 * The existing code relies on START/STOP filters 568 * being address filters. 569 */ 570 if (filter->action == PERF_ADDR_FILTER_ACTION_START || 571 filter->action == PERF_ADDR_FILTER_ACTION_STOP) 572 return -EOPNOTSUPP; 573 574 range = true; 575 } else 576 address = true; 577 578 /* 579 * At this time we don't allow range and start/stop filtering 580 * to cohabitate, they have to be mutually exclusive. 581 */ 582 if (range && address) 583 return -EOPNOTSUPP; 584 } 585 586 return 0; 587 } 588 589 static void etm_addr_filters_sync(struct perf_event *event) 590 { 591 struct perf_addr_filters_head *head = perf_event_addr_filters(event); 592 unsigned long start, stop; 593 struct perf_addr_filter_range *fr = event->addr_filter_ranges; 594 struct etm_filters *filters = event->hw.addr_filters; 595 struct etm_filter *etm_filter; 596 struct perf_addr_filter *filter; 597 int i = 0; 598 599 list_for_each_entry(filter, &head->list, entry) { 600 start = fr[i].start; 601 stop = start + fr[i].size; 602 etm_filter = &filters->etm_filter[i]; 603 604 switch (filter->action) { 605 case PERF_ADDR_FILTER_ACTION_FILTER: 606 etm_filter->start_addr = start; 607 etm_filter->stop_addr = stop; 608 etm_filter->type = ETM_ADDR_TYPE_RANGE; 609 break; 610 case PERF_ADDR_FILTER_ACTION_START: 611 etm_filter->start_addr = start; 612 etm_filter->type = ETM_ADDR_TYPE_START; 613 break; 614 case PERF_ADDR_FILTER_ACTION_STOP: 615 etm_filter->stop_addr = stop; 616 etm_filter->type = ETM_ADDR_TYPE_STOP; 617 break; 618 } 619 i++; 620 } 621 622 filters->nr_filters = i; 623 } 624 625 int etm_perf_symlink(struct coresight_device *csdev, bool link) 626 { 627 char entry[sizeof("cpu9999999")]; 628 int ret = 0, cpu = source_ops(csdev)->cpu_id(csdev); 629 struct device *pmu_dev = etm_pmu.dev; 630 struct device *cs_dev = &csdev->dev; 631 632 sprintf(entry, "cpu%d", cpu); 633 634 if (!etm_perf_up) 635 return -EPROBE_DEFER; 636 637 if (link) { 638 ret = sysfs_create_link(&pmu_dev->kobj, &cs_dev->kobj, entry); 639 if (ret) 640 return ret; 641 per_cpu(csdev_src, cpu) = csdev; 642 } else { 643 sysfs_remove_link(&pmu_dev->kobj, entry); 644 per_cpu(csdev_src, cpu) = NULL; 645 } 646 647 return 0; 648 } 649 EXPORT_SYMBOL_GPL(etm_perf_symlink); 650 651 static ssize_t etm_perf_sink_name_show(struct device *dev, 652 struct device_attribute *dattr, 653 char *buf) 654 { 655 struct dev_ext_attribute *ea; 656 657 ea = container_of(dattr, struct dev_ext_attribute, attr); 658 return scnprintf(buf, PAGE_SIZE, "0x%lx\n", (unsigned long)(ea->var)); 659 } 660 661 int etm_perf_add_symlink_sink(struct coresight_device *csdev) 662 { 663 int ret; 664 unsigned long hash; 665 const char *name; 666 struct device *pmu_dev = etm_pmu.dev; 667 struct device *dev = &csdev->dev; 668 struct dev_ext_attribute *ea; 669 670 if (csdev->type != CORESIGHT_DEV_TYPE_SINK && 671 csdev->type != CORESIGHT_DEV_TYPE_LINKSINK) 672 return -EINVAL; 673 674 if (csdev->ea != NULL) 675 return -EINVAL; 676 677 if (!etm_perf_up) 678 return -EPROBE_DEFER; 679 680 ea = devm_kzalloc(dev, sizeof(*ea), GFP_KERNEL); 681 if (!ea) 682 return -ENOMEM; 683 684 name = dev_name(dev); 685 /* See function coresight_get_sink_by_id() to know where this is used */ 686 hash = hashlen_hash(hashlen_string(NULL, name)); 687 688 sysfs_attr_init(&ea->attr.attr); 689 ea->attr.attr.name = devm_kstrdup(dev, name, GFP_KERNEL); 690 if (!ea->attr.attr.name) 691 return -ENOMEM; 692 693 ea->attr.attr.mode = 0444; 694 ea->attr.show = etm_perf_sink_name_show; 695 ea->var = (unsigned long *)hash; 696 697 ret = sysfs_add_file_to_group(&pmu_dev->kobj, 698 &ea->attr.attr, "sinks"); 699 700 if (!ret) 701 csdev->ea = ea; 702 703 return ret; 704 } 705 706 void etm_perf_del_symlink_sink(struct coresight_device *csdev) 707 { 708 struct device *pmu_dev = etm_pmu.dev; 709 struct dev_ext_attribute *ea = csdev->ea; 710 711 if (csdev->type != CORESIGHT_DEV_TYPE_SINK && 712 csdev->type != CORESIGHT_DEV_TYPE_LINKSINK) 713 return; 714 715 if (!ea) 716 return; 717 718 sysfs_remove_file_from_group(&pmu_dev->kobj, 719 &ea->attr.attr, "sinks"); 720 csdev->ea = NULL; 721 } 722 723 int __init etm_perf_init(void) 724 { 725 int ret; 726 727 etm_pmu.capabilities = (PERF_PMU_CAP_EXCLUSIVE | 728 PERF_PMU_CAP_ITRACE); 729 730 etm_pmu.attr_groups = etm_pmu_attr_groups; 731 etm_pmu.task_ctx_nr = perf_sw_context; 732 etm_pmu.read = etm_event_read; 733 etm_pmu.event_init = etm_event_init; 734 etm_pmu.setup_aux = etm_setup_aux; 735 etm_pmu.free_aux = etm_free_aux; 736 etm_pmu.start = etm_event_start; 737 etm_pmu.stop = etm_event_stop; 738 etm_pmu.add = etm_event_add; 739 etm_pmu.del = etm_event_del; 740 etm_pmu.addr_filters_sync = etm_addr_filters_sync; 741 etm_pmu.addr_filters_validate = etm_addr_filters_validate; 742 etm_pmu.nr_addr_filters = ETM_ADDR_CMP_MAX; 743 744 ret = perf_pmu_register(&etm_pmu, CORESIGHT_ETM_PMU_NAME, -1); 745 if (ret == 0) 746 etm_perf_up = true; 747 748 return ret; 749 } 750 751 void __exit etm_perf_exit(void) 752 { 753 perf_pmu_unregister(&etm_pmu); 754 } 755