1 /* 2 * In-Memory Collection (IMC) Performance Monitor counter support. 3 * 4 * Copyright (C) 2017 Madhavan Srinivasan, IBM Corporation. 5 * (C) 2017 Anju T Sudhakar, IBM Corporation. 6 * (C) 2017 Hemant K Shaw, IBM Corporation. 7 * 8 * This program is free software; you can redistribute it and/or 9 * modify it under the terms of the GNU General Public License 10 * as published by the Free Software Foundation; either version 11 * 2 of the License, or later version. 12 */ 13 #include <linux/perf_event.h> 14 #include <linux/slab.h> 15 #include <asm/opal.h> 16 #include <asm/imc-pmu.h> 17 #include <asm/cputhreads.h> 18 #include <asm/smp.h> 19 #include <linux/string.h> 20 21 /* Nest IMC data structures and variables */ 22 23 /* 24 * Used to avoid races in counting the nest-pmu units during hotplug 25 * register and unregister 26 */ 27 static DEFINE_MUTEX(nest_init_lock); 28 static DEFINE_PER_CPU(struct imc_pmu_ref *, local_nest_imc_refc); 29 static struct imc_pmu **per_nest_pmu_arr; 30 static cpumask_t nest_imc_cpumask; 31 struct imc_pmu_ref *nest_imc_refc; 32 static int nest_pmus; 33 34 /* Core IMC data structures and variables */ 35 36 static cpumask_t core_imc_cpumask; 37 struct imc_pmu_ref *core_imc_refc; 38 static struct imc_pmu *core_imc_pmu; 39 40 /* Thread IMC data structures and variables */ 41 42 static DEFINE_PER_CPU(u64 *, thread_imc_mem); 43 static struct imc_pmu *thread_imc_pmu; 44 static int thread_imc_mem_size; 45 46 struct imc_pmu *imc_event_to_pmu(struct perf_event *event) 47 { 48 return container_of(event->pmu, struct imc_pmu, pmu); 49 } 50 51 PMU_FORMAT_ATTR(event, "config:0-40"); 52 PMU_FORMAT_ATTR(offset, "config:0-31"); 53 PMU_FORMAT_ATTR(rvalue, "config:32"); 54 PMU_FORMAT_ATTR(mode, "config:33-40"); 55 static struct attribute *imc_format_attrs[] = { 56 &format_attr_event.attr, 57 &format_attr_offset.attr, 58 &format_attr_rvalue.attr, 59 &format_attr_mode.attr, 60 NULL, 61 }; 62 63 static struct attribute_group imc_format_group = { 64 .name = "format", 65 .attrs = imc_format_attrs, 66 }; 67 68 /* Get the cpumask printed to a buffer "buf" */ 69 static ssize_t imc_pmu_cpumask_get_attr(struct device *dev, 70 struct device_attribute *attr, 71 char *buf) 72 { 73 struct pmu *pmu = dev_get_drvdata(dev); 74 struct imc_pmu *imc_pmu = container_of(pmu, struct imc_pmu, pmu); 75 cpumask_t *active_mask; 76 77 switch(imc_pmu->domain){ 78 case IMC_DOMAIN_NEST: 79 active_mask = &nest_imc_cpumask; 80 break; 81 case IMC_DOMAIN_CORE: 82 active_mask = &core_imc_cpumask; 83 break; 84 default: 85 return 0; 86 } 87 88 return cpumap_print_to_pagebuf(true, buf, active_mask); 89 } 90 91 static DEVICE_ATTR(cpumask, S_IRUGO, imc_pmu_cpumask_get_attr, NULL); 92 93 static struct attribute *imc_pmu_cpumask_attrs[] = { 94 &dev_attr_cpumask.attr, 95 NULL, 96 }; 97 98 static struct attribute_group imc_pmu_cpumask_attr_group = { 99 .attrs = imc_pmu_cpumask_attrs, 100 }; 101 102 /* device_str_attr_create : Populate event "name" and string "str" in attribute */ 103 static struct attribute *device_str_attr_create(const char *name, const char *str) 104 { 105 struct perf_pmu_events_attr *attr; 106 107 attr = kzalloc(sizeof(*attr), GFP_KERNEL); 108 if (!attr) 109 return NULL; 110 sysfs_attr_init(&attr->attr.attr); 111 112 attr->event_str = str; 113 attr->attr.attr.name = name; 114 attr->attr.attr.mode = 0444; 115 attr->attr.show = perf_event_sysfs_show; 116 117 return &attr->attr.attr; 118 } 119 120 static int imc_parse_event(struct device_node *np, const char *scale, 121 const char *unit, const char *prefix, 122 u32 base, struct imc_events *event) 123 { 124 const char *s; 125 u32 reg; 126 127 if (of_property_read_u32(np, "reg", ®)) 128 goto error; 129 /* Add the base_reg value to the "reg" */ 130 event->value = base + reg; 131 132 if (of_property_read_string(np, "event-name", &s)) 133 goto error; 134 135 event->name = kasprintf(GFP_KERNEL, "%s%s", prefix, s); 136 if (!event->name) 137 goto error; 138 139 if (of_property_read_string(np, "scale", &s)) 140 s = scale; 141 142 if (s) { 143 event->scale = kstrdup(s, GFP_KERNEL); 144 if (!event->scale) 145 goto error; 146 } 147 148 if (of_property_read_string(np, "unit", &s)) 149 s = unit; 150 151 if (s) { 152 event->unit = kstrdup(s, GFP_KERNEL); 153 if (!event->unit) 154 goto error; 155 } 156 157 return 0; 158 error: 159 kfree(event->unit); 160 kfree(event->scale); 161 kfree(event->name); 162 return -EINVAL; 163 } 164 165 /* 166 * imc_free_events: Function to cleanup the events list, having 167 * "nr_entries". 168 */ 169 static void imc_free_events(struct imc_events *events, int nr_entries) 170 { 171 int i; 172 173 /* Nothing to clean, return */ 174 if (!events) 175 return; 176 for (i = 0; i < nr_entries; i++) { 177 kfree(events[i].unit); 178 kfree(events[i].scale); 179 kfree(events[i].name); 180 } 181 182 kfree(events); 183 } 184 185 /* 186 * update_events_in_group: Update the "events" information in an attr_group 187 * and assign the attr_group to the pmu "pmu". 188 */ 189 static int update_events_in_group(struct device_node *node, struct imc_pmu *pmu) 190 { 191 struct attribute_group *attr_group; 192 struct attribute **attrs, *dev_str; 193 struct device_node *np, *pmu_events; 194 u32 handle, base_reg; 195 int i = 0, j = 0, ct, ret; 196 const char *prefix, *g_scale, *g_unit; 197 const char *ev_val_str, *ev_scale_str, *ev_unit_str; 198 199 if (!of_property_read_u32(node, "events", &handle)) 200 pmu_events = of_find_node_by_phandle(handle); 201 else 202 return 0; 203 204 /* Did not find any node with a given phandle */ 205 if (!pmu_events) 206 return 0; 207 208 /* Get a count of number of child nodes */ 209 ct = of_get_child_count(pmu_events); 210 211 /* Get the event prefix */ 212 if (of_property_read_string(node, "events-prefix", &prefix)) 213 return 0; 214 215 /* Get a global unit and scale data if available */ 216 if (of_property_read_string(node, "scale", &g_scale)) 217 g_scale = NULL; 218 219 if (of_property_read_string(node, "unit", &g_unit)) 220 g_unit = NULL; 221 222 /* "reg" property gives out the base offset of the counters data */ 223 of_property_read_u32(node, "reg", &base_reg); 224 225 /* Allocate memory for the events */ 226 pmu->events = kcalloc(ct, sizeof(struct imc_events), GFP_KERNEL); 227 if (!pmu->events) 228 return -ENOMEM; 229 230 ct = 0; 231 /* Parse the events and update the struct */ 232 for_each_child_of_node(pmu_events, np) { 233 ret = imc_parse_event(np, g_scale, g_unit, prefix, base_reg, &pmu->events[ct]); 234 if (!ret) 235 ct++; 236 } 237 238 /* Allocate memory for attribute group */ 239 attr_group = kzalloc(sizeof(*attr_group), GFP_KERNEL); 240 if (!attr_group) { 241 imc_free_events(pmu->events, ct); 242 return -ENOMEM; 243 } 244 245 /* 246 * Allocate memory for attributes. 247 * Since we have count of events for this pmu, we also allocate 248 * memory for the scale and unit attribute for now. 249 * "ct" has the total event structs added from the events-parent node. 250 * So allocate three times the "ct" (this includes event, event_scale and 251 * event_unit). 252 */ 253 attrs = kcalloc(((ct * 3) + 1), sizeof(struct attribute *), GFP_KERNEL); 254 if (!attrs) { 255 kfree(attr_group); 256 imc_free_events(pmu->events, ct); 257 return -ENOMEM; 258 } 259 260 attr_group->name = "events"; 261 attr_group->attrs = attrs; 262 do { 263 ev_val_str = kasprintf(GFP_KERNEL, "event=0x%x", pmu->events[i].value); 264 dev_str = device_str_attr_create(pmu->events[i].name, ev_val_str); 265 if (!dev_str) 266 continue; 267 268 attrs[j++] = dev_str; 269 if (pmu->events[i].scale) { 270 ev_scale_str = kasprintf(GFP_KERNEL, "%s.scale", pmu->events[i].name); 271 dev_str = device_str_attr_create(ev_scale_str, pmu->events[i].scale); 272 if (!dev_str) 273 continue; 274 275 attrs[j++] = dev_str; 276 } 277 278 if (pmu->events[i].unit) { 279 ev_unit_str = kasprintf(GFP_KERNEL, "%s.unit", pmu->events[i].name); 280 dev_str = device_str_attr_create(ev_unit_str, pmu->events[i].unit); 281 if (!dev_str) 282 continue; 283 284 attrs[j++] = dev_str; 285 } 286 } while (++i < ct); 287 288 /* Save the event attribute */ 289 pmu->attr_groups[IMC_EVENT_ATTR] = attr_group; 290 291 return 0; 292 } 293 294 /* get_nest_pmu_ref: Return the imc_pmu_ref struct for the given node */ 295 static struct imc_pmu_ref *get_nest_pmu_ref(int cpu) 296 { 297 return per_cpu(local_nest_imc_refc, cpu); 298 } 299 300 static void nest_change_cpu_context(int old_cpu, int new_cpu) 301 { 302 struct imc_pmu **pn = per_nest_pmu_arr; 303 304 if (old_cpu < 0 || new_cpu < 0) 305 return; 306 307 while (*pn) { 308 perf_pmu_migrate_context(&(*pn)->pmu, old_cpu, new_cpu); 309 pn++; 310 } 311 } 312 313 static int ppc_nest_imc_cpu_offline(unsigned int cpu) 314 { 315 int nid, target = -1; 316 const struct cpumask *l_cpumask; 317 struct imc_pmu_ref *ref; 318 319 /* 320 * Check in the designated list for this cpu. Dont bother 321 * if not one of them. 322 */ 323 if (!cpumask_test_and_clear_cpu(cpu, &nest_imc_cpumask)) 324 return 0; 325 326 /* 327 * Check whether nest_imc is registered. We could end up here if the 328 * cpuhotplug callback registration fails. i.e, callback invokes the 329 * offline path for all successfully registered nodes. At this stage, 330 * nest_imc pmu will not be registered and we should return here. 331 * 332 * We return with a zero since this is not an offline failure. And 333 * cpuhp_setup_state() returns the actual failure reason to the caller, 334 * which in turn will call the cleanup routine. 335 */ 336 if (!nest_pmus) 337 return 0; 338 339 /* 340 * Now that this cpu is one of the designated, 341 * find a next cpu a) which is online and b) in same chip. 342 */ 343 nid = cpu_to_node(cpu); 344 l_cpumask = cpumask_of_node(nid); 345 target = cpumask_any_but(l_cpumask, cpu); 346 347 /* 348 * Update the cpumask with the target cpu and 349 * migrate the context if needed 350 */ 351 if (target >= 0 && target < nr_cpu_ids) { 352 cpumask_set_cpu(target, &nest_imc_cpumask); 353 nest_change_cpu_context(cpu, target); 354 } else { 355 opal_imc_counters_stop(OPAL_IMC_COUNTERS_NEST, 356 get_hard_smp_processor_id(cpu)); 357 /* 358 * If this is the last cpu in this chip then, skip the reference 359 * count mutex lock and make the reference count on this chip zero. 360 */ 361 ref = get_nest_pmu_ref(cpu); 362 if (!ref) 363 return -EINVAL; 364 365 ref->refc = 0; 366 } 367 return 0; 368 } 369 370 static int ppc_nest_imc_cpu_online(unsigned int cpu) 371 { 372 const struct cpumask *l_cpumask; 373 static struct cpumask tmp_mask; 374 int res; 375 376 /* Get the cpumask of this node */ 377 l_cpumask = cpumask_of_node(cpu_to_node(cpu)); 378 379 /* 380 * If this is not the first online CPU on this node, then 381 * just return. 382 */ 383 if (cpumask_and(&tmp_mask, l_cpumask, &nest_imc_cpumask)) 384 return 0; 385 386 /* 387 * If this is the first online cpu on this node 388 * disable the nest counters by making an OPAL call. 389 */ 390 res = opal_imc_counters_stop(OPAL_IMC_COUNTERS_NEST, 391 get_hard_smp_processor_id(cpu)); 392 if (res) 393 return res; 394 395 /* Make this CPU the designated target for counter collection */ 396 cpumask_set_cpu(cpu, &nest_imc_cpumask); 397 return 0; 398 } 399 400 static int nest_pmu_cpumask_init(void) 401 { 402 return cpuhp_setup_state(CPUHP_AP_PERF_POWERPC_NEST_IMC_ONLINE, 403 "perf/powerpc/imc:online", 404 ppc_nest_imc_cpu_online, 405 ppc_nest_imc_cpu_offline); 406 } 407 408 static void nest_imc_counters_release(struct perf_event *event) 409 { 410 int rc, node_id; 411 struct imc_pmu_ref *ref; 412 413 if (event->cpu < 0) 414 return; 415 416 node_id = cpu_to_node(event->cpu); 417 418 /* 419 * See if we need to disable the nest PMU. 420 * If no events are currently in use, then we have to take a 421 * mutex to ensure that we don't race with another task doing 422 * enable or disable the nest counters. 423 */ 424 ref = get_nest_pmu_ref(event->cpu); 425 if (!ref) 426 return; 427 428 /* Take the mutex lock for this node and then decrement the reference count */ 429 mutex_lock(&ref->lock); 430 if (ref->refc == 0) { 431 /* 432 * The scenario where this is true is, when perf session is 433 * started, followed by offlining of all cpus in a given node. 434 * 435 * In the cpuhotplug offline path, ppc_nest_imc_cpu_offline() 436 * function set the ref->count to zero, if the cpu which is 437 * about to offline is the last cpu in a given node and make 438 * an OPAL call to disable the engine in that node. 439 * 440 */ 441 mutex_unlock(&ref->lock); 442 return; 443 } 444 ref->refc--; 445 if (ref->refc == 0) { 446 rc = opal_imc_counters_stop(OPAL_IMC_COUNTERS_NEST, 447 get_hard_smp_processor_id(event->cpu)); 448 if (rc) { 449 mutex_unlock(&ref->lock); 450 pr_err("nest-imc: Unable to stop the counters for core %d\n", node_id); 451 return; 452 } 453 } else if (ref->refc < 0) { 454 WARN(1, "nest-imc: Invalid event reference count\n"); 455 ref->refc = 0; 456 } 457 mutex_unlock(&ref->lock); 458 } 459 460 static int nest_imc_event_init(struct perf_event *event) 461 { 462 int chip_id, rc, node_id; 463 u32 l_config, config = event->attr.config; 464 struct imc_mem_info *pcni; 465 struct imc_pmu *pmu; 466 struct imc_pmu_ref *ref; 467 bool flag = false; 468 469 if (event->attr.type != event->pmu->type) 470 return -ENOENT; 471 472 /* Sampling not supported */ 473 if (event->hw.sample_period) 474 return -EINVAL; 475 476 /* unsupported modes and filters */ 477 if (event->attr.exclude_user || 478 event->attr.exclude_kernel || 479 event->attr.exclude_hv || 480 event->attr.exclude_idle || 481 event->attr.exclude_host || 482 event->attr.exclude_guest) 483 return -EINVAL; 484 485 if (event->cpu < 0) 486 return -EINVAL; 487 488 pmu = imc_event_to_pmu(event); 489 490 /* Sanity check for config (event offset) */ 491 if ((config & IMC_EVENT_OFFSET_MASK) > pmu->counter_mem_size) 492 return -EINVAL; 493 494 /* 495 * Nest HW counter memory resides in a per-chip reserve-memory (HOMER). 496 * Get the base memory addresss for this cpu. 497 */ 498 chip_id = cpu_to_chip_id(event->cpu); 499 pcni = pmu->mem_info; 500 do { 501 if (pcni->id == chip_id) { 502 flag = true; 503 break; 504 } 505 pcni++; 506 } while (pcni); 507 508 if (!flag) 509 return -ENODEV; 510 511 /* 512 * Add the event offset to the base address. 513 */ 514 l_config = config & IMC_EVENT_OFFSET_MASK; 515 event->hw.event_base = (u64)pcni->vbase + l_config; 516 node_id = cpu_to_node(event->cpu); 517 518 /* 519 * Get the imc_pmu_ref struct for this node. 520 * Take the mutex lock and then increment the count of nest pmu events 521 * inited. 522 */ 523 ref = get_nest_pmu_ref(event->cpu); 524 if (!ref) 525 return -EINVAL; 526 527 mutex_lock(&ref->lock); 528 if (ref->refc == 0) { 529 rc = opal_imc_counters_start(OPAL_IMC_COUNTERS_NEST, 530 get_hard_smp_processor_id(event->cpu)); 531 if (rc) { 532 mutex_unlock(&ref->lock); 533 pr_err("nest-imc: Unable to start the counters for node %d\n", 534 node_id); 535 return rc; 536 } 537 } 538 ++ref->refc; 539 mutex_unlock(&ref->lock); 540 541 event->destroy = nest_imc_counters_release; 542 return 0; 543 } 544 545 /* 546 * core_imc_mem_init : Initializes memory for the current core. 547 * 548 * Uses alloc_pages_node() and uses the returned address as an argument to 549 * an opal call to configure the pdbar. The address sent as an argument is 550 * converted to physical address before the opal call is made. This is the 551 * base address at which the core imc counters are populated. 552 */ 553 static int core_imc_mem_init(int cpu, int size) 554 { 555 int nid, rc = 0, core_id = (cpu / threads_per_core); 556 struct imc_mem_info *mem_info; 557 558 /* 559 * alloc_pages_node() will allocate memory for core in the 560 * local node only. 561 */ 562 nid = cpu_to_node(cpu); 563 mem_info = &core_imc_pmu->mem_info[core_id]; 564 mem_info->id = core_id; 565 566 /* We need only vbase for core counters */ 567 mem_info->vbase = page_address(alloc_pages_node(nid, 568 GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE | 569 __GFP_NOWARN, get_order(size))); 570 if (!mem_info->vbase) 571 return -ENOMEM; 572 573 /* Init the mutex */ 574 core_imc_refc[core_id].id = core_id; 575 mutex_init(&core_imc_refc[core_id].lock); 576 577 rc = opal_imc_counters_init(OPAL_IMC_COUNTERS_CORE, 578 __pa((void *)mem_info->vbase), 579 get_hard_smp_processor_id(cpu)); 580 if (rc) { 581 free_pages((u64)mem_info->vbase, get_order(size)); 582 mem_info->vbase = NULL; 583 } 584 585 return rc; 586 } 587 588 static bool is_core_imc_mem_inited(int cpu) 589 { 590 struct imc_mem_info *mem_info; 591 int core_id = (cpu / threads_per_core); 592 593 mem_info = &core_imc_pmu->mem_info[core_id]; 594 if (!mem_info->vbase) 595 return false; 596 597 return true; 598 } 599 600 static int ppc_core_imc_cpu_online(unsigned int cpu) 601 { 602 const struct cpumask *l_cpumask; 603 static struct cpumask tmp_mask; 604 int ret = 0; 605 606 /* Get the cpumask for this core */ 607 l_cpumask = cpu_sibling_mask(cpu); 608 609 /* If a cpu for this core is already set, then, don't do anything */ 610 if (cpumask_and(&tmp_mask, l_cpumask, &core_imc_cpumask)) 611 return 0; 612 613 if (!is_core_imc_mem_inited(cpu)) { 614 ret = core_imc_mem_init(cpu, core_imc_pmu->counter_mem_size); 615 if (ret) { 616 pr_info("core_imc memory allocation for cpu %d failed\n", cpu); 617 return ret; 618 } 619 } 620 621 /* set the cpu in the mask */ 622 cpumask_set_cpu(cpu, &core_imc_cpumask); 623 return 0; 624 } 625 626 static int ppc_core_imc_cpu_offline(unsigned int cpu) 627 { 628 unsigned int core_id; 629 int ncpu; 630 struct imc_pmu_ref *ref; 631 632 /* 633 * clear this cpu out of the mask, if not present in the mask, 634 * don't bother doing anything. 635 */ 636 if (!cpumask_test_and_clear_cpu(cpu, &core_imc_cpumask)) 637 return 0; 638 639 /* 640 * Check whether core_imc is registered. We could end up here 641 * if the cpuhotplug callback registration fails. i.e, callback 642 * invokes the offline path for all sucessfully registered cpus. 643 * At this stage, core_imc pmu will not be registered and we 644 * should return here. 645 * 646 * We return with a zero since this is not an offline failure. 647 * And cpuhp_setup_state() returns the actual failure reason 648 * to the caller, which inturn will call the cleanup routine. 649 */ 650 if (!core_imc_pmu->pmu.event_init) 651 return 0; 652 653 /* Find any online cpu in that core except the current "cpu" */ 654 ncpu = cpumask_any_but(cpu_sibling_mask(cpu), cpu); 655 656 if (ncpu >= 0 && ncpu < nr_cpu_ids) { 657 cpumask_set_cpu(ncpu, &core_imc_cpumask); 658 perf_pmu_migrate_context(&core_imc_pmu->pmu, cpu, ncpu); 659 } else { 660 /* 661 * If this is the last cpu in this core then, skip taking refernce 662 * count mutex lock for this core and directly zero "refc" for 663 * this core. 664 */ 665 opal_imc_counters_stop(OPAL_IMC_COUNTERS_CORE, 666 get_hard_smp_processor_id(cpu)); 667 core_id = cpu / threads_per_core; 668 ref = &core_imc_refc[core_id]; 669 if (!ref) 670 return -EINVAL; 671 672 ref->refc = 0; 673 } 674 return 0; 675 } 676 677 static int core_imc_pmu_cpumask_init(void) 678 { 679 return cpuhp_setup_state(CPUHP_AP_PERF_POWERPC_CORE_IMC_ONLINE, 680 "perf/powerpc/imc_core:online", 681 ppc_core_imc_cpu_online, 682 ppc_core_imc_cpu_offline); 683 } 684 685 static void core_imc_counters_release(struct perf_event *event) 686 { 687 int rc, core_id; 688 struct imc_pmu_ref *ref; 689 690 if (event->cpu < 0) 691 return; 692 /* 693 * See if we need to disable the IMC PMU. 694 * If no events are currently in use, then we have to take a 695 * mutex to ensure that we don't race with another task doing 696 * enable or disable the core counters. 697 */ 698 core_id = event->cpu / threads_per_core; 699 700 /* Take the mutex lock and decrement the refernce count for this core */ 701 ref = &core_imc_refc[core_id]; 702 if (!ref) 703 return; 704 705 mutex_lock(&ref->lock); 706 if (ref->refc == 0) { 707 /* 708 * The scenario where this is true is, when perf session is 709 * started, followed by offlining of all cpus in a given core. 710 * 711 * In the cpuhotplug offline path, ppc_core_imc_cpu_offline() 712 * function set the ref->count to zero, if the cpu which is 713 * about to offline is the last cpu in a given core and make 714 * an OPAL call to disable the engine in that core. 715 * 716 */ 717 mutex_unlock(&ref->lock); 718 return; 719 } 720 ref->refc--; 721 if (ref->refc == 0) { 722 rc = opal_imc_counters_stop(OPAL_IMC_COUNTERS_CORE, 723 get_hard_smp_processor_id(event->cpu)); 724 if (rc) { 725 mutex_unlock(&ref->lock); 726 pr_err("IMC: Unable to stop the counters for core %d\n", core_id); 727 return; 728 } 729 } else if (ref->refc < 0) { 730 WARN(1, "core-imc: Invalid event reference count\n"); 731 ref->refc = 0; 732 } 733 mutex_unlock(&ref->lock); 734 } 735 736 static int core_imc_event_init(struct perf_event *event) 737 { 738 int core_id, rc; 739 u64 config = event->attr.config; 740 struct imc_mem_info *pcmi; 741 struct imc_pmu *pmu; 742 struct imc_pmu_ref *ref; 743 744 if (event->attr.type != event->pmu->type) 745 return -ENOENT; 746 747 /* Sampling not supported */ 748 if (event->hw.sample_period) 749 return -EINVAL; 750 751 /* unsupported modes and filters */ 752 if (event->attr.exclude_user || 753 event->attr.exclude_kernel || 754 event->attr.exclude_hv || 755 event->attr.exclude_idle || 756 event->attr.exclude_host || 757 event->attr.exclude_guest) 758 return -EINVAL; 759 760 if (event->cpu < 0) 761 return -EINVAL; 762 763 event->hw.idx = -1; 764 pmu = imc_event_to_pmu(event); 765 766 /* Sanity check for config (event offset) */ 767 if (((config & IMC_EVENT_OFFSET_MASK) > pmu->counter_mem_size)) 768 return -EINVAL; 769 770 if (!is_core_imc_mem_inited(event->cpu)) 771 return -ENODEV; 772 773 core_id = event->cpu / threads_per_core; 774 pcmi = &core_imc_pmu->mem_info[core_id]; 775 if ((!pcmi->vbase)) 776 return -ENODEV; 777 778 /* Get the core_imc mutex for this core */ 779 ref = &core_imc_refc[core_id]; 780 if (!ref) 781 return -EINVAL; 782 783 /* 784 * Core pmu units are enabled only when it is used. 785 * See if this is triggered for the first time. 786 * If yes, take the mutex lock and enable the core counters. 787 * If not, just increment the count in core_imc_refc struct. 788 */ 789 mutex_lock(&ref->lock); 790 if (ref->refc == 0) { 791 rc = opal_imc_counters_start(OPAL_IMC_COUNTERS_CORE, 792 get_hard_smp_processor_id(event->cpu)); 793 if (rc) { 794 mutex_unlock(&ref->lock); 795 pr_err("core-imc: Unable to start the counters for core %d\n", 796 core_id); 797 return rc; 798 } 799 } 800 ++ref->refc; 801 mutex_unlock(&ref->lock); 802 803 event->hw.event_base = (u64)pcmi->vbase + (config & IMC_EVENT_OFFSET_MASK); 804 event->destroy = core_imc_counters_release; 805 return 0; 806 } 807 808 /* 809 * Allocates a page of memory for each of the online cpus, and write the 810 * physical base address of that page to the LDBAR for that cpu. 811 * 812 * LDBAR Register Layout: 813 * 814 * 0 4 8 12 16 20 24 28 815 * | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | 816 * | | [ ] [ Counter Address [8:50] 817 * | * Mode | 818 * | * PB Scope 819 * * Enable/Disable 820 * 821 * 32 36 40 44 48 52 56 60 822 * | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | 823 * Counter Address [8:50] ] 824 * 825 */ 826 static int thread_imc_mem_alloc(int cpu_id, int size) 827 { 828 u64 ldbar_value, *local_mem = per_cpu(thread_imc_mem, cpu_id); 829 int nid = cpu_to_node(cpu_id); 830 831 if (!local_mem) { 832 /* 833 * This case could happen only once at start, since we dont 834 * free the memory in cpu offline path. 835 */ 836 local_mem = page_address(alloc_pages_node(nid, 837 GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE | 838 __GFP_NOWARN, get_order(size))); 839 if (!local_mem) 840 return -ENOMEM; 841 842 per_cpu(thread_imc_mem, cpu_id) = local_mem; 843 } 844 845 ldbar_value = ((u64)local_mem & THREAD_IMC_LDBAR_MASK) | THREAD_IMC_ENABLE; 846 847 mtspr(SPRN_LDBAR, ldbar_value); 848 return 0; 849 } 850 851 static int ppc_thread_imc_cpu_online(unsigned int cpu) 852 { 853 return thread_imc_mem_alloc(cpu, thread_imc_mem_size); 854 } 855 856 static int ppc_thread_imc_cpu_offline(unsigned int cpu) 857 { 858 mtspr(SPRN_LDBAR, 0); 859 return 0; 860 } 861 862 static int thread_imc_cpu_init(void) 863 { 864 return cpuhp_setup_state(CPUHP_AP_PERF_POWERPC_THREAD_IMC_ONLINE, 865 "perf/powerpc/imc_thread:online", 866 ppc_thread_imc_cpu_online, 867 ppc_thread_imc_cpu_offline); 868 } 869 870 static int thread_imc_event_init(struct perf_event *event) 871 { 872 u32 config = event->attr.config; 873 struct task_struct *target; 874 struct imc_pmu *pmu; 875 876 if (event->attr.type != event->pmu->type) 877 return -ENOENT; 878 879 /* Sampling not supported */ 880 if (event->hw.sample_period) 881 return -EINVAL; 882 883 event->hw.idx = -1; 884 pmu = imc_event_to_pmu(event); 885 886 /* Sanity check for config offset */ 887 if (((config & IMC_EVENT_OFFSET_MASK) > pmu->counter_mem_size)) 888 return -EINVAL; 889 890 target = event->hw.target; 891 if (!target) 892 return -EINVAL; 893 894 event->pmu->task_ctx_nr = perf_sw_context; 895 return 0; 896 } 897 898 static bool is_thread_imc_pmu(struct perf_event *event) 899 { 900 if (!strncmp(event->pmu->name, "thread_imc", strlen("thread_imc"))) 901 return true; 902 903 return false; 904 } 905 906 static u64 * get_event_base_addr(struct perf_event *event) 907 { 908 u64 addr; 909 910 if (is_thread_imc_pmu(event)) { 911 addr = (u64)per_cpu(thread_imc_mem, smp_processor_id()); 912 return (u64 *)(addr + (event->attr.config & IMC_EVENT_OFFSET_MASK)); 913 } 914 915 return (u64 *)event->hw.event_base; 916 } 917 918 static void thread_imc_pmu_start_txn(struct pmu *pmu, 919 unsigned int txn_flags) 920 { 921 if (txn_flags & ~PERF_PMU_TXN_ADD) 922 return; 923 perf_pmu_disable(pmu); 924 } 925 926 static void thread_imc_pmu_cancel_txn(struct pmu *pmu) 927 { 928 perf_pmu_enable(pmu); 929 } 930 931 static int thread_imc_pmu_commit_txn(struct pmu *pmu) 932 { 933 perf_pmu_enable(pmu); 934 return 0; 935 } 936 937 static u64 imc_read_counter(struct perf_event *event) 938 { 939 u64 *addr, data; 940 941 /* 942 * In-Memory Collection (IMC) counters are free flowing counters. 943 * So we take a snapshot of the counter value on enable and save it 944 * to calculate the delta at later stage to present the event counter 945 * value. 946 */ 947 addr = get_event_base_addr(event); 948 data = be64_to_cpu(READ_ONCE(*addr)); 949 local64_set(&event->hw.prev_count, data); 950 951 return data; 952 } 953 954 static void imc_event_update(struct perf_event *event) 955 { 956 u64 counter_prev, counter_new, final_count; 957 958 counter_prev = local64_read(&event->hw.prev_count); 959 counter_new = imc_read_counter(event); 960 final_count = counter_new - counter_prev; 961 962 /* Update the delta to the event count */ 963 local64_add(final_count, &event->count); 964 } 965 966 static void imc_event_start(struct perf_event *event, int flags) 967 { 968 /* 969 * In Memory Counters are free flowing counters. HW or the microcode 970 * keeps adding to the counter offset in memory. To get event 971 * counter value, we snapshot the value here and we calculate 972 * delta at later point. 973 */ 974 imc_read_counter(event); 975 } 976 977 static void imc_event_stop(struct perf_event *event, int flags) 978 { 979 /* 980 * Take a snapshot and calculate the delta and update 981 * the event counter values. 982 */ 983 imc_event_update(event); 984 } 985 986 static int imc_event_add(struct perf_event *event, int flags) 987 { 988 if (flags & PERF_EF_START) 989 imc_event_start(event, flags); 990 991 return 0; 992 } 993 994 static int thread_imc_event_add(struct perf_event *event, int flags) 995 { 996 int core_id; 997 struct imc_pmu_ref *ref; 998 999 if (flags & PERF_EF_START) 1000 imc_event_start(event, flags); 1001 1002 if (!is_core_imc_mem_inited(smp_processor_id())) 1003 return -EINVAL; 1004 1005 core_id = smp_processor_id() / threads_per_core; 1006 /* 1007 * imc pmus are enabled only when it is used. 1008 * See if this is triggered for the first time. 1009 * If yes, take the mutex lock and enable the counters. 1010 * If not, just increment the count in ref count struct. 1011 */ 1012 ref = &core_imc_refc[core_id]; 1013 if (!ref) 1014 return -EINVAL; 1015 1016 mutex_lock(&ref->lock); 1017 if (ref->refc == 0) { 1018 if (opal_imc_counters_start(OPAL_IMC_COUNTERS_CORE, 1019 get_hard_smp_processor_id(smp_processor_id()))) { 1020 mutex_unlock(&ref->lock); 1021 pr_err("thread-imc: Unable to start the counter\ 1022 for core %d\n", core_id); 1023 return -EINVAL; 1024 } 1025 } 1026 ++ref->refc; 1027 mutex_unlock(&ref->lock); 1028 return 0; 1029 } 1030 1031 static void thread_imc_event_del(struct perf_event *event, int flags) 1032 { 1033 1034 int core_id; 1035 struct imc_pmu_ref *ref; 1036 1037 /* 1038 * Take a snapshot and calculate the delta and update 1039 * the event counter values. 1040 */ 1041 imc_event_update(event); 1042 1043 core_id = smp_processor_id() / threads_per_core; 1044 ref = &core_imc_refc[core_id]; 1045 1046 mutex_lock(&ref->lock); 1047 ref->refc--; 1048 if (ref->refc == 0) { 1049 if (opal_imc_counters_stop(OPAL_IMC_COUNTERS_CORE, 1050 get_hard_smp_processor_id(smp_processor_id()))) { 1051 mutex_unlock(&ref->lock); 1052 pr_err("thread-imc: Unable to stop the counters\ 1053 for core %d\n", core_id); 1054 return; 1055 } 1056 } else if (ref->refc < 0) { 1057 ref->refc = 0; 1058 } 1059 mutex_unlock(&ref->lock); 1060 } 1061 1062 /* update_pmu_ops : Populate the appropriate operations for "pmu" */ 1063 static int update_pmu_ops(struct imc_pmu *pmu) 1064 { 1065 pmu->pmu.task_ctx_nr = perf_invalid_context; 1066 pmu->pmu.add = imc_event_add; 1067 pmu->pmu.del = imc_event_stop; 1068 pmu->pmu.start = imc_event_start; 1069 pmu->pmu.stop = imc_event_stop; 1070 pmu->pmu.read = imc_event_update; 1071 pmu->pmu.attr_groups = pmu->attr_groups; 1072 pmu->attr_groups[IMC_FORMAT_ATTR] = &imc_format_group; 1073 1074 switch (pmu->domain) { 1075 case IMC_DOMAIN_NEST: 1076 pmu->pmu.event_init = nest_imc_event_init; 1077 pmu->attr_groups[IMC_CPUMASK_ATTR] = &imc_pmu_cpumask_attr_group; 1078 break; 1079 case IMC_DOMAIN_CORE: 1080 pmu->pmu.event_init = core_imc_event_init; 1081 pmu->attr_groups[IMC_CPUMASK_ATTR] = &imc_pmu_cpumask_attr_group; 1082 break; 1083 case IMC_DOMAIN_THREAD: 1084 pmu->pmu.event_init = thread_imc_event_init; 1085 pmu->pmu.add = thread_imc_event_add; 1086 pmu->pmu.del = thread_imc_event_del; 1087 pmu->pmu.start_txn = thread_imc_pmu_start_txn; 1088 pmu->pmu.cancel_txn = thread_imc_pmu_cancel_txn; 1089 pmu->pmu.commit_txn = thread_imc_pmu_commit_txn; 1090 break; 1091 default: 1092 break; 1093 } 1094 1095 return 0; 1096 } 1097 1098 /* init_nest_pmu_ref: Initialize the imc_pmu_ref struct for all the nodes */ 1099 static int init_nest_pmu_ref(void) 1100 { 1101 int nid, i, cpu; 1102 1103 nest_imc_refc = kcalloc(num_possible_nodes(), sizeof(*nest_imc_refc), 1104 GFP_KERNEL); 1105 1106 if (!nest_imc_refc) 1107 return -ENOMEM; 1108 1109 i = 0; 1110 for_each_node(nid) { 1111 /* 1112 * Mutex lock to avoid races while tracking the number of 1113 * sessions using the chip's nest pmu units. 1114 */ 1115 mutex_init(&nest_imc_refc[i].lock); 1116 1117 /* 1118 * Loop to init the "id" with the node_id. Variable "i" initialized to 1119 * 0 and will be used as index to the array. "i" will not go off the 1120 * end of the array since the "for_each_node" loops for "N_POSSIBLE" 1121 * nodes only. 1122 */ 1123 nest_imc_refc[i++].id = nid; 1124 } 1125 1126 /* 1127 * Loop to init the per_cpu "local_nest_imc_refc" with the proper 1128 * "nest_imc_refc" index. This makes get_nest_pmu_ref() alot simple. 1129 */ 1130 for_each_possible_cpu(cpu) { 1131 nid = cpu_to_node(cpu); 1132 for (i = 0; i < num_possible_nodes(); i++) { 1133 if (nest_imc_refc[i].id == nid) { 1134 per_cpu(local_nest_imc_refc, cpu) = &nest_imc_refc[i]; 1135 break; 1136 } 1137 } 1138 } 1139 return 0; 1140 } 1141 1142 static void cleanup_all_core_imc_memory(void) 1143 { 1144 int i, nr_cores = DIV_ROUND_UP(num_possible_cpus(), threads_per_core); 1145 struct imc_mem_info *ptr = core_imc_pmu->mem_info; 1146 int size = core_imc_pmu->counter_mem_size; 1147 1148 /* mem_info will never be NULL */ 1149 for (i = 0; i < nr_cores; i++) { 1150 if (ptr[i].vbase) 1151 free_pages((u64)ptr[i].vbase, get_order(size)); 1152 } 1153 1154 kfree(ptr); 1155 kfree(core_imc_refc); 1156 } 1157 1158 static void thread_imc_ldbar_disable(void *dummy) 1159 { 1160 /* 1161 * By Zeroing LDBAR, we disable thread-imc 1162 * updates. 1163 */ 1164 mtspr(SPRN_LDBAR, 0); 1165 } 1166 1167 void thread_imc_disable(void) 1168 { 1169 on_each_cpu(thread_imc_ldbar_disable, NULL, 1); 1170 } 1171 1172 static void cleanup_all_thread_imc_memory(void) 1173 { 1174 int i, order = get_order(thread_imc_mem_size); 1175 1176 for_each_online_cpu(i) { 1177 if (per_cpu(thread_imc_mem, i)) 1178 free_pages((u64)per_cpu(thread_imc_mem, i), order); 1179 1180 } 1181 } 1182 1183 /* Function to free the attr_groups which are dynamically allocated */ 1184 static void imc_common_mem_free(struct imc_pmu *pmu_ptr) 1185 { 1186 if (pmu_ptr->attr_groups[IMC_EVENT_ATTR]) 1187 kfree(pmu_ptr->attr_groups[IMC_EVENT_ATTR]->attrs); 1188 kfree(pmu_ptr->attr_groups[IMC_EVENT_ATTR]); 1189 } 1190 1191 /* 1192 * Common function to unregister cpu hotplug callback and 1193 * free the memory. 1194 * TODO: Need to handle pmu unregistering, which will be 1195 * done in followup series. 1196 */ 1197 static void imc_common_cpuhp_mem_free(struct imc_pmu *pmu_ptr) 1198 { 1199 if (pmu_ptr->domain == IMC_DOMAIN_NEST) { 1200 mutex_lock(&nest_init_lock); 1201 if (nest_pmus == 1) { 1202 cpuhp_remove_state(CPUHP_AP_PERF_POWERPC_NEST_IMC_ONLINE); 1203 kfree(nest_imc_refc); 1204 kfree(per_nest_pmu_arr); 1205 per_nest_pmu_arr = NULL; 1206 } 1207 1208 if (nest_pmus > 0) 1209 nest_pmus--; 1210 mutex_unlock(&nest_init_lock); 1211 } 1212 1213 /* Free core_imc memory */ 1214 if (pmu_ptr->domain == IMC_DOMAIN_CORE) { 1215 cpuhp_remove_state(CPUHP_AP_PERF_POWERPC_CORE_IMC_ONLINE); 1216 cleanup_all_core_imc_memory(); 1217 } 1218 1219 /* Free thread_imc memory */ 1220 if (pmu_ptr->domain == IMC_DOMAIN_THREAD) { 1221 cpuhp_remove_state(CPUHP_AP_PERF_POWERPC_THREAD_IMC_ONLINE); 1222 cleanup_all_thread_imc_memory(); 1223 } 1224 } 1225 1226 /* 1227 * Function to unregister thread-imc if core-imc 1228 * is not registered. 1229 */ 1230 void unregister_thread_imc(void) 1231 { 1232 imc_common_cpuhp_mem_free(thread_imc_pmu); 1233 imc_common_mem_free(thread_imc_pmu); 1234 perf_pmu_unregister(&thread_imc_pmu->pmu); 1235 } 1236 1237 /* 1238 * imc_mem_init : Function to support memory allocation for core imc. 1239 */ 1240 static int imc_mem_init(struct imc_pmu *pmu_ptr, struct device_node *parent, 1241 int pmu_index) 1242 { 1243 const char *s; 1244 int nr_cores, cpu, res = -ENOMEM; 1245 1246 if (of_property_read_string(parent, "name", &s)) 1247 return -ENODEV; 1248 1249 switch (pmu_ptr->domain) { 1250 case IMC_DOMAIN_NEST: 1251 /* Update the pmu name */ 1252 pmu_ptr->pmu.name = kasprintf(GFP_KERNEL, "%s%s_imc", "nest_", s); 1253 if (!pmu_ptr->pmu.name) 1254 goto err; 1255 1256 /* Needed for hotplug/migration */ 1257 if (!per_nest_pmu_arr) { 1258 per_nest_pmu_arr = kcalloc(get_max_nest_dev() + 1, 1259 sizeof(struct imc_pmu *), 1260 GFP_KERNEL); 1261 if (!per_nest_pmu_arr) 1262 goto err; 1263 } 1264 per_nest_pmu_arr[pmu_index] = pmu_ptr; 1265 break; 1266 case IMC_DOMAIN_CORE: 1267 /* Update the pmu name */ 1268 pmu_ptr->pmu.name = kasprintf(GFP_KERNEL, "%s%s", s, "_imc"); 1269 if (!pmu_ptr->pmu.name) 1270 goto err; 1271 1272 nr_cores = DIV_ROUND_UP(num_possible_cpus(), threads_per_core); 1273 pmu_ptr->mem_info = kcalloc(nr_cores, sizeof(struct imc_mem_info), 1274 GFP_KERNEL); 1275 1276 if (!pmu_ptr->mem_info) 1277 goto err; 1278 1279 core_imc_refc = kcalloc(nr_cores, sizeof(struct imc_pmu_ref), 1280 GFP_KERNEL); 1281 1282 if (!core_imc_refc) { 1283 kfree(pmu_ptr->mem_info); 1284 goto err; 1285 } 1286 1287 core_imc_pmu = pmu_ptr; 1288 break; 1289 case IMC_DOMAIN_THREAD: 1290 /* Update the pmu name */ 1291 pmu_ptr->pmu.name = kasprintf(GFP_KERNEL, "%s%s", s, "_imc"); 1292 if (!pmu_ptr->pmu.name) 1293 goto err; 1294 1295 thread_imc_mem_size = pmu_ptr->counter_mem_size; 1296 for_each_online_cpu(cpu) { 1297 res = thread_imc_mem_alloc(cpu, pmu_ptr->counter_mem_size); 1298 if (res) { 1299 cleanup_all_thread_imc_memory(); 1300 goto err; 1301 } 1302 } 1303 1304 thread_imc_pmu = pmu_ptr; 1305 break; 1306 default: 1307 return -EINVAL; 1308 } 1309 1310 return 0; 1311 err: 1312 return res; 1313 } 1314 1315 /* 1316 * init_imc_pmu : Setup and register the IMC pmu device. 1317 * 1318 * @parent: Device tree unit node 1319 * @pmu_ptr: memory allocated for this pmu 1320 * @pmu_idx: Count of nest pmc registered 1321 * 1322 * init_imc_pmu() setup pmu cpumask and registers for a cpu hotplug callback. 1323 * Handles failure cases and accordingly frees memory. 1324 */ 1325 int init_imc_pmu(struct device_node *parent, struct imc_pmu *pmu_ptr, int pmu_idx) 1326 { 1327 int ret; 1328 1329 ret = imc_mem_init(pmu_ptr, parent, pmu_idx); 1330 if (ret) 1331 goto err_free_mem; 1332 1333 switch (pmu_ptr->domain) { 1334 case IMC_DOMAIN_NEST: 1335 /* 1336 * Nest imc pmu need only one cpu per chip, we initialize the 1337 * cpumask for the first nest imc pmu and use the same for the 1338 * rest. To handle the cpuhotplug callback unregister, we track 1339 * the number of nest pmus in "nest_pmus". 1340 */ 1341 mutex_lock(&nest_init_lock); 1342 if (nest_pmus == 0) { 1343 ret = init_nest_pmu_ref(); 1344 if (ret) { 1345 mutex_unlock(&nest_init_lock); 1346 kfree(per_nest_pmu_arr); 1347 per_nest_pmu_arr = NULL; 1348 goto err_free_mem; 1349 } 1350 /* Register for cpu hotplug notification. */ 1351 ret = nest_pmu_cpumask_init(); 1352 if (ret) { 1353 mutex_unlock(&nest_init_lock); 1354 kfree(nest_imc_refc); 1355 kfree(per_nest_pmu_arr); 1356 per_nest_pmu_arr = NULL; 1357 goto err_free_mem; 1358 } 1359 } 1360 nest_pmus++; 1361 mutex_unlock(&nest_init_lock); 1362 break; 1363 case IMC_DOMAIN_CORE: 1364 ret = core_imc_pmu_cpumask_init(); 1365 if (ret) { 1366 cleanup_all_core_imc_memory(); 1367 goto err_free_mem; 1368 } 1369 1370 break; 1371 case IMC_DOMAIN_THREAD: 1372 ret = thread_imc_cpu_init(); 1373 if (ret) { 1374 cleanup_all_thread_imc_memory(); 1375 goto err_free_mem; 1376 } 1377 1378 break; 1379 default: 1380 return -EINVAL; /* Unknown domain */ 1381 } 1382 1383 ret = update_events_in_group(parent, pmu_ptr); 1384 if (ret) 1385 goto err_free_cpuhp_mem; 1386 1387 ret = update_pmu_ops(pmu_ptr); 1388 if (ret) 1389 goto err_free_cpuhp_mem; 1390 1391 ret = perf_pmu_register(&pmu_ptr->pmu, pmu_ptr->pmu.name, -1); 1392 if (ret) 1393 goto err_free_cpuhp_mem; 1394 1395 pr_info("%s performance monitor hardware support registered\n", 1396 pmu_ptr->pmu.name); 1397 1398 return 0; 1399 1400 err_free_cpuhp_mem: 1401 imc_common_cpuhp_mem_free(pmu_ptr); 1402 err_free_mem: 1403 imc_common_mem_free(pmu_ptr); 1404 return ret; 1405 } 1406