1 /* 2 * In-Memory Collection (IMC) Performance Monitor counter support. 3 * 4 * Copyright (C) 2017 Madhavan Srinivasan, IBM Corporation. 5 * (C) 2017 Anju T Sudhakar, IBM Corporation. 6 * (C) 2017 Hemant K Shaw, IBM Corporation. 7 * 8 * This program is free software; you can redistribute it and/or 9 * modify it under the terms of the GNU General Public License 10 * as published by the Free Software Foundation; either version 11 * 2 of the License, or later version. 12 */ 13 #include <linux/perf_event.h> 14 #include <linux/slab.h> 15 #include <asm/opal.h> 16 #include <asm/imc-pmu.h> 17 #include <asm/cputhreads.h> 18 #include <asm/smp.h> 19 #include <linux/string.h> 20 21 /* Nest IMC data structures and variables */ 22 23 /* 24 * Used to avoid races in counting the nest-pmu units during hotplug 25 * register and unregister 26 */ 27 static DEFINE_MUTEX(nest_init_lock); 28 static DEFINE_PER_CPU(struct imc_pmu_ref *, local_nest_imc_refc); 29 static struct imc_pmu *per_nest_pmu_arr[IMC_MAX_PMUS]; 30 static cpumask_t nest_imc_cpumask; 31 struct imc_pmu_ref *nest_imc_refc; 32 static int nest_pmus; 33 34 /* Core IMC data structures and variables */ 35 36 static cpumask_t core_imc_cpumask; 37 struct imc_pmu_ref *core_imc_refc; 38 static struct imc_pmu *core_imc_pmu; 39 40 /* Thread IMC data structures and variables */ 41 42 static DEFINE_PER_CPU(u64 *, thread_imc_mem); 43 static struct imc_pmu *thread_imc_pmu; 44 static int thread_imc_mem_size; 45 46 struct imc_pmu *imc_event_to_pmu(struct perf_event *event) 47 { 48 return container_of(event->pmu, struct imc_pmu, pmu); 49 } 50 51 PMU_FORMAT_ATTR(event, "config:0-40"); 52 PMU_FORMAT_ATTR(offset, "config:0-31"); 53 PMU_FORMAT_ATTR(rvalue, "config:32"); 54 PMU_FORMAT_ATTR(mode, "config:33-40"); 55 static struct attribute *imc_format_attrs[] = { 56 &format_attr_event.attr, 57 &format_attr_offset.attr, 58 &format_attr_rvalue.attr, 59 &format_attr_mode.attr, 60 NULL, 61 }; 62 63 static struct attribute_group imc_format_group = { 64 .name = "format", 65 .attrs = imc_format_attrs, 66 }; 67 68 /* Get the cpumask printed to a buffer "buf" */ 69 static ssize_t imc_pmu_cpumask_get_attr(struct device *dev, 70 struct device_attribute *attr, 71 char *buf) 72 { 73 struct pmu *pmu = dev_get_drvdata(dev); 74 struct imc_pmu *imc_pmu = container_of(pmu, struct imc_pmu, pmu); 75 cpumask_t *active_mask; 76 77 switch(imc_pmu->domain){ 78 case IMC_DOMAIN_NEST: 79 active_mask = &nest_imc_cpumask; 80 break; 81 case IMC_DOMAIN_CORE: 82 active_mask = &core_imc_cpumask; 83 break; 84 default: 85 return 0; 86 } 87 88 return cpumap_print_to_pagebuf(true, buf, active_mask); 89 } 90 91 static DEVICE_ATTR(cpumask, S_IRUGO, imc_pmu_cpumask_get_attr, NULL); 92 93 static struct attribute *imc_pmu_cpumask_attrs[] = { 94 &dev_attr_cpumask.attr, 95 NULL, 96 }; 97 98 static struct attribute_group imc_pmu_cpumask_attr_group = { 99 .attrs = imc_pmu_cpumask_attrs, 100 }; 101 102 /* device_str_attr_create : Populate event "name" and string "str" in attribute */ 103 static struct attribute *device_str_attr_create(const char *name, const char *str) 104 { 105 struct perf_pmu_events_attr *attr; 106 107 attr = kzalloc(sizeof(*attr), GFP_KERNEL); 108 if (!attr) 109 return NULL; 110 sysfs_attr_init(&attr->attr.attr); 111 112 attr->event_str = str; 113 attr->attr.attr.name = name; 114 attr->attr.attr.mode = 0444; 115 attr->attr.show = perf_event_sysfs_show; 116 117 return &attr->attr.attr; 118 } 119 120 struct imc_events *imc_parse_event(struct device_node *np, const char *scale, 121 const char *unit, const char *prefix, u32 base) 122 { 123 struct imc_events *event; 124 const char *s; 125 u32 reg; 126 127 event = kzalloc(sizeof(struct imc_events), GFP_KERNEL); 128 if (!event) 129 return NULL; 130 131 if (of_property_read_u32(np, "reg", ®)) 132 goto error; 133 /* Add the base_reg value to the "reg" */ 134 event->value = base + reg; 135 136 if (of_property_read_string(np, "event-name", &s)) 137 goto error; 138 139 event->name = kasprintf(GFP_KERNEL, "%s%s", prefix, s); 140 if (!event->name) 141 goto error; 142 143 if (of_property_read_string(np, "scale", &s)) 144 s = scale; 145 146 if (s) { 147 event->scale = kstrdup(s, GFP_KERNEL); 148 if (!event->scale) 149 goto error; 150 } 151 152 if (of_property_read_string(np, "unit", &s)) 153 s = unit; 154 155 if (s) { 156 event->unit = kstrdup(s, GFP_KERNEL); 157 if (!event->unit) 158 goto error; 159 } 160 161 return event; 162 error: 163 kfree(event->unit); 164 kfree(event->scale); 165 kfree(event->name); 166 kfree(event); 167 168 return NULL; 169 } 170 171 /* 172 * update_events_in_group: Update the "events" information in an attr_group 173 * and assign the attr_group to the pmu "pmu". 174 */ 175 static int update_events_in_group(struct device_node *node, struct imc_pmu *pmu) 176 { 177 struct attribute_group *attr_group; 178 struct attribute **attrs, *dev_str; 179 struct device_node *np, *pmu_events; 180 struct imc_events *ev; 181 u32 handle, base_reg; 182 int i=0, j=0, ct; 183 const char *prefix, *g_scale, *g_unit; 184 const char *ev_val_str, *ev_scale_str, *ev_unit_str; 185 186 if (!of_property_read_u32(node, "events", &handle)) 187 pmu_events = of_find_node_by_phandle(handle); 188 else 189 return 0; 190 191 /* Did not find any node with a given phandle */ 192 if (!pmu_events) 193 return 0; 194 195 /* Get a count of number of child nodes */ 196 ct = of_get_child_count(pmu_events); 197 198 /* Get the event prefix */ 199 if (of_property_read_string(node, "events-prefix", &prefix)) 200 return 0; 201 202 /* Get a global unit and scale data if available */ 203 if (of_property_read_string(node, "scale", &g_scale)) 204 g_scale = NULL; 205 206 if (of_property_read_string(node, "unit", &g_unit)) 207 g_unit = NULL; 208 209 /* "reg" property gives out the base offset of the counters data */ 210 of_property_read_u32(node, "reg", &base_reg); 211 212 /* Allocate memory for the events */ 213 pmu->events = kcalloc(ct, sizeof(struct imc_events), GFP_KERNEL); 214 if (!pmu->events) 215 return -ENOMEM; 216 217 ct = 0; 218 /* Parse the events and update the struct */ 219 for_each_child_of_node(pmu_events, np) { 220 ev = imc_parse_event(np, g_scale, g_unit, prefix, base_reg); 221 if (ev) 222 pmu->events[ct++] = ev; 223 } 224 225 /* Allocate memory for attribute group */ 226 attr_group = kzalloc(sizeof(*attr_group), GFP_KERNEL); 227 if (!attr_group) 228 return -ENOMEM; 229 230 /* 231 * Allocate memory for attributes. 232 * Since we have count of events for this pmu, we also allocate 233 * memory for the scale and unit attribute for now. 234 * "ct" has the total event structs added from the events-parent node. 235 * So allocate three times the "ct" (this includes event, event_scale and 236 * event_unit). 237 */ 238 attrs = kcalloc(((ct * 3) + 1), sizeof(struct attribute *), GFP_KERNEL); 239 if (!attrs) { 240 kfree(attr_group); 241 kfree(pmu->events); 242 return -ENOMEM; 243 } 244 245 attr_group->name = "events"; 246 attr_group->attrs = attrs; 247 do { 248 ev_val_str = kasprintf(GFP_KERNEL, "event=0x%x", pmu->events[i]->value); 249 dev_str = device_str_attr_create(pmu->events[i]->name, ev_val_str); 250 if (!dev_str) 251 continue; 252 253 attrs[j++] = dev_str; 254 if (pmu->events[i]->scale) { 255 ev_scale_str = kasprintf(GFP_KERNEL, "%s.scale",pmu->events[i]->name); 256 dev_str = device_str_attr_create(ev_scale_str, pmu->events[i]->scale); 257 if (!dev_str) 258 continue; 259 260 attrs[j++] = dev_str; 261 } 262 263 if (pmu->events[i]->unit) { 264 ev_unit_str = kasprintf(GFP_KERNEL, "%s.unit",pmu->events[i]->name); 265 dev_str = device_str_attr_create(ev_unit_str, pmu->events[i]->unit); 266 if (!dev_str) 267 continue; 268 269 attrs[j++] = dev_str; 270 } 271 } while (++i < ct); 272 273 /* Save the event attribute */ 274 pmu->attr_groups[IMC_EVENT_ATTR] = attr_group; 275 276 kfree(pmu->events); 277 return 0; 278 } 279 280 /* get_nest_pmu_ref: Return the imc_pmu_ref struct for the given node */ 281 static struct imc_pmu_ref *get_nest_pmu_ref(int cpu) 282 { 283 return per_cpu(local_nest_imc_refc, cpu); 284 } 285 286 static void nest_change_cpu_context(int old_cpu, int new_cpu) 287 { 288 struct imc_pmu **pn = per_nest_pmu_arr; 289 int i; 290 291 if (old_cpu < 0 || new_cpu < 0) 292 return; 293 294 for (i = 0; *pn && i < IMC_MAX_PMUS; i++, pn++) 295 perf_pmu_migrate_context(&(*pn)->pmu, old_cpu, new_cpu); 296 } 297 298 static int ppc_nest_imc_cpu_offline(unsigned int cpu) 299 { 300 int nid, target = -1; 301 const struct cpumask *l_cpumask; 302 struct imc_pmu_ref *ref; 303 304 /* 305 * Check in the designated list for this cpu. Dont bother 306 * if not one of them. 307 */ 308 if (!cpumask_test_and_clear_cpu(cpu, &nest_imc_cpumask)) 309 return 0; 310 311 /* 312 * Now that this cpu is one of the designated, 313 * find a next cpu a) which is online and b) in same chip. 314 */ 315 nid = cpu_to_node(cpu); 316 l_cpumask = cpumask_of_node(nid); 317 target = cpumask_any_but(l_cpumask, cpu); 318 319 /* 320 * Update the cpumask with the target cpu and 321 * migrate the context if needed 322 */ 323 if (target >= 0 && target < nr_cpu_ids) { 324 cpumask_set_cpu(target, &nest_imc_cpumask); 325 nest_change_cpu_context(cpu, target); 326 } else { 327 opal_imc_counters_stop(OPAL_IMC_COUNTERS_NEST, 328 get_hard_smp_processor_id(cpu)); 329 /* 330 * If this is the last cpu in this chip then, skip the reference 331 * count mutex lock and make the reference count on this chip zero. 332 */ 333 ref = get_nest_pmu_ref(cpu); 334 if (!ref) 335 return -EINVAL; 336 337 ref->refc = 0; 338 } 339 return 0; 340 } 341 342 static int ppc_nest_imc_cpu_online(unsigned int cpu) 343 { 344 const struct cpumask *l_cpumask; 345 static struct cpumask tmp_mask; 346 int res; 347 348 /* Get the cpumask of this node */ 349 l_cpumask = cpumask_of_node(cpu_to_node(cpu)); 350 351 /* 352 * If this is not the first online CPU on this node, then 353 * just return. 354 */ 355 if (cpumask_and(&tmp_mask, l_cpumask, &nest_imc_cpumask)) 356 return 0; 357 358 /* 359 * If this is the first online cpu on this node 360 * disable the nest counters by making an OPAL call. 361 */ 362 res = opal_imc_counters_stop(OPAL_IMC_COUNTERS_NEST, 363 get_hard_smp_processor_id(cpu)); 364 if (res) 365 return res; 366 367 /* Make this CPU the designated target for counter collection */ 368 cpumask_set_cpu(cpu, &nest_imc_cpumask); 369 return 0; 370 } 371 372 static int nest_pmu_cpumask_init(void) 373 { 374 return cpuhp_setup_state(CPUHP_AP_PERF_POWERPC_NEST_IMC_ONLINE, 375 "perf/powerpc/imc:online", 376 ppc_nest_imc_cpu_online, 377 ppc_nest_imc_cpu_offline); 378 } 379 380 static void nest_imc_counters_release(struct perf_event *event) 381 { 382 int rc, node_id; 383 struct imc_pmu_ref *ref; 384 385 if (event->cpu < 0) 386 return; 387 388 node_id = cpu_to_node(event->cpu); 389 390 /* 391 * See if we need to disable the nest PMU. 392 * If no events are currently in use, then we have to take a 393 * mutex to ensure that we don't race with another task doing 394 * enable or disable the nest counters. 395 */ 396 ref = get_nest_pmu_ref(event->cpu); 397 if (!ref) 398 return; 399 400 /* Take the mutex lock for this node and then decrement the reference count */ 401 mutex_lock(&ref->lock); 402 ref->refc--; 403 if (ref->refc == 0) { 404 rc = opal_imc_counters_stop(OPAL_IMC_COUNTERS_NEST, 405 get_hard_smp_processor_id(event->cpu)); 406 if (rc) { 407 mutex_unlock(&ref->lock); 408 pr_err("nest-imc: Unable to stop the counters for core %d\n", node_id); 409 return; 410 } 411 } else if (ref->refc < 0) { 412 WARN(1, "nest-imc: Invalid event reference count\n"); 413 ref->refc = 0; 414 } 415 mutex_unlock(&ref->lock); 416 } 417 418 static int nest_imc_event_init(struct perf_event *event) 419 { 420 int chip_id, rc, node_id; 421 u32 l_config, config = event->attr.config; 422 struct imc_mem_info *pcni; 423 struct imc_pmu *pmu; 424 struct imc_pmu_ref *ref; 425 bool flag = false; 426 427 if (event->attr.type != event->pmu->type) 428 return -ENOENT; 429 430 /* Sampling not supported */ 431 if (event->hw.sample_period) 432 return -EINVAL; 433 434 /* unsupported modes and filters */ 435 if (event->attr.exclude_user || 436 event->attr.exclude_kernel || 437 event->attr.exclude_hv || 438 event->attr.exclude_idle || 439 event->attr.exclude_host || 440 event->attr.exclude_guest) 441 return -EINVAL; 442 443 if (event->cpu < 0) 444 return -EINVAL; 445 446 pmu = imc_event_to_pmu(event); 447 448 /* Sanity check for config (event offset) */ 449 if ((config & IMC_EVENT_OFFSET_MASK) > pmu->counter_mem_size) 450 return -EINVAL; 451 452 /* 453 * Nest HW counter memory resides in a per-chip reserve-memory (HOMER). 454 * Get the base memory addresss for this cpu. 455 */ 456 chip_id = topology_physical_package_id(event->cpu); 457 pcni = pmu->mem_info; 458 do { 459 if (pcni->id == chip_id) { 460 flag = true; 461 break; 462 } 463 pcni++; 464 } while (pcni); 465 466 if (!flag) 467 return -ENODEV; 468 469 /* 470 * Add the event offset to the base address. 471 */ 472 l_config = config & IMC_EVENT_OFFSET_MASK; 473 event->hw.event_base = (u64)pcni->vbase + l_config; 474 node_id = cpu_to_node(event->cpu); 475 476 /* 477 * Get the imc_pmu_ref struct for this node. 478 * Take the mutex lock and then increment the count of nest pmu events 479 * inited. 480 */ 481 ref = get_nest_pmu_ref(event->cpu); 482 if (!ref) 483 return -EINVAL; 484 485 mutex_lock(&ref->lock); 486 if (ref->refc == 0) { 487 rc = opal_imc_counters_start(OPAL_IMC_COUNTERS_NEST, 488 get_hard_smp_processor_id(event->cpu)); 489 if (rc) { 490 mutex_unlock(&ref->lock); 491 pr_err("nest-imc: Unable to start the counters for node %d\n", 492 node_id); 493 return rc; 494 } 495 } 496 ++ref->refc; 497 mutex_unlock(&ref->lock); 498 499 event->destroy = nest_imc_counters_release; 500 return 0; 501 } 502 503 /* 504 * core_imc_mem_init : Initializes memory for the current core. 505 * 506 * Uses alloc_pages_node() and uses the returned address as an argument to 507 * an opal call to configure the pdbar. The address sent as an argument is 508 * converted to physical address before the opal call is made. This is the 509 * base address at which the core imc counters are populated. 510 */ 511 static int core_imc_mem_init(int cpu, int size) 512 { 513 int phys_id, rc = 0, core_id = (cpu / threads_per_core); 514 struct imc_mem_info *mem_info; 515 516 /* 517 * alloc_pages_node() will allocate memory for core in the 518 * local node only. 519 */ 520 phys_id = topology_physical_package_id(cpu); 521 mem_info = &core_imc_pmu->mem_info[core_id]; 522 mem_info->id = core_id; 523 524 /* We need only vbase for core counters */ 525 mem_info->vbase = page_address(alloc_pages_node(phys_id, 526 GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE, 527 get_order(size))); 528 if (!mem_info->vbase) 529 return -ENOMEM; 530 531 /* Init the mutex */ 532 core_imc_refc[core_id].id = core_id; 533 mutex_init(&core_imc_refc[core_id].lock); 534 535 rc = opal_imc_counters_init(OPAL_IMC_COUNTERS_CORE, 536 __pa((void *)mem_info->vbase), 537 get_hard_smp_processor_id(cpu)); 538 if (rc) { 539 free_pages((u64)mem_info->vbase, get_order(size)); 540 mem_info->vbase = NULL; 541 } 542 543 return rc; 544 } 545 546 static bool is_core_imc_mem_inited(int cpu) 547 { 548 struct imc_mem_info *mem_info; 549 int core_id = (cpu / threads_per_core); 550 551 mem_info = &core_imc_pmu->mem_info[core_id]; 552 if (!mem_info->vbase) 553 return false; 554 555 return true; 556 } 557 558 static int ppc_core_imc_cpu_online(unsigned int cpu) 559 { 560 const struct cpumask *l_cpumask; 561 static struct cpumask tmp_mask; 562 int ret = 0; 563 564 /* Get the cpumask for this core */ 565 l_cpumask = cpu_sibling_mask(cpu); 566 567 /* If a cpu for this core is already set, then, don't do anything */ 568 if (cpumask_and(&tmp_mask, l_cpumask, &core_imc_cpumask)) 569 return 0; 570 571 if (!is_core_imc_mem_inited(cpu)) { 572 ret = core_imc_mem_init(cpu, core_imc_pmu->counter_mem_size); 573 if (ret) { 574 pr_info("core_imc memory allocation for cpu %d failed\n", cpu); 575 return ret; 576 } 577 } 578 579 /* set the cpu in the mask */ 580 cpumask_set_cpu(cpu, &core_imc_cpumask); 581 return 0; 582 } 583 584 static int ppc_core_imc_cpu_offline(unsigned int cpu) 585 { 586 unsigned int ncpu, core_id; 587 struct imc_pmu_ref *ref; 588 589 /* 590 * clear this cpu out of the mask, if not present in the mask, 591 * don't bother doing anything. 592 */ 593 if (!cpumask_test_and_clear_cpu(cpu, &core_imc_cpumask)) 594 return 0; 595 596 /* Find any online cpu in that core except the current "cpu" */ 597 ncpu = cpumask_any_but(cpu_sibling_mask(cpu), cpu); 598 599 if (ncpu >= 0 && ncpu < nr_cpu_ids) { 600 cpumask_set_cpu(ncpu, &core_imc_cpumask); 601 perf_pmu_migrate_context(&core_imc_pmu->pmu, cpu, ncpu); 602 } else { 603 /* 604 * If this is the last cpu in this core then, skip taking refernce 605 * count mutex lock for this core and directly zero "refc" for 606 * this core. 607 */ 608 opal_imc_counters_stop(OPAL_IMC_COUNTERS_CORE, 609 get_hard_smp_processor_id(cpu)); 610 core_id = cpu / threads_per_core; 611 ref = &core_imc_refc[core_id]; 612 if (!ref) 613 return -EINVAL; 614 615 ref->refc = 0; 616 } 617 return 0; 618 } 619 620 static int core_imc_pmu_cpumask_init(void) 621 { 622 return cpuhp_setup_state(CPUHP_AP_PERF_POWERPC_CORE_IMC_ONLINE, 623 "perf/powerpc/imc_core:online", 624 ppc_core_imc_cpu_online, 625 ppc_core_imc_cpu_offline); 626 } 627 628 static void core_imc_counters_release(struct perf_event *event) 629 { 630 int rc, core_id; 631 struct imc_pmu_ref *ref; 632 633 if (event->cpu < 0) 634 return; 635 /* 636 * See if we need to disable the IMC PMU. 637 * If no events are currently in use, then we have to take a 638 * mutex to ensure that we don't race with another task doing 639 * enable or disable the core counters. 640 */ 641 core_id = event->cpu / threads_per_core; 642 643 /* Take the mutex lock and decrement the refernce count for this core */ 644 ref = &core_imc_refc[core_id]; 645 if (!ref) 646 return; 647 648 mutex_lock(&ref->lock); 649 ref->refc--; 650 if (ref->refc == 0) { 651 rc = opal_imc_counters_stop(OPAL_IMC_COUNTERS_CORE, 652 get_hard_smp_processor_id(event->cpu)); 653 if (rc) { 654 mutex_unlock(&ref->lock); 655 pr_err("IMC: Unable to stop the counters for core %d\n", core_id); 656 return; 657 } 658 } else if (ref->refc < 0) { 659 WARN(1, "core-imc: Invalid event reference count\n"); 660 ref->refc = 0; 661 } 662 mutex_unlock(&ref->lock); 663 } 664 665 static int core_imc_event_init(struct perf_event *event) 666 { 667 int core_id, rc; 668 u64 config = event->attr.config; 669 struct imc_mem_info *pcmi; 670 struct imc_pmu *pmu; 671 struct imc_pmu_ref *ref; 672 673 if (event->attr.type != event->pmu->type) 674 return -ENOENT; 675 676 /* Sampling not supported */ 677 if (event->hw.sample_period) 678 return -EINVAL; 679 680 /* unsupported modes and filters */ 681 if (event->attr.exclude_user || 682 event->attr.exclude_kernel || 683 event->attr.exclude_hv || 684 event->attr.exclude_idle || 685 event->attr.exclude_host || 686 event->attr.exclude_guest) 687 return -EINVAL; 688 689 if (event->cpu < 0) 690 return -EINVAL; 691 692 event->hw.idx = -1; 693 pmu = imc_event_to_pmu(event); 694 695 /* Sanity check for config (event offset) */ 696 if (((config & IMC_EVENT_OFFSET_MASK) > pmu->counter_mem_size)) 697 return -EINVAL; 698 699 if (!is_core_imc_mem_inited(event->cpu)) 700 return -ENODEV; 701 702 core_id = event->cpu / threads_per_core; 703 pcmi = &core_imc_pmu->mem_info[core_id]; 704 if ((!pcmi->vbase)) 705 return -ENODEV; 706 707 /* Get the core_imc mutex for this core */ 708 ref = &core_imc_refc[core_id]; 709 if (!ref) 710 return -EINVAL; 711 712 /* 713 * Core pmu units are enabled only when it is used. 714 * See if this is triggered for the first time. 715 * If yes, take the mutex lock and enable the core counters. 716 * If not, just increment the count in core_imc_refc struct. 717 */ 718 mutex_lock(&ref->lock); 719 if (ref->refc == 0) { 720 rc = opal_imc_counters_start(OPAL_IMC_COUNTERS_CORE, 721 get_hard_smp_processor_id(event->cpu)); 722 if (rc) { 723 mutex_unlock(&ref->lock); 724 pr_err("core-imc: Unable to start the counters for core %d\n", 725 core_id); 726 return rc; 727 } 728 } 729 ++ref->refc; 730 mutex_unlock(&ref->lock); 731 732 event->hw.event_base = (u64)pcmi->vbase + (config & IMC_EVENT_OFFSET_MASK); 733 event->destroy = core_imc_counters_release; 734 return 0; 735 } 736 737 /* 738 * Allocates a page of memory for each of the online cpus, and write the 739 * physical base address of that page to the LDBAR for that cpu. 740 * 741 * LDBAR Register Layout: 742 * 743 * 0 4 8 12 16 20 24 28 744 * | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | 745 * | | [ ] [ Counter Address [8:50] 746 * | * Mode | 747 * | * PB Scope 748 * * Enable/Disable 749 * 750 * 32 36 40 44 48 52 56 60 751 * | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | 752 * Counter Address [8:50] ] 753 * 754 */ 755 static int thread_imc_mem_alloc(int cpu_id, int size) 756 { 757 u64 ldbar_value, *local_mem = per_cpu(thread_imc_mem, cpu_id); 758 int phys_id = topology_physical_package_id(cpu_id); 759 760 if (!local_mem) { 761 /* 762 * This case could happen only once at start, since we dont 763 * free the memory in cpu offline path. 764 */ 765 local_mem = page_address(alloc_pages_node(phys_id, 766 GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE, 767 get_order(size))); 768 if (!local_mem) 769 return -ENOMEM; 770 771 per_cpu(thread_imc_mem, cpu_id) = local_mem; 772 } 773 774 ldbar_value = ((u64)local_mem & THREAD_IMC_LDBAR_MASK) | THREAD_IMC_ENABLE; 775 776 mtspr(SPRN_LDBAR, ldbar_value); 777 return 0; 778 } 779 780 static int ppc_thread_imc_cpu_online(unsigned int cpu) 781 { 782 return thread_imc_mem_alloc(cpu, thread_imc_mem_size); 783 } 784 785 static int ppc_thread_imc_cpu_offline(unsigned int cpu) 786 { 787 mtspr(SPRN_LDBAR, 0); 788 return 0; 789 } 790 791 static int thread_imc_cpu_init(void) 792 { 793 return cpuhp_setup_state(CPUHP_AP_PERF_POWERPC_THREAD_IMC_ONLINE, 794 "perf/powerpc/imc_thread:online", 795 ppc_thread_imc_cpu_online, 796 ppc_thread_imc_cpu_offline); 797 } 798 799 void thread_imc_pmu_sched_task(struct perf_event_context *ctx, 800 bool sched_in) 801 { 802 int core_id; 803 struct imc_pmu_ref *ref; 804 805 if (!is_core_imc_mem_inited(smp_processor_id())) 806 return; 807 808 core_id = smp_processor_id() / threads_per_core; 809 /* 810 * imc pmus are enabled only when it is used. 811 * See if this is triggered for the first time. 812 * If yes, take the mutex lock and enable the counters. 813 * If not, just increment the count in ref count struct. 814 */ 815 ref = &core_imc_refc[core_id]; 816 if (!ref) 817 return; 818 819 if (sched_in) { 820 mutex_lock(&ref->lock); 821 if (ref->refc == 0) { 822 if (opal_imc_counters_start(OPAL_IMC_COUNTERS_CORE, 823 get_hard_smp_processor_id(smp_processor_id()))) { 824 mutex_unlock(&ref->lock); 825 pr_err("thread-imc: Unable to start the counter\ 826 for core %d\n", core_id); 827 return; 828 } 829 } 830 ++ref->refc; 831 mutex_unlock(&ref->lock); 832 } else { 833 mutex_lock(&ref->lock); 834 ref->refc--; 835 if (ref->refc == 0) { 836 if (opal_imc_counters_stop(OPAL_IMC_COUNTERS_CORE, 837 get_hard_smp_processor_id(smp_processor_id()))) { 838 mutex_unlock(&ref->lock); 839 pr_err("thread-imc: Unable to stop the counters\ 840 for core %d\n", core_id); 841 return; 842 } 843 } else if (ref->refc < 0) { 844 ref->refc = 0; 845 } 846 mutex_unlock(&ref->lock); 847 } 848 849 return; 850 } 851 852 static int thread_imc_event_init(struct perf_event *event) 853 { 854 u32 config = event->attr.config; 855 struct task_struct *target; 856 struct imc_pmu *pmu; 857 858 if (event->attr.type != event->pmu->type) 859 return -ENOENT; 860 861 /* Sampling not supported */ 862 if (event->hw.sample_period) 863 return -EINVAL; 864 865 event->hw.idx = -1; 866 pmu = imc_event_to_pmu(event); 867 868 /* Sanity check for config offset */ 869 if (((config & IMC_EVENT_OFFSET_MASK) > pmu->counter_mem_size)) 870 return -EINVAL; 871 872 target = event->hw.target; 873 if (!target) 874 return -EINVAL; 875 876 event->pmu->task_ctx_nr = perf_sw_context; 877 return 0; 878 } 879 880 static bool is_thread_imc_pmu(struct perf_event *event) 881 { 882 if (!strncmp(event->pmu->name, "thread_imc", strlen("thread_imc"))) 883 return true; 884 885 return false; 886 } 887 888 static u64 * get_event_base_addr(struct perf_event *event) 889 { 890 u64 addr; 891 892 if (is_thread_imc_pmu(event)) { 893 addr = (u64)per_cpu(thread_imc_mem, smp_processor_id()); 894 return (u64 *)(addr + (event->attr.config & IMC_EVENT_OFFSET_MASK)); 895 } 896 897 return (u64 *)event->hw.event_base; 898 } 899 900 static void thread_imc_pmu_start_txn(struct pmu *pmu, 901 unsigned int txn_flags) 902 { 903 if (txn_flags & ~PERF_PMU_TXN_ADD) 904 return; 905 perf_pmu_disable(pmu); 906 } 907 908 static void thread_imc_pmu_cancel_txn(struct pmu *pmu) 909 { 910 perf_pmu_enable(pmu); 911 } 912 913 static int thread_imc_pmu_commit_txn(struct pmu *pmu) 914 { 915 perf_pmu_enable(pmu); 916 return 0; 917 } 918 919 static u64 imc_read_counter(struct perf_event *event) 920 { 921 u64 *addr, data; 922 923 /* 924 * In-Memory Collection (IMC) counters are free flowing counters. 925 * So we take a snapshot of the counter value on enable and save it 926 * to calculate the delta at later stage to present the event counter 927 * value. 928 */ 929 addr = get_event_base_addr(event); 930 data = be64_to_cpu(READ_ONCE(*addr)); 931 local64_set(&event->hw.prev_count, data); 932 933 return data; 934 } 935 936 static void imc_event_update(struct perf_event *event) 937 { 938 u64 counter_prev, counter_new, final_count; 939 940 counter_prev = local64_read(&event->hw.prev_count); 941 counter_new = imc_read_counter(event); 942 final_count = counter_new - counter_prev; 943 944 /* Update the delta to the event count */ 945 local64_add(final_count, &event->count); 946 } 947 948 static void imc_event_start(struct perf_event *event, int flags) 949 { 950 /* 951 * In Memory Counters are free flowing counters. HW or the microcode 952 * keeps adding to the counter offset in memory. To get event 953 * counter value, we snapshot the value here and we calculate 954 * delta at later point. 955 */ 956 imc_read_counter(event); 957 } 958 959 static void imc_event_stop(struct perf_event *event, int flags) 960 { 961 /* 962 * Take a snapshot and calculate the delta and update 963 * the event counter values. 964 */ 965 imc_event_update(event); 966 } 967 968 static int imc_event_add(struct perf_event *event, int flags) 969 { 970 if (flags & PERF_EF_START) 971 imc_event_start(event, flags); 972 973 return 0; 974 } 975 976 static int thread_imc_event_add(struct perf_event *event, int flags) 977 { 978 if (flags & PERF_EF_START) 979 imc_event_start(event, flags); 980 981 /* Enable the sched_task to start the engine */ 982 perf_sched_cb_inc(event->ctx->pmu); 983 return 0; 984 } 985 986 static void thread_imc_event_del(struct perf_event *event, int flags) 987 { 988 /* 989 * Take a snapshot and calculate the delta and update 990 * the event counter values. 991 */ 992 imc_event_update(event); 993 perf_sched_cb_dec(event->ctx->pmu); 994 } 995 996 /* update_pmu_ops : Populate the appropriate operations for "pmu" */ 997 static int update_pmu_ops(struct imc_pmu *pmu) 998 { 999 pmu->pmu.task_ctx_nr = perf_invalid_context; 1000 pmu->pmu.add = imc_event_add; 1001 pmu->pmu.del = imc_event_stop; 1002 pmu->pmu.start = imc_event_start; 1003 pmu->pmu.stop = imc_event_stop; 1004 pmu->pmu.read = imc_event_update; 1005 pmu->pmu.attr_groups = pmu->attr_groups; 1006 pmu->attr_groups[IMC_FORMAT_ATTR] = &imc_format_group; 1007 1008 switch (pmu->domain) { 1009 case IMC_DOMAIN_NEST: 1010 pmu->pmu.event_init = nest_imc_event_init; 1011 pmu->attr_groups[IMC_CPUMASK_ATTR] = &imc_pmu_cpumask_attr_group; 1012 break; 1013 case IMC_DOMAIN_CORE: 1014 pmu->pmu.event_init = core_imc_event_init; 1015 pmu->attr_groups[IMC_CPUMASK_ATTR] = &imc_pmu_cpumask_attr_group; 1016 break; 1017 case IMC_DOMAIN_THREAD: 1018 pmu->pmu.event_init = thread_imc_event_init; 1019 pmu->pmu.sched_task = thread_imc_pmu_sched_task; 1020 pmu->pmu.add = thread_imc_event_add; 1021 pmu->pmu.del = thread_imc_event_del; 1022 pmu->pmu.start_txn = thread_imc_pmu_start_txn; 1023 pmu->pmu.cancel_txn = thread_imc_pmu_cancel_txn; 1024 pmu->pmu.commit_txn = thread_imc_pmu_commit_txn; 1025 break; 1026 default: 1027 break; 1028 } 1029 1030 return 0; 1031 } 1032 1033 /* init_nest_pmu_ref: Initialize the imc_pmu_ref struct for all the nodes */ 1034 static int init_nest_pmu_ref(void) 1035 { 1036 int nid, i, cpu; 1037 1038 nest_imc_refc = kcalloc(num_possible_nodes(), sizeof(*nest_imc_refc), 1039 GFP_KERNEL); 1040 1041 if (!nest_imc_refc) 1042 return -ENOMEM; 1043 1044 i = 0; 1045 for_each_node(nid) { 1046 /* 1047 * Mutex lock to avoid races while tracking the number of 1048 * sessions using the chip's nest pmu units. 1049 */ 1050 mutex_init(&nest_imc_refc[i].lock); 1051 1052 /* 1053 * Loop to init the "id" with the node_id. Variable "i" initialized to 1054 * 0 and will be used as index to the array. "i" will not go off the 1055 * end of the array since the "for_each_node" loops for "N_POSSIBLE" 1056 * nodes only. 1057 */ 1058 nest_imc_refc[i++].id = nid; 1059 } 1060 1061 /* 1062 * Loop to init the per_cpu "local_nest_imc_refc" with the proper 1063 * "nest_imc_refc" index. This makes get_nest_pmu_ref() alot simple. 1064 */ 1065 for_each_possible_cpu(cpu) { 1066 nid = cpu_to_node(cpu); 1067 for (i = 0; i < num_possible_nodes(); i++) { 1068 if (nest_imc_refc[i].id == nid) { 1069 per_cpu(local_nest_imc_refc, cpu) = &nest_imc_refc[i]; 1070 break; 1071 } 1072 } 1073 } 1074 return 0; 1075 } 1076 1077 static void cleanup_all_core_imc_memory(void) 1078 { 1079 int i, nr_cores = num_present_cpus() / threads_per_core; 1080 struct imc_mem_info *ptr = core_imc_pmu->mem_info; 1081 int size = core_imc_pmu->counter_mem_size; 1082 1083 /* mem_info will never be NULL */ 1084 for (i = 0; i < nr_cores; i++) { 1085 if (ptr[i].vbase) 1086 free_pages((u64)ptr->vbase, get_order(size)); 1087 } 1088 1089 kfree(ptr); 1090 kfree(core_imc_refc); 1091 } 1092 1093 static void thread_imc_ldbar_disable(void *dummy) 1094 { 1095 /* 1096 * By Zeroing LDBAR, we disable thread-imc 1097 * updates. 1098 */ 1099 mtspr(SPRN_LDBAR, 0); 1100 } 1101 1102 void thread_imc_disable(void) 1103 { 1104 on_each_cpu(thread_imc_ldbar_disable, NULL, 1); 1105 } 1106 1107 static void cleanup_all_thread_imc_memory(void) 1108 { 1109 int i, order = get_order(thread_imc_mem_size); 1110 1111 for_each_online_cpu(i) { 1112 if (per_cpu(thread_imc_mem, i)) 1113 free_pages((u64)per_cpu(thread_imc_mem, i), order); 1114 1115 } 1116 } 1117 1118 /* 1119 * Common function to unregister cpu hotplug callback and 1120 * free the memory. 1121 * TODO: Need to handle pmu unregistering, which will be 1122 * done in followup series. 1123 */ 1124 static void imc_common_cpuhp_mem_free(struct imc_pmu *pmu_ptr) 1125 { 1126 if (pmu_ptr->domain == IMC_DOMAIN_NEST) { 1127 mutex_lock(&nest_init_lock); 1128 if (nest_pmus == 1) { 1129 cpuhp_remove_state(CPUHP_AP_PERF_POWERPC_NEST_IMC_ONLINE); 1130 kfree(nest_imc_refc); 1131 } 1132 1133 if (nest_pmus > 0) 1134 nest_pmus--; 1135 mutex_unlock(&nest_init_lock); 1136 } 1137 1138 /* Free core_imc memory */ 1139 if (pmu_ptr->domain == IMC_DOMAIN_CORE) { 1140 cpuhp_remove_state(CPUHP_AP_PERF_POWERPC_CORE_IMC_ONLINE); 1141 cleanup_all_core_imc_memory(); 1142 } 1143 1144 /* Free thread_imc memory */ 1145 if (pmu_ptr->domain == IMC_DOMAIN_THREAD) { 1146 cpuhp_remove_state(CPUHP_AP_PERF_POWERPC_THREAD_IMC_ONLINE); 1147 cleanup_all_thread_imc_memory(); 1148 } 1149 1150 /* Only free the attr_groups which are dynamically allocated */ 1151 kfree(pmu_ptr->attr_groups[IMC_EVENT_ATTR]->attrs); 1152 kfree(pmu_ptr->attr_groups[IMC_EVENT_ATTR]); 1153 kfree(pmu_ptr); 1154 return; 1155 } 1156 1157 1158 /* 1159 * imc_mem_init : Function to support memory allocation for core imc. 1160 */ 1161 static int imc_mem_init(struct imc_pmu *pmu_ptr, struct device_node *parent, 1162 int pmu_index) 1163 { 1164 const char *s; 1165 int nr_cores, cpu, res; 1166 1167 if (of_property_read_string(parent, "name", &s)) 1168 return -ENODEV; 1169 1170 switch (pmu_ptr->domain) { 1171 case IMC_DOMAIN_NEST: 1172 /* Update the pmu name */ 1173 pmu_ptr->pmu.name = kasprintf(GFP_KERNEL, "%s%s_imc", "nest_", s); 1174 if (!pmu_ptr->pmu.name) 1175 return -ENOMEM; 1176 1177 /* Needed for hotplug/migration */ 1178 per_nest_pmu_arr[pmu_index] = pmu_ptr; 1179 break; 1180 case IMC_DOMAIN_CORE: 1181 /* Update the pmu name */ 1182 pmu_ptr->pmu.name = kasprintf(GFP_KERNEL, "%s%s", s, "_imc"); 1183 if (!pmu_ptr->pmu.name) 1184 return -ENOMEM; 1185 1186 nr_cores = num_present_cpus() / threads_per_core; 1187 pmu_ptr->mem_info = kcalloc(nr_cores, sizeof(struct imc_mem_info), 1188 GFP_KERNEL); 1189 1190 if (!pmu_ptr->mem_info) 1191 return -ENOMEM; 1192 1193 core_imc_refc = kcalloc(nr_cores, sizeof(struct imc_pmu_ref), 1194 GFP_KERNEL); 1195 1196 if (!core_imc_refc) 1197 return -ENOMEM; 1198 1199 core_imc_pmu = pmu_ptr; 1200 break; 1201 case IMC_DOMAIN_THREAD: 1202 /* Update the pmu name */ 1203 pmu_ptr->pmu.name = kasprintf(GFP_KERNEL, "%s%s", s, "_imc"); 1204 if (!pmu_ptr->pmu.name) 1205 return -ENOMEM; 1206 1207 thread_imc_mem_size = pmu_ptr->counter_mem_size; 1208 for_each_online_cpu(cpu) { 1209 res = thread_imc_mem_alloc(cpu, pmu_ptr->counter_mem_size); 1210 if (res) 1211 return res; 1212 } 1213 1214 thread_imc_pmu = pmu_ptr; 1215 break; 1216 default: 1217 return -EINVAL; 1218 } 1219 1220 return 0; 1221 } 1222 1223 /* 1224 * init_imc_pmu : Setup and register the IMC pmu device. 1225 * 1226 * @parent: Device tree unit node 1227 * @pmu_ptr: memory allocated for this pmu 1228 * @pmu_idx: Count of nest pmc registered 1229 * 1230 * init_imc_pmu() setup pmu cpumask and registers for a cpu hotplug callback. 1231 * Handles failure cases and accordingly frees memory. 1232 */ 1233 int init_imc_pmu(struct device_node *parent, struct imc_pmu *pmu_ptr, int pmu_idx) 1234 { 1235 int ret; 1236 1237 ret = imc_mem_init(pmu_ptr, parent, pmu_idx); 1238 if (ret) 1239 goto err_free; 1240 1241 switch (pmu_ptr->domain) { 1242 case IMC_DOMAIN_NEST: 1243 /* 1244 * Nest imc pmu need only one cpu per chip, we initialize the 1245 * cpumask for the first nest imc pmu and use the same for the 1246 * rest. To handle the cpuhotplug callback unregister, we track 1247 * the number of nest pmus in "nest_pmus". 1248 */ 1249 mutex_lock(&nest_init_lock); 1250 if (nest_pmus == 0) { 1251 ret = init_nest_pmu_ref(); 1252 if (ret) { 1253 mutex_unlock(&nest_init_lock); 1254 goto err_free; 1255 } 1256 /* Register for cpu hotplug notification. */ 1257 ret = nest_pmu_cpumask_init(); 1258 if (ret) { 1259 mutex_unlock(&nest_init_lock); 1260 goto err_free; 1261 } 1262 } 1263 nest_pmus++; 1264 mutex_unlock(&nest_init_lock); 1265 break; 1266 case IMC_DOMAIN_CORE: 1267 ret = core_imc_pmu_cpumask_init(); 1268 if (ret) { 1269 cleanup_all_core_imc_memory(); 1270 return ret; 1271 } 1272 1273 break; 1274 case IMC_DOMAIN_THREAD: 1275 ret = thread_imc_cpu_init(); 1276 if (ret) { 1277 cleanup_all_thread_imc_memory(); 1278 return ret; 1279 } 1280 1281 break; 1282 default: 1283 return -1; /* Unknown domain */ 1284 } 1285 1286 ret = update_events_in_group(parent, pmu_ptr); 1287 if (ret) 1288 goto err_free; 1289 1290 ret = update_pmu_ops(pmu_ptr); 1291 if (ret) 1292 goto err_free; 1293 1294 ret = perf_pmu_register(&pmu_ptr->pmu, pmu_ptr->pmu.name, -1); 1295 if (ret) 1296 goto err_free; 1297 1298 pr_info("%s performance monitor hardware support registered\n", 1299 pmu_ptr->pmu.name); 1300 1301 return 0; 1302 1303 err_free: 1304 imc_common_cpuhp_mem_free(pmu_ptr); 1305 return ret; 1306 } 1307