1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Performance event support for the System z CPU-measurement Sampling Facility 4 * 5 * Copyright IBM Corp. 2013, 2018 6 * Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com> 7 */ 8 #define KMSG_COMPONENT "cpum_sf" 9 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 10 11 #include <linux/kernel.h> 12 #include <linux/kernel_stat.h> 13 #include <linux/perf_event.h> 14 #include <linux/percpu.h> 15 #include <linux/pid.h> 16 #include <linux/notifier.h> 17 #include <linux/export.h> 18 #include <linux/slab.h> 19 #include <linux/mm.h> 20 #include <linux/moduleparam.h> 21 #include <asm/cpu_mf.h> 22 #include <asm/irq.h> 23 #include <asm/debug.h> 24 #include <asm/timex.h> 25 26 /* Minimum number of sample-data-block-tables: 27 * At least one table is required for the sampling buffer structure. 28 * A single table contains up to 511 pointers to sample-data-blocks. 29 */ 30 #define CPUM_SF_MIN_SDBT 1 31 32 /* Number of sample-data-blocks per sample-data-block-table (SDBT): 33 * A table contains SDB pointers (8 bytes) and one table-link entry 34 * that points to the origin of the next SDBT. 35 */ 36 #define CPUM_SF_SDB_PER_TABLE ((PAGE_SIZE - 8) / 8) 37 38 /* Maximum page offset for an SDBT table-link entry: 39 * If this page offset is reached, a table-link entry to the next SDBT 40 * must be added. 41 */ 42 #define CPUM_SF_SDBT_TL_OFFSET (CPUM_SF_SDB_PER_TABLE * 8) 43 static inline int require_table_link(const void *sdbt) 44 { 45 return ((unsigned long) sdbt & ~PAGE_MASK) == CPUM_SF_SDBT_TL_OFFSET; 46 } 47 48 /* Minimum and maximum sampling buffer sizes: 49 * 50 * This number represents the maximum size of the sampling buffer taking 51 * the number of sample-data-block-tables into account. Note that these 52 * numbers apply to the basic-sampling function only. 53 * The maximum number of SDBs is increased by CPUM_SF_SDB_DIAG_FACTOR if 54 * the diagnostic-sampling function is active. 55 * 56 * Sampling buffer size Buffer characteristics 57 * --------------------------------------------------- 58 * 64KB == 16 pages (4KB per page) 59 * 1 page for SDB-tables 60 * 15 pages for SDBs 61 * 62 * 32MB == 8192 pages (4KB per page) 63 * 16 pages for SDB-tables 64 * 8176 pages for SDBs 65 */ 66 static unsigned long __read_mostly CPUM_SF_MIN_SDB = 15; 67 static unsigned long __read_mostly CPUM_SF_MAX_SDB = 8176; 68 static unsigned long __read_mostly CPUM_SF_SDB_DIAG_FACTOR = 1; 69 70 struct sf_buffer { 71 unsigned long *sdbt; /* Sample-data-block-table origin */ 72 /* buffer characteristics (required for buffer increments) */ 73 unsigned long num_sdb; /* Number of sample-data-blocks */ 74 unsigned long num_sdbt; /* Number of sample-data-block-tables */ 75 unsigned long *tail; /* last sample-data-block-table */ 76 }; 77 78 struct aux_buffer { 79 struct sf_buffer sfb; 80 unsigned long head; /* index of SDB of buffer head */ 81 unsigned long alert_mark; /* index of SDB of alert request position */ 82 unsigned long empty_mark; /* mark of SDB not marked full */ 83 unsigned long *sdb_index; /* SDB address for fast lookup */ 84 unsigned long *sdbt_index; /* SDBT address for fast lookup */ 85 }; 86 87 struct cpu_hw_sf { 88 /* CPU-measurement sampling information block */ 89 struct hws_qsi_info_block qsi; 90 /* CPU-measurement sampling control block */ 91 struct hws_lsctl_request_block lsctl; 92 struct sf_buffer sfb; /* Sampling buffer */ 93 unsigned int flags; /* Status flags */ 94 struct perf_event *event; /* Scheduled perf event */ 95 struct perf_output_handle handle; /* AUX buffer output handle */ 96 }; 97 static DEFINE_PER_CPU(struct cpu_hw_sf, cpu_hw_sf); 98 99 /* Debug feature */ 100 static debug_info_t *sfdbg; 101 102 /* 103 * sf_disable() - Switch off sampling facility 104 */ 105 static int sf_disable(void) 106 { 107 struct hws_lsctl_request_block sreq; 108 109 memset(&sreq, 0, sizeof(sreq)); 110 return lsctl(&sreq); 111 } 112 113 /* 114 * sf_buffer_available() - Check for an allocated sampling buffer 115 */ 116 static int sf_buffer_available(struct cpu_hw_sf *cpuhw) 117 { 118 return !!cpuhw->sfb.sdbt; 119 } 120 121 /* 122 * deallocate sampling facility buffer 123 */ 124 static void free_sampling_buffer(struct sf_buffer *sfb) 125 { 126 unsigned long *sdbt, *curr; 127 128 if (!sfb->sdbt) 129 return; 130 131 sdbt = sfb->sdbt; 132 curr = sdbt; 133 134 /* Free the SDBT after all SDBs are processed... */ 135 while (1) { 136 if (!*curr || !sdbt) 137 break; 138 139 /* Process table-link entries */ 140 if (is_link_entry(curr)) { 141 curr = get_next_sdbt(curr); 142 if (sdbt) 143 free_page((unsigned long) sdbt); 144 145 /* If the origin is reached, sampling buffer is freed */ 146 if (curr == sfb->sdbt) 147 break; 148 else 149 sdbt = curr; 150 } else { 151 /* Process SDB pointer */ 152 if (*curr) { 153 free_page(*curr); 154 curr++; 155 } 156 } 157 } 158 159 debug_sprintf_event(sfdbg, 5, "%s: freed sdbt %#lx\n", __func__, 160 (unsigned long)sfb->sdbt); 161 memset(sfb, 0, sizeof(*sfb)); 162 } 163 164 static int alloc_sample_data_block(unsigned long *sdbt, gfp_t gfp_flags) 165 { 166 unsigned long sdb, *trailer; 167 168 /* Allocate and initialize sample-data-block */ 169 sdb = get_zeroed_page(gfp_flags); 170 if (!sdb) 171 return -ENOMEM; 172 trailer = trailer_entry_ptr(sdb); 173 *trailer = SDB_TE_ALERT_REQ_MASK; 174 175 /* Link SDB into the sample-data-block-table */ 176 *sdbt = sdb; 177 178 return 0; 179 } 180 181 /* 182 * realloc_sampling_buffer() - extend sampler memory 183 * 184 * Allocates new sample-data-blocks and adds them to the specified sampling 185 * buffer memory. 186 * 187 * Important: This modifies the sampling buffer and must be called when the 188 * sampling facility is disabled. 189 * 190 * Returns zero on success, non-zero otherwise. 191 */ 192 static int realloc_sampling_buffer(struct sf_buffer *sfb, 193 unsigned long num_sdb, gfp_t gfp_flags) 194 { 195 int i, rc; 196 unsigned long *new, *tail, *tail_prev = NULL; 197 198 if (!sfb->sdbt || !sfb->tail) 199 return -EINVAL; 200 201 if (!is_link_entry(sfb->tail)) 202 return -EINVAL; 203 204 /* Append to the existing sampling buffer, overwriting the table-link 205 * register. 206 * The tail variables always points to the "tail" (last and table-link) 207 * entry in an SDB-table. 208 */ 209 tail = sfb->tail; 210 211 /* Do a sanity check whether the table-link entry points to 212 * the sampling buffer origin. 213 */ 214 if (sfb->sdbt != get_next_sdbt(tail)) { 215 debug_sprintf_event(sfdbg, 3, "%s: " 216 "sampling buffer is not linked: origin %#lx" 217 " tail %#lx\n", __func__, 218 (unsigned long)sfb->sdbt, 219 (unsigned long)tail); 220 return -EINVAL; 221 } 222 223 /* Allocate remaining SDBs */ 224 rc = 0; 225 for (i = 0; i < num_sdb; i++) { 226 /* Allocate a new SDB-table if it is full. */ 227 if (require_table_link(tail)) { 228 new = (unsigned long *) get_zeroed_page(gfp_flags); 229 if (!new) { 230 rc = -ENOMEM; 231 break; 232 } 233 sfb->num_sdbt++; 234 /* Link current page to tail of chain */ 235 *tail = (unsigned long)(void *) new + 1; 236 tail_prev = tail; 237 tail = new; 238 } 239 240 /* Allocate a new sample-data-block. 241 * If there is not enough memory, stop the realloc process 242 * and simply use what was allocated. If this is a temporary 243 * issue, a new realloc call (if required) might succeed. 244 */ 245 rc = alloc_sample_data_block(tail, gfp_flags); 246 if (rc) { 247 /* Undo last SDBT. An SDBT with no SDB at its first 248 * entry but with an SDBT entry instead can not be 249 * handled by the interrupt handler code. 250 * Avoid this situation. 251 */ 252 if (tail_prev) { 253 sfb->num_sdbt--; 254 free_page((unsigned long) new); 255 tail = tail_prev; 256 } 257 break; 258 } 259 sfb->num_sdb++; 260 tail++; 261 tail_prev = new = NULL; /* Allocated at least one SBD */ 262 } 263 264 /* Link sampling buffer to its origin */ 265 *tail = (unsigned long) sfb->sdbt + 1; 266 sfb->tail = tail; 267 268 debug_sprintf_event(sfdbg, 4, "%s: new buffer" 269 " settings: sdbt %lu sdb %lu\n", __func__, 270 sfb->num_sdbt, sfb->num_sdb); 271 return rc; 272 } 273 274 /* 275 * allocate_sampling_buffer() - allocate sampler memory 276 * 277 * Allocates and initializes a sampling buffer structure using the 278 * specified number of sample-data-blocks (SDB). For each allocation, 279 * a 4K page is used. The number of sample-data-block-tables (SDBT) 280 * are calculated from SDBs. 281 * Also set the ALERT_REQ mask in each SDBs trailer. 282 * 283 * Returns zero on success, non-zero otherwise. 284 */ 285 static int alloc_sampling_buffer(struct sf_buffer *sfb, unsigned long num_sdb) 286 { 287 int rc; 288 289 if (sfb->sdbt) 290 return -EINVAL; 291 292 /* Allocate the sample-data-block-table origin */ 293 sfb->sdbt = (unsigned long *) get_zeroed_page(GFP_KERNEL); 294 if (!sfb->sdbt) 295 return -ENOMEM; 296 sfb->num_sdb = 0; 297 sfb->num_sdbt = 1; 298 299 /* Link the table origin to point to itself to prepare for 300 * realloc_sampling_buffer() invocation. 301 */ 302 sfb->tail = sfb->sdbt; 303 *sfb->tail = (unsigned long)(void *) sfb->sdbt + 1; 304 305 /* Allocate requested number of sample-data-blocks */ 306 rc = realloc_sampling_buffer(sfb, num_sdb, GFP_KERNEL); 307 if (rc) { 308 free_sampling_buffer(sfb); 309 debug_sprintf_event(sfdbg, 4, "%s: " 310 "realloc_sampling_buffer failed with rc %i\n", 311 __func__, rc); 312 } else 313 debug_sprintf_event(sfdbg, 4, 314 "%s: tear %#lx dear %#lx\n", __func__, 315 (unsigned long)sfb->sdbt, (unsigned long)*sfb->sdbt); 316 return rc; 317 } 318 319 static void sfb_set_limits(unsigned long min, unsigned long max) 320 { 321 struct hws_qsi_info_block si; 322 323 CPUM_SF_MIN_SDB = min; 324 CPUM_SF_MAX_SDB = max; 325 326 memset(&si, 0, sizeof(si)); 327 if (!qsi(&si)) 328 CPUM_SF_SDB_DIAG_FACTOR = DIV_ROUND_UP(si.dsdes, si.bsdes); 329 } 330 331 static unsigned long sfb_max_limit(struct hw_perf_event *hwc) 332 { 333 return SAMPL_DIAG_MODE(hwc) ? CPUM_SF_MAX_SDB * CPUM_SF_SDB_DIAG_FACTOR 334 : CPUM_SF_MAX_SDB; 335 } 336 337 static unsigned long sfb_pending_allocs(struct sf_buffer *sfb, 338 struct hw_perf_event *hwc) 339 { 340 if (!sfb->sdbt) 341 return SFB_ALLOC_REG(hwc); 342 if (SFB_ALLOC_REG(hwc) > sfb->num_sdb) 343 return SFB_ALLOC_REG(hwc) - sfb->num_sdb; 344 return 0; 345 } 346 347 static int sfb_has_pending_allocs(struct sf_buffer *sfb, 348 struct hw_perf_event *hwc) 349 { 350 return sfb_pending_allocs(sfb, hwc) > 0; 351 } 352 353 static void sfb_account_allocs(unsigned long num, struct hw_perf_event *hwc) 354 { 355 /* Limit the number of SDBs to not exceed the maximum */ 356 num = min_t(unsigned long, num, sfb_max_limit(hwc) - SFB_ALLOC_REG(hwc)); 357 if (num) 358 SFB_ALLOC_REG(hwc) += num; 359 } 360 361 static void sfb_init_allocs(unsigned long num, struct hw_perf_event *hwc) 362 { 363 SFB_ALLOC_REG(hwc) = 0; 364 sfb_account_allocs(num, hwc); 365 } 366 367 static void deallocate_buffers(struct cpu_hw_sf *cpuhw) 368 { 369 if (cpuhw->sfb.sdbt) 370 free_sampling_buffer(&cpuhw->sfb); 371 } 372 373 static int allocate_buffers(struct cpu_hw_sf *cpuhw, struct hw_perf_event *hwc) 374 { 375 unsigned long n_sdb, freq, factor; 376 size_t sample_size; 377 378 /* Calculate sampling buffers using 4K pages 379 * 380 * 1. Determine the sample data size which depends on the used 381 * sampling functions, for example, basic-sampling or 382 * basic-sampling with diagnostic-sampling. 383 * 384 * 2. Use the sampling frequency as input. The sampling buffer is 385 * designed for almost one second. This can be adjusted through 386 * the "factor" variable. 387 * In any case, alloc_sampling_buffer() sets the Alert Request 388 * Control indicator to trigger a measurement-alert to harvest 389 * sample-data-blocks (sdb). 390 * 391 * 3. Compute the number of sample-data-blocks and ensure a minimum 392 * of CPUM_SF_MIN_SDB. Also ensure the upper limit does not 393 * exceed a "calculated" maximum. The symbolic maximum is 394 * designed for basic-sampling only and needs to be increased if 395 * diagnostic-sampling is active. 396 * See also the remarks for these symbolic constants. 397 * 398 * 4. Compute the number of sample-data-block-tables (SDBT) and 399 * ensure a minimum of CPUM_SF_MIN_SDBT (one table can manage up 400 * to 511 SDBs). 401 */ 402 sample_size = sizeof(struct hws_basic_entry); 403 freq = sample_rate_to_freq(&cpuhw->qsi, SAMPL_RATE(hwc)); 404 factor = 1; 405 n_sdb = DIV_ROUND_UP(freq, factor * ((PAGE_SIZE-64) / sample_size)); 406 if (n_sdb < CPUM_SF_MIN_SDB) 407 n_sdb = CPUM_SF_MIN_SDB; 408 409 /* If there is already a sampling buffer allocated, it is very likely 410 * that the sampling facility is enabled too. If the event to be 411 * initialized requires a greater sampling buffer, the allocation must 412 * be postponed. Changing the sampling buffer requires the sampling 413 * facility to be in the disabled state. So, account the number of 414 * required SDBs and let cpumsf_pmu_enable() resize the buffer just 415 * before the event is started. 416 */ 417 sfb_init_allocs(n_sdb, hwc); 418 if (sf_buffer_available(cpuhw)) 419 return 0; 420 421 debug_sprintf_event(sfdbg, 3, 422 "%s: rate %lu f %lu sdb %lu/%lu" 423 " sample_size %lu cpuhw %p\n", __func__, 424 SAMPL_RATE(hwc), freq, n_sdb, sfb_max_limit(hwc), 425 sample_size, cpuhw); 426 427 return alloc_sampling_buffer(&cpuhw->sfb, 428 sfb_pending_allocs(&cpuhw->sfb, hwc)); 429 } 430 431 static unsigned long min_percent(unsigned int percent, unsigned long base, 432 unsigned long min) 433 { 434 return min_t(unsigned long, min, DIV_ROUND_UP(percent * base, 100)); 435 } 436 437 static unsigned long compute_sfb_extent(unsigned long ratio, unsigned long base) 438 { 439 /* Use a percentage-based approach to extend the sampling facility 440 * buffer. Accept up to 5% sample data loss. 441 * Vary the extents between 1% to 5% of the current number of 442 * sample-data-blocks. 443 */ 444 if (ratio <= 5) 445 return 0; 446 if (ratio <= 25) 447 return min_percent(1, base, 1); 448 if (ratio <= 50) 449 return min_percent(1, base, 1); 450 if (ratio <= 75) 451 return min_percent(2, base, 2); 452 if (ratio <= 100) 453 return min_percent(3, base, 3); 454 if (ratio <= 250) 455 return min_percent(4, base, 4); 456 457 return min_percent(5, base, 8); 458 } 459 460 static void sfb_account_overflows(struct cpu_hw_sf *cpuhw, 461 struct hw_perf_event *hwc) 462 { 463 unsigned long ratio, num; 464 465 if (!OVERFLOW_REG(hwc)) 466 return; 467 468 /* The sample_overflow contains the average number of sample data 469 * that has been lost because sample-data-blocks were full. 470 * 471 * Calculate the total number of sample data entries that has been 472 * discarded. Then calculate the ratio of lost samples to total samples 473 * per second in percent. 474 */ 475 ratio = DIV_ROUND_UP(100 * OVERFLOW_REG(hwc) * cpuhw->sfb.num_sdb, 476 sample_rate_to_freq(&cpuhw->qsi, SAMPL_RATE(hwc))); 477 478 /* Compute number of sample-data-blocks */ 479 num = compute_sfb_extent(ratio, cpuhw->sfb.num_sdb); 480 if (num) 481 sfb_account_allocs(num, hwc); 482 483 debug_sprintf_event(sfdbg, 5, "%s: overflow %llu ratio %lu num %lu\n", 484 __func__, OVERFLOW_REG(hwc), ratio, num); 485 OVERFLOW_REG(hwc) = 0; 486 } 487 488 /* extend_sampling_buffer() - Extend sampling buffer 489 * @sfb: Sampling buffer structure (for local CPU) 490 * @hwc: Perf event hardware structure 491 * 492 * Use this function to extend the sampling buffer based on the overflow counter 493 * and postponed allocation extents stored in the specified Perf event hardware. 494 * 495 * Important: This function disables the sampling facility in order to safely 496 * change the sampling buffer structure. Do not call this function 497 * when the PMU is active. 498 */ 499 static void extend_sampling_buffer(struct sf_buffer *sfb, 500 struct hw_perf_event *hwc) 501 { 502 unsigned long num, num_old; 503 int rc; 504 505 num = sfb_pending_allocs(sfb, hwc); 506 if (!num) 507 return; 508 num_old = sfb->num_sdb; 509 510 /* Disable the sampling facility to reset any states and also 511 * clear pending measurement alerts. 512 */ 513 sf_disable(); 514 515 /* Extend the sampling buffer. 516 * This memory allocation typically happens in an atomic context when 517 * called by perf. Because this is a reallocation, it is fine if the 518 * new SDB-request cannot be satisfied immediately. 519 */ 520 rc = realloc_sampling_buffer(sfb, num, GFP_ATOMIC); 521 if (rc) 522 debug_sprintf_event(sfdbg, 5, "%s: realloc failed with rc %i\n", 523 __func__, rc); 524 525 if (sfb_has_pending_allocs(sfb, hwc)) 526 debug_sprintf_event(sfdbg, 5, "%s: " 527 "req %lu alloc %lu remaining %lu\n", 528 __func__, num, sfb->num_sdb - num_old, 529 sfb_pending_allocs(sfb, hwc)); 530 } 531 532 /* Number of perf events counting hardware events */ 533 static atomic_t num_events; 534 /* Used to avoid races in calling reserve/release_cpumf_hardware */ 535 static DEFINE_MUTEX(pmc_reserve_mutex); 536 537 #define PMC_INIT 0 538 #define PMC_RELEASE 1 539 #define PMC_FAILURE 2 540 static void setup_pmc_cpu(void *flags) 541 { 542 int err; 543 struct cpu_hw_sf *cpusf = this_cpu_ptr(&cpu_hw_sf); 544 545 err = 0; 546 switch (*((int *) flags)) { 547 case PMC_INIT: 548 memset(cpusf, 0, sizeof(*cpusf)); 549 err = qsi(&cpusf->qsi); 550 if (err) 551 break; 552 cpusf->flags |= PMU_F_RESERVED; 553 err = sf_disable(); 554 if (err) 555 pr_err("Switching off the sampling facility failed " 556 "with rc %i\n", err); 557 debug_sprintf_event(sfdbg, 5, 558 "%s: initialized: cpuhw %p\n", __func__, 559 cpusf); 560 break; 561 case PMC_RELEASE: 562 cpusf->flags &= ~PMU_F_RESERVED; 563 err = sf_disable(); 564 if (err) { 565 pr_err("Switching off the sampling facility failed " 566 "with rc %i\n", err); 567 } else 568 deallocate_buffers(cpusf); 569 debug_sprintf_event(sfdbg, 5, 570 "%s: released: cpuhw %p\n", __func__, 571 cpusf); 572 break; 573 } 574 if (err) 575 *((int *) flags) |= PMC_FAILURE; 576 } 577 578 static void release_pmc_hardware(void) 579 { 580 int flags = PMC_RELEASE; 581 582 irq_subclass_unregister(IRQ_SUBCLASS_MEASUREMENT_ALERT); 583 on_each_cpu(setup_pmc_cpu, &flags, 1); 584 } 585 586 static int reserve_pmc_hardware(void) 587 { 588 int flags = PMC_INIT; 589 590 on_each_cpu(setup_pmc_cpu, &flags, 1); 591 if (flags & PMC_FAILURE) { 592 release_pmc_hardware(); 593 return -ENODEV; 594 } 595 irq_subclass_register(IRQ_SUBCLASS_MEASUREMENT_ALERT); 596 597 return 0; 598 } 599 600 static void hw_perf_event_destroy(struct perf_event *event) 601 { 602 /* Release PMC if this is the last perf event */ 603 if (!atomic_add_unless(&num_events, -1, 1)) { 604 mutex_lock(&pmc_reserve_mutex); 605 if (atomic_dec_return(&num_events) == 0) 606 release_pmc_hardware(); 607 mutex_unlock(&pmc_reserve_mutex); 608 } 609 } 610 611 static void hw_init_period(struct hw_perf_event *hwc, u64 period) 612 { 613 hwc->sample_period = period; 614 hwc->last_period = hwc->sample_period; 615 local64_set(&hwc->period_left, hwc->sample_period); 616 } 617 618 static unsigned long hw_limit_rate(const struct hws_qsi_info_block *si, 619 unsigned long rate) 620 { 621 return clamp_t(unsigned long, rate, 622 si->min_sampl_rate, si->max_sampl_rate); 623 } 624 625 static u32 cpumsf_pid_type(struct perf_event *event, 626 u32 pid, enum pid_type type) 627 { 628 struct task_struct *tsk; 629 630 /* Idle process */ 631 if (!pid) 632 goto out; 633 634 tsk = find_task_by_pid_ns(pid, &init_pid_ns); 635 pid = -1; 636 if (tsk) { 637 /* 638 * Only top level events contain the pid namespace in which 639 * they are created. 640 */ 641 if (event->parent) 642 event = event->parent; 643 pid = __task_pid_nr_ns(tsk, type, event->ns); 644 /* 645 * See also 1d953111b648 646 * "perf/core: Don't report zero PIDs for exiting tasks". 647 */ 648 if (!pid && !pid_alive(tsk)) 649 pid = -1; 650 } 651 out: 652 return pid; 653 } 654 655 static void cpumsf_output_event_pid(struct perf_event *event, 656 struct perf_sample_data *data, 657 struct pt_regs *regs) 658 { 659 u32 pid; 660 struct perf_event_header header; 661 struct perf_output_handle handle; 662 663 /* 664 * Obtain the PID from the basic-sampling data entry and 665 * correct the data->tid_entry.pid value. 666 */ 667 pid = data->tid_entry.pid; 668 669 /* Protect callchain buffers, tasks */ 670 rcu_read_lock(); 671 672 perf_prepare_sample(&header, data, event, regs); 673 if (perf_output_begin(&handle, event, header.size)) 674 goto out; 675 676 /* Update the process ID (see also kernel/events/core.c) */ 677 data->tid_entry.pid = cpumsf_pid_type(event, pid, PIDTYPE_TGID); 678 data->tid_entry.tid = cpumsf_pid_type(event, pid, PIDTYPE_PID); 679 680 perf_output_sample(&handle, &header, data, event); 681 perf_output_end(&handle); 682 out: 683 rcu_read_unlock(); 684 } 685 686 static unsigned long getrate(bool freq, unsigned long sample, 687 struct hws_qsi_info_block *si) 688 { 689 unsigned long rate; 690 691 if (freq) { 692 rate = freq_to_sample_rate(si, sample); 693 rate = hw_limit_rate(si, rate); 694 } else { 695 /* The min/max sampling rates specifies the valid range 696 * of sample periods. If the specified sample period is 697 * out of range, limit the period to the range boundary. 698 */ 699 rate = hw_limit_rate(si, sample); 700 701 /* The perf core maintains a maximum sample rate that is 702 * configurable through the sysctl interface. Ensure the 703 * sampling rate does not exceed this value. This also helps 704 * to avoid throttling when pushing samples with 705 * perf_event_overflow(). 706 */ 707 if (sample_rate_to_freq(si, rate) > 708 sysctl_perf_event_sample_rate) { 709 debug_sprintf_event(sfdbg, 1, "%s: " 710 "Sampling rate exceeds maximum " 711 "perf sample rate\n", __func__); 712 rate = 0; 713 } 714 } 715 return rate; 716 } 717 718 /* The sampling information (si) contains information about the 719 * min/max sampling intervals and the CPU speed. So calculate the 720 * correct sampling interval and avoid the whole period adjust 721 * feedback loop. 722 * 723 * Since the CPU Measurement sampling facility can not handle frequency 724 * calculate the sampling interval when frequency is specified using 725 * this formula: 726 * interval := cpu_speed * 1000000 / sample_freq 727 * 728 * Returns errno on bad input and zero on success with parameter interval 729 * set to the correct sampling rate. 730 * 731 * Note: This function turns off freq bit to avoid calling function 732 * perf_adjust_period(). This causes frequency adjustment in the common 733 * code part which causes tremendous variations in the counter values. 734 */ 735 static int __hw_perf_event_init_rate(struct perf_event *event, 736 struct hws_qsi_info_block *si) 737 { 738 struct perf_event_attr *attr = &event->attr; 739 struct hw_perf_event *hwc = &event->hw; 740 unsigned long rate; 741 742 if (attr->freq) { 743 if (!attr->sample_freq) 744 return -EINVAL; 745 rate = getrate(attr->freq, attr->sample_freq, si); 746 attr->freq = 0; /* Don't call perf_adjust_period() */ 747 SAMPL_FLAGS(hwc) |= PERF_CPUM_SF_FREQ_MODE; 748 } else { 749 rate = getrate(attr->freq, attr->sample_period, si); 750 if (!rate) 751 return -EINVAL; 752 } 753 attr->sample_period = rate; 754 SAMPL_RATE(hwc) = rate; 755 hw_init_period(hwc, SAMPL_RATE(hwc)); 756 debug_sprintf_event(sfdbg, 4, "%s: cpu %d period %#llx freq %d,%#lx\n", 757 __func__, event->cpu, event->attr.sample_period, 758 event->attr.freq, SAMPLE_FREQ_MODE(hwc)); 759 return 0; 760 } 761 762 static int __hw_perf_event_init(struct perf_event *event) 763 { 764 struct cpu_hw_sf *cpuhw; 765 struct hws_qsi_info_block si; 766 struct perf_event_attr *attr = &event->attr; 767 struct hw_perf_event *hwc = &event->hw; 768 int cpu, err; 769 770 /* Reserve CPU-measurement sampling facility */ 771 err = 0; 772 if (!atomic_inc_not_zero(&num_events)) { 773 mutex_lock(&pmc_reserve_mutex); 774 if (atomic_read(&num_events) == 0 && reserve_pmc_hardware()) 775 err = -EBUSY; 776 else 777 atomic_inc(&num_events); 778 mutex_unlock(&pmc_reserve_mutex); 779 } 780 event->destroy = hw_perf_event_destroy; 781 782 if (err) 783 goto out; 784 785 /* Access per-CPU sampling information (query sampling info) */ 786 /* 787 * The event->cpu value can be -1 to count on every CPU, for example, 788 * when attaching to a task. If this is specified, use the query 789 * sampling info from the current CPU, otherwise use event->cpu to 790 * retrieve the per-CPU information. 791 * Later, cpuhw indicates whether to allocate sampling buffers for a 792 * particular CPU (cpuhw!=NULL) or each online CPU (cpuw==NULL). 793 */ 794 memset(&si, 0, sizeof(si)); 795 cpuhw = NULL; 796 if (event->cpu == -1) 797 qsi(&si); 798 else { 799 /* Event is pinned to a particular CPU, retrieve the per-CPU 800 * sampling structure for accessing the CPU-specific QSI. 801 */ 802 cpuhw = &per_cpu(cpu_hw_sf, event->cpu); 803 si = cpuhw->qsi; 804 } 805 806 /* Check sampling facility authorization and, if not authorized, 807 * fall back to other PMUs. It is safe to check any CPU because 808 * the authorization is identical for all configured CPUs. 809 */ 810 if (!si.as) { 811 err = -ENOENT; 812 goto out; 813 } 814 815 if (si.ribm & CPU_MF_SF_RIBM_NOTAV) { 816 pr_warn("CPU Measurement Facility sampling is temporarily not available\n"); 817 err = -EBUSY; 818 goto out; 819 } 820 821 /* Always enable basic sampling */ 822 SAMPL_FLAGS(hwc) = PERF_CPUM_SF_BASIC_MODE; 823 824 /* Check if diagnostic sampling is requested. Deny if the required 825 * sampling authorization is missing. 826 */ 827 if (attr->config == PERF_EVENT_CPUM_SF_DIAG) { 828 if (!si.ad) { 829 err = -EPERM; 830 goto out; 831 } 832 SAMPL_FLAGS(hwc) |= PERF_CPUM_SF_DIAG_MODE; 833 } 834 835 /* Check and set other sampling flags */ 836 if (attr->config1 & PERF_CPUM_SF_FULL_BLOCKS) 837 SAMPL_FLAGS(hwc) |= PERF_CPUM_SF_FULL_BLOCKS; 838 839 err = __hw_perf_event_init_rate(event, &si); 840 if (err) 841 goto out; 842 843 /* Initialize sample data overflow accounting */ 844 hwc->extra_reg.reg = REG_OVERFLOW; 845 OVERFLOW_REG(hwc) = 0; 846 847 /* Use AUX buffer. No need to allocate it by ourself */ 848 if (attr->config == PERF_EVENT_CPUM_SF_DIAG) 849 return 0; 850 851 /* Allocate the per-CPU sampling buffer using the CPU information 852 * from the event. If the event is not pinned to a particular 853 * CPU (event->cpu == -1; or cpuhw == NULL), allocate sampling 854 * buffers for each online CPU. 855 */ 856 if (cpuhw) 857 /* Event is pinned to a particular CPU */ 858 err = allocate_buffers(cpuhw, hwc); 859 else { 860 /* Event is not pinned, allocate sampling buffer on 861 * each online CPU 862 */ 863 for_each_online_cpu(cpu) { 864 cpuhw = &per_cpu(cpu_hw_sf, cpu); 865 err = allocate_buffers(cpuhw, hwc); 866 if (err) 867 break; 868 } 869 } 870 871 /* If PID/TID sampling is active, replace the default overflow 872 * handler to extract and resolve the PIDs from the basic-sampling 873 * data entries. 874 */ 875 if (event->attr.sample_type & PERF_SAMPLE_TID) 876 if (is_default_overflow_handler(event)) 877 event->overflow_handler = cpumsf_output_event_pid; 878 out: 879 return err; 880 } 881 882 static int cpumsf_pmu_event_init(struct perf_event *event) 883 { 884 int err; 885 886 /* No support for taken branch sampling */ 887 if (has_branch_stack(event)) 888 return -EOPNOTSUPP; 889 890 switch (event->attr.type) { 891 case PERF_TYPE_RAW: 892 if ((event->attr.config != PERF_EVENT_CPUM_SF) && 893 (event->attr.config != PERF_EVENT_CPUM_SF_DIAG)) 894 return -ENOENT; 895 break; 896 case PERF_TYPE_HARDWARE: 897 /* Support sampling of CPU cycles in addition to the 898 * counter facility. However, the counter facility 899 * is more precise and, hence, restrict this PMU to 900 * sampling events only. 901 */ 902 if (event->attr.config != PERF_COUNT_HW_CPU_CYCLES) 903 return -ENOENT; 904 if (!is_sampling_event(event)) 905 return -ENOENT; 906 break; 907 default: 908 return -ENOENT; 909 } 910 911 /* Check online status of the CPU to which the event is pinned */ 912 if (event->cpu >= 0 && !cpu_online(event->cpu)) 913 return -ENODEV; 914 915 /* Force reset of idle/hv excludes regardless of what the 916 * user requested. 917 */ 918 if (event->attr.exclude_hv) 919 event->attr.exclude_hv = 0; 920 if (event->attr.exclude_idle) 921 event->attr.exclude_idle = 0; 922 923 err = __hw_perf_event_init(event); 924 if (unlikely(err)) 925 if (event->destroy) 926 event->destroy(event); 927 return err; 928 } 929 930 static void cpumsf_pmu_enable(struct pmu *pmu) 931 { 932 struct cpu_hw_sf *cpuhw = this_cpu_ptr(&cpu_hw_sf); 933 struct hw_perf_event *hwc; 934 int err; 935 936 if (cpuhw->flags & PMU_F_ENABLED) 937 return; 938 939 if (cpuhw->flags & PMU_F_ERR_MASK) 940 return; 941 942 /* Check whether to extent the sampling buffer. 943 * 944 * Two conditions trigger an increase of the sampling buffer for a 945 * perf event: 946 * 1. Postponed buffer allocations from the event initialization. 947 * 2. Sampling overflows that contribute to pending allocations. 948 * 949 * Note that the extend_sampling_buffer() function disables the sampling 950 * facility, but it can be fully re-enabled using sampling controls that 951 * have been saved in cpumsf_pmu_disable(). 952 */ 953 if (cpuhw->event) { 954 hwc = &cpuhw->event->hw; 955 if (!(SAMPL_DIAG_MODE(hwc))) { 956 /* 957 * Account number of overflow-designated 958 * buffer extents 959 */ 960 sfb_account_overflows(cpuhw, hwc); 961 extend_sampling_buffer(&cpuhw->sfb, hwc); 962 } 963 /* Rate may be adjusted with ioctl() */ 964 cpuhw->lsctl.interval = SAMPL_RATE(&cpuhw->event->hw); 965 } 966 967 /* (Re)enable the PMU and sampling facility */ 968 cpuhw->flags |= PMU_F_ENABLED; 969 barrier(); 970 971 err = lsctl(&cpuhw->lsctl); 972 if (err) { 973 cpuhw->flags &= ~PMU_F_ENABLED; 974 pr_err("Loading sampling controls failed: op %i err %i\n", 975 1, err); 976 return; 977 } 978 979 /* Load current program parameter */ 980 lpp(&S390_lowcore.lpp); 981 982 debug_sprintf_event(sfdbg, 6, "%s: es %i cs %i ed %i cd %i " 983 "interval %#lx tear %#lx dear %#lx\n", __func__, 984 cpuhw->lsctl.es, cpuhw->lsctl.cs, cpuhw->lsctl.ed, 985 cpuhw->lsctl.cd, cpuhw->lsctl.interval, 986 cpuhw->lsctl.tear, cpuhw->lsctl.dear); 987 } 988 989 static void cpumsf_pmu_disable(struct pmu *pmu) 990 { 991 struct cpu_hw_sf *cpuhw = this_cpu_ptr(&cpu_hw_sf); 992 struct hws_lsctl_request_block inactive; 993 struct hws_qsi_info_block si; 994 int err; 995 996 if (!(cpuhw->flags & PMU_F_ENABLED)) 997 return; 998 999 if (cpuhw->flags & PMU_F_ERR_MASK) 1000 return; 1001 1002 /* Switch off sampling activation control */ 1003 inactive = cpuhw->lsctl; 1004 inactive.cs = 0; 1005 inactive.cd = 0; 1006 1007 err = lsctl(&inactive); 1008 if (err) { 1009 pr_err("Loading sampling controls failed: op %i err %i\n", 1010 2, err); 1011 return; 1012 } 1013 1014 /* Save state of TEAR and DEAR register contents */ 1015 err = qsi(&si); 1016 if (!err) { 1017 /* TEAR/DEAR values are valid only if the sampling facility is 1018 * enabled. Note that cpumsf_pmu_disable() might be called even 1019 * for a disabled sampling facility because cpumsf_pmu_enable() 1020 * controls the enable/disable state. 1021 */ 1022 if (si.es) { 1023 cpuhw->lsctl.tear = si.tear; 1024 cpuhw->lsctl.dear = si.dear; 1025 } 1026 } else 1027 debug_sprintf_event(sfdbg, 3, "%s: qsi() failed with err %i\n", 1028 __func__, err); 1029 1030 cpuhw->flags &= ~PMU_F_ENABLED; 1031 } 1032 1033 /* perf_exclude_event() - Filter event 1034 * @event: The perf event 1035 * @regs: pt_regs structure 1036 * @sde_regs: Sample-data-entry (sde) regs structure 1037 * 1038 * Filter perf events according to their exclude specification. 1039 * 1040 * Return non-zero if the event shall be excluded. 1041 */ 1042 static int perf_exclude_event(struct perf_event *event, struct pt_regs *regs, 1043 struct perf_sf_sde_regs *sde_regs) 1044 { 1045 if (event->attr.exclude_user && user_mode(regs)) 1046 return 1; 1047 if (event->attr.exclude_kernel && !user_mode(regs)) 1048 return 1; 1049 if (event->attr.exclude_guest && sde_regs->in_guest) 1050 return 1; 1051 if (event->attr.exclude_host && !sde_regs->in_guest) 1052 return 1; 1053 return 0; 1054 } 1055 1056 /* perf_push_sample() - Push samples to perf 1057 * @event: The perf event 1058 * @sample: Hardware sample data 1059 * 1060 * Use the hardware sample data to create perf event sample. The sample 1061 * is the pushed to the event subsystem and the function checks for 1062 * possible event overflows. If an event overflow occurs, the PMU is 1063 * stopped. 1064 * 1065 * Return non-zero if an event overflow occurred. 1066 */ 1067 static int perf_push_sample(struct perf_event *event, 1068 struct hws_basic_entry *basic) 1069 { 1070 int overflow; 1071 struct pt_regs regs; 1072 struct perf_sf_sde_regs *sde_regs; 1073 struct perf_sample_data data; 1074 1075 /* Setup perf sample */ 1076 perf_sample_data_init(&data, 0, event->hw.last_period); 1077 1078 /* Setup pt_regs to look like an CPU-measurement external interrupt 1079 * using the Program Request Alert code. The regs.int_parm_long 1080 * field which is unused contains additional sample-data-entry related 1081 * indicators. 1082 */ 1083 memset(®s, 0, sizeof(regs)); 1084 regs.int_code = 0x1407; 1085 regs.int_parm = CPU_MF_INT_SF_PRA; 1086 sde_regs = (struct perf_sf_sde_regs *) ®s.int_parm_long; 1087 1088 psw_bits(regs.psw).ia = basic->ia; 1089 psw_bits(regs.psw).dat = basic->T; 1090 psw_bits(regs.psw).wait = basic->W; 1091 psw_bits(regs.psw).pstate = basic->P; 1092 psw_bits(regs.psw).as = basic->AS; 1093 1094 /* 1095 * Use the hardware provided configuration level to decide if the 1096 * sample belongs to a guest or host. If that is not available, 1097 * fall back to the following heuristics: 1098 * A non-zero guest program parameter always indicates a guest 1099 * sample. Some early samples or samples from guests without 1100 * lpp usage would be misaccounted to the host. We use the asn 1101 * value as an addon heuristic to detect most of these guest samples. 1102 * If the value differs from 0xffff (the host value), we assume to 1103 * be a KVM guest. 1104 */ 1105 switch (basic->CL) { 1106 case 1: /* logical partition */ 1107 sde_regs->in_guest = 0; 1108 break; 1109 case 2: /* virtual machine */ 1110 sde_regs->in_guest = 1; 1111 break; 1112 default: /* old machine, use heuristics */ 1113 if (basic->gpp || basic->prim_asn != 0xffff) 1114 sde_regs->in_guest = 1; 1115 break; 1116 } 1117 1118 /* 1119 * Store the PID value from the sample-data-entry to be 1120 * processed and resolved by cpumsf_output_event_pid(). 1121 */ 1122 data.tid_entry.pid = basic->hpp & LPP_PID_MASK; 1123 1124 overflow = 0; 1125 if (perf_exclude_event(event, ®s, sde_regs)) 1126 goto out; 1127 if (perf_event_overflow(event, &data, ®s)) { 1128 overflow = 1; 1129 event->pmu->stop(event, 0); 1130 } 1131 perf_event_update_userpage(event); 1132 out: 1133 return overflow; 1134 } 1135 1136 static void perf_event_count_update(struct perf_event *event, u64 count) 1137 { 1138 local64_add(count, &event->count); 1139 } 1140 1141 /* hw_collect_samples() - Walk through a sample-data-block and collect samples 1142 * @event: The perf event 1143 * @sdbt: Sample-data-block table 1144 * @overflow: Event overflow counter 1145 * 1146 * Walks through a sample-data-block and collects sampling data entries that are 1147 * then pushed to the perf event subsystem. Depending on the sampling function, 1148 * there can be either basic-sampling or combined-sampling data entries. A 1149 * combined-sampling data entry consists of a basic- and a diagnostic-sampling 1150 * data entry. The sampling function is determined by the flags in the perf 1151 * event hardware structure. The function always works with a combined-sampling 1152 * data entry but ignores the the diagnostic portion if it is not available. 1153 * 1154 * Note that the implementation focuses on basic-sampling data entries and, if 1155 * such an entry is not valid, the entire combined-sampling data entry is 1156 * ignored. 1157 * 1158 * The overflow variables counts the number of samples that has been discarded 1159 * due to a perf event overflow. 1160 */ 1161 static void hw_collect_samples(struct perf_event *event, unsigned long *sdbt, 1162 unsigned long long *overflow) 1163 { 1164 struct hws_trailer_entry *te; 1165 struct hws_basic_entry *sample; 1166 1167 te = (struct hws_trailer_entry *) trailer_entry_ptr(*sdbt); 1168 sample = (struct hws_basic_entry *) *sdbt; 1169 while ((unsigned long *) sample < (unsigned long *) te) { 1170 /* Check for an empty sample */ 1171 if (!sample->def) 1172 break; 1173 1174 /* Update perf event period */ 1175 perf_event_count_update(event, SAMPL_RATE(&event->hw)); 1176 1177 /* Check whether sample is valid */ 1178 if (sample->def == 0x0001) { 1179 /* If an event overflow occurred, the PMU is stopped to 1180 * throttle event delivery. Remaining sample data is 1181 * discarded. 1182 */ 1183 if (!*overflow) { 1184 /* Check whether sample is consistent */ 1185 if (sample->I == 0 && sample->W == 0) { 1186 /* Deliver sample data to perf */ 1187 *overflow = perf_push_sample(event, 1188 sample); 1189 } 1190 } else 1191 /* Count discarded samples */ 1192 *overflow += 1; 1193 } else { 1194 debug_sprintf_event(sfdbg, 4, 1195 "%s: Found unknown" 1196 " sampling data entry: te->f %i" 1197 " basic.def %#4x (%p)\n", __func__, 1198 te->f, sample->def, sample); 1199 /* Sample slot is not yet written or other record. 1200 * 1201 * This condition can occur if the buffer was reused 1202 * from a combined basic- and diagnostic-sampling. 1203 * If only basic-sampling is then active, entries are 1204 * written into the larger diagnostic entries. 1205 * This is typically the case for sample-data-blocks 1206 * that are not full. Stop processing if the first 1207 * invalid format was detected. 1208 */ 1209 if (!te->f) 1210 break; 1211 } 1212 1213 /* Reset sample slot and advance to next sample */ 1214 sample->def = 0; 1215 sample++; 1216 } 1217 } 1218 1219 /* hw_perf_event_update() - Process sampling buffer 1220 * @event: The perf event 1221 * @flush_all: Flag to also flush partially filled sample-data-blocks 1222 * 1223 * Processes the sampling buffer and create perf event samples. 1224 * The sampling buffer position are retrieved and saved in the TEAR_REG 1225 * register of the specified perf event. 1226 * 1227 * Only full sample-data-blocks are processed. Specify the flash_all flag 1228 * to also walk through partially filled sample-data-blocks. It is ignored 1229 * if PERF_CPUM_SF_FULL_BLOCKS is set. The PERF_CPUM_SF_FULL_BLOCKS flag 1230 * enforces the processing of full sample-data-blocks only (trailer entries 1231 * with the block-full-indicator bit set). 1232 */ 1233 static void hw_perf_event_update(struct perf_event *event, int flush_all) 1234 { 1235 struct hw_perf_event *hwc = &event->hw; 1236 struct hws_trailer_entry *te; 1237 unsigned long *sdbt; 1238 unsigned long long event_overflow, sampl_overflow, num_sdb, te_flags; 1239 int done; 1240 1241 /* 1242 * AUX buffer is used when in diagnostic sampling mode. 1243 * No perf events/samples are created. 1244 */ 1245 if (SAMPL_DIAG_MODE(&event->hw)) 1246 return; 1247 1248 if (flush_all && SDB_FULL_BLOCKS(hwc)) 1249 flush_all = 0; 1250 1251 sdbt = (unsigned long *) TEAR_REG(hwc); 1252 done = event_overflow = sampl_overflow = num_sdb = 0; 1253 while (!done) { 1254 /* Get the trailer entry of the sample-data-block */ 1255 te = (struct hws_trailer_entry *) trailer_entry_ptr(*sdbt); 1256 1257 /* Leave loop if no more work to do (block full indicator) */ 1258 if (!te->f) { 1259 done = 1; 1260 if (!flush_all) 1261 break; 1262 } 1263 1264 /* Check the sample overflow count */ 1265 if (te->overflow) 1266 /* Account sample overflows and, if a particular limit 1267 * is reached, extend the sampling buffer. 1268 * For details, see sfb_account_overflows(). 1269 */ 1270 sampl_overflow += te->overflow; 1271 1272 /* Timestamps are valid for full sample-data-blocks only */ 1273 debug_sprintf_event(sfdbg, 6, "%s: sdbt %#lx " 1274 "overflow %llu timestamp %#llx\n", 1275 __func__, (unsigned long)sdbt, te->overflow, 1276 (te->f) ? trailer_timestamp(te) : 0ULL); 1277 1278 /* Collect all samples from a single sample-data-block and 1279 * flag if an (perf) event overflow happened. If so, the PMU 1280 * is stopped and remaining samples will be discarded. 1281 */ 1282 hw_collect_samples(event, sdbt, &event_overflow); 1283 num_sdb++; 1284 1285 /* Reset trailer (using compare-double-and-swap) */ 1286 do { 1287 te_flags = te->flags & ~SDB_TE_BUFFER_FULL_MASK; 1288 te_flags |= SDB_TE_ALERT_REQ_MASK; 1289 } while (!cmpxchg_double(&te->flags, &te->overflow, 1290 te->flags, te->overflow, 1291 te_flags, 0ULL)); 1292 1293 /* Advance to next sample-data-block */ 1294 sdbt++; 1295 if (is_link_entry(sdbt)) 1296 sdbt = get_next_sdbt(sdbt); 1297 1298 /* Update event hardware registers */ 1299 TEAR_REG(hwc) = (unsigned long) sdbt; 1300 1301 /* Stop processing sample-data if all samples of the current 1302 * sample-data-block were flushed even if it was not full. 1303 */ 1304 if (flush_all && done) 1305 break; 1306 1307 /* If an event overflow happened, discard samples by 1308 * processing any remaining sample-data-blocks. 1309 */ 1310 if (event_overflow) 1311 flush_all = 1; 1312 } 1313 1314 /* Account sample overflows in the event hardware structure */ 1315 if (sampl_overflow) 1316 OVERFLOW_REG(hwc) = DIV_ROUND_UP(OVERFLOW_REG(hwc) + 1317 sampl_overflow, 1 + num_sdb); 1318 if (sampl_overflow || event_overflow) 1319 debug_sprintf_event(sfdbg, 4, "%s: " 1320 "overflows: sample %llu event %llu" 1321 " total %llu num_sdb %llu\n", 1322 __func__, sampl_overflow, event_overflow, 1323 OVERFLOW_REG(hwc), num_sdb); 1324 } 1325 1326 #define AUX_SDB_INDEX(aux, i) ((i) % aux->sfb.num_sdb) 1327 #define AUX_SDB_NUM(aux, start, end) (end >= start ? end - start + 1 : 0) 1328 #define AUX_SDB_NUM_ALERT(aux) AUX_SDB_NUM(aux, aux->head, aux->alert_mark) 1329 #define AUX_SDB_NUM_EMPTY(aux) AUX_SDB_NUM(aux, aux->head, aux->empty_mark) 1330 1331 /* 1332 * Get trailer entry by index of SDB. 1333 */ 1334 static struct hws_trailer_entry *aux_sdb_trailer(struct aux_buffer *aux, 1335 unsigned long index) 1336 { 1337 unsigned long sdb; 1338 1339 index = AUX_SDB_INDEX(aux, index); 1340 sdb = aux->sdb_index[index]; 1341 return (struct hws_trailer_entry *)trailer_entry_ptr(sdb); 1342 } 1343 1344 /* 1345 * Finish sampling on the cpu. Called by cpumsf_pmu_del() with pmu 1346 * disabled. Collect the full SDBs in AUX buffer which have not reached 1347 * the point of alert indicator. And ignore the SDBs which are not 1348 * full. 1349 * 1350 * 1. Scan SDBs to see how much data is there and consume them. 1351 * 2. Remove alert indicator in the buffer. 1352 */ 1353 static void aux_output_end(struct perf_output_handle *handle) 1354 { 1355 unsigned long i, range_scan, idx; 1356 struct aux_buffer *aux; 1357 struct hws_trailer_entry *te; 1358 1359 aux = perf_get_aux(handle); 1360 if (!aux) 1361 return; 1362 1363 range_scan = AUX_SDB_NUM_ALERT(aux); 1364 for (i = 0, idx = aux->head; i < range_scan; i++, idx++) { 1365 te = aux_sdb_trailer(aux, idx); 1366 if (!(te->flags & SDB_TE_BUFFER_FULL_MASK)) 1367 break; 1368 } 1369 /* i is num of SDBs which are full */ 1370 perf_aux_output_end(handle, i << PAGE_SHIFT); 1371 1372 /* Remove alert indicators in the buffer */ 1373 te = aux_sdb_trailer(aux, aux->alert_mark); 1374 te->flags &= ~SDB_TE_ALERT_REQ_MASK; 1375 1376 debug_sprintf_event(sfdbg, 6, "%s: collect %#lx SDBs\n", __func__, i); 1377 } 1378 1379 /* 1380 * Start sampling on the CPU. Called by cpumsf_pmu_add() when an event 1381 * is first added to the CPU or rescheduled again to the CPU. It is called 1382 * with pmu disabled. 1383 * 1384 * 1. Reset the trailer of SDBs to get ready for new data. 1385 * 2. Tell the hardware where to put the data by reset the SDBs buffer 1386 * head(tear/dear). 1387 */ 1388 static int aux_output_begin(struct perf_output_handle *handle, 1389 struct aux_buffer *aux, 1390 struct cpu_hw_sf *cpuhw) 1391 { 1392 unsigned long range; 1393 unsigned long i, range_scan, idx; 1394 unsigned long head, base, offset; 1395 struct hws_trailer_entry *te; 1396 1397 if (WARN_ON_ONCE(handle->head & ~PAGE_MASK)) 1398 return -EINVAL; 1399 1400 aux->head = handle->head >> PAGE_SHIFT; 1401 range = (handle->size + 1) >> PAGE_SHIFT; 1402 if (range <= 1) 1403 return -ENOMEM; 1404 1405 /* 1406 * SDBs between aux->head and aux->empty_mark are already ready 1407 * for new data. range_scan is num of SDBs not within them. 1408 */ 1409 if (range > AUX_SDB_NUM_EMPTY(aux)) { 1410 range_scan = range - AUX_SDB_NUM_EMPTY(aux); 1411 idx = aux->empty_mark + 1; 1412 for (i = 0; i < range_scan; i++, idx++) { 1413 te = aux_sdb_trailer(aux, idx); 1414 te->flags = te->flags & ~SDB_TE_BUFFER_FULL_MASK; 1415 te->flags = te->flags & ~SDB_TE_ALERT_REQ_MASK; 1416 te->overflow = 0; 1417 } 1418 /* Save the position of empty SDBs */ 1419 aux->empty_mark = aux->head + range - 1; 1420 } 1421 1422 /* Set alert indicator */ 1423 aux->alert_mark = aux->head + range/2 - 1; 1424 te = aux_sdb_trailer(aux, aux->alert_mark); 1425 te->flags = te->flags | SDB_TE_ALERT_REQ_MASK; 1426 1427 /* Reset hardware buffer head */ 1428 head = AUX_SDB_INDEX(aux, aux->head); 1429 base = aux->sdbt_index[head / CPUM_SF_SDB_PER_TABLE]; 1430 offset = head % CPUM_SF_SDB_PER_TABLE; 1431 cpuhw->lsctl.tear = base + offset * sizeof(unsigned long); 1432 cpuhw->lsctl.dear = aux->sdb_index[head]; 1433 1434 debug_sprintf_event(sfdbg, 6, "%s: " 1435 "head->alert_mark->empty_mark (num_alert, range)" 1436 "[%#lx -> %#lx -> %#lx] (%#lx, %#lx) " 1437 "tear index %#lx, tear %#lx dear %#lx\n", __func__, 1438 aux->head, aux->alert_mark, aux->empty_mark, 1439 AUX_SDB_NUM_ALERT(aux), range, 1440 head / CPUM_SF_SDB_PER_TABLE, 1441 cpuhw->lsctl.tear, 1442 cpuhw->lsctl.dear); 1443 1444 return 0; 1445 } 1446 1447 /* 1448 * Set alert indicator on SDB at index @alert_index while sampler is running. 1449 * 1450 * Return true if successfully. 1451 * Return false if full indicator is already set by hardware sampler. 1452 */ 1453 static bool aux_set_alert(struct aux_buffer *aux, unsigned long alert_index, 1454 unsigned long long *overflow) 1455 { 1456 unsigned long long orig_overflow, orig_flags, new_flags; 1457 struct hws_trailer_entry *te; 1458 1459 te = aux_sdb_trailer(aux, alert_index); 1460 do { 1461 orig_flags = te->flags; 1462 orig_overflow = te->overflow; 1463 *overflow = orig_overflow; 1464 if (orig_flags & SDB_TE_BUFFER_FULL_MASK) { 1465 /* 1466 * SDB is already set by hardware. 1467 * Abort and try to set somewhere 1468 * behind. 1469 */ 1470 return false; 1471 } 1472 new_flags = orig_flags | SDB_TE_ALERT_REQ_MASK; 1473 } while (!cmpxchg_double(&te->flags, &te->overflow, 1474 orig_flags, orig_overflow, 1475 new_flags, 0ULL)); 1476 return true; 1477 } 1478 1479 /* 1480 * aux_reset_buffer() - Scan and setup SDBs for new samples 1481 * @aux: The AUX buffer to set 1482 * @range: The range of SDBs to scan started from aux->head 1483 * @overflow: Set to overflow count 1484 * 1485 * Set alert indicator on the SDB at index of aux->alert_mark. If this SDB is 1486 * marked as empty, check if it is already set full by the hardware sampler. 1487 * If yes, that means new data is already there before we can set an alert 1488 * indicator. Caller should try to set alert indicator to some position behind. 1489 * 1490 * Scan the SDBs in AUX buffer from behind aux->empty_mark. They are used 1491 * previously and have already been consumed by user space. Reset these SDBs 1492 * (clear full indicator and alert indicator) for new data. 1493 * If aux->alert_mark fall in this area, just set it. Overflow count is 1494 * recorded while scanning. 1495 * 1496 * SDBs between aux->head and aux->empty_mark are already reset at last time. 1497 * and ready for new samples. So scanning on this area could be skipped. 1498 * 1499 * Return true if alert indicator is set successfully and false if not. 1500 */ 1501 static bool aux_reset_buffer(struct aux_buffer *aux, unsigned long range, 1502 unsigned long long *overflow) 1503 { 1504 unsigned long long orig_overflow, orig_flags, new_flags; 1505 unsigned long i, range_scan, idx; 1506 struct hws_trailer_entry *te; 1507 1508 if (range <= AUX_SDB_NUM_EMPTY(aux)) 1509 /* 1510 * No need to scan. All SDBs in range are marked as empty. 1511 * Just set alert indicator. Should check race with hardware 1512 * sampler. 1513 */ 1514 return aux_set_alert(aux, aux->alert_mark, overflow); 1515 1516 if (aux->alert_mark <= aux->empty_mark) 1517 /* 1518 * Set alert indicator on empty SDB. Should check race 1519 * with hardware sampler. 1520 */ 1521 if (!aux_set_alert(aux, aux->alert_mark, overflow)) 1522 return false; 1523 1524 /* 1525 * Scan the SDBs to clear full and alert indicator used previously. 1526 * Start scanning from one SDB behind empty_mark. If the new alert 1527 * indicator fall into this range, set it. 1528 */ 1529 range_scan = range - AUX_SDB_NUM_EMPTY(aux); 1530 idx = aux->empty_mark + 1; 1531 for (i = 0; i < range_scan; i++, idx++) { 1532 te = aux_sdb_trailer(aux, idx); 1533 do { 1534 orig_flags = te->flags; 1535 orig_overflow = te->overflow; 1536 new_flags = orig_flags & ~SDB_TE_BUFFER_FULL_MASK; 1537 if (idx == aux->alert_mark) 1538 new_flags |= SDB_TE_ALERT_REQ_MASK; 1539 else 1540 new_flags &= ~SDB_TE_ALERT_REQ_MASK; 1541 } while (!cmpxchg_double(&te->flags, &te->overflow, 1542 orig_flags, orig_overflow, 1543 new_flags, 0ULL)); 1544 *overflow += orig_overflow; 1545 } 1546 1547 /* Update empty_mark to new position */ 1548 aux->empty_mark = aux->head + range - 1; 1549 1550 return true; 1551 } 1552 1553 /* 1554 * Measurement alert handler for diagnostic mode sampling. 1555 */ 1556 static void hw_collect_aux(struct cpu_hw_sf *cpuhw) 1557 { 1558 struct aux_buffer *aux; 1559 int done = 0; 1560 unsigned long range = 0, size; 1561 unsigned long long overflow = 0; 1562 struct perf_output_handle *handle = &cpuhw->handle; 1563 unsigned long num_sdb; 1564 1565 aux = perf_get_aux(handle); 1566 if (WARN_ON_ONCE(!aux)) 1567 return; 1568 1569 /* Inform user space new data arrived */ 1570 size = AUX_SDB_NUM_ALERT(aux) << PAGE_SHIFT; 1571 perf_aux_output_end(handle, size); 1572 num_sdb = aux->sfb.num_sdb; 1573 1574 while (!done) { 1575 /* Get an output handle */ 1576 aux = perf_aux_output_begin(handle, cpuhw->event); 1577 if (handle->size == 0) { 1578 pr_err("The AUX buffer with %lu pages for the " 1579 "diagnostic-sampling mode is full\n", 1580 num_sdb); 1581 debug_sprintf_event(sfdbg, 1, 1582 "%s: AUX buffer used up\n", 1583 __func__); 1584 break; 1585 } 1586 if (WARN_ON_ONCE(!aux)) 1587 return; 1588 1589 /* Update head and alert_mark to new position */ 1590 aux->head = handle->head >> PAGE_SHIFT; 1591 range = (handle->size + 1) >> PAGE_SHIFT; 1592 if (range == 1) 1593 aux->alert_mark = aux->head; 1594 else 1595 aux->alert_mark = aux->head + range/2 - 1; 1596 1597 if (aux_reset_buffer(aux, range, &overflow)) { 1598 if (!overflow) { 1599 done = 1; 1600 break; 1601 } 1602 size = range << PAGE_SHIFT; 1603 perf_aux_output_end(&cpuhw->handle, size); 1604 pr_err("Sample data caused the AUX buffer with %lu " 1605 "pages to overflow\n", num_sdb); 1606 debug_sprintf_event(sfdbg, 1, "%s: head %#lx range %#lx " 1607 "overflow %#llx\n", __func__, 1608 aux->head, range, overflow); 1609 } else { 1610 size = AUX_SDB_NUM_ALERT(aux) << PAGE_SHIFT; 1611 perf_aux_output_end(&cpuhw->handle, size); 1612 debug_sprintf_event(sfdbg, 6, "%s: head %#lx alert %#lx " 1613 "already full, try another\n", 1614 __func__, 1615 aux->head, aux->alert_mark); 1616 } 1617 } 1618 1619 if (done) 1620 debug_sprintf_event(sfdbg, 6, "%s: aux_reset_buffer " 1621 "[%#lx -> %#lx -> %#lx] (%#lx, %#lx)\n", 1622 __func__, aux->head, aux->alert_mark, 1623 aux->empty_mark, AUX_SDB_NUM_ALERT(aux), 1624 range); 1625 } 1626 1627 /* 1628 * Callback when freeing AUX buffers. 1629 */ 1630 static void aux_buffer_free(void *data) 1631 { 1632 struct aux_buffer *aux = data; 1633 unsigned long i, num_sdbt; 1634 1635 if (!aux) 1636 return; 1637 1638 /* Free SDBT. SDB is freed by the caller */ 1639 num_sdbt = aux->sfb.num_sdbt; 1640 for (i = 0; i < num_sdbt; i++) 1641 free_page(aux->sdbt_index[i]); 1642 1643 kfree(aux->sdbt_index); 1644 kfree(aux->sdb_index); 1645 kfree(aux); 1646 1647 debug_sprintf_event(sfdbg, 4, "%s: free " 1648 "%lu SDBTs\n", __func__, num_sdbt); 1649 } 1650 1651 static void aux_sdb_init(unsigned long sdb) 1652 { 1653 struct hws_trailer_entry *te; 1654 1655 te = (struct hws_trailer_entry *)trailer_entry_ptr(sdb); 1656 1657 /* Save clock base */ 1658 te->clock_base = 1; 1659 memcpy(&te->progusage2, &tod_clock_base[1], 8); 1660 } 1661 1662 /* 1663 * aux_buffer_setup() - Setup AUX buffer for diagnostic mode sampling 1664 * @event: Event the buffer is setup for, event->cpu == -1 means current 1665 * @pages: Array of pointers to buffer pages passed from perf core 1666 * @nr_pages: Total pages 1667 * @snapshot: Flag for snapshot mode 1668 * 1669 * This is the callback when setup an event using AUX buffer. Perf tool can 1670 * trigger this by an additional mmap() call on the event. Unlike the buffer 1671 * for basic samples, AUX buffer belongs to the event. It is scheduled with 1672 * the task among online cpus when it is a per-thread event. 1673 * 1674 * Return the private AUX buffer structure if success or NULL if fails. 1675 */ 1676 static void *aux_buffer_setup(struct perf_event *event, void **pages, 1677 int nr_pages, bool snapshot) 1678 { 1679 struct sf_buffer *sfb; 1680 struct aux_buffer *aux; 1681 unsigned long *new, *tail; 1682 int i, n_sdbt; 1683 1684 if (!nr_pages || !pages) 1685 return NULL; 1686 1687 if (nr_pages > CPUM_SF_MAX_SDB * CPUM_SF_SDB_DIAG_FACTOR) { 1688 pr_err("AUX buffer size (%i pages) is larger than the " 1689 "maximum sampling buffer limit\n", 1690 nr_pages); 1691 return NULL; 1692 } else if (nr_pages < CPUM_SF_MIN_SDB * CPUM_SF_SDB_DIAG_FACTOR) { 1693 pr_err("AUX buffer size (%i pages) is less than the " 1694 "minimum sampling buffer limit\n", 1695 nr_pages); 1696 return NULL; 1697 } 1698 1699 /* Allocate aux_buffer struct for the event */ 1700 aux = kmalloc(sizeof(struct aux_buffer), GFP_KERNEL); 1701 if (!aux) 1702 goto no_aux; 1703 sfb = &aux->sfb; 1704 1705 /* Allocate sdbt_index for fast reference */ 1706 n_sdbt = (nr_pages + CPUM_SF_SDB_PER_TABLE - 1) / CPUM_SF_SDB_PER_TABLE; 1707 aux->sdbt_index = kmalloc_array(n_sdbt, sizeof(void *), GFP_KERNEL); 1708 if (!aux->sdbt_index) 1709 goto no_sdbt_index; 1710 1711 /* Allocate sdb_index for fast reference */ 1712 aux->sdb_index = kmalloc_array(nr_pages, sizeof(void *), GFP_KERNEL); 1713 if (!aux->sdb_index) 1714 goto no_sdb_index; 1715 1716 /* Allocate the first SDBT */ 1717 sfb->num_sdbt = 0; 1718 sfb->sdbt = (unsigned long *) get_zeroed_page(GFP_KERNEL); 1719 if (!sfb->sdbt) 1720 goto no_sdbt; 1721 aux->sdbt_index[sfb->num_sdbt++] = (unsigned long)sfb->sdbt; 1722 tail = sfb->tail = sfb->sdbt; 1723 1724 /* 1725 * Link the provided pages of AUX buffer to SDBT. 1726 * Allocate SDBT if needed. 1727 */ 1728 for (i = 0; i < nr_pages; i++, tail++) { 1729 if (require_table_link(tail)) { 1730 new = (unsigned long *) get_zeroed_page(GFP_KERNEL); 1731 if (!new) 1732 goto no_sdbt; 1733 aux->sdbt_index[sfb->num_sdbt++] = (unsigned long)new; 1734 /* Link current page to tail of chain */ 1735 *tail = (unsigned long)(void *) new + 1; 1736 tail = new; 1737 } 1738 /* Tail is the entry in a SDBT */ 1739 *tail = (unsigned long)pages[i]; 1740 aux->sdb_index[i] = (unsigned long)pages[i]; 1741 aux_sdb_init((unsigned long)pages[i]); 1742 } 1743 sfb->num_sdb = nr_pages; 1744 1745 /* Link the last entry in the SDBT to the first SDBT */ 1746 *tail = (unsigned long) sfb->sdbt + 1; 1747 sfb->tail = tail; 1748 1749 /* 1750 * Initial all SDBs are zeroed. Mark it as empty. 1751 * So there is no need to clear the full indicator 1752 * when this event is first added. 1753 */ 1754 aux->empty_mark = sfb->num_sdb - 1; 1755 1756 debug_sprintf_event(sfdbg, 4, "%s: setup %lu SDBTs and %lu SDBs\n", 1757 __func__, sfb->num_sdbt, sfb->num_sdb); 1758 1759 return aux; 1760 1761 no_sdbt: 1762 /* SDBs (AUX buffer pages) are freed by caller */ 1763 for (i = 0; i < sfb->num_sdbt; i++) 1764 free_page(aux->sdbt_index[i]); 1765 kfree(aux->sdb_index); 1766 no_sdb_index: 1767 kfree(aux->sdbt_index); 1768 no_sdbt_index: 1769 kfree(aux); 1770 no_aux: 1771 return NULL; 1772 } 1773 1774 static void cpumsf_pmu_read(struct perf_event *event) 1775 { 1776 /* Nothing to do ... updates are interrupt-driven */ 1777 } 1778 1779 /* Check if the new sampling period/freqeuncy is appropriate. 1780 * 1781 * Return non-zero on error and zero on passed checks. 1782 */ 1783 static int cpumsf_pmu_check_period(struct perf_event *event, u64 value) 1784 { 1785 struct hws_qsi_info_block si; 1786 unsigned long rate; 1787 bool do_freq; 1788 1789 memset(&si, 0, sizeof(si)); 1790 if (event->cpu == -1) { 1791 if (qsi(&si)) 1792 return -ENODEV; 1793 } else { 1794 /* Event is pinned to a particular CPU, retrieve the per-CPU 1795 * sampling structure for accessing the CPU-specific QSI. 1796 */ 1797 struct cpu_hw_sf *cpuhw = &per_cpu(cpu_hw_sf, event->cpu); 1798 1799 si = cpuhw->qsi; 1800 } 1801 1802 do_freq = !!SAMPLE_FREQ_MODE(&event->hw); 1803 rate = getrate(do_freq, value, &si); 1804 if (!rate) 1805 return -EINVAL; 1806 1807 event->attr.sample_period = rate; 1808 SAMPL_RATE(&event->hw) = rate; 1809 hw_init_period(&event->hw, SAMPL_RATE(&event->hw)); 1810 debug_sprintf_event(sfdbg, 4, "%s:" 1811 " cpu %d value %#llx period %#llx freq %d\n", 1812 __func__, event->cpu, value, 1813 event->attr.sample_period, do_freq); 1814 return 0; 1815 } 1816 1817 /* Activate sampling control. 1818 * Next call of pmu_enable() starts sampling. 1819 */ 1820 static void cpumsf_pmu_start(struct perf_event *event, int flags) 1821 { 1822 struct cpu_hw_sf *cpuhw = this_cpu_ptr(&cpu_hw_sf); 1823 1824 if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED))) 1825 return; 1826 1827 if (flags & PERF_EF_RELOAD) 1828 WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE)); 1829 1830 perf_pmu_disable(event->pmu); 1831 event->hw.state = 0; 1832 cpuhw->lsctl.cs = 1; 1833 if (SAMPL_DIAG_MODE(&event->hw)) 1834 cpuhw->lsctl.cd = 1; 1835 perf_pmu_enable(event->pmu); 1836 } 1837 1838 /* Deactivate sampling control. 1839 * Next call of pmu_enable() stops sampling. 1840 */ 1841 static void cpumsf_pmu_stop(struct perf_event *event, int flags) 1842 { 1843 struct cpu_hw_sf *cpuhw = this_cpu_ptr(&cpu_hw_sf); 1844 1845 if (event->hw.state & PERF_HES_STOPPED) 1846 return; 1847 1848 perf_pmu_disable(event->pmu); 1849 cpuhw->lsctl.cs = 0; 1850 cpuhw->lsctl.cd = 0; 1851 event->hw.state |= PERF_HES_STOPPED; 1852 1853 if ((flags & PERF_EF_UPDATE) && !(event->hw.state & PERF_HES_UPTODATE)) { 1854 hw_perf_event_update(event, 1); 1855 event->hw.state |= PERF_HES_UPTODATE; 1856 } 1857 perf_pmu_enable(event->pmu); 1858 } 1859 1860 static int cpumsf_pmu_add(struct perf_event *event, int flags) 1861 { 1862 struct cpu_hw_sf *cpuhw = this_cpu_ptr(&cpu_hw_sf); 1863 struct aux_buffer *aux; 1864 int err; 1865 1866 if (cpuhw->flags & PMU_F_IN_USE) 1867 return -EAGAIN; 1868 1869 if (!SAMPL_DIAG_MODE(&event->hw) && !cpuhw->sfb.sdbt) 1870 return -EINVAL; 1871 1872 err = 0; 1873 perf_pmu_disable(event->pmu); 1874 1875 event->hw.state = PERF_HES_UPTODATE | PERF_HES_STOPPED; 1876 1877 /* Set up sampling controls. Always program the sampling register 1878 * using the SDB-table start. Reset TEAR_REG event hardware register 1879 * that is used by hw_perf_event_update() to store the sampling buffer 1880 * position after samples have been flushed. 1881 */ 1882 cpuhw->lsctl.s = 0; 1883 cpuhw->lsctl.h = 1; 1884 cpuhw->lsctl.interval = SAMPL_RATE(&event->hw); 1885 if (!SAMPL_DIAG_MODE(&event->hw)) { 1886 cpuhw->lsctl.tear = (unsigned long) cpuhw->sfb.sdbt; 1887 cpuhw->lsctl.dear = *(unsigned long *) cpuhw->sfb.sdbt; 1888 TEAR_REG(&event->hw) = (unsigned long) cpuhw->sfb.sdbt; 1889 } 1890 1891 /* Ensure sampling functions are in the disabled state. If disabled, 1892 * switch on sampling enable control. */ 1893 if (WARN_ON_ONCE(cpuhw->lsctl.es == 1 || cpuhw->lsctl.ed == 1)) { 1894 err = -EAGAIN; 1895 goto out; 1896 } 1897 if (SAMPL_DIAG_MODE(&event->hw)) { 1898 aux = perf_aux_output_begin(&cpuhw->handle, event); 1899 if (!aux) { 1900 err = -EINVAL; 1901 goto out; 1902 } 1903 err = aux_output_begin(&cpuhw->handle, aux, cpuhw); 1904 if (err) 1905 goto out; 1906 cpuhw->lsctl.ed = 1; 1907 } 1908 cpuhw->lsctl.es = 1; 1909 1910 /* Set in_use flag and store event */ 1911 cpuhw->event = event; 1912 cpuhw->flags |= PMU_F_IN_USE; 1913 1914 if (flags & PERF_EF_START) 1915 cpumsf_pmu_start(event, PERF_EF_RELOAD); 1916 out: 1917 perf_event_update_userpage(event); 1918 perf_pmu_enable(event->pmu); 1919 return err; 1920 } 1921 1922 static void cpumsf_pmu_del(struct perf_event *event, int flags) 1923 { 1924 struct cpu_hw_sf *cpuhw = this_cpu_ptr(&cpu_hw_sf); 1925 1926 perf_pmu_disable(event->pmu); 1927 cpumsf_pmu_stop(event, PERF_EF_UPDATE); 1928 1929 cpuhw->lsctl.es = 0; 1930 cpuhw->lsctl.ed = 0; 1931 cpuhw->flags &= ~PMU_F_IN_USE; 1932 cpuhw->event = NULL; 1933 1934 if (SAMPL_DIAG_MODE(&event->hw)) 1935 aux_output_end(&cpuhw->handle); 1936 perf_event_update_userpage(event); 1937 perf_pmu_enable(event->pmu); 1938 } 1939 1940 CPUMF_EVENT_ATTR(SF, SF_CYCLES_BASIC, PERF_EVENT_CPUM_SF); 1941 CPUMF_EVENT_ATTR(SF, SF_CYCLES_BASIC_DIAG, PERF_EVENT_CPUM_SF_DIAG); 1942 1943 /* Attribute list for CPU_SF. 1944 * 1945 * The availablitiy depends on the CPU_MF sampling facility authorization 1946 * for basic + diagnositic samples. This is determined at initialization 1947 * time by the sampling facility device driver. 1948 * If the authorization for basic samples is turned off, it should be 1949 * also turned off for diagnostic sampling. 1950 * 1951 * During initialization of the device driver, check the authorization 1952 * level for diagnostic sampling and installs the attribute 1953 * file for diagnostic sampling if necessary. 1954 * 1955 * For now install a placeholder to reference all possible attributes: 1956 * SF_CYCLES_BASIC and SF_CYCLES_BASIC_DIAG. 1957 * Add another entry for the final NULL pointer. 1958 */ 1959 enum { 1960 SF_CYCLES_BASIC_ATTR_IDX = 0, 1961 SF_CYCLES_BASIC_DIAG_ATTR_IDX, 1962 SF_CYCLES_ATTR_MAX 1963 }; 1964 1965 static struct attribute *cpumsf_pmu_events_attr[SF_CYCLES_ATTR_MAX + 1] = { 1966 [SF_CYCLES_BASIC_ATTR_IDX] = CPUMF_EVENT_PTR(SF, SF_CYCLES_BASIC) 1967 }; 1968 1969 PMU_FORMAT_ATTR(event, "config:0-63"); 1970 1971 static struct attribute *cpumsf_pmu_format_attr[] = { 1972 &format_attr_event.attr, 1973 NULL, 1974 }; 1975 1976 static struct attribute_group cpumsf_pmu_events_group = { 1977 .name = "events", 1978 .attrs = cpumsf_pmu_events_attr, 1979 }; 1980 1981 static struct attribute_group cpumsf_pmu_format_group = { 1982 .name = "format", 1983 .attrs = cpumsf_pmu_format_attr, 1984 }; 1985 1986 static const struct attribute_group *cpumsf_pmu_attr_groups[] = { 1987 &cpumsf_pmu_events_group, 1988 &cpumsf_pmu_format_group, 1989 NULL, 1990 }; 1991 1992 static struct pmu cpumf_sampling = { 1993 .pmu_enable = cpumsf_pmu_enable, 1994 .pmu_disable = cpumsf_pmu_disable, 1995 1996 .event_init = cpumsf_pmu_event_init, 1997 .add = cpumsf_pmu_add, 1998 .del = cpumsf_pmu_del, 1999 2000 .start = cpumsf_pmu_start, 2001 .stop = cpumsf_pmu_stop, 2002 .read = cpumsf_pmu_read, 2003 2004 .attr_groups = cpumsf_pmu_attr_groups, 2005 2006 .setup_aux = aux_buffer_setup, 2007 .free_aux = aux_buffer_free, 2008 2009 .check_period = cpumsf_pmu_check_period, 2010 }; 2011 2012 static void cpumf_measurement_alert(struct ext_code ext_code, 2013 unsigned int alert, unsigned long unused) 2014 { 2015 struct cpu_hw_sf *cpuhw; 2016 2017 if (!(alert & CPU_MF_INT_SF_MASK)) 2018 return; 2019 inc_irq_stat(IRQEXT_CMS); 2020 cpuhw = this_cpu_ptr(&cpu_hw_sf); 2021 2022 /* Measurement alerts are shared and might happen when the PMU 2023 * is not reserved. Ignore these alerts in this case. */ 2024 if (!(cpuhw->flags & PMU_F_RESERVED)) 2025 return; 2026 2027 /* The processing below must take care of multiple alert events that 2028 * might be indicated concurrently. */ 2029 2030 /* Program alert request */ 2031 if (alert & CPU_MF_INT_SF_PRA) { 2032 if (cpuhw->flags & PMU_F_IN_USE) 2033 if (SAMPL_DIAG_MODE(&cpuhw->event->hw)) 2034 hw_collect_aux(cpuhw); 2035 else 2036 hw_perf_event_update(cpuhw->event, 0); 2037 else 2038 WARN_ON_ONCE(!(cpuhw->flags & PMU_F_IN_USE)); 2039 } 2040 2041 /* Report measurement alerts only for non-PRA codes */ 2042 if (alert != CPU_MF_INT_SF_PRA) 2043 debug_sprintf_event(sfdbg, 6, "%s: alert %#x\n", __func__, 2044 alert); 2045 2046 /* Sampling authorization change request */ 2047 if (alert & CPU_MF_INT_SF_SACA) 2048 qsi(&cpuhw->qsi); 2049 2050 /* Loss of sample data due to high-priority machine activities */ 2051 if (alert & CPU_MF_INT_SF_LSDA) { 2052 pr_err("Sample data was lost\n"); 2053 cpuhw->flags |= PMU_F_ERR_LSDA; 2054 sf_disable(); 2055 } 2056 2057 /* Invalid sampling buffer entry */ 2058 if (alert & (CPU_MF_INT_SF_IAE|CPU_MF_INT_SF_ISE)) { 2059 pr_err("A sampling buffer entry is incorrect (alert=0x%x)\n", 2060 alert); 2061 cpuhw->flags |= PMU_F_ERR_IBE; 2062 sf_disable(); 2063 } 2064 } 2065 2066 static int cpusf_pmu_setup(unsigned int cpu, int flags) 2067 { 2068 /* Ignore the notification if no events are scheduled on the PMU. 2069 * This might be racy... 2070 */ 2071 if (!atomic_read(&num_events)) 2072 return 0; 2073 2074 local_irq_disable(); 2075 setup_pmc_cpu(&flags); 2076 local_irq_enable(); 2077 return 0; 2078 } 2079 2080 static int s390_pmu_sf_online_cpu(unsigned int cpu) 2081 { 2082 return cpusf_pmu_setup(cpu, PMC_INIT); 2083 } 2084 2085 static int s390_pmu_sf_offline_cpu(unsigned int cpu) 2086 { 2087 return cpusf_pmu_setup(cpu, PMC_RELEASE); 2088 } 2089 2090 static int param_get_sfb_size(char *buffer, const struct kernel_param *kp) 2091 { 2092 if (!cpum_sf_avail()) 2093 return -ENODEV; 2094 return sprintf(buffer, "%lu,%lu", CPUM_SF_MIN_SDB, CPUM_SF_MAX_SDB); 2095 } 2096 2097 static int param_set_sfb_size(const char *val, const struct kernel_param *kp) 2098 { 2099 int rc; 2100 unsigned long min, max; 2101 2102 if (!cpum_sf_avail()) 2103 return -ENODEV; 2104 if (!val || !strlen(val)) 2105 return -EINVAL; 2106 2107 /* Valid parameter values: "min,max" or "max" */ 2108 min = CPUM_SF_MIN_SDB; 2109 max = CPUM_SF_MAX_SDB; 2110 if (strchr(val, ',')) 2111 rc = (sscanf(val, "%lu,%lu", &min, &max) == 2) ? 0 : -EINVAL; 2112 else 2113 rc = kstrtoul(val, 10, &max); 2114 2115 if (min < 2 || min >= max || max > get_num_physpages()) 2116 rc = -EINVAL; 2117 if (rc) 2118 return rc; 2119 2120 sfb_set_limits(min, max); 2121 pr_info("The sampling buffer limits have changed to: " 2122 "min %lu max %lu (diag %lu)\n", 2123 CPUM_SF_MIN_SDB, CPUM_SF_MAX_SDB, CPUM_SF_SDB_DIAG_FACTOR); 2124 return 0; 2125 } 2126 2127 #define param_check_sfb_size(name, p) __param_check(name, p, void) 2128 static const struct kernel_param_ops param_ops_sfb_size = { 2129 .set = param_set_sfb_size, 2130 .get = param_get_sfb_size, 2131 }; 2132 2133 #define RS_INIT_FAILURE_QSI 0x0001 2134 #define RS_INIT_FAILURE_BSDES 0x0002 2135 #define RS_INIT_FAILURE_ALRT 0x0003 2136 #define RS_INIT_FAILURE_PERF 0x0004 2137 static void __init pr_cpumsf_err(unsigned int reason) 2138 { 2139 pr_err("Sampling facility support for perf is not available: " 2140 "reason %#x\n", reason); 2141 } 2142 2143 static int __init init_cpum_sampling_pmu(void) 2144 { 2145 struct hws_qsi_info_block si; 2146 int err; 2147 2148 if (!cpum_sf_avail()) 2149 return -ENODEV; 2150 2151 memset(&si, 0, sizeof(si)); 2152 if (qsi(&si)) { 2153 pr_cpumsf_err(RS_INIT_FAILURE_QSI); 2154 return -ENODEV; 2155 } 2156 2157 if (!si.as && !si.ad) 2158 return -ENODEV; 2159 2160 if (si.bsdes != sizeof(struct hws_basic_entry)) { 2161 pr_cpumsf_err(RS_INIT_FAILURE_BSDES); 2162 return -EINVAL; 2163 } 2164 2165 if (si.ad) { 2166 sfb_set_limits(CPUM_SF_MIN_SDB, CPUM_SF_MAX_SDB); 2167 /* Sampling of diagnostic data authorized, 2168 * install event into attribute list of PMU device. 2169 */ 2170 cpumsf_pmu_events_attr[SF_CYCLES_BASIC_DIAG_ATTR_IDX] = 2171 CPUMF_EVENT_PTR(SF, SF_CYCLES_BASIC_DIAG); 2172 } 2173 2174 sfdbg = debug_register(KMSG_COMPONENT, 2, 1, 80); 2175 if (!sfdbg) { 2176 pr_err("Registering for s390dbf failed\n"); 2177 return -ENOMEM; 2178 } 2179 debug_register_view(sfdbg, &debug_sprintf_view); 2180 2181 err = register_external_irq(EXT_IRQ_MEASURE_ALERT, 2182 cpumf_measurement_alert); 2183 if (err) { 2184 pr_cpumsf_err(RS_INIT_FAILURE_ALRT); 2185 debug_unregister(sfdbg); 2186 goto out; 2187 } 2188 2189 err = perf_pmu_register(&cpumf_sampling, "cpum_sf", PERF_TYPE_RAW); 2190 if (err) { 2191 pr_cpumsf_err(RS_INIT_FAILURE_PERF); 2192 unregister_external_irq(EXT_IRQ_MEASURE_ALERT, 2193 cpumf_measurement_alert); 2194 debug_unregister(sfdbg); 2195 goto out; 2196 } 2197 2198 cpuhp_setup_state(CPUHP_AP_PERF_S390_SF_ONLINE, "perf/s390/sf:online", 2199 s390_pmu_sf_online_cpu, s390_pmu_sf_offline_cpu); 2200 out: 2201 return err; 2202 } 2203 2204 arch_initcall(init_cpum_sampling_pmu); 2205 core_param(cpum_sfb_size, CPUM_SF_MAX_SDB, sfb_size, 0640); 2206