1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Performance event support for the System z CPU-measurement Sampling Facility 4 * 5 * Copyright IBM Corp. 2013, 2018 6 * Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com> 7 */ 8 #define KMSG_COMPONENT "cpum_sf" 9 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 10 11 #include <linux/kernel.h> 12 #include <linux/kernel_stat.h> 13 #include <linux/perf_event.h> 14 #include <linux/percpu.h> 15 #include <linux/pid.h> 16 #include <linux/notifier.h> 17 #include <linux/export.h> 18 #include <linux/slab.h> 19 #include <linux/mm.h> 20 #include <linux/moduleparam.h> 21 #include <asm/cpu_mf.h> 22 #include <asm/irq.h> 23 #include <asm/debug.h> 24 #include <asm/timex.h> 25 26 /* Minimum number of sample-data-block-tables: 27 * At least one table is required for the sampling buffer structure. 28 * A single table contains up to 511 pointers to sample-data-blocks. 29 */ 30 #define CPUM_SF_MIN_SDBT 1 31 32 /* Number of sample-data-blocks per sample-data-block-table (SDBT): 33 * A table contains SDB pointers (8 bytes) and one table-link entry 34 * that points to the origin of the next SDBT. 35 */ 36 #define CPUM_SF_SDB_PER_TABLE ((PAGE_SIZE - 8) / 8) 37 38 /* Maximum page offset for an SDBT table-link entry: 39 * If this page offset is reached, a table-link entry to the next SDBT 40 * must be added. 41 */ 42 #define CPUM_SF_SDBT_TL_OFFSET (CPUM_SF_SDB_PER_TABLE * 8) 43 static inline int require_table_link(const void *sdbt) 44 { 45 return ((unsigned long) sdbt & ~PAGE_MASK) == CPUM_SF_SDBT_TL_OFFSET; 46 } 47 48 /* Minimum and maximum sampling buffer sizes: 49 * 50 * This number represents the maximum size of the sampling buffer taking 51 * the number of sample-data-block-tables into account. Note that these 52 * numbers apply to the basic-sampling function only. 53 * The maximum number of SDBs is increased by CPUM_SF_SDB_DIAG_FACTOR if 54 * the diagnostic-sampling function is active. 55 * 56 * Sampling buffer size Buffer characteristics 57 * --------------------------------------------------- 58 * 64KB == 16 pages (4KB per page) 59 * 1 page for SDB-tables 60 * 15 pages for SDBs 61 * 62 * 32MB == 8192 pages (4KB per page) 63 * 16 pages for SDB-tables 64 * 8176 pages for SDBs 65 */ 66 static unsigned long __read_mostly CPUM_SF_MIN_SDB = 15; 67 static unsigned long __read_mostly CPUM_SF_MAX_SDB = 8176; 68 static unsigned long __read_mostly CPUM_SF_SDB_DIAG_FACTOR = 1; 69 70 struct sf_buffer { 71 unsigned long *sdbt; /* Sample-data-block-table origin */ 72 /* buffer characteristics (required for buffer increments) */ 73 unsigned long num_sdb; /* Number of sample-data-blocks */ 74 unsigned long num_sdbt; /* Number of sample-data-block-tables */ 75 unsigned long *tail; /* last sample-data-block-table */ 76 }; 77 78 struct aux_buffer { 79 struct sf_buffer sfb; 80 unsigned long head; /* index of SDB of buffer head */ 81 unsigned long alert_mark; /* index of SDB of alert request position */ 82 unsigned long empty_mark; /* mark of SDB not marked full */ 83 unsigned long *sdb_index; /* SDB address for fast lookup */ 84 unsigned long *sdbt_index; /* SDBT address for fast lookup */ 85 }; 86 87 struct cpu_hw_sf { 88 /* CPU-measurement sampling information block */ 89 struct hws_qsi_info_block qsi; 90 /* CPU-measurement sampling control block */ 91 struct hws_lsctl_request_block lsctl; 92 struct sf_buffer sfb; /* Sampling buffer */ 93 unsigned int flags; /* Status flags */ 94 struct perf_event *event; /* Scheduled perf event */ 95 struct perf_output_handle handle; /* AUX buffer output handle */ 96 }; 97 static DEFINE_PER_CPU(struct cpu_hw_sf, cpu_hw_sf); 98 99 /* Debug feature */ 100 static debug_info_t *sfdbg; 101 102 /* 103 * sf_disable() - Switch off sampling facility 104 */ 105 static int sf_disable(void) 106 { 107 struct hws_lsctl_request_block sreq; 108 109 memset(&sreq, 0, sizeof(sreq)); 110 return lsctl(&sreq); 111 } 112 113 /* 114 * sf_buffer_available() - Check for an allocated sampling buffer 115 */ 116 static int sf_buffer_available(struct cpu_hw_sf *cpuhw) 117 { 118 return !!cpuhw->sfb.sdbt; 119 } 120 121 /* 122 * deallocate sampling facility buffer 123 */ 124 static void free_sampling_buffer(struct sf_buffer *sfb) 125 { 126 unsigned long *sdbt, *curr; 127 128 if (!sfb->sdbt) 129 return; 130 131 sdbt = sfb->sdbt; 132 curr = sdbt; 133 134 /* Free the SDBT after all SDBs are processed... */ 135 while (1) { 136 if (!*curr || !sdbt) 137 break; 138 139 /* Process table-link entries */ 140 if (is_link_entry(curr)) { 141 curr = get_next_sdbt(curr); 142 if (sdbt) 143 free_page((unsigned long) sdbt); 144 145 /* If the origin is reached, sampling buffer is freed */ 146 if (curr == sfb->sdbt) 147 break; 148 else 149 sdbt = curr; 150 } else { 151 /* Process SDB pointer */ 152 if (*curr) { 153 free_page(*curr); 154 curr++; 155 } 156 } 157 } 158 159 debug_sprintf_event(sfdbg, 5, "%s: freed sdbt %#lx\n", __func__, 160 (unsigned long)sfb->sdbt); 161 memset(sfb, 0, sizeof(*sfb)); 162 } 163 164 static int alloc_sample_data_block(unsigned long *sdbt, gfp_t gfp_flags) 165 { 166 unsigned long sdb, *trailer; 167 168 /* Allocate and initialize sample-data-block */ 169 sdb = get_zeroed_page(gfp_flags); 170 if (!sdb) 171 return -ENOMEM; 172 trailer = trailer_entry_ptr(sdb); 173 *trailer = SDB_TE_ALERT_REQ_MASK; 174 175 /* Link SDB into the sample-data-block-table */ 176 *sdbt = sdb; 177 178 return 0; 179 } 180 181 /* 182 * realloc_sampling_buffer() - extend sampler memory 183 * 184 * Allocates new sample-data-blocks and adds them to the specified sampling 185 * buffer memory. 186 * 187 * Important: This modifies the sampling buffer and must be called when the 188 * sampling facility is disabled. 189 * 190 * Returns zero on success, non-zero otherwise. 191 */ 192 static int realloc_sampling_buffer(struct sf_buffer *sfb, 193 unsigned long num_sdb, gfp_t gfp_flags) 194 { 195 int i, rc; 196 unsigned long *new, *tail, *tail_prev = NULL; 197 198 if (!sfb->sdbt || !sfb->tail) 199 return -EINVAL; 200 201 if (!is_link_entry(sfb->tail)) 202 return -EINVAL; 203 204 /* Append to the existing sampling buffer, overwriting the table-link 205 * register. 206 * The tail variables always points to the "tail" (last and table-link) 207 * entry in an SDB-table. 208 */ 209 tail = sfb->tail; 210 211 /* Do a sanity check whether the table-link entry points to 212 * the sampling buffer origin. 213 */ 214 if (sfb->sdbt != get_next_sdbt(tail)) { 215 debug_sprintf_event(sfdbg, 3, "%s: " 216 "sampling buffer is not linked: origin %#lx" 217 " tail %#lx\n", __func__, 218 (unsigned long)sfb->sdbt, 219 (unsigned long)tail); 220 return -EINVAL; 221 } 222 223 /* Allocate remaining SDBs */ 224 rc = 0; 225 for (i = 0; i < num_sdb; i++) { 226 /* Allocate a new SDB-table if it is full. */ 227 if (require_table_link(tail)) { 228 new = (unsigned long *) get_zeroed_page(gfp_flags); 229 if (!new) { 230 rc = -ENOMEM; 231 break; 232 } 233 sfb->num_sdbt++; 234 /* Link current page to tail of chain */ 235 *tail = (unsigned long)(void *) new + 1; 236 tail_prev = tail; 237 tail = new; 238 } 239 240 /* Allocate a new sample-data-block. 241 * If there is not enough memory, stop the realloc process 242 * and simply use what was allocated. If this is a temporary 243 * issue, a new realloc call (if required) might succeed. 244 */ 245 rc = alloc_sample_data_block(tail, gfp_flags); 246 if (rc) { 247 /* Undo last SDBT. An SDBT with no SDB at its first 248 * entry but with an SDBT entry instead can not be 249 * handled by the interrupt handler code. 250 * Avoid this situation. 251 */ 252 if (tail_prev) { 253 sfb->num_sdbt--; 254 free_page((unsigned long) new); 255 tail = tail_prev; 256 } 257 break; 258 } 259 sfb->num_sdb++; 260 tail++; 261 tail_prev = new = NULL; /* Allocated at least one SBD */ 262 } 263 264 /* Link sampling buffer to its origin */ 265 *tail = (unsigned long) sfb->sdbt + 1; 266 sfb->tail = tail; 267 268 debug_sprintf_event(sfdbg, 4, "%s: new buffer" 269 " settings: sdbt %lu sdb %lu\n", __func__, 270 sfb->num_sdbt, sfb->num_sdb); 271 return rc; 272 } 273 274 /* 275 * allocate_sampling_buffer() - allocate sampler memory 276 * 277 * Allocates and initializes a sampling buffer structure using the 278 * specified number of sample-data-blocks (SDB). For each allocation, 279 * a 4K page is used. The number of sample-data-block-tables (SDBT) 280 * are calculated from SDBs. 281 * Also set the ALERT_REQ mask in each SDBs trailer. 282 * 283 * Returns zero on success, non-zero otherwise. 284 */ 285 static int alloc_sampling_buffer(struct sf_buffer *sfb, unsigned long num_sdb) 286 { 287 int rc; 288 289 if (sfb->sdbt) 290 return -EINVAL; 291 292 /* Allocate the sample-data-block-table origin */ 293 sfb->sdbt = (unsigned long *) get_zeroed_page(GFP_KERNEL); 294 if (!sfb->sdbt) 295 return -ENOMEM; 296 sfb->num_sdb = 0; 297 sfb->num_sdbt = 1; 298 299 /* Link the table origin to point to itself to prepare for 300 * realloc_sampling_buffer() invocation. 301 */ 302 sfb->tail = sfb->sdbt; 303 *sfb->tail = (unsigned long)(void *) sfb->sdbt + 1; 304 305 /* Allocate requested number of sample-data-blocks */ 306 rc = realloc_sampling_buffer(sfb, num_sdb, GFP_KERNEL); 307 if (rc) { 308 free_sampling_buffer(sfb); 309 debug_sprintf_event(sfdbg, 4, "%s: " 310 "realloc_sampling_buffer failed with rc %i\n", 311 __func__, rc); 312 } else 313 debug_sprintf_event(sfdbg, 4, 314 "%s: tear %#lx dear %#lx\n", __func__, 315 (unsigned long)sfb->sdbt, (unsigned long)*sfb->sdbt); 316 return rc; 317 } 318 319 static void sfb_set_limits(unsigned long min, unsigned long max) 320 { 321 struct hws_qsi_info_block si; 322 323 CPUM_SF_MIN_SDB = min; 324 CPUM_SF_MAX_SDB = max; 325 326 memset(&si, 0, sizeof(si)); 327 if (!qsi(&si)) 328 CPUM_SF_SDB_DIAG_FACTOR = DIV_ROUND_UP(si.dsdes, si.bsdes); 329 } 330 331 static unsigned long sfb_max_limit(struct hw_perf_event *hwc) 332 { 333 return SAMPL_DIAG_MODE(hwc) ? CPUM_SF_MAX_SDB * CPUM_SF_SDB_DIAG_FACTOR 334 : CPUM_SF_MAX_SDB; 335 } 336 337 static unsigned long sfb_pending_allocs(struct sf_buffer *sfb, 338 struct hw_perf_event *hwc) 339 { 340 if (!sfb->sdbt) 341 return SFB_ALLOC_REG(hwc); 342 if (SFB_ALLOC_REG(hwc) > sfb->num_sdb) 343 return SFB_ALLOC_REG(hwc) - sfb->num_sdb; 344 return 0; 345 } 346 347 static int sfb_has_pending_allocs(struct sf_buffer *sfb, 348 struct hw_perf_event *hwc) 349 { 350 return sfb_pending_allocs(sfb, hwc) > 0; 351 } 352 353 static void sfb_account_allocs(unsigned long num, struct hw_perf_event *hwc) 354 { 355 /* Limit the number of SDBs to not exceed the maximum */ 356 num = min_t(unsigned long, num, sfb_max_limit(hwc) - SFB_ALLOC_REG(hwc)); 357 if (num) 358 SFB_ALLOC_REG(hwc) += num; 359 } 360 361 static void sfb_init_allocs(unsigned long num, struct hw_perf_event *hwc) 362 { 363 SFB_ALLOC_REG(hwc) = 0; 364 sfb_account_allocs(num, hwc); 365 } 366 367 static void deallocate_buffers(struct cpu_hw_sf *cpuhw) 368 { 369 if (cpuhw->sfb.sdbt) 370 free_sampling_buffer(&cpuhw->sfb); 371 } 372 373 static int allocate_buffers(struct cpu_hw_sf *cpuhw, struct hw_perf_event *hwc) 374 { 375 unsigned long n_sdb, freq, factor; 376 size_t sample_size; 377 378 /* Calculate sampling buffers using 4K pages 379 * 380 * 1. Determine the sample data size which depends on the used 381 * sampling functions, for example, basic-sampling or 382 * basic-sampling with diagnostic-sampling. 383 * 384 * 2. Use the sampling frequency as input. The sampling buffer is 385 * designed for almost one second. This can be adjusted through 386 * the "factor" variable. 387 * In any case, alloc_sampling_buffer() sets the Alert Request 388 * Control indicator to trigger a measurement-alert to harvest 389 * sample-data-blocks (sdb). 390 * 391 * 3. Compute the number of sample-data-blocks and ensure a minimum 392 * of CPUM_SF_MIN_SDB. Also ensure the upper limit does not 393 * exceed a "calculated" maximum. The symbolic maximum is 394 * designed for basic-sampling only and needs to be increased if 395 * diagnostic-sampling is active. 396 * See also the remarks for these symbolic constants. 397 * 398 * 4. Compute the number of sample-data-block-tables (SDBT) and 399 * ensure a minimum of CPUM_SF_MIN_SDBT (one table can manage up 400 * to 511 SDBs). 401 */ 402 sample_size = sizeof(struct hws_basic_entry); 403 freq = sample_rate_to_freq(&cpuhw->qsi, SAMPL_RATE(hwc)); 404 factor = 1; 405 n_sdb = DIV_ROUND_UP(freq, factor * ((PAGE_SIZE-64) / sample_size)); 406 if (n_sdb < CPUM_SF_MIN_SDB) 407 n_sdb = CPUM_SF_MIN_SDB; 408 409 /* If there is already a sampling buffer allocated, it is very likely 410 * that the sampling facility is enabled too. If the event to be 411 * initialized requires a greater sampling buffer, the allocation must 412 * be postponed. Changing the sampling buffer requires the sampling 413 * facility to be in the disabled state. So, account the number of 414 * required SDBs and let cpumsf_pmu_enable() resize the buffer just 415 * before the event is started. 416 */ 417 sfb_init_allocs(n_sdb, hwc); 418 if (sf_buffer_available(cpuhw)) 419 return 0; 420 421 debug_sprintf_event(sfdbg, 3, 422 "%s: rate %lu f %lu sdb %lu/%lu" 423 " sample_size %lu cpuhw %p\n", __func__, 424 SAMPL_RATE(hwc), freq, n_sdb, sfb_max_limit(hwc), 425 sample_size, cpuhw); 426 427 return alloc_sampling_buffer(&cpuhw->sfb, 428 sfb_pending_allocs(&cpuhw->sfb, hwc)); 429 } 430 431 static unsigned long min_percent(unsigned int percent, unsigned long base, 432 unsigned long min) 433 { 434 return min_t(unsigned long, min, DIV_ROUND_UP(percent * base, 100)); 435 } 436 437 static unsigned long compute_sfb_extent(unsigned long ratio, unsigned long base) 438 { 439 /* Use a percentage-based approach to extend the sampling facility 440 * buffer. Accept up to 5% sample data loss. 441 * Vary the extents between 1% to 5% of the current number of 442 * sample-data-blocks. 443 */ 444 if (ratio <= 5) 445 return 0; 446 if (ratio <= 25) 447 return min_percent(1, base, 1); 448 if (ratio <= 50) 449 return min_percent(1, base, 1); 450 if (ratio <= 75) 451 return min_percent(2, base, 2); 452 if (ratio <= 100) 453 return min_percent(3, base, 3); 454 if (ratio <= 250) 455 return min_percent(4, base, 4); 456 457 return min_percent(5, base, 8); 458 } 459 460 static void sfb_account_overflows(struct cpu_hw_sf *cpuhw, 461 struct hw_perf_event *hwc) 462 { 463 unsigned long ratio, num; 464 465 if (!OVERFLOW_REG(hwc)) 466 return; 467 468 /* The sample_overflow contains the average number of sample data 469 * that has been lost because sample-data-blocks were full. 470 * 471 * Calculate the total number of sample data entries that has been 472 * discarded. Then calculate the ratio of lost samples to total samples 473 * per second in percent. 474 */ 475 ratio = DIV_ROUND_UP(100 * OVERFLOW_REG(hwc) * cpuhw->sfb.num_sdb, 476 sample_rate_to_freq(&cpuhw->qsi, SAMPL_RATE(hwc))); 477 478 /* Compute number of sample-data-blocks */ 479 num = compute_sfb_extent(ratio, cpuhw->sfb.num_sdb); 480 if (num) 481 sfb_account_allocs(num, hwc); 482 483 debug_sprintf_event(sfdbg, 5, "%s: overflow %llu ratio %lu num %lu\n", 484 __func__, OVERFLOW_REG(hwc), ratio, num); 485 OVERFLOW_REG(hwc) = 0; 486 } 487 488 /* extend_sampling_buffer() - Extend sampling buffer 489 * @sfb: Sampling buffer structure (for local CPU) 490 * @hwc: Perf event hardware structure 491 * 492 * Use this function to extend the sampling buffer based on the overflow counter 493 * and postponed allocation extents stored in the specified Perf event hardware. 494 * 495 * Important: This function disables the sampling facility in order to safely 496 * change the sampling buffer structure. Do not call this function 497 * when the PMU is active. 498 */ 499 static void extend_sampling_buffer(struct sf_buffer *sfb, 500 struct hw_perf_event *hwc) 501 { 502 unsigned long num, num_old; 503 int rc; 504 505 num = sfb_pending_allocs(sfb, hwc); 506 if (!num) 507 return; 508 num_old = sfb->num_sdb; 509 510 /* Disable the sampling facility to reset any states and also 511 * clear pending measurement alerts. 512 */ 513 sf_disable(); 514 515 /* Extend the sampling buffer. 516 * This memory allocation typically happens in an atomic context when 517 * called by perf. Because this is a reallocation, it is fine if the 518 * new SDB-request cannot be satisfied immediately. 519 */ 520 rc = realloc_sampling_buffer(sfb, num, GFP_ATOMIC); 521 if (rc) 522 debug_sprintf_event(sfdbg, 5, "%s: realloc failed with rc %i\n", 523 __func__, rc); 524 525 if (sfb_has_pending_allocs(sfb, hwc)) 526 debug_sprintf_event(sfdbg, 5, "%s: " 527 "req %lu alloc %lu remaining %lu\n", 528 __func__, num, sfb->num_sdb - num_old, 529 sfb_pending_allocs(sfb, hwc)); 530 } 531 532 /* Number of perf events counting hardware events */ 533 static atomic_t num_events; 534 /* Used to avoid races in calling reserve/release_cpumf_hardware */ 535 static DEFINE_MUTEX(pmc_reserve_mutex); 536 537 #define PMC_INIT 0 538 #define PMC_RELEASE 1 539 #define PMC_FAILURE 2 540 static void setup_pmc_cpu(void *flags) 541 { 542 int err; 543 struct cpu_hw_sf *cpusf = this_cpu_ptr(&cpu_hw_sf); 544 545 err = 0; 546 switch (*((int *) flags)) { 547 case PMC_INIT: 548 memset(cpusf, 0, sizeof(*cpusf)); 549 err = qsi(&cpusf->qsi); 550 if (err) 551 break; 552 cpusf->flags |= PMU_F_RESERVED; 553 err = sf_disable(); 554 if (err) 555 pr_err("Switching off the sampling facility failed " 556 "with rc %i\n", err); 557 debug_sprintf_event(sfdbg, 5, 558 "%s: initialized: cpuhw %p\n", __func__, 559 cpusf); 560 break; 561 case PMC_RELEASE: 562 cpusf->flags &= ~PMU_F_RESERVED; 563 err = sf_disable(); 564 if (err) { 565 pr_err("Switching off the sampling facility failed " 566 "with rc %i\n", err); 567 } else 568 deallocate_buffers(cpusf); 569 debug_sprintf_event(sfdbg, 5, 570 "%s: released: cpuhw %p\n", __func__, 571 cpusf); 572 break; 573 } 574 if (err) 575 *((int *) flags) |= PMC_FAILURE; 576 } 577 578 static void release_pmc_hardware(void) 579 { 580 int flags = PMC_RELEASE; 581 582 irq_subclass_unregister(IRQ_SUBCLASS_MEASUREMENT_ALERT); 583 on_each_cpu(setup_pmc_cpu, &flags, 1); 584 } 585 586 static int reserve_pmc_hardware(void) 587 { 588 int flags = PMC_INIT; 589 590 on_each_cpu(setup_pmc_cpu, &flags, 1); 591 if (flags & PMC_FAILURE) { 592 release_pmc_hardware(); 593 return -ENODEV; 594 } 595 irq_subclass_register(IRQ_SUBCLASS_MEASUREMENT_ALERT); 596 597 return 0; 598 } 599 600 static void hw_perf_event_destroy(struct perf_event *event) 601 { 602 /* Release PMC if this is the last perf event */ 603 if (!atomic_add_unless(&num_events, -1, 1)) { 604 mutex_lock(&pmc_reserve_mutex); 605 if (atomic_dec_return(&num_events) == 0) 606 release_pmc_hardware(); 607 mutex_unlock(&pmc_reserve_mutex); 608 } 609 } 610 611 static void hw_init_period(struct hw_perf_event *hwc, u64 period) 612 { 613 hwc->sample_period = period; 614 hwc->last_period = hwc->sample_period; 615 local64_set(&hwc->period_left, hwc->sample_period); 616 } 617 618 static unsigned long hw_limit_rate(const struct hws_qsi_info_block *si, 619 unsigned long rate) 620 { 621 return clamp_t(unsigned long, rate, 622 si->min_sampl_rate, si->max_sampl_rate); 623 } 624 625 static u32 cpumsf_pid_type(struct perf_event *event, 626 u32 pid, enum pid_type type) 627 { 628 struct task_struct *tsk; 629 630 /* Idle process */ 631 if (!pid) 632 goto out; 633 634 tsk = find_task_by_pid_ns(pid, &init_pid_ns); 635 pid = -1; 636 if (tsk) { 637 /* 638 * Only top level events contain the pid namespace in which 639 * they are created. 640 */ 641 if (event->parent) 642 event = event->parent; 643 pid = __task_pid_nr_ns(tsk, type, event->ns); 644 /* 645 * See also 1d953111b648 646 * "perf/core: Don't report zero PIDs for exiting tasks". 647 */ 648 if (!pid && !pid_alive(tsk)) 649 pid = -1; 650 } 651 out: 652 return pid; 653 } 654 655 static void cpumsf_output_event_pid(struct perf_event *event, 656 struct perf_sample_data *data, 657 struct pt_regs *regs) 658 { 659 u32 pid; 660 struct perf_event_header header; 661 struct perf_output_handle handle; 662 663 /* 664 * Obtain the PID from the basic-sampling data entry and 665 * correct the data->tid_entry.pid value. 666 */ 667 pid = data->tid_entry.pid; 668 669 /* Protect callchain buffers, tasks */ 670 rcu_read_lock(); 671 672 perf_prepare_sample(&header, data, event, regs); 673 if (perf_output_begin(&handle, event, header.size)) 674 goto out; 675 676 /* Update the process ID (see also kernel/events/core.c) */ 677 data->tid_entry.pid = cpumsf_pid_type(event, pid, PIDTYPE_TGID); 678 data->tid_entry.tid = cpumsf_pid_type(event, pid, PIDTYPE_PID); 679 680 perf_output_sample(&handle, &header, data, event); 681 perf_output_end(&handle); 682 out: 683 rcu_read_unlock(); 684 } 685 686 static unsigned long getrate(bool freq, unsigned long sample, 687 struct hws_qsi_info_block *si) 688 { 689 unsigned long rate; 690 691 if (freq) { 692 rate = freq_to_sample_rate(si, sample); 693 rate = hw_limit_rate(si, rate); 694 } else { 695 /* The min/max sampling rates specifies the valid range 696 * of sample periods. If the specified sample period is 697 * out of range, limit the period to the range boundary. 698 */ 699 rate = hw_limit_rate(si, sample); 700 701 /* The perf core maintains a maximum sample rate that is 702 * configurable through the sysctl interface. Ensure the 703 * sampling rate does not exceed this value. This also helps 704 * to avoid throttling when pushing samples with 705 * perf_event_overflow(). 706 */ 707 if (sample_rate_to_freq(si, rate) > 708 sysctl_perf_event_sample_rate) { 709 debug_sprintf_event(sfdbg, 1, "%s: " 710 "Sampling rate exceeds maximum " 711 "perf sample rate\n", __func__); 712 rate = 0; 713 } 714 } 715 return rate; 716 } 717 718 /* The sampling information (si) contains information about the 719 * min/max sampling intervals and the CPU speed. So calculate the 720 * correct sampling interval and avoid the whole period adjust 721 * feedback loop. 722 * 723 * Since the CPU Measurement sampling facility can not handle frequency 724 * calculate the sampling interval when frequency is specified using 725 * this formula: 726 * interval := cpu_speed * 1000000 / sample_freq 727 * 728 * Returns errno on bad input and zero on success with parameter interval 729 * set to the correct sampling rate. 730 * 731 * Note: This function turns off freq bit to avoid calling function 732 * perf_adjust_period(). This causes frequency adjustment in the common 733 * code part which causes tremendous variations in the counter values. 734 */ 735 static int __hw_perf_event_init_rate(struct perf_event *event, 736 struct hws_qsi_info_block *si) 737 { 738 struct perf_event_attr *attr = &event->attr; 739 struct hw_perf_event *hwc = &event->hw; 740 unsigned long rate; 741 742 if (attr->freq) { 743 if (!attr->sample_freq) 744 return -EINVAL; 745 rate = getrate(attr->freq, attr->sample_freq, si); 746 attr->freq = 0; /* Don't call perf_adjust_period() */ 747 SAMPL_FLAGS(hwc) |= PERF_CPUM_SF_FREQ_MODE; 748 } else { 749 rate = getrate(attr->freq, attr->sample_period, si); 750 if (!rate) 751 return -EINVAL; 752 } 753 attr->sample_period = rate; 754 SAMPL_RATE(hwc) = rate; 755 hw_init_period(hwc, SAMPL_RATE(hwc)); 756 debug_sprintf_event(sfdbg, 4, "%s: cpu %d period %#llx freq %d,%#lx\n", 757 __func__, event->cpu, event->attr.sample_period, 758 event->attr.freq, SAMPLE_FREQ_MODE(hwc)); 759 return 0; 760 } 761 762 static int __hw_perf_event_init(struct perf_event *event) 763 { 764 struct cpu_hw_sf *cpuhw; 765 struct hws_qsi_info_block si; 766 struct perf_event_attr *attr = &event->attr; 767 struct hw_perf_event *hwc = &event->hw; 768 int cpu, err; 769 770 /* Reserve CPU-measurement sampling facility */ 771 err = 0; 772 if (!atomic_inc_not_zero(&num_events)) { 773 mutex_lock(&pmc_reserve_mutex); 774 if (atomic_read(&num_events) == 0 && reserve_pmc_hardware()) 775 err = -EBUSY; 776 else 777 atomic_inc(&num_events); 778 mutex_unlock(&pmc_reserve_mutex); 779 } 780 event->destroy = hw_perf_event_destroy; 781 782 if (err) 783 goto out; 784 785 /* Access per-CPU sampling information (query sampling info) */ 786 /* 787 * The event->cpu value can be -1 to count on every CPU, for example, 788 * when attaching to a task. If this is specified, use the query 789 * sampling info from the current CPU, otherwise use event->cpu to 790 * retrieve the per-CPU information. 791 * Later, cpuhw indicates whether to allocate sampling buffers for a 792 * particular CPU (cpuhw!=NULL) or each online CPU (cpuw==NULL). 793 */ 794 memset(&si, 0, sizeof(si)); 795 cpuhw = NULL; 796 if (event->cpu == -1) 797 qsi(&si); 798 else { 799 /* Event is pinned to a particular CPU, retrieve the per-CPU 800 * sampling structure for accessing the CPU-specific QSI. 801 */ 802 cpuhw = &per_cpu(cpu_hw_sf, event->cpu); 803 si = cpuhw->qsi; 804 } 805 806 /* Check sampling facility authorization and, if not authorized, 807 * fall back to other PMUs. It is safe to check any CPU because 808 * the authorization is identical for all configured CPUs. 809 */ 810 if (!si.as) { 811 err = -ENOENT; 812 goto out; 813 } 814 815 if (si.ribm & CPU_MF_SF_RIBM_NOTAV) { 816 pr_warn("CPU Measurement Facility sampling is temporarily not available\n"); 817 err = -EBUSY; 818 goto out; 819 } 820 821 /* Always enable basic sampling */ 822 SAMPL_FLAGS(hwc) = PERF_CPUM_SF_BASIC_MODE; 823 824 /* Check if diagnostic sampling is requested. Deny if the required 825 * sampling authorization is missing. 826 */ 827 if (attr->config == PERF_EVENT_CPUM_SF_DIAG) { 828 if (!si.ad) { 829 err = -EPERM; 830 goto out; 831 } 832 SAMPL_FLAGS(hwc) |= PERF_CPUM_SF_DIAG_MODE; 833 } 834 835 /* Check and set other sampling flags */ 836 if (attr->config1 & PERF_CPUM_SF_FULL_BLOCKS) 837 SAMPL_FLAGS(hwc) |= PERF_CPUM_SF_FULL_BLOCKS; 838 839 err = __hw_perf_event_init_rate(event, &si); 840 if (err) 841 goto out; 842 843 /* Initialize sample data overflow accounting */ 844 hwc->extra_reg.reg = REG_OVERFLOW; 845 OVERFLOW_REG(hwc) = 0; 846 847 /* Use AUX buffer. No need to allocate it by ourself */ 848 if (attr->config == PERF_EVENT_CPUM_SF_DIAG) 849 return 0; 850 851 /* Allocate the per-CPU sampling buffer using the CPU information 852 * from the event. If the event is not pinned to a particular 853 * CPU (event->cpu == -1; or cpuhw == NULL), allocate sampling 854 * buffers for each online CPU. 855 */ 856 if (cpuhw) 857 /* Event is pinned to a particular CPU */ 858 err = allocate_buffers(cpuhw, hwc); 859 else { 860 /* Event is not pinned, allocate sampling buffer on 861 * each online CPU 862 */ 863 for_each_online_cpu(cpu) { 864 cpuhw = &per_cpu(cpu_hw_sf, cpu); 865 err = allocate_buffers(cpuhw, hwc); 866 if (err) 867 break; 868 } 869 } 870 871 /* If PID/TID sampling is active, replace the default overflow 872 * handler to extract and resolve the PIDs from the basic-sampling 873 * data entries. 874 */ 875 if (event->attr.sample_type & PERF_SAMPLE_TID) 876 if (is_default_overflow_handler(event)) 877 event->overflow_handler = cpumsf_output_event_pid; 878 out: 879 return err; 880 } 881 882 static int cpumsf_pmu_event_init(struct perf_event *event) 883 { 884 int err; 885 886 /* No support for taken branch sampling */ 887 if (has_branch_stack(event)) 888 return -EOPNOTSUPP; 889 890 switch (event->attr.type) { 891 case PERF_TYPE_RAW: 892 if ((event->attr.config != PERF_EVENT_CPUM_SF) && 893 (event->attr.config != PERF_EVENT_CPUM_SF_DIAG)) 894 return -ENOENT; 895 break; 896 case PERF_TYPE_HARDWARE: 897 /* Support sampling of CPU cycles in addition to the 898 * counter facility. However, the counter facility 899 * is more precise and, hence, restrict this PMU to 900 * sampling events only. 901 */ 902 if (event->attr.config != PERF_COUNT_HW_CPU_CYCLES) 903 return -ENOENT; 904 if (!is_sampling_event(event)) 905 return -ENOENT; 906 break; 907 default: 908 return -ENOENT; 909 } 910 911 /* Check online status of the CPU to which the event is pinned */ 912 if (event->cpu >= 0 && !cpu_online(event->cpu)) 913 return -ENODEV; 914 915 /* Force reset of idle/hv excludes regardless of what the 916 * user requested. 917 */ 918 if (event->attr.exclude_hv) 919 event->attr.exclude_hv = 0; 920 if (event->attr.exclude_idle) 921 event->attr.exclude_idle = 0; 922 923 err = __hw_perf_event_init(event); 924 if (unlikely(err)) 925 if (event->destroy) 926 event->destroy(event); 927 return err; 928 } 929 930 static void cpumsf_pmu_enable(struct pmu *pmu) 931 { 932 struct cpu_hw_sf *cpuhw = this_cpu_ptr(&cpu_hw_sf); 933 struct hw_perf_event *hwc; 934 int err; 935 936 if (cpuhw->flags & PMU_F_ENABLED) 937 return; 938 939 if (cpuhw->flags & PMU_F_ERR_MASK) 940 return; 941 942 /* Check whether to extent the sampling buffer. 943 * 944 * Two conditions trigger an increase of the sampling buffer for a 945 * perf event: 946 * 1. Postponed buffer allocations from the event initialization. 947 * 2. Sampling overflows that contribute to pending allocations. 948 * 949 * Note that the extend_sampling_buffer() function disables the sampling 950 * facility, but it can be fully re-enabled using sampling controls that 951 * have been saved in cpumsf_pmu_disable(). 952 */ 953 if (cpuhw->event) { 954 hwc = &cpuhw->event->hw; 955 if (!(SAMPL_DIAG_MODE(hwc))) { 956 /* 957 * Account number of overflow-designated 958 * buffer extents 959 */ 960 sfb_account_overflows(cpuhw, hwc); 961 extend_sampling_buffer(&cpuhw->sfb, hwc); 962 } 963 /* Rate may be adjusted with ioctl() */ 964 cpuhw->lsctl.interval = SAMPL_RATE(&cpuhw->event->hw); 965 } 966 967 /* (Re)enable the PMU and sampling facility */ 968 cpuhw->flags |= PMU_F_ENABLED; 969 barrier(); 970 971 err = lsctl(&cpuhw->lsctl); 972 if (err) { 973 cpuhw->flags &= ~PMU_F_ENABLED; 974 pr_err("Loading sampling controls failed: op %i err %i\n", 975 1, err); 976 return; 977 } 978 979 /* Load current program parameter */ 980 lpp(&S390_lowcore.lpp); 981 982 debug_sprintf_event(sfdbg, 6, "%s: es %i cs %i ed %i cd %i " 983 "interval %#lx tear %#lx dear %#lx\n", __func__, 984 cpuhw->lsctl.es, cpuhw->lsctl.cs, cpuhw->lsctl.ed, 985 cpuhw->lsctl.cd, cpuhw->lsctl.interval, 986 cpuhw->lsctl.tear, cpuhw->lsctl.dear); 987 } 988 989 static void cpumsf_pmu_disable(struct pmu *pmu) 990 { 991 struct cpu_hw_sf *cpuhw = this_cpu_ptr(&cpu_hw_sf); 992 struct hws_lsctl_request_block inactive; 993 struct hws_qsi_info_block si; 994 int err; 995 996 if (!(cpuhw->flags & PMU_F_ENABLED)) 997 return; 998 999 if (cpuhw->flags & PMU_F_ERR_MASK) 1000 return; 1001 1002 /* Switch off sampling activation control */ 1003 inactive = cpuhw->lsctl; 1004 inactive.cs = 0; 1005 inactive.cd = 0; 1006 1007 err = lsctl(&inactive); 1008 if (err) { 1009 pr_err("Loading sampling controls failed: op %i err %i\n", 1010 2, err); 1011 return; 1012 } 1013 1014 /* Save state of TEAR and DEAR register contents */ 1015 err = qsi(&si); 1016 if (!err) { 1017 /* TEAR/DEAR values are valid only if the sampling facility is 1018 * enabled. Note that cpumsf_pmu_disable() might be called even 1019 * for a disabled sampling facility because cpumsf_pmu_enable() 1020 * controls the enable/disable state. 1021 */ 1022 if (si.es) { 1023 cpuhw->lsctl.tear = si.tear; 1024 cpuhw->lsctl.dear = si.dear; 1025 } 1026 } else 1027 debug_sprintf_event(sfdbg, 3, "%s: qsi() failed with err %i\n", 1028 __func__, err); 1029 1030 cpuhw->flags &= ~PMU_F_ENABLED; 1031 } 1032 1033 /* perf_exclude_event() - Filter event 1034 * @event: The perf event 1035 * @regs: pt_regs structure 1036 * @sde_regs: Sample-data-entry (sde) regs structure 1037 * 1038 * Filter perf events according to their exclude specification. 1039 * 1040 * Return non-zero if the event shall be excluded. 1041 */ 1042 static int perf_exclude_event(struct perf_event *event, struct pt_regs *regs, 1043 struct perf_sf_sde_regs *sde_regs) 1044 { 1045 if (event->attr.exclude_user && user_mode(regs)) 1046 return 1; 1047 if (event->attr.exclude_kernel && !user_mode(regs)) 1048 return 1; 1049 if (event->attr.exclude_guest && sde_regs->in_guest) 1050 return 1; 1051 if (event->attr.exclude_host && !sde_regs->in_guest) 1052 return 1; 1053 return 0; 1054 } 1055 1056 /* perf_push_sample() - Push samples to perf 1057 * @event: The perf event 1058 * @sample: Hardware sample data 1059 * 1060 * Use the hardware sample data to create perf event sample. The sample 1061 * is the pushed to the event subsystem and the function checks for 1062 * possible event overflows. If an event overflow occurs, the PMU is 1063 * stopped. 1064 * 1065 * Return non-zero if an event overflow occurred. 1066 */ 1067 static int perf_push_sample(struct perf_event *event, 1068 struct hws_basic_entry *basic) 1069 { 1070 int overflow; 1071 struct pt_regs regs; 1072 struct perf_sf_sde_regs *sde_regs; 1073 struct perf_sample_data data; 1074 1075 /* Setup perf sample */ 1076 perf_sample_data_init(&data, 0, event->hw.last_period); 1077 1078 /* Setup pt_regs to look like an CPU-measurement external interrupt 1079 * using the Program Request Alert code. The regs.int_parm_long 1080 * field which is unused contains additional sample-data-entry related 1081 * indicators. 1082 */ 1083 memset(®s, 0, sizeof(regs)); 1084 regs.int_code = 0x1407; 1085 regs.int_parm = CPU_MF_INT_SF_PRA; 1086 sde_regs = (struct perf_sf_sde_regs *) ®s.int_parm_long; 1087 1088 psw_bits(regs.psw).ia = basic->ia; 1089 psw_bits(regs.psw).dat = basic->T; 1090 psw_bits(regs.psw).wait = basic->W; 1091 psw_bits(regs.psw).pstate = basic->P; 1092 psw_bits(regs.psw).as = basic->AS; 1093 1094 /* 1095 * Use the hardware provided configuration level to decide if the 1096 * sample belongs to a guest or host. If that is not available, 1097 * fall back to the following heuristics: 1098 * A non-zero guest program parameter always indicates a guest 1099 * sample. Some early samples or samples from guests without 1100 * lpp usage would be misaccounted to the host. We use the asn 1101 * value as an addon heuristic to detect most of these guest samples. 1102 * If the value differs from 0xffff (the host value), we assume to 1103 * be a KVM guest. 1104 */ 1105 switch (basic->CL) { 1106 case 1: /* logical partition */ 1107 sde_regs->in_guest = 0; 1108 break; 1109 case 2: /* virtual machine */ 1110 sde_regs->in_guest = 1; 1111 break; 1112 default: /* old machine, use heuristics */ 1113 if (basic->gpp || basic->prim_asn != 0xffff) 1114 sde_regs->in_guest = 1; 1115 break; 1116 } 1117 1118 /* 1119 * Store the PID value from the sample-data-entry to be 1120 * processed and resolved by cpumsf_output_event_pid(). 1121 */ 1122 data.tid_entry.pid = basic->hpp & LPP_PID_MASK; 1123 1124 overflow = 0; 1125 if (perf_exclude_event(event, ®s, sde_regs)) 1126 goto out; 1127 if (perf_event_overflow(event, &data, ®s)) { 1128 overflow = 1; 1129 event->pmu->stop(event, 0); 1130 } 1131 perf_event_update_userpage(event); 1132 out: 1133 return overflow; 1134 } 1135 1136 static void perf_event_count_update(struct perf_event *event, u64 count) 1137 { 1138 local64_add(count, &event->count); 1139 } 1140 1141 /* hw_collect_samples() - Walk through a sample-data-block and collect samples 1142 * @event: The perf event 1143 * @sdbt: Sample-data-block table 1144 * @overflow: Event overflow counter 1145 * 1146 * Walks through a sample-data-block and collects sampling data entries that are 1147 * then pushed to the perf event subsystem. Depending on the sampling function, 1148 * there can be either basic-sampling or combined-sampling data entries. A 1149 * combined-sampling data entry consists of a basic- and a diagnostic-sampling 1150 * data entry. The sampling function is determined by the flags in the perf 1151 * event hardware structure. The function always works with a combined-sampling 1152 * data entry but ignores the the diagnostic portion if it is not available. 1153 * 1154 * Note that the implementation focuses on basic-sampling data entries and, if 1155 * such an entry is not valid, the entire combined-sampling data entry is 1156 * ignored. 1157 * 1158 * The overflow variables counts the number of samples that has been discarded 1159 * due to a perf event overflow. 1160 */ 1161 static void hw_collect_samples(struct perf_event *event, unsigned long *sdbt, 1162 unsigned long long *overflow) 1163 { 1164 struct hws_trailer_entry *te; 1165 struct hws_basic_entry *sample; 1166 1167 te = (struct hws_trailer_entry *) trailer_entry_ptr(*sdbt); 1168 sample = (struct hws_basic_entry *) *sdbt; 1169 while ((unsigned long *) sample < (unsigned long *) te) { 1170 /* Check for an empty sample */ 1171 if (!sample->def) 1172 break; 1173 1174 /* Update perf event period */ 1175 perf_event_count_update(event, SAMPL_RATE(&event->hw)); 1176 1177 /* Check whether sample is valid */ 1178 if (sample->def == 0x0001) { 1179 /* If an event overflow occurred, the PMU is stopped to 1180 * throttle event delivery. Remaining sample data is 1181 * discarded. 1182 */ 1183 if (!*overflow) { 1184 /* Check whether sample is consistent */ 1185 if (sample->I == 0 && sample->W == 0) { 1186 /* Deliver sample data to perf */ 1187 *overflow = perf_push_sample(event, 1188 sample); 1189 } 1190 } else 1191 /* Count discarded samples */ 1192 *overflow += 1; 1193 } else { 1194 debug_sprintf_event(sfdbg, 4, 1195 "%s: Found unknown" 1196 " sampling data entry: te->f %i" 1197 " basic.def %#4x (%p)\n", __func__, 1198 te->f, sample->def, sample); 1199 /* Sample slot is not yet written or other record. 1200 * 1201 * This condition can occur if the buffer was reused 1202 * from a combined basic- and diagnostic-sampling. 1203 * If only basic-sampling is then active, entries are 1204 * written into the larger diagnostic entries. 1205 * This is typically the case for sample-data-blocks 1206 * that are not full. Stop processing if the first 1207 * invalid format was detected. 1208 */ 1209 if (!te->f) 1210 break; 1211 } 1212 1213 /* Reset sample slot and advance to next sample */ 1214 sample->def = 0; 1215 sample++; 1216 } 1217 } 1218 1219 /* hw_perf_event_update() - Process sampling buffer 1220 * @event: The perf event 1221 * @flush_all: Flag to also flush partially filled sample-data-blocks 1222 * 1223 * Processes the sampling buffer and create perf event samples. 1224 * The sampling buffer position are retrieved and saved in the TEAR_REG 1225 * register of the specified perf event. 1226 * 1227 * Only full sample-data-blocks are processed. Specify the flash_all flag 1228 * to also walk through partially filled sample-data-blocks. It is ignored 1229 * if PERF_CPUM_SF_FULL_BLOCKS is set. The PERF_CPUM_SF_FULL_BLOCKS flag 1230 * enforces the processing of full sample-data-blocks only (trailer entries 1231 * with the block-full-indicator bit set). 1232 */ 1233 static void hw_perf_event_update(struct perf_event *event, int flush_all) 1234 { 1235 struct hw_perf_event *hwc = &event->hw; 1236 struct hws_trailer_entry *te; 1237 unsigned long *sdbt; 1238 unsigned long long event_overflow, sampl_overflow, num_sdb, te_flags; 1239 int done; 1240 1241 /* 1242 * AUX buffer is used when in diagnostic sampling mode. 1243 * No perf events/samples are created. 1244 */ 1245 if (SAMPL_DIAG_MODE(&event->hw)) 1246 return; 1247 1248 if (flush_all && SDB_FULL_BLOCKS(hwc)) 1249 flush_all = 0; 1250 1251 sdbt = (unsigned long *) TEAR_REG(hwc); 1252 done = event_overflow = sampl_overflow = num_sdb = 0; 1253 while (!done) { 1254 /* Get the trailer entry of the sample-data-block */ 1255 te = (struct hws_trailer_entry *) trailer_entry_ptr(*sdbt); 1256 1257 /* Leave loop if no more work to do (block full indicator) */ 1258 if (!te->f) { 1259 done = 1; 1260 if (!flush_all) 1261 break; 1262 } 1263 1264 /* Check the sample overflow count */ 1265 if (te->overflow) 1266 /* Account sample overflows and, if a particular limit 1267 * is reached, extend the sampling buffer. 1268 * For details, see sfb_account_overflows(). 1269 */ 1270 sampl_overflow += te->overflow; 1271 1272 /* Timestamps are valid for full sample-data-blocks only */ 1273 debug_sprintf_event(sfdbg, 6, "%s: sdbt %#lx " 1274 "overflow %llu timestamp %#llx\n", 1275 __func__, (unsigned long)sdbt, te->overflow, 1276 (te->f) ? trailer_timestamp(te) : 0ULL); 1277 1278 /* Collect all samples from a single sample-data-block and 1279 * flag if an (perf) event overflow happened. If so, the PMU 1280 * is stopped and remaining samples will be discarded. 1281 */ 1282 hw_collect_samples(event, sdbt, &event_overflow); 1283 num_sdb++; 1284 1285 /* Reset trailer (using compare-double-and-swap) */ 1286 do { 1287 te_flags = te->flags & ~SDB_TE_BUFFER_FULL_MASK; 1288 te_flags |= SDB_TE_ALERT_REQ_MASK; 1289 } while (!cmpxchg_double(&te->flags, &te->overflow, 1290 te->flags, te->overflow, 1291 te_flags, 0ULL)); 1292 1293 /* Advance to next sample-data-block */ 1294 sdbt++; 1295 if (is_link_entry(sdbt)) 1296 sdbt = get_next_sdbt(sdbt); 1297 1298 /* Update event hardware registers */ 1299 TEAR_REG(hwc) = (unsigned long) sdbt; 1300 1301 /* Stop processing sample-data if all samples of the current 1302 * sample-data-block were flushed even if it was not full. 1303 */ 1304 if (flush_all && done) 1305 break; 1306 } 1307 1308 /* Account sample overflows in the event hardware structure */ 1309 if (sampl_overflow) 1310 OVERFLOW_REG(hwc) = DIV_ROUND_UP(OVERFLOW_REG(hwc) + 1311 sampl_overflow, 1 + num_sdb); 1312 1313 /* Perf_event_overflow() and perf_event_account_interrupt() limit 1314 * the interrupt rate to an upper limit. Roughly 1000 samples per 1315 * task tick. 1316 * Hitting this limit results in a large number 1317 * of throttled REF_REPORT_THROTTLE entries and the samples 1318 * are dropped. 1319 * Slightly increase the interval to avoid hitting this limit. 1320 */ 1321 if (event_overflow) { 1322 SAMPL_RATE(hwc) += DIV_ROUND_UP(SAMPL_RATE(hwc), 10); 1323 debug_sprintf_event(sfdbg, 1, "%s: rate adjustment %ld\n", 1324 __func__, 1325 DIV_ROUND_UP(SAMPL_RATE(hwc), 10)); 1326 } 1327 1328 if (sampl_overflow || event_overflow) 1329 debug_sprintf_event(sfdbg, 4, "%s: " 1330 "overflows: sample %llu event %llu" 1331 " total %llu num_sdb %llu\n", 1332 __func__, sampl_overflow, event_overflow, 1333 OVERFLOW_REG(hwc), num_sdb); 1334 } 1335 1336 #define AUX_SDB_INDEX(aux, i) ((i) % aux->sfb.num_sdb) 1337 #define AUX_SDB_NUM(aux, start, end) (end >= start ? end - start + 1 : 0) 1338 #define AUX_SDB_NUM_ALERT(aux) AUX_SDB_NUM(aux, aux->head, aux->alert_mark) 1339 #define AUX_SDB_NUM_EMPTY(aux) AUX_SDB_NUM(aux, aux->head, aux->empty_mark) 1340 1341 /* 1342 * Get trailer entry by index of SDB. 1343 */ 1344 static struct hws_trailer_entry *aux_sdb_trailer(struct aux_buffer *aux, 1345 unsigned long index) 1346 { 1347 unsigned long sdb; 1348 1349 index = AUX_SDB_INDEX(aux, index); 1350 sdb = aux->sdb_index[index]; 1351 return (struct hws_trailer_entry *)trailer_entry_ptr(sdb); 1352 } 1353 1354 /* 1355 * Finish sampling on the cpu. Called by cpumsf_pmu_del() with pmu 1356 * disabled. Collect the full SDBs in AUX buffer which have not reached 1357 * the point of alert indicator. And ignore the SDBs which are not 1358 * full. 1359 * 1360 * 1. Scan SDBs to see how much data is there and consume them. 1361 * 2. Remove alert indicator in the buffer. 1362 */ 1363 static void aux_output_end(struct perf_output_handle *handle) 1364 { 1365 unsigned long i, range_scan, idx; 1366 struct aux_buffer *aux; 1367 struct hws_trailer_entry *te; 1368 1369 aux = perf_get_aux(handle); 1370 if (!aux) 1371 return; 1372 1373 range_scan = AUX_SDB_NUM_ALERT(aux); 1374 for (i = 0, idx = aux->head; i < range_scan; i++, idx++) { 1375 te = aux_sdb_trailer(aux, idx); 1376 if (!(te->flags & SDB_TE_BUFFER_FULL_MASK)) 1377 break; 1378 } 1379 /* i is num of SDBs which are full */ 1380 perf_aux_output_end(handle, i << PAGE_SHIFT); 1381 1382 /* Remove alert indicators in the buffer */ 1383 te = aux_sdb_trailer(aux, aux->alert_mark); 1384 te->flags &= ~SDB_TE_ALERT_REQ_MASK; 1385 1386 debug_sprintf_event(sfdbg, 6, "%s: collect %#lx SDBs\n", __func__, i); 1387 } 1388 1389 /* 1390 * Start sampling on the CPU. Called by cpumsf_pmu_add() when an event 1391 * is first added to the CPU or rescheduled again to the CPU. It is called 1392 * with pmu disabled. 1393 * 1394 * 1. Reset the trailer of SDBs to get ready for new data. 1395 * 2. Tell the hardware where to put the data by reset the SDBs buffer 1396 * head(tear/dear). 1397 */ 1398 static int aux_output_begin(struct perf_output_handle *handle, 1399 struct aux_buffer *aux, 1400 struct cpu_hw_sf *cpuhw) 1401 { 1402 unsigned long range; 1403 unsigned long i, range_scan, idx; 1404 unsigned long head, base, offset; 1405 struct hws_trailer_entry *te; 1406 1407 if (WARN_ON_ONCE(handle->head & ~PAGE_MASK)) 1408 return -EINVAL; 1409 1410 aux->head = handle->head >> PAGE_SHIFT; 1411 range = (handle->size + 1) >> PAGE_SHIFT; 1412 if (range <= 1) 1413 return -ENOMEM; 1414 1415 /* 1416 * SDBs between aux->head and aux->empty_mark are already ready 1417 * for new data. range_scan is num of SDBs not within them. 1418 */ 1419 if (range > AUX_SDB_NUM_EMPTY(aux)) { 1420 range_scan = range - AUX_SDB_NUM_EMPTY(aux); 1421 idx = aux->empty_mark + 1; 1422 for (i = 0; i < range_scan; i++, idx++) { 1423 te = aux_sdb_trailer(aux, idx); 1424 te->flags = te->flags & ~SDB_TE_BUFFER_FULL_MASK; 1425 te->flags = te->flags & ~SDB_TE_ALERT_REQ_MASK; 1426 te->overflow = 0; 1427 } 1428 /* Save the position of empty SDBs */ 1429 aux->empty_mark = aux->head + range - 1; 1430 } 1431 1432 /* Set alert indicator */ 1433 aux->alert_mark = aux->head + range/2 - 1; 1434 te = aux_sdb_trailer(aux, aux->alert_mark); 1435 te->flags = te->flags | SDB_TE_ALERT_REQ_MASK; 1436 1437 /* Reset hardware buffer head */ 1438 head = AUX_SDB_INDEX(aux, aux->head); 1439 base = aux->sdbt_index[head / CPUM_SF_SDB_PER_TABLE]; 1440 offset = head % CPUM_SF_SDB_PER_TABLE; 1441 cpuhw->lsctl.tear = base + offset * sizeof(unsigned long); 1442 cpuhw->lsctl.dear = aux->sdb_index[head]; 1443 1444 debug_sprintf_event(sfdbg, 6, "%s: " 1445 "head->alert_mark->empty_mark (num_alert, range)" 1446 "[%#lx -> %#lx -> %#lx] (%#lx, %#lx) " 1447 "tear index %#lx, tear %#lx dear %#lx\n", __func__, 1448 aux->head, aux->alert_mark, aux->empty_mark, 1449 AUX_SDB_NUM_ALERT(aux), range, 1450 head / CPUM_SF_SDB_PER_TABLE, 1451 cpuhw->lsctl.tear, 1452 cpuhw->lsctl.dear); 1453 1454 return 0; 1455 } 1456 1457 /* 1458 * Set alert indicator on SDB at index @alert_index while sampler is running. 1459 * 1460 * Return true if successfully. 1461 * Return false if full indicator is already set by hardware sampler. 1462 */ 1463 static bool aux_set_alert(struct aux_buffer *aux, unsigned long alert_index, 1464 unsigned long long *overflow) 1465 { 1466 unsigned long long orig_overflow, orig_flags, new_flags; 1467 struct hws_trailer_entry *te; 1468 1469 te = aux_sdb_trailer(aux, alert_index); 1470 do { 1471 orig_flags = te->flags; 1472 orig_overflow = te->overflow; 1473 *overflow = orig_overflow; 1474 if (orig_flags & SDB_TE_BUFFER_FULL_MASK) { 1475 /* 1476 * SDB is already set by hardware. 1477 * Abort and try to set somewhere 1478 * behind. 1479 */ 1480 return false; 1481 } 1482 new_flags = orig_flags | SDB_TE_ALERT_REQ_MASK; 1483 } while (!cmpxchg_double(&te->flags, &te->overflow, 1484 orig_flags, orig_overflow, 1485 new_flags, 0ULL)); 1486 return true; 1487 } 1488 1489 /* 1490 * aux_reset_buffer() - Scan and setup SDBs for new samples 1491 * @aux: The AUX buffer to set 1492 * @range: The range of SDBs to scan started from aux->head 1493 * @overflow: Set to overflow count 1494 * 1495 * Set alert indicator on the SDB at index of aux->alert_mark. If this SDB is 1496 * marked as empty, check if it is already set full by the hardware sampler. 1497 * If yes, that means new data is already there before we can set an alert 1498 * indicator. Caller should try to set alert indicator to some position behind. 1499 * 1500 * Scan the SDBs in AUX buffer from behind aux->empty_mark. They are used 1501 * previously and have already been consumed by user space. Reset these SDBs 1502 * (clear full indicator and alert indicator) for new data. 1503 * If aux->alert_mark fall in this area, just set it. Overflow count is 1504 * recorded while scanning. 1505 * 1506 * SDBs between aux->head and aux->empty_mark are already reset at last time. 1507 * and ready for new samples. So scanning on this area could be skipped. 1508 * 1509 * Return true if alert indicator is set successfully and false if not. 1510 */ 1511 static bool aux_reset_buffer(struct aux_buffer *aux, unsigned long range, 1512 unsigned long long *overflow) 1513 { 1514 unsigned long long orig_overflow, orig_flags, new_flags; 1515 unsigned long i, range_scan, idx; 1516 struct hws_trailer_entry *te; 1517 1518 if (range <= AUX_SDB_NUM_EMPTY(aux)) 1519 /* 1520 * No need to scan. All SDBs in range are marked as empty. 1521 * Just set alert indicator. Should check race with hardware 1522 * sampler. 1523 */ 1524 return aux_set_alert(aux, aux->alert_mark, overflow); 1525 1526 if (aux->alert_mark <= aux->empty_mark) 1527 /* 1528 * Set alert indicator on empty SDB. Should check race 1529 * with hardware sampler. 1530 */ 1531 if (!aux_set_alert(aux, aux->alert_mark, overflow)) 1532 return false; 1533 1534 /* 1535 * Scan the SDBs to clear full and alert indicator used previously. 1536 * Start scanning from one SDB behind empty_mark. If the new alert 1537 * indicator fall into this range, set it. 1538 */ 1539 range_scan = range - AUX_SDB_NUM_EMPTY(aux); 1540 idx = aux->empty_mark + 1; 1541 for (i = 0; i < range_scan; i++, idx++) { 1542 te = aux_sdb_trailer(aux, idx); 1543 do { 1544 orig_flags = te->flags; 1545 orig_overflow = te->overflow; 1546 new_flags = orig_flags & ~SDB_TE_BUFFER_FULL_MASK; 1547 if (idx == aux->alert_mark) 1548 new_flags |= SDB_TE_ALERT_REQ_MASK; 1549 else 1550 new_flags &= ~SDB_TE_ALERT_REQ_MASK; 1551 } while (!cmpxchg_double(&te->flags, &te->overflow, 1552 orig_flags, orig_overflow, 1553 new_flags, 0ULL)); 1554 *overflow += orig_overflow; 1555 } 1556 1557 /* Update empty_mark to new position */ 1558 aux->empty_mark = aux->head + range - 1; 1559 1560 return true; 1561 } 1562 1563 /* 1564 * Measurement alert handler for diagnostic mode sampling. 1565 */ 1566 static void hw_collect_aux(struct cpu_hw_sf *cpuhw) 1567 { 1568 struct aux_buffer *aux; 1569 int done = 0; 1570 unsigned long range = 0, size; 1571 unsigned long long overflow = 0; 1572 struct perf_output_handle *handle = &cpuhw->handle; 1573 unsigned long num_sdb; 1574 1575 aux = perf_get_aux(handle); 1576 if (WARN_ON_ONCE(!aux)) 1577 return; 1578 1579 /* Inform user space new data arrived */ 1580 size = AUX_SDB_NUM_ALERT(aux) << PAGE_SHIFT; 1581 perf_aux_output_end(handle, size); 1582 num_sdb = aux->sfb.num_sdb; 1583 1584 while (!done) { 1585 /* Get an output handle */ 1586 aux = perf_aux_output_begin(handle, cpuhw->event); 1587 if (handle->size == 0) { 1588 pr_err("The AUX buffer with %lu pages for the " 1589 "diagnostic-sampling mode is full\n", 1590 num_sdb); 1591 debug_sprintf_event(sfdbg, 1, 1592 "%s: AUX buffer used up\n", 1593 __func__); 1594 break; 1595 } 1596 if (WARN_ON_ONCE(!aux)) 1597 return; 1598 1599 /* Update head and alert_mark to new position */ 1600 aux->head = handle->head >> PAGE_SHIFT; 1601 range = (handle->size + 1) >> PAGE_SHIFT; 1602 if (range == 1) 1603 aux->alert_mark = aux->head; 1604 else 1605 aux->alert_mark = aux->head + range/2 - 1; 1606 1607 if (aux_reset_buffer(aux, range, &overflow)) { 1608 if (!overflow) { 1609 done = 1; 1610 break; 1611 } 1612 size = range << PAGE_SHIFT; 1613 perf_aux_output_end(&cpuhw->handle, size); 1614 pr_err("Sample data caused the AUX buffer with %lu " 1615 "pages to overflow\n", num_sdb); 1616 debug_sprintf_event(sfdbg, 1, "%s: head %#lx range %#lx " 1617 "overflow %#llx\n", __func__, 1618 aux->head, range, overflow); 1619 } else { 1620 size = AUX_SDB_NUM_ALERT(aux) << PAGE_SHIFT; 1621 perf_aux_output_end(&cpuhw->handle, size); 1622 debug_sprintf_event(sfdbg, 6, "%s: head %#lx alert %#lx " 1623 "already full, try another\n", 1624 __func__, 1625 aux->head, aux->alert_mark); 1626 } 1627 } 1628 1629 if (done) 1630 debug_sprintf_event(sfdbg, 6, "%s: aux_reset_buffer " 1631 "[%#lx -> %#lx -> %#lx] (%#lx, %#lx)\n", 1632 __func__, aux->head, aux->alert_mark, 1633 aux->empty_mark, AUX_SDB_NUM_ALERT(aux), 1634 range); 1635 } 1636 1637 /* 1638 * Callback when freeing AUX buffers. 1639 */ 1640 static void aux_buffer_free(void *data) 1641 { 1642 struct aux_buffer *aux = data; 1643 unsigned long i, num_sdbt; 1644 1645 if (!aux) 1646 return; 1647 1648 /* Free SDBT. SDB is freed by the caller */ 1649 num_sdbt = aux->sfb.num_sdbt; 1650 for (i = 0; i < num_sdbt; i++) 1651 free_page(aux->sdbt_index[i]); 1652 1653 kfree(aux->sdbt_index); 1654 kfree(aux->sdb_index); 1655 kfree(aux); 1656 1657 debug_sprintf_event(sfdbg, 4, "%s: free " 1658 "%lu SDBTs\n", __func__, num_sdbt); 1659 } 1660 1661 static void aux_sdb_init(unsigned long sdb) 1662 { 1663 struct hws_trailer_entry *te; 1664 1665 te = (struct hws_trailer_entry *)trailer_entry_ptr(sdb); 1666 1667 /* Save clock base */ 1668 te->clock_base = 1; 1669 memcpy(&te->progusage2, &tod_clock_base[1], 8); 1670 } 1671 1672 /* 1673 * aux_buffer_setup() - Setup AUX buffer for diagnostic mode sampling 1674 * @event: Event the buffer is setup for, event->cpu == -1 means current 1675 * @pages: Array of pointers to buffer pages passed from perf core 1676 * @nr_pages: Total pages 1677 * @snapshot: Flag for snapshot mode 1678 * 1679 * This is the callback when setup an event using AUX buffer. Perf tool can 1680 * trigger this by an additional mmap() call on the event. Unlike the buffer 1681 * for basic samples, AUX buffer belongs to the event. It is scheduled with 1682 * the task among online cpus when it is a per-thread event. 1683 * 1684 * Return the private AUX buffer structure if success or NULL if fails. 1685 */ 1686 static void *aux_buffer_setup(struct perf_event *event, void **pages, 1687 int nr_pages, bool snapshot) 1688 { 1689 struct sf_buffer *sfb; 1690 struct aux_buffer *aux; 1691 unsigned long *new, *tail; 1692 int i, n_sdbt; 1693 1694 if (!nr_pages || !pages) 1695 return NULL; 1696 1697 if (nr_pages > CPUM_SF_MAX_SDB * CPUM_SF_SDB_DIAG_FACTOR) { 1698 pr_err("AUX buffer size (%i pages) is larger than the " 1699 "maximum sampling buffer limit\n", 1700 nr_pages); 1701 return NULL; 1702 } else if (nr_pages < CPUM_SF_MIN_SDB * CPUM_SF_SDB_DIAG_FACTOR) { 1703 pr_err("AUX buffer size (%i pages) is less than the " 1704 "minimum sampling buffer limit\n", 1705 nr_pages); 1706 return NULL; 1707 } 1708 1709 /* Allocate aux_buffer struct for the event */ 1710 aux = kmalloc(sizeof(struct aux_buffer), GFP_KERNEL); 1711 if (!aux) 1712 goto no_aux; 1713 sfb = &aux->sfb; 1714 1715 /* Allocate sdbt_index for fast reference */ 1716 n_sdbt = (nr_pages + CPUM_SF_SDB_PER_TABLE - 1) / CPUM_SF_SDB_PER_TABLE; 1717 aux->sdbt_index = kmalloc_array(n_sdbt, sizeof(void *), GFP_KERNEL); 1718 if (!aux->sdbt_index) 1719 goto no_sdbt_index; 1720 1721 /* Allocate sdb_index for fast reference */ 1722 aux->sdb_index = kmalloc_array(nr_pages, sizeof(void *), GFP_KERNEL); 1723 if (!aux->sdb_index) 1724 goto no_sdb_index; 1725 1726 /* Allocate the first SDBT */ 1727 sfb->num_sdbt = 0; 1728 sfb->sdbt = (unsigned long *) get_zeroed_page(GFP_KERNEL); 1729 if (!sfb->sdbt) 1730 goto no_sdbt; 1731 aux->sdbt_index[sfb->num_sdbt++] = (unsigned long)sfb->sdbt; 1732 tail = sfb->tail = sfb->sdbt; 1733 1734 /* 1735 * Link the provided pages of AUX buffer to SDBT. 1736 * Allocate SDBT if needed. 1737 */ 1738 for (i = 0; i < nr_pages; i++, tail++) { 1739 if (require_table_link(tail)) { 1740 new = (unsigned long *) get_zeroed_page(GFP_KERNEL); 1741 if (!new) 1742 goto no_sdbt; 1743 aux->sdbt_index[sfb->num_sdbt++] = (unsigned long)new; 1744 /* Link current page to tail of chain */ 1745 *tail = (unsigned long)(void *) new + 1; 1746 tail = new; 1747 } 1748 /* Tail is the entry in a SDBT */ 1749 *tail = (unsigned long)pages[i]; 1750 aux->sdb_index[i] = (unsigned long)pages[i]; 1751 aux_sdb_init((unsigned long)pages[i]); 1752 } 1753 sfb->num_sdb = nr_pages; 1754 1755 /* Link the last entry in the SDBT to the first SDBT */ 1756 *tail = (unsigned long) sfb->sdbt + 1; 1757 sfb->tail = tail; 1758 1759 /* 1760 * Initial all SDBs are zeroed. Mark it as empty. 1761 * So there is no need to clear the full indicator 1762 * when this event is first added. 1763 */ 1764 aux->empty_mark = sfb->num_sdb - 1; 1765 1766 debug_sprintf_event(sfdbg, 4, "%s: setup %lu SDBTs and %lu SDBs\n", 1767 __func__, sfb->num_sdbt, sfb->num_sdb); 1768 1769 return aux; 1770 1771 no_sdbt: 1772 /* SDBs (AUX buffer pages) are freed by caller */ 1773 for (i = 0; i < sfb->num_sdbt; i++) 1774 free_page(aux->sdbt_index[i]); 1775 kfree(aux->sdb_index); 1776 no_sdb_index: 1777 kfree(aux->sdbt_index); 1778 no_sdbt_index: 1779 kfree(aux); 1780 no_aux: 1781 return NULL; 1782 } 1783 1784 static void cpumsf_pmu_read(struct perf_event *event) 1785 { 1786 /* Nothing to do ... updates are interrupt-driven */ 1787 } 1788 1789 /* Check if the new sampling period/freqeuncy is appropriate. 1790 * 1791 * Return non-zero on error and zero on passed checks. 1792 */ 1793 static int cpumsf_pmu_check_period(struct perf_event *event, u64 value) 1794 { 1795 struct hws_qsi_info_block si; 1796 unsigned long rate; 1797 bool do_freq; 1798 1799 memset(&si, 0, sizeof(si)); 1800 if (event->cpu == -1) { 1801 if (qsi(&si)) 1802 return -ENODEV; 1803 } else { 1804 /* Event is pinned to a particular CPU, retrieve the per-CPU 1805 * sampling structure for accessing the CPU-specific QSI. 1806 */ 1807 struct cpu_hw_sf *cpuhw = &per_cpu(cpu_hw_sf, event->cpu); 1808 1809 si = cpuhw->qsi; 1810 } 1811 1812 do_freq = !!SAMPLE_FREQ_MODE(&event->hw); 1813 rate = getrate(do_freq, value, &si); 1814 if (!rate) 1815 return -EINVAL; 1816 1817 event->attr.sample_period = rate; 1818 SAMPL_RATE(&event->hw) = rate; 1819 hw_init_period(&event->hw, SAMPL_RATE(&event->hw)); 1820 debug_sprintf_event(sfdbg, 4, "%s:" 1821 " cpu %d value %#llx period %#llx freq %d\n", 1822 __func__, event->cpu, value, 1823 event->attr.sample_period, do_freq); 1824 return 0; 1825 } 1826 1827 /* Activate sampling control. 1828 * Next call of pmu_enable() starts sampling. 1829 */ 1830 static void cpumsf_pmu_start(struct perf_event *event, int flags) 1831 { 1832 struct cpu_hw_sf *cpuhw = this_cpu_ptr(&cpu_hw_sf); 1833 1834 if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED))) 1835 return; 1836 1837 if (flags & PERF_EF_RELOAD) 1838 WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE)); 1839 1840 perf_pmu_disable(event->pmu); 1841 event->hw.state = 0; 1842 cpuhw->lsctl.cs = 1; 1843 if (SAMPL_DIAG_MODE(&event->hw)) 1844 cpuhw->lsctl.cd = 1; 1845 perf_pmu_enable(event->pmu); 1846 } 1847 1848 /* Deactivate sampling control. 1849 * Next call of pmu_enable() stops sampling. 1850 */ 1851 static void cpumsf_pmu_stop(struct perf_event *event, int flags) 1852 { 1853 struct cpu_hw_sf *cpuhw = this_cpu_ptr(&cpu_hw_sf); 1854 1855 if (event->hw.state & PERF_HES_STOPPED) 1856 return; 1857 1858 perf_pmu_disable(event->pmu); 1859 cpuhw->lsctl.cs = 0; 1860 cpuhw->lsctl.cd = 0; 1861 event->hw.state |= PERF_HES_STOPPED; 1862 1863 if ((flags & PERF_EF_UPDATE) && !(event->hw.state & PERF_HES_UPTODATE)) { 1864 hw_perf_event_update(event, 1); 1865 event->hw.state |= PERF_HES_UPTODATE; 1866 } 1867 perf_pmu_enable(event->pmu); 1868 } 1869 1870 static int cpumsf_pmu_add(struct perf_event *event, int flags) 1871 { 1872 struct cpu_hw_sf *cpuhw = this_cpu_ptr(&cpu_hw_sf); 1873 struct aux_buffer *aux; 1874 int err; 1875 1876 if (cpuhw->flags & PMU_F_IN_USE) 1877 return -EAGAIN; 1878 1879 if (!SAMPL_DIAG_MODE(&event->hw) && !cpuhw->sfb.sdbt) 1880 return -EINVAL; 1881 1882 err = 0; 1883 perf_pmu_disable(event->pmu); 1884 1885 event->hw.state = PERF_HES_UPTODATE | PERF_HES_STOPPED; 1886 1887 /* Set up sampling controls. Always program the sampling register 1888 * using the SDB-table start. Reset TEAR_REG event hardware register 1889 * that is used by hw_perf_event_update() to store the sampling buffer 1890 * position after samples have been flushed. 1891 */ 1892 cpuhw->lsctl.s = 0; 1893 cpuhw->lsctl.h = 1; 1894 cpuhw->lsctl.interval = SAMPL_RATE(&event->hw); 1895 if (!SAMPL_DIAG_MODE(&event->hw)) { 1896 cpuhw->lsctl.tear = (unsigned long) cpuhw->sfb.sdbt; 1897 cpuhw->lsctl.dear = *(unsigned long *) cpuhw->sfb.sdbt; 1898 TEAR_REG(&event->hw) = (unsigned long) cpuhw->sfb.sdbt; 1899 } 1900 1901 /* Ensure sampling functions are in the disabled state. If disabled, 1902 * switch on sampling enable control. */ 1903 if (WARN_ON_ONCE(cpuhw->lsctl.es == 1 || cpuhw->lsctl.ed == 1)) { 1904 err = -EAGAIN; 1905 goto out; 1906 } 1907 if (SAMPL_DIAG_MODE(&event->hw)) { 1908 aux = perf_aux_output_begin(&cpuhw->handle, event); 1909 if (!aux) { 1910 err = -EINVAL; 1911 goto out; 1912 } 1913 err = aux_output_begin(&cpuhw->handle, aux, cpuhw); 1914 if (err) 1915 goto out; 1916 cpuhw->lsctl.ed = 1; 1917 } 1918 cpuhw->lsctl.es = 1; 1919 1920 /* Set in_use flag and store event */ 1921 cpuhw->event = event; 1922 cpuhw->flags |= PMU_F_IN_USE; 1923 1924 if (flags & PERF_EF_START) 1925 cpumsf_pmu_start(event, PERF_EF_RELOAD); 1926 out: 1927 perf_event_update_userpage(event); 1928 perf_pmu_enable(event->pmu); 1929 return err; 1930 } 1931 1932 static void cpumsf_pmu_del(struct perf_event *event, int flags) 1933 { 1934 struct cpu_hw_sf *cpuhw = this_cpu_ptr(&cpu_hw_sf); 1935 1936 perf_pmu_disable(event->pmu); 1937 cpumsf_pmu_stop(event, PERF_EF_UPDATE); 1938 1939 cpuhw->lsctl.es = 0; 1940 cpuhw->lsctl.ed = 0; 1941 cpuhw->flags &= ~PMU_F_IN_USE; 1942 cpuhw->event = NULL; 1943 1944 if (SAMPL_DIAG_MODE(&event->hw)) 1945 aux_output_end(&cpuhw->handle); 1946 perf_event_update_userpage(event); 1947 perf_pmu_enable(event->pmu); 1948 } 1949 1950 CPUMF_EVENT_ATTR(SF, SF_CYCLES_BASIC, PERF_EVENT_CPUM_SF); 1951 CPUMF_EVENT_ATTR(SF, SF_CYCLES_BASIC_DIAG, PERF_EVENT_CPUM_SF_DIAG); 1952 1953 /* Attribute list for CPU_SF. 1954 * 1955 * The availablitiy depends on the CPU_MF sampling facility authorization 1956 * for basic + diagnositic samples. This is determined at initialization 1957 * time by the sampling facility device driver. 1958 * If the authorization for basic samples is turned off, it should be 1959 * also turned off for diagnostic sampling. 1960 * 1961 * During initialization of the device driver, check the authorization 1962 * level for diagnostic sampling and installs the attribute 1963 * file for diagnostic sampling if necessary. 1964 * 1965 * For now install a placeholder to reference all possible attributes: 1966 * SF_CYCLES_BASIC and SF_CYCLES_BASIC_DIAG. 1967 * Add another entry for the final NULL pointer. 1968 */ 1969 enum { 1970 SF_CYCLES_BASIC_ATTR_IDX = 0, 1971 SF_CYCLES_BASIC_DIAG_ATTR_IDX, 1972 SF_CYCLES_ATTR_MAX 1973 }; 1974 1975 static struct attribute *cpumsf_pmu_events_attr[SF_CYCLES_ATTR_MAX + 1] = { 1976 [SF_CYCLES_BASIC_ATTR_IDX] = CPUMF_EVENT_PTR(SF, SF_CYCLES_BASIC) 1977 }; 1978 1979 PMU_FORMAT_ATTR(event, "config:0-63"); 1980 1981 static struct attribute *cpumsf_pmu_format_attr[] = { 1982 &format_attr_event.attr, 1983 NULL, 1984 }; 1985 1986 static struct attribute_group cpumsf_pmu_events_group = { 1987 .name = "events", 1988 .attrs = cpumsf_pmu_events_attr, 1989 }; 1990 1991 static struct attribute_group cpumsf_pmu_format_group = { 1992 .name = "format", 1993 .attrs = cpumsf_pmu_format_attr, 1994 }; 1995 1996 static const struct attribute_group *cpumsf_pmu_attr_groups[] = { 1997 &cpumsf_pmu_events_group, 1998 &cpumsf_pmu_format_group, 1999 NULL, 2000 }; 2001 2002 static struct pmu cpumf_sampling = { 2003 .pmu_enable = cpumsf_pmu_enable, 2004 .pmu_disable = cpumsf_pmu_disable, 2005 2006 .event_init = cpumsf_pmu_event_init, 2007 .add = cpumsf_pmu_add, 2008 .del = cpumsf_pmu_del, 2009 2010 .start = cpumsf_pmu_start, 2011 .stop = cpumsf_pmu_stop, 2012 .read = cpumsf_pmu_read, 2013 2014 .attr_groups = cpumsf_pmu_attr_groups, 2015 2016 .setup_aux = aux_buffer_setup, 2017 .free_aux = aux_buffer_free, 2018 2019 .check_period = cpumsf_pmu_check_period, 2020 }; 2021 2022 static void cpumf_measurement_alert(struct ext_code ext_code, 2023 unsigned int alert, unsigned long unused) 2024 { 2025 struct cpu_hw_sf *cpuhw; 2026 2027 if (!(alert & CPU_MF_INT_SF_MASK)) 2028 return; 2029 inc_irq_stat(IRQEXT_CMS); 2030 cpuhw = this_cpu_ptr(&cpu_hw_sf); 2031 2032 /* Measurement alerts are shared and might happen when the PMU 2033 * is not reserved. Ignore these alerts in this case. */ 2034 if (!(cpuhw->flags & PMU_F_RESERVED)) 2035 return; 2036 2037 /* The processing below must take care of multiple alert events that 2038 * might be indicated concurrently. */ 2039 2040 /* Program alert request */ 2041 if (alert & CPU_MF_INT_SF_PRA) { 2042 if (cpuhw->flags & PMU_F_IN_USE) 2043 if (SAMPL_DIAG_MODE(&cpuhw->event->hw)) 2044 hw_collect_aux(cpuhw); 2045 else 2046 hw_perf_event_update(cpuhw->event, 0); 2047 else 2048 WARN_ON_ONCE(!(cpuhw->flags & PMU_F_IN_USE)); 2049 } 2050 2051 /* Report measurement alerts only for non-PRA codes */ 2052 if (alert != CPU_MF_INT_SF_PRA) 2053 debug_sprintf_event(sfdbg, 6, "%s: alert %#x\n", __func__, 2054 alert); 2055 2056 /* Sampling authorization change request */ 2057 if (alert & CPU_MF_INT_SF_SACA) 2058 qsi(&cpuhw->qsi); 2059 2060 /* Loss of sample data due to high-priority machine activities */ 2061 if (alert & CPU_MF_INT_SF_LSDA) { 2062 pr_err("Sample data was lost\n"); 2063 cpuhw->flags |= PMU_F_ERR_LSDA; 2064 sf_disable(); 2065 } 2066 2067 /* Invalid sampling buffer entry */ 2068 if (alert & (CPU_MF_INT_SF_IAE|CPU_MF_INT_SF_ISE)) { 2069 pr_err("A sampling buffer entry is incorrect (alert=0x%x)\n", 2070 alert); 2071 cpuhw->flags |= PMU_F_ERR_IBE; 2072 sf_disable(); 2073 } 2074 } 2075 2076 static int cpusf_pmu_setup(unsigned int cpu, int flags) 2077 { 2078 /* Ignore the notification if no events are scheduled on the PMU. 2079 * This might be racy... 2080 */ 2081 if (!atomic_read(&num_events)) 2082 return 0; 2083 2084 local_irq_disable(); 2085 setup_pmc_cpu(&flags); 2086 local_irq_enable(); 2087 return 0; 2088 } 2089 2090 static int s390_pmu_sf_online_cpu(unsigned int cpu) 2091 { 2092 return cpusf_pmu_setup(cpu, PMC_INIT); 2093 } 2094 2095 static int s390_pmu_sf_offline_cpu(unsigned int cpu) 2096 { 2097 return cpusf_pmu_setup(cpu, PMC_RELEASE); 2098 } 2099 2100 static int param_get_sfb_size(char *buffer, const struct kernel_param *kp) 2101 { 2102 if (!cpum_sf_avail()) 2103 return -ENODEV; 2104 return sprintf(buffer, "%lu,%lu", CPUM_SF_MIN_SDB, CPUM_SF_MAX_SDB); 2105 } 2106 2107 static int param_set_sfb_size(const char *val, const struct kernel_param *kp) 2108 { 2109 int rc; 2110 unsigned long min, max; 2111 2112 if (!cpum_sf_avail()) 2113 return -ENODEV; 2114 if (!val || !strlen(val)) 2115 return -EINVAL; 2116 2117 /* Valid parameter values: "min,max" or "max" */ 2118 min = CPUM_SF_MIN_SDB; 2119 max = CPUM_SF_MAX_SDB; 2120 if (strchr(val, ',')) 2121 rc = (sscanf(val, "%lu,%lu", &min, &max) == 2) ? 0 : -EINVAL; 2122 else 2123 rc = kstrtoul(val, 10, &max); 2124 2125 if (min < 2 || min >= max || max > get_num_physpages()) 2126 rc = -EINVAL; 2127 if (rc) 2128 return rc; 2129 2130 sfb_set_limits(min, max); 2131 pr_info("The sampling buffer limits have changed to: " 2132 "min %lu max %lu (diag %lu)\n", 2133 CPUM_SF_MIN_SDB, CPUM_SF_MAX_SDB, CPUM_SF_SDB_DIAG_FACTOR); 2134 return 0; 2135 } 2136 2137 #define param_check_sfb_size(name, p) __param_check(name, p, void) 2138 static const struct kernel_param_ops param_ops_sfb_size = { 2139 .set = param_set_sfb_size, 2140 .get = param_get_sfb_size, 2141 }; 2142 2143 #define RS_INIT_FAILURE_QSI 0x0001 2144 #define RS_INIT_FAILURE_BSDES 0x0002 2145 #define RS_INIT_FAILURE_ALRT 0x0003 2146 #define RS_INIT_FAILURE_PERF 0x0004 2147 static void __init pr_cpumsf_err(unsigned int reason) 2148 { 2149 pr_err("Sampling facility support for perf is not available: " 2150 "reason %#x\n", reason); 2151 } 2152 2153 static int __init init_cpum_sampling_pmu(void) 2154 { 2155 struct hws_qsi_info_block si; 2156 int err; 2157 2158 if (!cpum_sf_avail()) 2159 return -ENODEV; 2160 2161 memset(&si, 0, sizeof(si)); 2162 if (qsi(&si)) { 2163 pr_cpumsf_err(RS_INIT_FAILURE_QSI); 2164 return -ENODEV; 2165 } 2166 2167 if (!si.as && !si.ad) 2168 return -ENODEV; 2169 2170 if (si.bsdes != sizeof(struct hws_basic_entry)) { 2171 pr_cpumsf_err(RS_INIT_FAILURE_BSDES); 2172 return -EINVAL; 2173 } 2174 2175 if (si.ad) { 2176 sfb_set_limits(CPUM_SF_MIN_SDB, CPUM_SF_MAX_SDB); 2177 /* Sampling of diagnostic data authorized, 2178 * install event into attribute list of PMU device. 2179 */ 2180 cpumsf_pmu_events_attr[SF_CYCLES_BASIC_DIAG_ATTR_IDX] = 2181 CPUMF_EVENT_PTR(SF, SF_CYCLES_BASIC_DIAG); 2182 } 2183 2184 sfdbg = debug_register(KMSG_COMPONENT, 2, 1, 80); 2185 if (!sfdbg) { 2186 pr_err("Registering for s390dbf failed\n"); 2187 return -ENOMEM; 2188 } 2189 debug_register_view(sfdbg, &debug_sprintf_view); 2190 2191 err = register_external_irq(EXT_IRQ_MEASURE_ALERT, 2192 cpumf_measurement_alert); 2193 if (err) { 2194 pr_cpumsf_err(RS_INIT_FAILURE_ALRT); 2195 debug_unregister(sfdbg); 2196 goto out; 2197 } 2198 2199 err = perf_pmu_register(&cpumf_sampling, "cpum_sf", PERF_TYPE_RAW); 2200 if (err) { 2201 pr_cpumsf_err(RS_INIT_FAILURE_PERF); 2202 unregister_external_irq(EXT_IRQ_MEASURE_ALERT, 2203 cpumf_measurement_alert); 2204 debug_unregister(sfdbg); 2205 goto out; 2206 } 2207 2208 cpuhp_setup_state(CPUHP_AP_PERF_S390_SF_ONLINE, "perf/s390/sf:online", 2209 s390_pmu_sf_online_cpu, s390_pmu_sf_offline_cpu); 2210 out: 2211 return err; 2212 } 2213 2214 arch_initcall(init_cpum_sampling_pmu); 2215 core_param(cpum_sfb_size, CPUM_SF_MAX_SDB, sfb_size, 0640); 2216