1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Performance event support for the System z CPU-measurement Sampling Facility 4 * 5 * Copyright IBM Corp. 2013, 2018 6 * Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com> 7 */ 8 #define KMSG_COMPONENT "cpum_sf" 9 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 10 11 #include <linux/kernel.h> 12 #include <linux/kernel_stat.h> 13 #include <linux/perf_event.h> 14 #include <linux/percpu.h> 15 #include <linux/pid.h> 16 #include <linux/notifier.h> 17 #include <linux/export.h> 18 #include <linux/slab.h> 19 #include <linux/mm.h> 20 #include <linux/moduleparam.h> 21 #include <asm/cpu_mf.h> 22 #include <asm/irq.h> 23 #include <asm/debug.h> 24 #include <asm/timex.h> 25 26 /* Minimum number of sample-data-block-tables: 27 * At least one table is required for the sampling buffer structure. 28 * A single table contains up to 511 pointers to sample-data-blocks. 29 */ 30 #define CPUM_SF_MIN_SDBT 1 31 32 /* Number of sample-data-blocks per sample-data-block-table (SDBT): 33 * A table contains SDB pointers (8 bytes) and one table-link entry 34 * that points to the origin of the next SDBT. 35 */ 36 #define CPUM_SF_SDB_PER_TABLE ((PAGE_SIZE - 8) / 8) 37 38 /* Maximum page offset for an SDBT table-link entry: 39 * If this page offset is reached, a table-link entry to the next SDBT 40 * must be added. 41 */ 42 #define CPUM_SF_SDBT_TL_OFFSET (CPUM_SF_SDB_PER_TABLE * 8) 43 static inline int require_table_link(const void *sdbt) 44 { 45 return ((unsigned long) sdbt & ~PAGE_MASK) == CPUM_SF_SDBT_TL_OFFSET; 46 } 47 48 /* Minimum and maximum sampling buffer sizes: 49 * 50 * This number represents the maximum size of the sampling buffer taking 51 * the number of sample-data-block-tables into account. Note that these 52 * numbers apply to the basic-sampling function only. 53 * The maximum number of SDBs is increased by CPUM_SF_SDB_DIAG_FACTOR if 54 * the diagnostic-sampling function is active. 55 * 56 * Sampling buffer size Buffer characteristics 57 * --------------------------------------------------- 58 * 64KB == 16 pages (4KB per page) 59 * 1 page for SDB-tables 60 * 15 pages for SDBs 61 * 62 * 32MB == 8192 pages (4KB per page) 63 * 16 pages for SDB-tables 64 * 8176 pages for SDBs 65 */ 66 static unsigned long __read_mostly CPUM_SF_MIN_SDB = 15; 67 static unsigned long __read_mostly CPUM_SF_MAX_SDB = 8176; 68 static unsigned long __read_mostly CPUM_SF_SDB_DIAG_FACTOR = 1; 69 70 struct sf_buffer { 71 unsigned long *sdbt; /* Sample-data-block-table origin */ 72 /* buffer characteristics (required for buffer increments) */ 73 unsigned long num_sdb; /* Number of sample-data-blocks */ 74 unsigned long num_sdbt; /* Number of sample-data-block-tables */ 75 unsigned long *tail; /* last sample-data-block-table */ 76 }; 77 78 struct aux_buffer { 79 struct sf_buffer sfb; 80 unsigned long head; /* index of SDB of buffer head */ 81 unsigned long alert_mark; /* index of SDB of alert request position */ 82 unsigned long empty_mark; /* mark of SDB not marked full */ 83 unsigned long *sdb_index; /* SDB address for fast lookup */ 84 unsigned long *sdbt_index; /* SDBT address for fast lookup */ 85 }; 86 87 struct cpu_hw_sf { 88 /* CPU-measurement sampling information block */ 89 struct hws_qsi_info_block qsi; 90 /* CPU-measurement sampling control block */ 91 struct hws_lsctl_request_block lsctl; 92 struct sf_buffer sfb; /* Sampling buffer */ 93 unsigned int flags; /* Status flags */ 94 struct perf_event *event; /* Scheduled perf event */ 95 struct perf_output_handle handle; /* AUX buffer output handle */ 96 }; 97 static DEFINE_PER_CPU(struct cpu_hw_sf, cpu_hw_sf); 98 99 /* Debug feature */ 100 static debug_info_t *sfdbg; 101 102 /* 103 * sf_disable() - Switch off sampling facility 104 */ 105 static int sf_disable(void) 106 { 107 struct hws_lsctl_request_block sreq; 108 109 memset(&sreq, 0, sizeof(sreq)); 110 return lsctl(&sreq); 111 } 112 113 /* 114 * sf_buffer_available() - Check for an allocated sampling buffer 115 */ 116 static int sf_buffer_available(struct cpu_hw_sf *cpuhw) 117 { 118 return !!cpuhw->sfb.sdbt; 119 } 120 121 /* 122 * deallocate sampling facility buffer 123 */ 124 static void free_sampling_buffer(struct sf_buffer *sfb) 125 { 126 unsigned long *sdbt, *curr; 127 128 if (!sfb->sdbt) 129 return; 130 131 sdbt = sfb->sdbt; 132 curr = sdbt; 133 134 /* Free the SDBT after all SDBs are processed... */ 135 while (1) { 136 if (!*curr || !sdbt) 137 break; 138 139 /* Process table-link entries */ 140 if (is_link_entry(curr)) { 141 curr = get_next_sdbt(curr); 142 if (sdbt) 143 free_page((unsigned long) sdbt); 144 145 /* If the origin is reached, sampling buffer is freed */ 146 if (curr == sfb->sdbt) 147 break; 148 else 149 sdbt = curr; 150 } else { 151 /* Process SDB pointer */ 152 if (*curr) { 153 free_page(*curr); 154 curr++; 155 } 156 } 157 } 158 159 debug_sprintf_event(sfdbg, 5, "%s: freed sdbt %#lx\n", __func__, 160 (unsigned long)sfb->sdbt); 161 memset(sfb, 0, sizeof(*sfb)); 162 } 163 164 static int alloc_sample_data_block(unsigned long *sdbt, gfp_t gfp_flags) 165 { 166 unsigned long sdb, *trailer; 167 168 /* Allocate and initialize sample-data-block */ 169 sdb = get_zeroed_page(gfp_flags); 170 if (!sdb) 171 return -ENOMEM; 172 trailer = trailer_entry_ptr(sdb); 173 *trailer = SDB_TE_ALERT_REQ_MASK; 174 175 /* Link SDB into the sample-data-block-table */ 176 *sdbt = sdb; 177 178 return 0; 179 } 180 181 /* 182 * realloc_sampling_buffer() - extend sampler memory 183 * 184 * Allocates new sample-data-blocks and adds them to the specified sampling 185 * buffer memory. 186 * 187 * Important: This modifies the sampling buffer and must be called when the 188 * sampling facility is disabled. 189 * 190 * Returns zero on success, non-zero otherwise. 191 */ 192 static int realloc_sampling_buffer(struct sf_buffer *sfb, 193 unsigned long num_sdb, gfp_t gfp_flags) 194 { 195 int i, rc; 196 unsigned long *new, *tail, *tail_prev = NULL; 197 198 if (!sfb->sdbt || !sfb->tail) 199 return -EINVAL; 200 201 if (!is_link_entry(sfb->tail)) 202 return -EINVAL; 203 204 /* Append to the existing sampling buffer, overwriting the table-link 205 * register. 206 * The tail variables always points to the "tail" (last and table-link) 207 * entry in an SDB-table. 208 */ 209 tail = sfb->tail; 210 211 /* Do a sanity check whether the table-link entry points to 212 * the sampling buffer origin. 213 */ 214 if (sfb->sdbt != get_next_sdbt(tail)) { 215 debug_sprintf_event(sfdbg, 3, "%s: " 216 "sampling buffer is not linked: origin %#lx" 217 " tail %#lx\n", __func__, 218 (unsigned long)sfb->sdbt, 219 (unsigned long)tail); 220 return -EINVAL; 221 } 222 223 /* Allocate remaining SDBs */ 224 rc = 0; 225 for (i = 0; i < num_sdb; i++) { 226 /* Allocate a new SDB-table if it is full. */ 227 if (require_table_link(tail)) { 228 new = (unsigned long *) get_zeroed_page(gfp_flags); 229 if (!new) { 230 rc = -ENOMEM; 231 break; 232 } 233 sfb->num_sdbt++; 234 /* Link current page to tail of chain */ 235 *tail = (unsigned long)(void *) new + 1; 236 tail_prev = tail; 237 tail = new; 238 } 239 240 /* Allocate a new sample-data-block. 241 * If there is not enough memory, stop the realloc process 242 * and simply use what was allocated. If this is a temporary 243 * issue, a new realloc call (if required) might succeed. 244 */ 245 rc = alloc_sample_data_block(tail, gfp_flags); 246 if (rc) { 247 /* Undo last SDBT. An SDBT with no SDB at its first 248 * entry but with an SDBT entry instead can not be 249 * handled by the interrupt handler code. 250 * Avoid this situation. 251 */ 252 if (tail_prev) { 253 sfb->num_sdbt--; 254 free_page((unsigned long) new); 255 tail = tail_prev; 256 } 257 break; 258 } 259 sfb->num_sdb++; 260 tail++; 261 tail_prev = new = NULL; /* Allocated at least one SBD */ 262 } 263 264 /* Link sampling buffer to its origin */ 265 *tail = (unsigned long) sfb->sdbt + 1; 266 sfb->tail = tail; 267 268 debug_sprintf_event(sfdbg, 4, "%s: new buffer" 269 " settings: sdbt %lu sdb %lu\n", __func__, 270 sfb->num_sdbt, sfb->num_sdb); 271 return rc; 272 } 273 274 /* 275 * allocate_sampling_buffer() - allocate sampler memory 276 * 277 * Allocates and initializes a sampling buffer structure using the 278 * specified number of sample-data-blocks (SDB). For each allocation, 279 * a 4K page is used. The number of sample-data-block-tables (SDBT) 280 * are calculated from SDBs. 281 * Also set the ALERT_REQ mask in each SDBs trailer. 282 * 283 * Returns zero on success, non-zero otherwise. 284 */ 285 static int alloc_sampling_buffer(struct sf_buffer *sfb, unsigned long num_sdb) 286 { 287 int rc; 288 289 if (sfb->sdbt) 290 return -EINVAL; 291 292 /* Allocate the sample-data-block-table origin */ 293 sfb->sdbt = (unsigned long *) get_zeroed_page(GFP_KERNEL); 294 if (!sfb->sdbt) 295 return -ENOMEM; 296 sfb->num_sdb = 0; 297 sfb->num_sdbt = 1; 298 299 /* Link the table origin to point to itself to prepare for 300 * realloc_sampling_buffer() invocation. 301 */ 302 sfb->tail = sfb->sdbt; 303 *sfb->tail = (unsigned long)(void *) sfb->sdbt + 1; 304 305 /* Allocate requested number of sample-data-blocks */ 306 rc = realloc_sampling_buffer(sfb, num_sdb, GFP_KERNEL); 307 if (rc) { 308 free_sampling_buffer(sfb); 309 debug_sprintf_event(sfdbg, 4, "%s: " 310 "realloc_sampling_buffer failed with rc %i\n", 311 __func__, rc); 312 } else 313 debug_sprintf_event(sfdbg, 4, 314 "%s: tear %#lx dear %#lx\n", __func__, 315 (unsigned long)sfb->sdbt, (unsigned long)*sfb->sdbt); 316 return rc; 317 } 318 319 static void sfb_set_limits(unsigned long min, unsigned long max) 320 { 321 struct hws_qsi_info_block si; 322 323 CPUM_SF_MIN_SDB = min; 324 CPUM_SF_MAX_SDB = max; 325 326 memset(&si, 0, sizeof(si)); 327 if (!qsi(&si)) 328 CPUM_SF_SDB_DIAG_FACTOR = DIV_ROUND_UP(si.dsdes, si.bsdes); 329 } 330 331 static unsigned long sfb_max_limit(struct hw_perf_event *hwc) 332 { 333 return SAMPL_DIAG_MODE(hwc) ? CPUM_SF_MAX_SDB * CPUM_SF_SDB_DIAG_FACTOR 334 : CPUM_SF_MAX_SDB; 335 } 336 337 static unsigned long sfb_pending_allocs(struct sf_buffer *sfb, 338 struct hw_perf_event *hwc) 339 { 340 if (!sfb->sdbt) 341 return SFB_ALLOC_REG(hwc); 342 if (SFB_ALLOC_REG(hwc) > sfb->num_sdb) 343 return SFB_ALLOC_REG(hwc) - sfb->num_sdb; 344 return 0; 345 } 346 347 static int sfb_has_pending_allocs(struct sf_buffer *sfb, 348 struct hw_perf_event *hwc) 349 { 350 return sfb_pending_allocs(sfb, hwc) > 0; 351 } 352 353 static void sfb_account_allocs(unsigned long num, struct hw_perf_event *hwc) 354 { 355 /* Limit the number of SDBs to not exceed the maximum */ 356 num = min_t(unsigned long, num, sfb_max_limit(hwc) - SFB_ALLOC_REG(hwc)); 357 if (num) 358 SFB_ALLOC_REG(hwc) += num; 359 } 360 361 static void sfb_init_allocs(unsigned long num, struct hw_perf_event *hwc) 362 { 363 SFB_ALLOC_REG(hwc) = 0; 364 sfb_account_allocs(num, hwc); 365 } 366 367 static void deallocate_buffers(struct cpu_hw_sf *cpuhw) 368 { 369 if (cpuhw->sfb.sdbt) 370 free_sampling_buffer(&cpuhw->sfb); 371 } 372 373 static int allocate_buffers(struct cpu_hw_sf *cpuhw, struct hw_perf_event *hwc) 374 { 375 unsigned long n_sdb, freq, factor; 376 size_t sample_size; 377 378 /* Calculate sampling buffers using 4K pages 379 * 380 * 1. Determine the sample data size which depends on the used 381 * sampling functions, for example, basic-sampling or 382 * basic-sampling with diagnostic-sampling. 383 * 384 * 2. Use the sampling frequency as input. The sampling buffer is 385 * designed for almost one second. This can be adjusted through 386 * the "factor" variable. 387 * In any case, alloc_sampling_buffer() sets the Alert Request 388 * Control indicator to trigger a measurement-alert to harvest 389 * sample-data-blocks (sdb). 390 * 391 * 3. Compute the number of sample-data-blocks and ensure a minimum 392 * of CPUM_SF_MIN_SDB. Also ensure the upper limit does not 393 * exceed a "calculated" maximum. The symbolic maximum is 394 * designed for basic-sampling only and needs to be increased if 395 * diagnostic-sampling is active. 396 * See also the remarks for these symbolic constants. 397 * 398 * 4. Compute the number of sample-data-block-tables (SDBT) and 399 * ensure a minimum of CPUM_SF_MIN_SDBT (one table can manage up 400 * to 511 SDBs). 401 */ 402 sample_size = sizeof(struct hws_basic_entry); 403 freq = sample_rate_to_freq(&cpuhw->qsi, SAMPL_RATE(hwc)); 404 factor = 1; 405 n_sdb = DIV_ROUND_UP(freq, factor * ((PAGE_SIZE-64) / sample_size)); 406 if (n_sdb < CPUM_SF_MIN_SDB) 407 n_sdb = CPUM_SF_MIN_SDB; 408 409 /* If there is already a sampling buffer allocated, it is very likely 410 * that the sampling facility is enabled too. If the event to be 411 * initialized requires a greater sampling buffer, the allocation must 412 * be postponed. Changing the sampling buffer requires the sampling 413 * facility to be in the disabled state. So, account the number of 414 * required SDBs and let cpumsf_pmu_enable() resize the buffer just 415 * before the event is started. 416 */ 417 sfb_init_allocs(n_sdb, hwc); 418 if (sf_buffer_available(cpuhw)) 419 return 0; 420 421 debug_sprintf_event(sfdbg, 3, 422 "%s: rate %lu f %lu sdb %lu/%lu" 423 " sample_size %lu cpuhw %p\n", __func__, 424 SAMPL_RATE(hwc), freq, n_sdb, sfb_max_limit(hwc), 425 sample_size, cpuhw); 426 427 return alloc_sampling_buffer(&cpuhw->sfb, 428 sfb_pending_allocs(&cpuhw->sfb, hwc)); 429 } 430 431 static unsigned long min_percent(unsigned int percent, unsigned long base, 432 unsigned long min) 433 { 434 return min_t(unsigned long, min, DIV_ROUND_UP(percent * base, 100)); 435 } 436 437 static unsigned long compute_sfb_extent(unsigned long ratio, unsigned long base) 438 { 439 /* Use a percentage-based approach to extend the sampling facility 440 * buffer. Accept up to 5% sample data loss. 441 * Vary the extents between 1% to 5% of the current number of 442 * sample-data-blocks. 443 */ 444 if (ratio <= 5) 445 return 0; 446 if (ratio <= 25) 447 return min_percent(1, base, 1); 448 if (ratio <= 50) 449 return min_percent(1, base, 1); 450 if (ratio <= 75) 451 return min_percent(2, base, 2); 452 if (ratio <= 100) 453 return min_percent(3, base, 3); 454 if (ratio <= 250) 455 return min_percent(4, base, 4); 456 457 return min_percent(5, base, 8); 458 } 459 460 static void sfb_account_overflows(struct cpu_hw_sf *cpuhw, 461 struct hw_perf_event *hwc) 462 { 463 unsigned long ratio, num; 464 465 if (!OVERFLOW_REG(hwc)) 466 return; 467 468 /* The sample_overflow contains the average number of sample data 469 * that has been lost because sample-data-blocks were full. 470 * 471 * Calculate the total number of sample data entries that has been 472 * discarded. Then calculate the ratio of lost samples to total samples 473 * per second in percent. 474 */ 475 ratio = DIV_ROUND_UP(100 * OVERFLOW_REG(hwc) * cpuhw->sfb.num_sdb, 476 sample_rate_to_freq(&cpuhw->qsi, SAMPL_RATE(hwc))); 477 478 /* Compute number of sample-data-blocks */ 479 num = compute_sfb_extent(ratio, cpuhw->sfb.num_sdb); 480 if (num) 481 sfb_account_allocs(num, hwc); 482 483 debug_sprintf_event(sfdbg, 5, "%s: overflow %llu ratio %lu num %lu\n", 484 __func__, OVERFLOW_REG(hwc), ratio, num); 485 OVERFLOW_REG(hwc) = 0; 486 } 487 488 /* extend_sampling_buffer() - Extend sampling buffer 489 * @sfb: Sampling buffer structure (for local CPU) 490 * @hwc: Perf event hardware structure 491 * 492 * Use this function to extend the sampling buffer based on the overflow counter 493 * and postponed allocation extents stored in the specified Perf event hardware. 494 * 495 * Important: This function disables the sampling facility in order to safely 496 * change the sampling buffer structure. Do not call this function 497 * when the PMU is active. 498 */ 499 static void extend_sampling_buffer(struct sf_buffer *sfb, 500 struct hw_perf_event *hwc) 501 { 502 unsigned long num, num_old; 503 int rc; 504 505 num = sfb_pending_allocs(sfb, hwc); 506 if (!num) 507 return; 508 num_old = sfb->num_sdb; 509 510 /* Disable the sampling facility to reset any states and also 511 * clear pending measurement alerts. 512 */ 513 sf_disable(); 514 515 /* Extend the sampling buffer. 516 * This memory allocation typically happens in an atomic context when 517 * called by perf. Because this is a reallocation, it is fine if the 518 * new SDB-request cannot be satisfied immediately. 519 */ 520 rc = realloc_sampling_buffer(sfb, num, GFP_ATOMIC); 521 if (rc) 522 debug_sprintf_event(sfdbg, 5, "%s: realloc failed with rc %i\n", 523 __func__, rc); 524 525 if (sfb_has_pending_allocs(sfb, hwc)) 526 debug_sprintf_event(sfdbg, 5, "%s: " 527 "req %lu alloc %lu remaining %lu\n", 528 __func__, num, sfb->num_sdb - num_old, 529 sfb_pending_allocs(sfb, hwc)); 530 } 531 532 /* Number of perf events counting hardware events */ 533 static atomic_t num_events; 534 /* Used to avoid races in calling reserve/release_cpumf_hardware */ 535 static DEFINE_MUTEX(pmc_reserve_mutex); 536 537 #define PMC_INIT 0 538 #define PMC_RELEASE 1 539 #define PMC_FAILURE 2 540 static void setup_pmc_cpu(void *flags) 541 { 542 int err; 543 struct cpu_hw_sf *cpusf = this_cpu_ptr(&cpu_hw_sf); 544 545 err = 0; 546 switch (*((int *) flags)) { 547 case PMC_INIT: 548 memset(cpusf, 0, sizeof(*cpusf)); 549 err = qsi(&cpusf->qsi); 550 if (err) 551 break; 552 cpusf->flags |= PMU_F_RESERVED; 553 err = sf_disable(); 554 if (err) 555 pr_err("Switching off the sampling facility failed " 556 "with rc %i\n", err); 557 debug_sprintf_event(sfdbg, 5, 558 "%s: initialized: cpuhw %p\n", __func__, 559 cpusf); 560 break; 561 case PMC_RELEASE: 562 cpusf->flags &= ~PMU_F_RESERVED; 563 err = sf_disable(); 564 if (err) { 565 pr_err("Switching off the sampling facility failed " 566 "with rc %i\n", err); 567 } else 568 deallocate_buffers(cpusf); 569 debug_sprintf_event(sfdbg, 5, 570 "%s: released: cpuhw %p\n", __func__, 571 cpusf); 572 break; 573 } 574 if (err) 575 *((int *) flags) |= PMC_FAILURE; 576 } 577 578 static void release_pmc_hardware(void) 579 { 580 int flags = PMC_RELEASE; 581 582 irq_subclass_unregister(IRQ_SUBCLASS_MEASUREMENT_ALERT); 583 on_each_cpu(setup_pmc_cpu, &flags, 1); 584 } 585 586 static int reserve_pmc_hardware(void) 587 { 588 int flags = PMC_INIT; 589 590 on_each_cpu(setup_pmc_cpu, &flags, 1); 591 if (flags & PMC_FAILURE) { 592 release_pmc_hardware(); 593 return -ENODEV; 594 } 595 irq_subclass_register(IRQ_SUBCLASS_MEASUREMENT_ALERT); 596 597 return 0; 598 } 599 600 static void hw_perf_event_destroy(struct perf_event *event) 601 { 602 /* Release PMC if this is the last perf event */ 603 if (!atomic_add_unless(&num_events, -1, 1)) { 604 mutex_lock(&pmc_reserve_mutex); 605 if (atomic_dec_return(&num_events) == 0) 606 release_pmc_hardware(); 607 mutex_unlock(&pmc_reserve_mutex); 608 } 609 } 610 611 static void hw_init_period(struct hw_perf_event *hwc, u64 period) 612 { 613 hwc->sample_period = period; 614 hwc->last_period = hwc->sample_period; 615 local64_set(&hwc->period_left, hwc->sample_period); 616 } 617 618 static unsigned long hw_limit_rate(const struct hws_qsi_info_block *si, 619 unsigned long rate) 620 { 621 return clamp_t(unsigned long, rate, 622 si->min_sampl_rate, si->max_sampl_rate); 623 } 624 625 static u32 cpumsf_pid_type(struct perf_event *event, 626 u32 pid, enum pid_type type) 627 { 628 struct task_struct *tsk; 629 630 /* Idle process */ 631 if (!pid) 632 goto out; 633 634 tsk = find_task_by_pid_ns(pid, &init_pid_ns); 635 pid = -1; 636 if (tsk) { 637 /* 638 * Only top level events contain the pid namespace in which 639 * they are created. 640 */ 641 if (event->parent) 642 event = event->parent; 643 pid = __task_pid_nr_ns(tsk, type, event->ns); 644 /* 645 * See also 1d953111b648 646 * "perf/core: Don't report zero PIDs for exiting tasks". 647 */ 648 if (!pid && !pid_alive(tsk)) 649 pid = -1; 650 } 651 out: 652 return pid; 653 } 654 655 static void cpumsf_output_event_pid(struct perf_event *event, 656 struct perf_sample_data *data, 657 struct pt_regs *regs) 658 { 659 u32 pid; 660 struct perf_event_header header; 661 struct perf_output_handle handle; 662 663 /* 664 * Obtain the PID from the basic-sampling data entry and 665 * correct the data->tid_entry.pid value. 666 */ 667 pid = data->tid_entry.pid; 668 669 /* Protect callchain buffers, tasks */ 670 rcu_read_lock(); 671 672 perf_prepare_sample(&header, data, event, regs); 673 if (perf_output_begin(&handle, event, header.size)) 674 goto out; 675 676 /* Update the process ID (see also kernel/events/core.c) */ 677 data->tid_entry.pid = cpumsf_pid_type(event, pid, PIDTYPE_TGID); 678 data->tid_entry.tid = cpumsf_pid_type(event, pid, PIDTYPE_PID); 679 680 perf_output_sample(&handle, &header, data, event); 681 perf_output_end(&handle); 682 out: 683 rcu_read_unlock(); 684 } 685 686 static unsigned long getrate(bool freq, unsigned long sample, 687 struct hws_qsi_info_block *si) 688 { 689 unsigned long rate; 690 691 if (freq) { 692 rate = freq_to_sample_rate(si, sample); 693 rate = hw_limit_rate(si, rate); 694 } else { 695 /* The min/max sampling rates specifies the valid range 696 * of sample periods. If the specified sample period is 697 * out of range, limit the period to the range boundary. 698 */ 699 rate = hw_limit_rate(si, sample); 700 701 /* The perf core maintains a maximum sample rate that is 702 * configurable through the sysctl interface. Ensure the 703 * sampling rate does not exceed this value. This also helps 704 * to avoid throttling when pushing samples with 705 * perf_event_overflow(). 706 */ 707 if (sample_rate_to_freq(si, rate) > 708 sysctl_perf_event_sample_rate) { 709 debug_sprintf_event(sfdbg, 1, "%s: " 710 "Sampling rate exceeds maximum " 711 "perf sample rate\n", __func__); 712 rate = 0; 713 } 714 } 715 return rate; 716 } 717 718 /* The sampling information (si) contains information about the 719 * min/max sampling intervals and the CPU speed. So calculate the 720 * correct sampling interval and avoid the whole period adjust 721 * feedback loop. 722 * 723 * Since the CPU Measurement sampling facility can not handle frequency 724 * calculate the sampling interval when frequency is specified using 725 * this formula: 726 * interval := cpu_speed * 1000000 / sample_freq 727 * 728 * Returns errno on bad input and zero on success with parameter interval 729 * set to the correct sampling rate. 730 * 731 * Note: This function turns off freq bit to avoid calling function 732 * perf_adjust_period(). This causes frequency adjustment in the common 733 * code part which causes tremendous variations in the counter values. 734 */ 735 static int __hw_perf_event_init_rate(struct perf_event *event, 736 struct hws_qsi_info_block *si) 737 { 738 struct perf_event_attr *attr = &event->attr; 739 struct hw_perf_event *hwc = &event->hw; 740 unsigned long rate; 741 742 if (attr->freq) { 743 if (!attr->sample_freq) 744 return -EINVAL; 745 rate = getrate(attr->freq, attr->sample_freq, si); 746 attr->freq = 0; /* Don't call perf_adjust_period() */ 747 SAMPL_FLAGS(hwc) |= PERF_CPUM_SF_FREQ_MODE; 748 } else { 749 rate = getrate(attr->freq, attr->sample_period, si); 750 if (!rate) 751 return -EINVAL; 752 } 753 attr->sample_period = rate; 754 SAMPL_RATE(hwc) = rate; 755 hw_init_period(hwc, SAMPL_RATE(hwc)); 756 debug_sprintf_event(sfdbg, 4, "%s: cpu %d period %#llx freq %d,%#lx\n", 757 __func__, event->cpu, event->attr.sample_period, 758 event->attr.freq, SAMPLE_FREQ_MODE(hwc)); 759 return 0; 760 } 761 762 static int __hw_perf_event_init(struct perf_event *event) 763 { 764 struct cpu_hw_sf *cpuhw; 765 struct hws_qsi_info_block si; 766 struct perf_event_attr *attr = &event->attr; 767 struct hw_perf_event *hwc = &event->hw; 768 int cpu, err; 769 770 /* Reserve CPU-measurement sampling facility */ 771 err = 0; 772 if (!atomic_inc_not_zero(&num_events)) { 773 mutex_lock(&pmc_reserve_mutex); 774 if (atomic_read(&num_events) == 0 && reserve_pmc_hardware()) 775 err = -EBUSY; 776 else 777 atomic_inc(&num_events); 778 mutex_unlock(&pmc_reserve_mutex); 779 } 780 event->destroy = hw_perf_event_destroy; 781 782 if (err) 783 goto out; 784 785 /* Access per-CPU sampling information (query sampling info) */ 786 /* 787 * The event->cpu value can be -1 to count on every CPU, for example, 788 * when attaching to a task. If this is specified, use the query 789 * sampling info from the current CPU, otherwise use event->cpu to 790 * retrieve the per-CPU information. 791 * Later, cpuhw indicates whether to allocate sampling buffers for a 792 * particular CPU (cpuhw!=NULL) or each online CPU (cpuw==NULL). 793 */ 794 memset(&si, 0, sizeof(si)); 795 cpuhw = NULL; 796 if (event->cpu == -1) 797 qsi(&si); 798 else { 799 /* Event is pinned to a particular CPU, retrieve the per-CPU 800 * sampling structure for accessing the CPU-specific QSI. 801 */ 802 cpuhw = &per_cpu(cpu_hw_sf, event->cpu); 803 si = cpuhw->qsi; 804 } 805 806 /* Check sampling facility authorization and, if not authorized, 807 * fall back to other PMUs. It is safe to check any CPU because 808 * the authorization is identical for all configured CPUs. 809 */ 810 if (!si.as) { 811 err = -ENOENT; 812 goto out; 813 } 814 815 if (si.ribm & CPU_MF_SF_RIBM_NOTAV) { 816 pr_warn("CPU Measurement Facility sampling is temporarily not available\n"); 817 err = -EBUSY; 818 goto out; 819 } 820 821 /* Always enable basic sampling */ 822 SAMPL_FLAGS(hwc) = PERF_CPUM_SF_BASIC_MODE; 823 824 /* Check if diagnostic sampling is requested. Deny if the required 825 * sampling authorization is missing. 826 */ 827 if (attr->config == PERF_EVENT_CPUM_SF_DIAG) { 828 if (!si.ad) { 829 err = -EPERM; 830 goto out; 831 } 832 SAMPL_FLAGS(hwc) |= PERF_CPUM_SF_DIAG_MODE; 833 } 834 835 /* Check and set other sampling flags */ 836 if (attr->config1 & PERF_CPUM_SF_FULL_BLOCKS) 837 SAMPL_FLAGS(hwc) |= PERF_CPUM_SF_FULL_BLOCKS; 838 839 err = __hw_perf_event_init_rate(event, &si); 840 if (err) 841 goto out; 842 843 /* Initialize sample data overflow accounting */ 844 hwc->extra_reg.reg = REG_OVERFLOW; 845 OVERFLOW_REG(hwc) = 0; 846 847 /* Use AUX buffer. No need to allocate it by ourself */ 848 if (attr->config == PERF_EVENT_CPUM_SF_DIAG) 849 return 0; 850 851 /* Allocate the per-CPU sampling buffer using the CPU information 852 * from the event. If the event is not pinned to a particular 853 * CPU (event->cpu == -1; or cpuhw == NULL), allocate sampling 854 * buffers for each online CPU. 855 */ 856 if (cpuhw) 857 /* Event is pinned to a particular CPU */ 858 err = allocate_buffers(cpuhw, hwc); 859 else { 860 /* Event is not pinned, allocate sampling buffer on 861 * each online CPU 862 */ 863 for_each_online_cpu(cpu) { 864 cpuhw = &per_cpu(cpu_hw_sf, cpu); 865 err = allocate_buffers(cpuhw, hwc); 866 if (err) 867 break; 868 } 869 } 870 871 /* If PID/TID sampling is active, replace the default overflow 872 * handler to extract and resolve the PIDs from the basic-sampling 873 * data entries. 874 */ 875 if (event->attr.sample_type & PERF_SAMPLE_TID) 876 if (is_default_overflow_handler(event)) 877 event->overflow_handler = cpumsf_output_event_pid; 878 out: 879 return err; 880 } 881 882 static int cpumsf_pmu_event_init(struct perf_event *event) 883 { 884 int err; 885 886 /* No support for taken branch sampling */ 887 if (has_branch_stack(event)) 888 return -EOPNOTSUPP; 889 890 switch (event->attr.type) { 891 case PERF_TYPE_RAW: 892 if ((event->attr.config != PERF_EVENT_CPUM_SF) && 893 (event->attr.config != PERF_EVENT_CPUM_SF_DIAG)) 894 return -ENOENT; 895 break; 896 case PERF_TYPE_HARDWARE: 897 /* Support sampling of CPU cycles in addition to the 898 * counter facility. However, the counter facility 899 * is more precise and, hence, restrict this PMU to 900 * sampling events only. 901 */ 902 if (event->attr.config != PERF_COUNT_HW_CPU_CYCLES) 903 return -ENOENT; 904 if (!is_sampling_event(event)) 905 return -ENOENT; 906 break; 907 default: 908 return -ENOENT; 909 } 910 911 /* Check online status of the CPU to which the event is pinned */ 912 if (event->cpu >= 0 && !cpu_online(event->cpu)) 913 return -ENODEV; 914 915 /* Force reset of idle/hv excludes regardless of what the 916 * user requested. 917 */ 918 if (event->attr.exclude_hv) 919 event->attr.exclude_hv = 0; 920 if (event->attr.exclude_idle) 921 event->attr.exclude_idle = 0; 922 923 err = __hw_perf_event_init(event); 924 if (unlikely(err)) 925 if (event->destroy) 926 event->destroy(event); 927 return err; 928 } 929 930 static void cpumsf_pmu_enable(struct pmu *pmu) 931 { 932 struct cpu_hw_sf *cpuhw = this_cpu_ptr(&cpu_hw_sf); 933 struct hw_perf_event *hwc; 934 int err; 935 936 if (cpuhw->flags & PMU_F_ENABLED) 937 return; 938 939 if (cpuhw->flags & PMU_F_ERR_MASK) 940 return; 941 942 /* Check whether to extent the sampling buffer. 943 * 944 * Two conditions trigger an increase of the sampling buffer for a 945 * perf event: 946 * 1. Postponed buffer allocations from the event initialization. 947 * 2. Sampling overflows that contribute to pending allocations. 948 * 949 * Note that the extend_sampling_buffer() function disables the sampling 950 * facility, but it can be fully re-enabled using sampling controls that 951 * have been saved in cpumsf_pmu_disable(). 952 */ 953 if (cpuhw->event) { 954 hwc = &cpuhw->event->hw; 955 if (!(SAMPL_DIAG_MODE(hwc))) { 956 /* 957 * Account number of overflow-designated 958 * buffer extents 959 */ 960 sfb_account_overflows(cpuhw, hwc); 961 extend_sampling_buffer(&cpuhw->sfb, hwc); 962 } 963 /* Rate may be adjusted with ioctl() */ 964 cpuhw->lsctl.interval = SAMPL_RATE(&cpuhw->event->hw); 965 } 966 967 /* (Re)enable the PMU and sampling facility */ 968 cpuhw->flags |= PMU_F_ENABLED; 969 barrier(); 970 971 err = lsctl(&cpuhw->lsctl); 972 if (err) { 973 cpuhw->flags &= ~PMU_F_ENABLED; 974 pr_err("Loading sampling controls failed: op %i err %i\n", 975 1, err); 976 return; 977 } 978 979 /* Load current program parameter */ 980 lpp(&S390_lowcore.lpp); 981 982 debug_sprintf_event(sfdbg, 6, "%s: es %i cs %i ed %i cd %i " 983 "interval %#lx tear %#lx dear %#lx\n", __func__, 984 cpuhw->lsctl.es, cpuhw->lsctl.cs, cpuhw->lsctl.ed, 985 cpuhw->lsctl.cd, cpuhw->lsctl.interval, 986 cpuhw->lsctl.tear, cpuhw->lsctl.dear); 987 } 988 989 static void cpumsf_pmu_disable(struct pmu *pmu) 990 { 991 struct cpu_hw_sf *cpuhw = this_cpu_ptr(&cpu_hw_sf); 992 struct hws_lsctl_request_block inactive; 993 struct hws_qsi_info_block si; 994 int err; 995 996 if (!(cpuhw->flags & PMU_F_ENABLED)) 997 return; 998 999 if (cpuhw->flags & PMU_F_ERR_MASK) 1000 return; 1001 1002 /* Switch off sampling activation control */ 1003 inactive = cpuhw->lsctl; 1004 inactive.cs = 0; 1005 inactive.cd = 0; 1006 1007 err = lsctl(&inactive); 1008 if (err) { 1009 pr_err("Loading sampling controls failed: op %i err %i\n", 1010 2, err); 1011 return; 1012 } 1013 1014 /* Save state of TEAR and DEAR register contents */ 1015 err = qsi(&si); 1016 if (!err) { 1017 /* TEAR/DEAR values are valid only if the sampling facility is 1018 * enabled. Note that cpumsf_pmu_disable() might be called even 1019 * for a disabled sampling facility because cpumsf_pmu_enable() 1020 * controls the enable/disable state. 1021 */ 1022 if (si.es) { 1023 cpuhw->lsctl.tear = si.tear; 1024 cpuhw->lsctl.dear = si.dear; 1025 } 1026 } else 1027 debug_sprintf_event(sfdbg, 3, "%s: qsi() failed with err %i\n", 1028 __func__, err); 1029 1030 cpuhw->flags &= ~PMU_F_ENABLED; 1031 } 1032 1033 /* perf_exclude_event() - Filter event 1034 * @event: The perf event 1035 * @regs: pt_regs structure 1036 * @sde_regs: Sample-data-entry (sde) regs structure 1037 * 1038 * Filter perf events according to their exclude specification. 1039 * 1040 * Return non-zero if the event shall be excluded. 1041 */ 1042 static int perf_exclude_event(struct perf_event *event, struct pt_regs *regs, 1043 struct perf_sf_sde_regs *sde_regs) 1044 { 1045 if (event->attr.exclude_user && user_mode(regs)) 1046 return 1; 1047 if (event->attr.exclude_kernel && !user_mode(regs)) 1048 return 1; 1049 if (event->attr.exclude_guest && sde_regs->in_guest) 1050 return 1; 1051 if (event->attr.exclude_host && !sde_regs->in_guest) 1052 return 1; 1053 return 0; 1054 } 1055 1056 /* perf_push_sample() - Push samples to perf 1057 * @event: The perf event 1058 * @sample: Hardware sample data 1059 * 1060 * Use the hardware sample data to create perf event sample. The sample 1061 * is the pushed to the event subsystem and the function checks for 1062 * possible event overflows. If an event overflow occurs, the PMU is 1063 * stopped. 1064 * 1065 * Return non-zero if an event overflow occurred. 1066 */ 1067 static int perf_push_sample(struct perf_event *event, 1068 struct hws_basic_entry *basic) 1069 { 1070 int overflow; 1071 struct pt_regs regs; 1072 struct perf_sf_sde_regs *sde_regs; 1073 struct perf_sample_data data; 1074 1075 /* Setup perf sample */ 1076 perf_sample_data_init(&data, 0, event->hw.last_period); 1077 1078 /* Setup pt_regs to look like an CPU-measurement external interrupt 1079 * using the Program Request Alert code. The regs.int_parm_long 1080 * field which is unused contains additional sample-data-entry related 1081 * indicators. 1082 */ 1083 memset(®s, 0, sizeof(regs)); 1084 regs.int_code = 0x1407; 1085 regs.int_parm = CPU_MF_INT_SF_PRA; 1086 sde_regs = (struct perf_sf_sde_regs *) ®s.int_parm_long; 1087 1088 psw_bits(regs.psw).ia = basic->ia; 1089 psw_bits(regs.psw).dat = basic->T; 1090 psw_bits(regs.psw).wait = basic->W; 1091 psw_bits(regs.psw).pstate = basic->P; 1092 psw_bits(regs.psw).as = basic->AS; 1093 1094 /* 1095 * Use the hardware provided configuration level to decide if the 1096 * sample belongs to a guest or host. If that is not available, 1097 * fall back to the following heuristics: 1098 * A non-zero guest program parameter always indicates a guest 1099 * sample. Some early samples or samples from guests without 1100 * lpp usage would be misaccounted to the host. We use the asn 1101 * value as an addon heuristic to detect most of these guest samples. 1102 * If the value differs from 0xffff (the host value), we assume to 1103 * be a KVM guest. 1104 */ 1105 switch (basic->CL) { 1106 case 1: /* logical partition */ 1107 sde_regs->in_guest = 0; 1108 break; 1109 case 2: /* virtual machine */ 1110 sde_regs->in_guest = 1; 1111 break; 1112 default: /* old machine, use heuristics */ 1113 if (basic->gpp || basic->prim_asn != 0xffff) 1114 sde_regs->in_guest = 1; 1115 break; 1116 } 1117 1118 /* 1119 * Store the PID value from the sample-data-entry to be 1120 * processed and resolved by cpumsf_output_event_pid(). 1121 */ 1122 data.tid_entry.pid = basic->hpp & LPP_PID_MASK; 1123 1124 overflow = 0; 1125 if (perf_exclude_event(event, ®s, sde_regs)) 1126 goto out; 1127 if (perf_event_overflow(event, &data, ®s)) { 1128 overflow = 1; 1129 event->pmu->stop(event, 0); 1130 } 1131 perf_event_update_userpage(event); 1132 out: 1133 return overflow; 1134 } 1135 1136 static void perf_event_count_update(struct perf_event *event, u64 count) 1137 { 1138 local64_add(count, &event->count); 1139 } 1140 1141 /* hw_collect_samples() - Walk through a sample-data-block and collect samples 1142 * @event: The perf event 1143 * @sdbt: Sample-data-block table 1144 * @overflow: Event overflow counter 1145 * 1146 * Walks through a sample-data-block and collects sampling data entries that are 1147 * then pushed to the perf event subsystem. Depending on the sampling function, 1148 * there can be either basic-sampling or combined-sampling data entries. A 1149 * combined-sampling data entry consists of a basic- and a diagnostic-sampling 1150 * data entry. The sampling function is determined by the flags in the perf 1151 * event hardware structure. The function always works with a combined-sampling 1152 * data entry but ignores the the diagnostic portion if it is not available. 1153 * 1154 * Note that the implementation focuses on basic-sampling data entries and, if 1155 * such an entry is not valid, the entire combined-sampling data entry is 1156 * ignored. 1157 * 1158 * The overflow variables counts the number of samples that has been discarded 1159 * due to a perf event overflow. 1160 */ 1161 static void hw_collect_samples(struct perf_event *event, unsigned long *sdbt, 1162 unsigned long long *overflow) 1163 { 1164 struct hws_trailer_entry *te; 1165 struct hws_basic_entry *sample; 1166 1167 te = (struct hws_trailer_entry *) trailer_entry_ptr(*sdbt); 1168 sample = (struct hws_basic_entry *) *sdbt; 1169 while ((unsigned long *) sample < (unsigned long *) te) { 1170 /* Check for an empty sample */ 1171 if (!sample->def) 1172 break; 1173 1174 /* Update perf event period */ 1175 perf_event_count_update(event, SAMPL_RATE(&event->hw)); 1176 1177 /* Check whether sample is valid */ 1178 if (sample->def == 0x0001) { 1179 /* If an event overflow occurred, the PMU is stopped to 1180 * throttle event delivery. Remaining sample data is 1181 * discarded. 1182 */ 1183 if (!*overflow) { 1184 /* Check whether sample is consistent */ 1185 if (sample->I == 0 && sample->W == 0) { 1186 /* Deliver sample data to perf */ 1187 *overflow = perf_push_sample(event, 1188 sample); 1189 } 1190 } else 1191 /* Count discarded samples */ 1192 *overflow += 1; 1193 } else { 1194 debug_sprintf_event(sfdbg, 4, 1195 "%s: Found unknown" 1196 " sampling data entry: te->f %i" 1197 " basic.def %#4x (%p)\n", __func__, 1198 te->f, sample->def, sample); 1199 /* Sample slot is not yet written or other record. 1200 * 1201 * This condition can occur if the buffer was reused 1202 * from a combined basic- and diagnostic-sampling. 1203 * If only basic-sampling is then active, entries are 1204 * written into the larger diagnostic entries. 1205 * This is typically the case for sample-data-blocks 1206 * that are not full. Stop processing if the first 1207 * invalid format was detected. 1208 */ 1209 if (!te->f) 1210 break; 1211 } 1212 1213 /* Reset sample slot and advance to next sample */ 1214 sample->def = 0; 1215 sample++; 1216 } 1217 } 1218 1219 /* hw_perf_event_update() - Process sampling buffer 1220 * @event: The perf event 1221 * @flush_all: Flag to also flush partially filled sample-data-blocks 1222 * 1223 * Processes the sampling buffer and create perf event samples. 1224 * The sampling buffer position are retrieved and saved in the TEAR_REG 1225 * register of the specified perf event. 1226 * 1227 * Only full sample-data-blocks are processed. Specify the flash_all flag 1228 * to also walk through partially filled sample-data-blocks. It is ignored 1229 * if PERF_CPUM_SF_FULL_BLOCKS is set. The PERF_CPUM_SF_FULL_BLOCKS flag 1230 * enforces the processing of full sample-data-blocks only (trailer entries 1231 * with the block-full-indicator bit set). 1232 */ 1233 static void hw_perf_event_update(struct perf_event *event, int flush_all) 1234 { 1235 struct hw_perf_event *hwc = &event->hw; 1236 struct hws_trailer_entry *te; 1237 unsigned long *sdbt; 1238 unsigned long long event_overflow, sampl_overflow, num_sdb, te_flags; 1239 int done; 1240 1241 /* 1242 * AUX buffer is used when in diagnostic sampling mode. 1243 * No perf events/samples are created. 1244 */ 1245 if (SAMPL_DIAG_MODE(&event->hw)) 1246 return; 1247 1248 if (flush_all && SDB_FULL_BLOCKS(hwc)) 1249 flush_all = 0; 1250 1251 sdbt = (unsigned long *) TEAR_REG(hwc); 1252 done = event_overflow = sampl_overflow = num_sdb = 0; 1253 while (!done) { 1254 /* Get the trailer entry of the sample-data-block */ 1255 te = (struct hws_trailer_entry *) trailer_entry_ptr(*sdbt); 1256 1257 /* Leave loop if no more work to do (block full indicator) */ 1258 if (!te->f) { 1259 done = 1; 1260 if (!flush_all) 1261 break; 1262 } 1263 1264 /* Check the sample overflow count */ 1265 if (te->overflow) 1266 /* Account sample overflows and, if a particular limit 1267 * is reached, extend the sampling buffer. 1268 * For details, see sfb_account_overflows(). 1269 */ 1270 sampl_overflow += te->overflow; 1271 1272 /* Timestamps are valid for full sample-data-blocks only */ 1273 debug_sprintf_event(sfdbg, 6, "%s: sdbt %#lx " 1274 "overflow %llu timestamp %#llx\n", 1275 __func__, (unsigned long)sdbt, te->overflow, 1276 (te->f) ? trailer_timestamp(te) : 0ULL); 1277 1278 /* Collect all samples from a single sample-data-block and 1279 * flag if an (perf) event overflow happened. If so, the PMU 1280 * is stopped and remaining samples will be discarded. 1281 */ 1282 hw_collect_samples(event, sdbt, &event_overflow); 1283 num_sdb++; 1284 1285 /* Reset trailer (using compare-double-and-swap) */ 1286 do { 1287 te_flags = te->flags & ~SDB_TE_BUFFER_FULL_MASK; 1288 te_flags |= SDB_TE_ALERT_REQ_MASK; 1289 } while (!cmpxchg_double(&te->flags, &te->overflow, 1290 te->flags, te->overflow, 1291 te_flags, 0ULL)); 1292 1293 /* Advance to next sample-data-block */ 1294 sdbt++; 1295 if (is_link_entry(sdbt)) 1296 sdbt = get_next_sdbt(sdbt); 1297 1298 /* Update event hardware registers */ 1299 TEAR_REG(hwc) = (unsigned long) sdbt; 1300 1301 /* Stop processing sample-data if all samples of the current 1302 * sample-data-block were flushed even if it was not full. 1303 */ 1304 if (flush_all && done) 1305 break; 1306 } 1307 1308 /* Account sample overflows in the event hardware structure */ 1309 if (sampl_overflow) 1310 OVERFLOW_REG(hwc) = DIV_ROUND_UP(OVERFLOW_REG(hwc) + 1311 sampl_overflow, 1 + num_sdb); 1312 1313 /* Perf_event_overflow() and perf_event_account_interrupt() limit 1314 * the interrupt rate to an upper limit. Roughly 1000 samples per 1315 * task tick. 1316 * Hitting this limit results in a large number 1317 * of throttled REF_REPORT_THROTTLE entries and the samples 1318 * are dropped. 1319 * Slightly increase the interval to avoid hitting this limit. 1320 */ 1321 if (event_overflow) { 1322 SAMPL_RATE(hwc) += DIV_ROUND_UP(SAMPL_RATE(hwc), 10); 1323 debug_sprintf_event(sfdbg, 1, "%s: rate adjustment %ld\n", 1324 __func__, 1325 DIV_ROUND_UP(SAMPL_RATE(hwc), 10)); 1326 } 1327 1328 if (sampl_overflow || event_overflow) 1329 debug_sprintf_event(sfdbg, 4, "%s: " 1330 "overflows: sample %llu event %llu" 1331 " total %llu num_sdb %llu\n", 1332 __func__, sampl_overflow, event_overflow, 1333 OVERFLOW_REG(hwc), num_sdb); 1334 } 1335 1336 #define AUX_SDB_INDEX(aux, i) ((i) % aux->sfb.num_sdb) 1337 #define AUX_SDB_NUM(aux, start, end) (end >= start ? end - start + 1 : 0) 1338 #define AUX_SDB_NUM_ALERT(aux) AUX_SDB_NUM(aux, aux->head, aux->alert_mark) 1339 #define AUX_SDB_NUM_EMPTY(aux) AUX_SDB_NUM(aux, aux->head, aux->empty_mark) 1340 1341 /* 1342 * Get trailer entry by index of SDB. 1343 */ 1344 static struct hws_trailer_entry *aux_sdb_trailer(struct aux_buffer *aux, 1345 unsigned long index) 1346 { 1347 unsigned long sdb; 1348 1349 index = AUX_SDB_INDEX(aux, index); 1350 sdb = aux->sdb_index[index]; 1351 return (struct hws_trailer_entry *)trailer_entry_ptr(sdb); 1352 } 1353 1354 /* 1355 * Finish sampling on the cpu. Called by cpumsf_pmu_del() with pmu 1356 * disabled. Collect the full SDBs in AUX buffer which have not reached 1357 * the point of alert indicator. And ignore the SDBs which are not 1358 * full. 1359 * 1360 * 1. Scan SDBs to see how much data is there and consume them. 1361 * 2. Remove alert indicator in the buffer. 1362 */ 1363 static void aux_output_end(struct perf_output_handle *handle) 1364 { 1365 unsigned long i, range_scan, idx; 1366 struct aux_buffer *aux; 1367 struct hws_trailer_entry *te; 1368 1369 aux = perf_get_aux(handle); 1370 if (!aux) 1371 return; 1372 1373 range_scan = AUX_SDB_NUM_ALERT(aux); 1374 for (i = 0, idx = aux->head; i < range_scan; i++, idx++) { 1375 te = aux_sdb_trailer(aux, idx); 1376 if (!(te->flags & SDB_TE_BUFFER_FULL_MASK)) 1377 break; 1378 } 1379 /* i is num of SDBs which are full */ 1380 perf_aux_output_end(handle, i << PAGE_SHIFT); 1381 1382 /* Remove alert indicators in the buffer */ 1383 te = aux_sdb_trailer(aux, aux->alert_mark); 1384 te->flags &= ~SDB_TE_ALERT_REQ_MASK; 1385 1386 debug_sprintf_event(sfdbg, 6, "%s: SDBs %ld range %ld head %ld\n", 1387 __func__, i, range_scan, aux->head); 1388 } 1389 1390 /* 1391 * Start sampling on the CPU. Called by cpumsf_pmu_add() when an event 1392 * is first added to the CPU or rescheduled again to the CPU. It is called 1393 * with pmu disabled. 1394 * 1395 * 1. Reset the trailer of SDBs to get ready for new data. 1396 * 2. Tell the hardware where to put the data by reset the SDBs buffer 1397 * head(tear/dear). 1398 */ 1399 static int aux_output_begin(struct perf_output_handle *handle, 1400 struct aux_buffer *aux, 1401 struct cpu_hw_sf *cpuhw) 1402 { 1403 unsigned long range; 1404 unsigned long i, range_scan, idx; 1405 unsigned long head, base, offset; 1406 struct hws_trailer_entry *te; 1407 1408 if (WARN_ON_ONCE(handle->head & ~PAGE_MASK)) 1409 return -EINVAL; 1410 1411 aux->head = handle->head >> PAGE_SHIFT; 1412 range = (handle->size + 1) >> PAGE_SHIFT; 1413 if (range <= 1) 1414 return -ENOMEM; 1415 1416 /* 1417 * SDBs between aux->head and aux->empty_mark are already ready 1418 * for new data. range_scan is num of SDBs not within them. 1419 */ 1420 debug_sprintf_event(sfdbg, 6, 1421 "%s: range %ld head %ld alert %ld empty %ld\n", 1422 __func__, range, aux->head, aux->alert_mark, 1423 aux->empty_mark); 1424 if (range > AUX_SDB_NUM_EMPTY(aux)) { 1425 range_scan = range - AUX_SDB_NUM_EMPTY(aux); 1426 idx = aux->empty_mark + 1; 1427 for (i = 0; i < range_scan; i++, idx++) { 1428 te = aux_sdb_trailer(aux, idx); 1429 te->flags &= ~(SDB_TE_BUFFER_FULL_MASK | 1430 SDB_TE_ALERT_REQ_MASK); 1431 te->overflow = 0; 1432 } 1433 /* Save the position of empty SDBs */ 1434 aux->empty_mark = aux->head + range - 1; 1435 } 1436 1437 /* Set alert indicator */ 1438 aux->alert_mark = aux->head + range/2 - 1; 1439 te = aux_sdb_trailer(aux, aux->alert_mark); 1440 te->flags = te->flags | SDB_TE_ALERT_REQ_MASK; 1441 1442 /* Reset hardware buffer head */ 1443 head = AUX_SDB_INDEX(aux, aux->head); 1444 base = aux->sdbt_index[head / CPUM_SF_SDB_PER_TABLE]; 1445 offset = head % CPUM_SF_SDB_PER_TABLE; 1446 cpuhw->lsctl.tear = base + offset * sizeof(unsigned long); 1447 cpuhw->lsctl.dear = aux->sdb_index[head]; 1448 1449 debug_sprintf_event(sfdbg, 6, "%s: head %ld alert %ld empty %ld " 1450 "index %ld tear %#lx dear %#lx\n", __func__, 1451 aux->head, aux->alert_mark, aux->empty_mark, 1452 head / CPUM_SF_SDB_PER_TABLE, 1453 cpuhw->lsctl.tear, cpuhw->lsctl.dear); 1454 1455 return 0; 1456 } 1457 1458 /* 1459 * Set alert indicator on SDB at index @alert_index while sampler is running. 1460 * 1461 * Return true if successfully. 1462 * Return false if full indicator is already set by hardware sampler. 1463 */ 1464 static bool aux_set_alert(struct aux_buffer *aux, unsigned long alert_index, 1465 unsigned long long *overflow) 1466 { 1467 unsigned long long orig_overflow, orig_flags, new_flags; 1468 struct hws_trailer_entry *te; 1469 1470 te = aux_sdb_trailer(aux, alert_index); 1471 do { 1472 orig_flags = te->flags; 1473 *overflow = orig_overflow = te->overflow; 1474 if (orig_flags & SDB_TE_BUFFER_FULL_MASK) { 1475 /* 1476 * SDB is already set by hardware. 1477 * Abort and try to set somewhere 1478 * behind. 1479 */ 1480 return false; 1481 } 1482 new_flags = orig_flags | SDB_TE_ALERT_REQ_MASK; 1483 } while (!cmpxchg_double(&te->flags, &te->overflow, 1484 orig_flags, orig_overflow, 1485 new_flags, 0ULL)); 1486 return true; 1487 } 1488 1489 /* 1490 * aux_reset_buffer() - Scan and setup SDBs for new samples 1491 * @aux: The AUX buffer to set 1492 * @range: The range of SDBs to scan started from aux->head 1493 * @overflow: Set to overflow count 1494 * 1495 * Set alert indicator on the SDB at index of aux->alert_mark. If this SDB is 1496 * marked as empty, check if it is already set full by the hardware sampler. 1497 * If yes, that means new data is already there before we can set an alert 1498 * indicator. Caller should try to set alert indicator to some position behind. 1499 * 1500 * Scan the SDBs in AUX buffer from behind aux->empty_mark. They are used 1501 * previously and have already been consumed by user space. Reset these SDBs 1502 * (clear full indicator and alert indicator) for new data. 1503 * If aux->alert_mark fall in this area, just set it. Overflow count is 1504 * recorded while scanning. 1505 * 1506 * SDBs between aux->head and aux->empty_mark are already reset at last time. 1507 * and ready for new samples. So scanning on this area could be skipped. 1508 * 1509 * Return true if alert indicator is set successfully and false if not. 1510 */ 1511 static bool aux_reset_buffer(struct aux_buffer *aux, unsigned long range, 1512 unsigned long long *overflow) 1513 { 1514 unsigned long long orig_overflow, orig_flags, new_flags; 1515 unsigned long i, range_scan, idx, idx_old; 1516 struct hws_trailer_entry *te; 1517 1518 debug_sprintf_event(sfdbg, 6, "%s: range %ld head %ld alert %ld " 1519 "empty %ld\n", __func__, range, aux->head, 1520 aux->alert_mark, aux->empty_mark); 1521 if (range <= AUX_SDB_NUM_EMPTY(aux)) 1522 /* 1523 * No need to scan. All SDBs in range are marked as empty. 1524 * Just set alert indicator. Should check race with hardware 1525 * sampler. 1526 */ 1527 return aux_set_alert(aux, aux->alert_mark, overflow); 1528 1529 if (aux->alert_mark <= aux->empty_mark) 1530 /* 1531 * Set alert indicator on empty SDB. Should check race 1532 * with hardware sampler. 1533 */ 1534 if (!aux_set_alert(aux, aux->alert_mark, overflow)) 1535 return false; 1536 1537 /* 1538 * Scan the SDBs to clear full and alert indicator used previously. 1539 * Start scanning from one SDB behind empty_mark. If the new alert 1540 * indicator fall into this range, set it. 1541 */ 1542 range_scan = range - AUX_SDB_NUM_EMPTY(aux); 1543 idx_old = idx = aux->empty_mark + 1; 1544 for (i = 0; i < range_scan; i++, idx++) { 1545 te = aux_sdb_trailer(aux, idx); 1546 do { 1547 orig_flags = te->flags; 1548 orig_overflow = te->overflow; 1549 new_flags = orig_flags & ~SDB_TE_BUFFER_FULL_MASK; 1550 if (idx == aux->alert_mark) 1551 new_flags |= SDB_TE_ALERT_REQ_MASK; 1552 else 1553 new_flags &= ~SDB_TE_ALERT_REQ_MASK; 1554 } while (!cmpxchg_double(&te->flags, &te->overflow, 1555 orig_flags, orig_overflow, 1556 new_flags, 0ULL)); 1557 *overflow += orig_overflow; 1558 } 1559 1560 /* Update empty_mark to new position */ 1561 aux->empty_mark = aux->head + range - 1; 1562 1563 debug_sprintf_event(sfdbg, 6, "%s: range_scan %ld idx %ld..%ld " 1564 "empty %ld\n", __func__, range_scan, idx_old, 1565 idx - 1, aux->empty_mark); 1566 return true; 1567 } 1568 1569 /* 1570 * Measurement alert handler for diagnostic mode sampling. 1571 */ 1572 static void hw_collect_aux(struct cpu_hw_sf *cpuhw) 1573 { 1574 struct aux_buffer *aux; 1575 int done = 0; 1576 unsigned long range = 0, size; 1577 unsigned long long overflow = 0; 1578 struct perf_output_handle *handle = &cpuhw->handle; 1579 1580 aux = perf_get_aux(handle); 1581 if (WARN_ON_ONCE(!aux)) 1582 return; 1583 1584 /* Inform user space new data arrived */ 1585 size = AUX_SDB_NUM_ALERT(aux) << PAGE_SHIFT; 1586 debug_sprintf_event(sfdbg, 6, "%s: #alert %ld\n", __func__, 1587 size >> PAGE_SHIFT); 1588 perf_aux_output_end(handle, size); 1589 1590 while (!done) { 1591 /* Get an output handle */ 1592 aux = perf_aux_output_begin(handle, cpuhw->event); 1593 if (handle->size == 0) { 1594 pr_err("The AUX buffer with %lu pages for the " 1595 "diagnostic-sampling mode is full\n", 1596 aux->sfb.num_sdb); 1597 debug_sprintf_event(sfdbg, 1, 1598 "%s: AUX buffer used up\n", 1599 __func__); 1600 break; 1601 } 1602 if (WARN_ON_ONCE(!aux)) 1603 return; 1604 1605 /* Update head and alert_mark to new position */ 1606 aux->head = handle->head >> PAGE_SHIFT; 1607 range = (handle->size + 1) >> PAGE_SHIFT; 1608 if (range == 1) 1609 aux->alert_mark = aux->head; 1610 else 1611 aux->alert_mark = aux->head + range/2 - 1; 1612 1613 if (aux_reset_buffer(aux, range, &overflow)) { 1614 if (!overflow) { 1615 done = 1; 1616 break; 1617 } 1618 size = range << PAGE_SHIFT; 1619 perf_aux_output_end(&cpuhw->handle, size); 1620 pr_err("Sample data caused the AUX buffer with %lu " 1621 "pages to overflow\n", aux->sfb.num_sdb); 1622 debug_sprintf_event(sfdbg, 1, "%s: head %ld range %ld " 1623 "overflow %lld\n", __func__, 1624 aux->head, range, overflow); 1625 } else { 1626 size = AUX_SDB_NUM_ALERT(aux) << PAGE_SHIFT; 1627 perf_aux_output_end(&cpuhw->handle, size); 1628 debug_sprintf_event(sfdbg, 6, "%s: head %ld alert %ld " 1629 "already full, try another\n", 1630 __func__, 1631 aux->head, aux->alert_mark); 1632 } 1633 } 1634 1635 if (done) 1636 debug_sprintf_event(sfdbg, 6, "%s: head %ld alert %ld " 1637 "empty %ld\n", __func__, aux->head, 1638 aux->alert_mark, aux->empty_mark); 1639 } 1640 1641 /* 1642 * Callback when freeing AUX buffers. 1643 */ 1644 static void aux_buffer_free(void *data) 1645 { 1646 struct aux_buffer *aux = data; 1647 unsigned long i, num_sdbt; 1648 1649 if (!aux) 1650 return; 1651 1652 /* Free SDBT. SDB is freed by the caller */ 1653 num_sdbt = aux->sfb.num_sdbt; 1654 for (i = 0; i < num_sdbt; i++) 1655 free_page(aux->sdbt_index[i]); 1656 1657 kfree(aux->sdbt_index); 1658 kfree(aux->sdb_index); 1659 kfree(aux); 1660 1661 debug_sprintf_event(sfdbg, 4, "%s: SDBTs %lu\n", __func__, num_sdbt); 1662 } 1663 1664 static void aux_sdb_init(unsigned long sdb) 1665 { 1666 struct hws_trailer_entry *te; 1667 1668 te = (struct hws_trailer_entry *)trailer_entry_ptr(sdb); 1669 1670 /* Save clock base */ 1671 te->clock_base = 1; 1672 memcpy(&te->progusage2, &tod_clock_base[1], 8); 1673 } 1674 1675 /* 1676 * aux_buffer_setup() - Setup AUX buffer for diagnostic mode sampling 1677 * @event: Event the buffer is setup for, event->cpu == -1 means current 1678 * @pages: Array of pointers to buffer pages passed from perf core 1679 * @nr_pages: Total pages 1680 * @snapshot: Flag for snapshot mode 1681 * 1682 * This is the callback when setup an event using AUX buffer. Perf tool can 1683 * trigger this by an additional mmap() call on the event. Unlike the buffer 1684 * for basic samples, AUX buffer belongs to the event. It is scheduled with 1685 * the task among online cpus when it is a per-thread event. 1686 * 1687 * Return the private AUX buffer structure if success or NULL if fails. 1688 */ 1689 static void *aux_buffer_setup(struct perf_event *event, void **pages, 1690 int nr_pages, bool snapshot) 1691 { 1692 struct sf_buffer *sfb; 1693 struct aux_buffer *aux; 1694 unsigned long *new, *tail; 1695 int i, n_sdbt; 1696 1697 if (!nr_pages || !pages) 1698 return NULL; 1699 1700 if (nr_pages > CPUM_SF_MAX_SDB * CPUM_SF_SDB_DIAG_FACTOR) { 1701 pr_err("AUX buffer size (%i pages) is larger than the " 1702 "maximum sampling buffer limit\n", 1703 nr_pages); 1704 return NULL; 1705 } else if (nr_pages < CPUM_SF_MIN_SDB * CPUM_SF_SDB_DIAG_FACTOR) { 1706 pr_err("AUX buffer size (%i pages) is less than the " 1707 "minimum sampling buffer limit\n", 1708 nr_pages); 1709 return NULL; 1710 } 1711 1712 /* Allocate aux_buffer struct for the event */ 1713 aux = kzalloc(sizeof(struct aux_buffer), GFP_KERNEL); 1714 if (!aux) 1715 goto no_aux; 1716 sfb = &aux->sfb; 1717 1718 /* Allocate sdbt_index for fast reference */ 1719 n_sdbt = DIV_ROUND_UP(nr_pages, CPUM_SF_SDB_PER_TABLE); 1720 aux->sdbt_index = kmalloc_array(n_sdbt, sizeof(void *), GFP_KERNEL); 1721 if (!aux->sdbt_index) 1722 goto no_sdbt_index; 1723 1724 /* Allocate sdb_index for fast reference */ 1725 aux->sdb_index = kmalloc_array(nr_pages, sizeof(void *), GFP_KERNEL); 1726 if (!aux->sdb_index) 1727 goto no_sdb_index; 1728 1729 /* Allocate the first SDBT */ 1730 sfb->num_sdbt = 0; 1731 sfb->sdbt = (unsigned long *) get_zeroed_page(GFP_KERNEL); 1732 if (!sfb->sdbt) 1733 goto no_sdbt; 1734 aux->sdbt_index[sfb->num_sdbt++] = (unsigned long)sfb->sdbt; 1735 tail = sfb->tail = sfb->sdbt; 1736 1737 /* 1738 * Link the provided pages of AUX buffer to SDBT. 1739 * Allocate SDBT if needed. 1740 */ 1741 for (i = 0; i < nr_pages; i++, tail++) { 1742 if (require_table_link(tail)) { 1743 new = (unsigned long *) get_zeroed_page(GFP_KERNEL); 1744 if (!new) 1745 goto no_sdbt; 1746 aux->sdbt_index[sfb->num_sdbt++] = (unsigned long)new; 1747 /* Link current page to tail of chain */ 1748 *tail = (unsigned long)(void *) new + 1; 1749 tail = new; 1750 } 1751 /* Tail is the entry in a SDBT */ 1752 *tail = (unsigned long)pages[i]; 1753 aux->sdb_index[i] = (unsigned long)pages[i]; 1754 aux_sdb_init((unsigned long)pages[i]); 1755 } 1756 sfb->num_sdb = nr_pages; 1757 1758 /* Link the last entry in the SDBT to the first SDBT */ 1759 *tail = (unsigned long) sfb->sdbt + 1; 1760 sfb->tail = tail; 1761 1762 /* 1763 * Initial all SDBs are zeroed. Mark it as empty. 1764 * So there is no need to clear the full indicator 1765 * when this event is first added. 1766 */ 1767 aux->empty_mark = sfb->num_sdb - 1; 1768 1769 debug_sprintf_event(sfdbg, 4, "%s: SDBTs %lu SDBs %lu\n", __func__, 1770 sfb->num_sdbt, sfb->num_sdb); 1771 1772 return aux; 1773 1774 no_sdbt: 1775 /* SDBs (AUX buffer pages) are freed by caller */ 1776 for (i = 0; i < sfb->num_sdbt; i++) 1777 free_page(aux->sdbt_index[i]); 1778 kfree(aux->sdb_index); 1779 no_sdb_index: 1780 kfree(aux->sdbt_index); 1781 no_sdbt_index: 1782 kfree(aux); 1783 no_aux: 1784 return NULL; 1785 } 1786 1787 static void cpumsf_pmu_read(struct perf_event *event) 1788 { 1789 /* Nothing to do ... updates are interrupt-driven */ 1790 } 1791 1792 /* Check if the new sampling period/freqeuncy is appropriate. 1793 * 1794 * Return non-zero on error and zero on passed checks. 1795 */ 1796 static int cpumsf_pmu_check_period(struct perf_event *event, u64 value) 1797 { 1798 struct hws_qsi_info_block si; 1799 unsigned long rate; 1800 bool do_freq; 1801 1802 memset(&si, 0, sizeof(si)); 1803 if (event->cpu == -1) { 1804 if (qsi(&si)) 1805 return -ENODEV; 1806 } else { 1807 /* Event is pinned to a particular CPU, retrieve the per-CPU 1808 * sampling structure for accessing the CPU-specific QSI. 1809 */ 1810 struct cpu_hw_sf *cpuhw = &per_cpu(cpu_hw_sf, event->cpu); 1811 1812 si = cpuhw->qsi; 1813 } 1814 1815 do_freq = !!SAMPLE_FREQ_MODE(&event->hw); 1816 rate = getrate(do_freq, value, &si); 1817 if (!rate) 1818 return -EINVAL; 1819 1820 event->attr.sample_period = rate; 1821 SAMPL_RATE(&event->hw) = rate; 1822 hw_init_period(&event->hw, SAMPL_RATE(&event->hw)); 1823 debug_sprintf_event(sfdbg, 4, "%s:" 1824 " cpu %d value %#llx period %#llx freq %d\n", 1825 __func__, event->cpu, value, 1826 event->attr.sample_period, do_freq); 1827 return 0; 1828 } 1829 1830 /* Activate sampling control. 1831 * Next call of pmu_enable() starts sampling. 1832 */ 1833 static void cpumsf_pmu_start(struct perf_event *event, int flags) 1834 { 1835 struct cpu_hw_sf *cpuhw = this_cpu_ptr(&cpu_hw_sf); 1836 1837 if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED))) 1838 return; 1839 1840 if (flags & PERF_EF_RELOAD) 1841 WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE)); 1842 1843 perf_pmu_disable(event->pmu); 1844 event->hw.state = 0; 1845 cpuhw->lsctl.cs = 1; 1846 if (SAMPL_DIAG_MODE(&event->hw)) 1847 cpuhw->lsctl.cd = 1; 1848 perf_pmu_enable(event->pmu); 1849 } 1850 1851 /* Deactivate sampling control. 1852 * Next call of pmu_enable() stops sampling. 1853 */ 1854 static void cpumsf_pmu_stop(struct perf_event *event, int flags) 1855 { 1856 struct cpu_hw_sf *cpuhw = this_cpu_ptr(&cpu_hw_sf); 1857 1858 if (event->hw.state & PERF_HES_STOPPED) 1859 return; 1860 1861 perf_pmu_disable(event->pmu); 1862 cpuhw->lsctl.cs = 0; 1863 cpuhw->lsctl.cd = 0; 1864 event->hw.state |= PERF_HES_STOPPED; 1865 1866 if ((flags & PERF_EF_UPDATE) && !(event->hw.state & PERF_HES_UPTODATE)) { 1867 hw_perf_event_update(event, 1); 1868 event->hw.state |= PERF_HES_UPTODATE; 1869 } 1870 perf_pmu_enable(event->pmu); 1871 } 1872 1873 static int cpumsf_pmu_add(struct perf_event *event, int flags) 1874 { 1875 struct cpu_hw_sf *cpuhw = this_cpu_ptr(&cpu_hw_sf); 1876 struct aux_buffer *aux; 1877 int err; 1878 1879 if (cpuhw->flags & PMU_F_IN_USE) 1880 return -EAGAIN; 1881 1882 if (!SAMPL_DIAG_MODE(&event->hw) && !cpuhw->sfb.sdbt) 1883 return -EINVAL; 1884 1885 err = 0; 1886 perf_pmu_disable(event->pmu); 1887 1888 event->hw.state = PERF_HES_UPTODATE | PERF_HES_STOPPED; 1889 1890 /* Set up sampling controls. Always program the sampling register 1891 * using the SDB-table start. Reset TEAR_REG event hardware register 1892 * that is used by hw_perf_event_update() to store the sampling buffer 1893 * position after samples have been flushed. 1894 */ 1895 cpuhw->lsctl.s = 0; 1896 cpuhw->lsctl.h = 1; 1897 cpuhw->lsctl.interval = SAMPL_RATE(&event->hw); 1898 if (!SAMPL_DIAG_MODE(&event->hw)) { 1899 cpuhw->lsctl.tear = (unsigned long) cpuhw->sfb.sdbt; 1900 cpuhw->lsctl.dear = *(unsigned long *) cpuhw->sfb.sdbt; 1901 TEAR_REG(&event->hw) = (unsigned long) cpuhw->sfb.sdbt; 1902 } 1903 1904 /* Ensure sampling functions are in the disabled state. If disabled, 1905 * switch on sampling enable control. */ 1906 if (WARN_ON_ONCE(cpuhw->lsctl.es == 1 || cpuhw->lsctl.ed == 1)) { 1907 err = -EAGAIN; 1908 goto out; 1909 } 1910 if (SAMPL_DIAG_MODE(&event->hw)) { 1911 aux = perf_aux_output_begin(&cpuhw->handle, event); 1912 if (!aux) { 1913 err = -EINVAL; 1914 goto out; 1915 } 1916 err = aux_output_begin(&cpuhw->handle, aux, cpuhw); 1917 if (err) 1918 goto out; 1919 cpuhw->lsctl.ed = 1; 1920 } 1921 cpuhw->lsctl.es = 1; 1922 1923 /* Set in_use flag and store event */ 1924 cpuhw->event = event; 1925 cpuhw->flags |= PMU_F_IN_USE; 1926 1927 if (flags & PERF_EF_START) 1928 cpumsf_pmu_start(event, PERF_EF_RELOAD); 1929 out: 1930 perf_event_update_userpage(event); 1931 perf_pmu_enable(event->pmu); 1932 return err; 1933 } 1934 1935 static void cpumsf_pmu_del(struct perf_event *event, int flags) 1936 { 1937 struct cpu_hw_sf *cpuhw = this_cpu_ptr(&cpu_hw_sf); 1938 1939 perf_pmu_disable(event->pmu); 1940 cpumsf_pmu_stop(event, PERF_EF_UPDATE); 1941 1942 cpuhw->lsctl.es = 0; 1943 cpuhw->lsctl.ed = 0; 1944 cpuhw->flags &= ~PMU_F_IN_USE; 1945 cpuhw->event = NULL; 1946 1947 if (SAMPL_DIAG_MODE(&event->hw)) 1948 aux_output_end(&cpuhw->handle); 1949 perf_event_update_userpage(event); 1950 perf_pmu_enable(event->pmu); 1951 } 1952 1953 CPUMF_EVENT_ATTR(SF, SF_CYCLES_BASIC, PERF_EVENT_CPUM_SF); 1954 CPUMF_EVENT_ATTR(SF, SF_CYCLES_BASIC_DIAG, PERF_EVENT_CPUM_SF_DIAG); 1955 1956 /* Attribute list for CPU_SF. 1957 * 1958 * The availablitiy depends on the CPU_MF sampling facility authorization 1959 * for basic + diagnositic samples. This is determined at initialization 1960 * time by the sampling facility device driver. 1961 * If the authorization for basic samples is turned off, it should be 1962 * also turned off for diagnostic sampling. 1963 * 1964 * During initialization of the device driver, check the authorization 1965 * level for diagnostic sampling and installs the attribute 1966 * file for diagnostic sampling if necessary. 1967 * 1968 * For now install a placeholder to reference all possible attributes: 1969 * SF_CYCLES_BASIC and SF_CYCLES_BASIC_DIAG. 1970 * Add another entry for the final NULL pointer. 1971 */ 1972 enum { 1973 SF_CYCLES_BASIC_ATTR_IDX = 0, 1974 SF_CYCLES_BASIC_DIAG_ATTR_IDX, 1975 SF_CYCLES_ATTR_MAX 1976 }; 1977 1978 static struct attribute *cpumsf_pmu_events_attr[SF_CYCLES_ATTR_MAX + 1] = { 1979 [SF_CYCLES_BASIC_ATTR_IDX] = CPUMF_EVENT_PTR(SF, SF_CYCLES_BASIC) 1980 }; 1981 1982 PMU_FORMAT_ATTR(event, "config:0-63"); 1983 1984 static struct attribute *cpumsf_pmu_format_attr[] = { 1985 &format_attr_event.attr, 1986 NULL, 1987 }; 1988 1989 static struct attribute_group cpumsf_pmu_events_group = { 1990 .name = "events", 1991 .attrs = cpumsf_pmu_events_attr, 1992 }; 1993 1994 static struct attribute_group cpumsf_pmu_format_group = { 1995 .name = "format", 1996 .attrs = cpumsf_pmu_format_attr, 1997 }; 1998 1999 static const struct attribute_group *cpumsf_pmu_attr_groups[] = { 2000 &cpumsf_pmu_events_group, 2001 &cpumsf_pmu_format_group, 2002 NULL, 2003 }; 2004 2005 static struct pmu cpumf_sampling = { 2006 .pmu_enable = cpumsf_pmu_enable, 2007 .pmu_disable = cpumsf_pmu_disable, 2008 2009 .event_init = cpumsf_pmu_event_init, 2010 .add = cpumsf_pmu_add, 2011 .del = cpumsf_pmu_del, 2012 2013 .start = cpumsf_pmu_start, 2014 .stop = cpumsf_pmu_stop, 2015 .read = cpumsf_pmu_read, 2016 2017 .attr_groups = cpumsf_pmu_attr_groups, 2018 2019 .setup_aux = aux_buffer_setup, 2020 .free_aux = aux_buffer_free, 2021 2022 .check_period = cpumsf_pmu_check_period, 2023 }; 2024 2025 static void cpumf_measurement_alert(struct ext_code ext_code, 2026 unsigned int alert, unsigned long unused) 2027 { 2028 struct cpu_hw_sf *cpuhw; 2029 2030 if (!(alert & CPU_MF_INT_SF_MASK)) 2031 return; 2032 inc_irq_stat(IRQEXT_CMS); 2033 cpuhw = this_cpu_ptr(&cpu_hw_sf); 2034 2035 /* Measurement alerts are shared and might happen when the PMU 2036 * is not reserved. Ignore these alerts in this case. */ 2037 if (!(cpuhw->flags & PMU_F_RESERVED)) 2038 return; 2039 2040 /* The processing below must take care of multiple alert events that 2041 * might be indicated concurrently. */ 2042 2043 /* Program alert request */ 2044 if (alert & CPU_MF_INT_SF_PRA) { 2045 if (cpuhw->flags & PMU_F_IN_USE) 2046 if (SAMPL_DIAG_MODE(&cpuhw->event->hw)) 2047 hw_collect_aux(cpuhw); 2048 else 2049 hw_perf_event_update(cpuhw->event, 0); 2050 else 2051 WARN_ON_ONCE(!(cpuhw->flags & PMU_F_IN_USE)); 2052 } 2053 2054 /* Report measurement alerts only for non-PRA codes */ 2055 if (alert != CPU_MF_INT_SF_PRA) 2056 debug_sprintf_event(sfdbg, 6, "%s: alert %#x\n", __func__, 2057 alert); 2058 2059 /* Sampling authorization change request */ 2060 if (alert & CPU_MF_INT_SF_SACA) 2061 qsi(&cpuhw->qsi); 2062 2063 /* Loss of sample data due to high-priority machine activities */ 2064 if (alert & CPU_MF_INT_SF_LSDA) { 2065 pr_err("Sample data was lost\n"); 2066 cpuhw->flags |= PMU_F_ERR_LSDA; 2067 sf_disable(); 2068 } 2069 2070 /* Invalid sampling buffer entry */ 2071 if (alert & (CPU_MF_INT_SF_IAE|CPU_MF_INT_SF_ISE)) { 2072 pr_err("A sampling buffer entry is incorrect (alert=0x%x)\n", 2073 alert); 2074 cpuhw->flags |= PMU_F_ERR_IBE; 2075 sf_disable(); 2076 } 2077 } 2078 2079 static int cpusf_pmu_setup(unsigned int cpu, int flags) 2080 { 2081 /* Ignore the notification if no events are scheduled on the PMU. 2082 * This might be racy... 2083 */ 2084 if (!atomic_read(&num_events)) 2085 return 0; 2086 2087 local_irq_disable(); 2088 setup_pmc_cpu(&flags); 2089 local_irq_enable(); 2090 return 0; 2091 } 2092 2093 static int s390_pmu_sf_online_cpu(unsigned int cpu) 2094 { 2095 return cpusf_pmu_setup(cpu, PMC_INIT); 2096 } 2097 2098 static int s390_pmu_sf_offline_cpu(unsigned int cpu) 2099 { 2100 return cpusf_pmu_setup(cpu, PMC_RELEASE); 2101 } 2102 2103 static int param_get_sfb_size(char *buffer, const struct kernel_param *kp) 2104 { 2105 if (!cpum_sf_avail()) 2106 return -ENODEV; 2107 return sprintf(buffer, "%lu,%lu", CPUM_SF_MIN_SDB, CPUM_SF_MAX_SDB); 2108 } 2109 2110 static int param_set_sfb_size(const char *val, const struct kernel_param *kp) 2111 { 2112 int rc; 2113 unsigned long min, max; 2114 2115 if (!cpum_sf_avail()) 2116 return -ENODEV; 2117 if (!val || !strlen(val)) 2118 return -EINVAL; 2119 2120 /* Valid parameter values: "min,max" or "max" */ 2121 min = CPUM_SF_MIN_SDB; 2122 max = CPUM_SF_MAX_SDB; 2123 if (strchr(val, ',')) 2124 rc = (sscanf(val, "%lu,%lu", &min, &max) == 2) ? 0 : -EINVAL; 2125 else 2126 rc = kstrtoul(val, 10, &max); 2127 2128 if (min < 2 || min >= max || max > get_num_physpages()) 2129 rc = -EINVAL; 2130 if (rc) 2131 return rc; 2132 2133 sfb_set_limits(min, max); 2134 pr_info("The sampling buffer limits have changed to: " 2135 "min %lu max %lu (diag %lu)\n", 2136 CPUM_SF_MIN_SDB, CPUM_SF_MAX_SDB, CPUM_SF_SDB_DIAG_FACTOR); 2137 return 0; 2138 } 2139 2140 #define param_check_sfb_size(name, p) __param_check(name, p, void) 2141 static const struct kernel_param_ops param_ops_sfb_size = { 2142 .set = param_set_sfb_size, 2143 .get = param_get_sfb_size, 2144 }; 2145 2146 #define RS_INIT_FAILURE_QSI 0x0001 2147 #define RS_INIT_FAILURE_BSDES 0x0002 2148 #define RS_INIT_FAILURE_ALRT 0x0003 2149 #define RS_INIT_FAILURE_PERF 0x0004 2150 static void __init pr_cpumsf_err(unsigned int reason) 2151 { 2152 pr_err("Sampling facility support for perf is not available: " 2153 "reason %#x\n", reason); 2154 } 2155 2156 static int __init init_cpum_sampling_pmu(void) 2157 { 2158 struct hws_qsi_info_block si; 2159 int err; 2160 2161 if (!cpum_sf_avail()) 2162 return -ENODEV; 2163 2164 memset(&si, 0, sizeof(si)); 2165 if (qsi(&si)) { 2166 pr_cpumsf_err(RS_INIT_FAILURE_QSI); 2167 return -ENODEV; 2168 } 2169 2170 if (!si.as && !si.ad) 2171 return -ENODEV; 2172 2173 if (si.bsdes != sizeof(struct hws_basic_entry)) { 2174 pr_cpumsf_err(RS_INIT_FAILURE_BSDES); 2175 return -EINVAL; 2176 } 2177 2178 if (si.ad) { 2179 sfb_set_limits(CPUM_SF_MIN_SDB, CPUM_SF_MAX_SDB); 2180 /* Sampling of diagnostic data authorized, 2181 * install event into attribute list of PMU device. 2182 */ 2183 cpumsf_pmu_events_attr[SF_CYCLES_BASIC_DIAG_ATTR_IDX] = 2184 CPUMF_EVENT_PTR(SF, SF_CYCLES_BASIC_DIAG); 2185 } 2186 2187 sfdbg = debug_register(KMSG_COMPONENT, 2, 1, 80); 2188 if (!sfdbg) { 2189 pr_err("Registering for s390dbf failed\n"); 2190 return -ENOMEM; 2191 } 2192 debug_register_view(sfdbg, &debug_sprintf_view); 2193 2194 err = register_external_irq(EXT_IRQ_MEASURE_ALERT, 2195 cpumf_measurement_alert); 2196 if (err) { 2197 pr_cpumsf_err(RS_INIT_FAILURE_ALRT); 2198 debug_unregister(sfdbg); 2199 goto out; 2200 } 2201 2202 err = perf_pmu_register(&cpumf_sampling, "cpum_sf", PERF_TYPE_RAW); 2203 if (err) { 2204 pr_cpumsf_err(RS_INIT_FAILURE_PERF); 2205 unregister_external_irq(EXT_IRQ_MEASURE_ALERT, 2206 cpumf_measurement_alert); 2207 debug_unregister(sfdbg); 2208 goto out; 2209 } 2210 2211 cpuhp_setup_state(CPUHP_AP_PERF_S390_SF_ONLINE, "perf/s390/sf:online", 2212 s390_pmu_sf_online_cpu, s390_pmu_sf_offline_cpu); 2213 out: 2214 return err; 2215 } 2216 2217 arch_initcall(init_cpum_sampling_pmu); 2218 core_param(cpum_sfb_size, CPUM_SF_MAX_SDB, sfb_size, 0640); 2219