1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Performance event support for the System z CPU-measurement Sampling Facility 4 * 5 * Copyright IBM Corp. 2013, 2018 6 * Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com> 7 */ 8 #define KMSG_COMPONENT "cpum_sf" 9 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 10 11 #include <linux/kernel.h> 12 #include <linux/kernel_stat.h> 13 #include <linux/perf_event.h> 14 #include <linux/percpu.h> 15 #include <linux/pid.h> 16 #include <linux/notifier.h> 17 #include <linux/export.h> 18 #include <linux/slab.h> 19 #include <linux/mm.h> 20 #include <linux/moduleparam.h> 21 #include <asm/cpu_mf.h> 22 #include <asm/irq.h> 23 #include <asm/debug.h> 24 #include <asm/timex.h> 25 26 /* Minimum number of sample-data-block-tables: 27 * At least one table is required for the sampling buffer structure. 28 * A single table contains up to 511 pointers to sample-data-blocks. 29 */ 30 #define CPUM_SF_MIN_SDBT 1 31 32 /* Number of sample-data-blocks per sample-data-block-table (SDBT): 33 * A table contains SDB pointers (8 bytes) and one table-link entry 34 * that points to the origin of the next SDBT. 35 */ 36 #define CPUM_SF_SDB_PER_TABLE ((PAGE_SIZE - 8) / 8) 37 38 /* Maximum page offset for an SDBT table-link entry: 39 * If this page offset is reached, a table-link entry to the next SDBT 40 * must be added. 41 */ 42 #define CPUM_SF_SDBT_TL_OFFSET (CPUM_SF_SDB_PER_TABLE * 8) 43 static inline int require_table_link(const void *sdbt) 44 { 45 return ((unsigned long) sdbt & ~PAGE_MASK) == CPUM_SF_SDBT_TL_OFFSET; 46 } 47 48 /* Minimum and maximum sampling buffer sizes: 49 * 50 * This number represents the maximum size of the sampling buffer taking 51 * the number of sample-data-block-tables into account. Note that these 52 * numbers apply to the basic-sampling function only. 53 * The maximum number of SDBs is increased by CPUM_SF_SDB_DIAG_FACTOR if 54 * the diagnostic-sampling function is active. 55 * 56 * Sampling buffer size Buffer characteristics 57 * --------------------------------------------------- 58 * 64KB == 16 pages (4KB per page) 59 * 1 page for SDB-tables 60 * 15 pages for SDBs 61 * 62 * 32MB == 8192 pages (4KB per page) 63 * 16 pages for SDB-tables 64 * 8176 pages for SDBs 65 */ 66 static unsigned long __read_mostly CPUM_SF_MIN_SDB = 15; 67 static unsigned long __read_mostly CPUM_SF_MAX_SDB = 8176; 68 static unsigned long __read_mostly CPUM_SF_SDB_DIAG_FACTOR = 1; 69 70 struct sf_buffer { 71 unsigned long *sdbt; /* Sample-data-block-table origin */ 72 /* buffer characteristics (required for buffer increments) */ 73 unsigned long num_sdb; /* Number of sample-data-blocks */ 74 unsigned long num_sdbt; /* Number of sample-data-block-tables */ 75 unsigned long *tail; /* last sample-data-block-table */ 76 }; 77 78 struct aux_buffer { 79 struct sf_buffer sfb; 80 unsigned long head; /* index of SDB of buffer head */ 81 unsigned long alert_mark; /* index of SDB of alert request position */ 82 unsigned long empty_mark; /* mark of SDB not marked full */ 83 unsigned long *sdb_index; /* SDB address for fast lookup */ 84 unsigned long *sdbt_index; /* SDBT address for fast lookup */ 85 }; 86 87 struct cpu_hw_sf { 88 /* CPU-measurement sampling information block */ 89 struct hws_qsi_info_block qsi; 90 /* CPU-measurement sampling control block */ 91 struct hws_lsctl_request_block lsctl; 92 struct sf_buffer sfb; /* Sampling buffer */ 93 unsigned int flags; /* Status flags */ 94 struct perf_event *event; /* Scheduled perf event */ 95 struct perf_output_handle handle; /* AUX buffer output handle */ 96 }; 97 static DEFINE_PER_CPU(struct cpu_hw_sf, cpu_hw_sf); 98 99 /* Debug feature */ 100 static debug_info_t *sfdbg; 101 102 /* 103 * sf_disable() - Switch off sampling facility 104 */ 105 static int sf_disable(void) 106 { 107 struct hws_lsctl_request_block sreq; 108 109 memset(&sreq, 0, sizeof(sreq)); 110 return lsctl(&sreq); 111 } 112 113 /* 114 * sf_buffer_available() - Check for an allocated sampling buffer 115 */ 116 static int sf_buffer_available(struct cpu_hw_sf *cpuhw) 117 { 118 return !!cpuhw->sfb.sdbt; 119 } 120 121 /* 122 * deallocate sampling facility buffer 123 */ 124 static void free_sampling_buffer(struct sf_buffer *sfb) 125 { 126 unsigned long *sdbt, *curr; 127 128 if (!sfb->sdbt) 129 return; 130 131 sdbt = sfb->sdbt; 132 curr = sdbt; 133 134 /* Free the SDBT after all SDBs are processed... */ 135 while (1) { 136 if (!*curr || !sdbt) 137 break; 138 139 /* Process table-link entries */ 140 if (is_link_entry(curr)) { 141 curr = get_next_sdbt(curr); 142 if (sdbt) 143 free_page((unsigned long) sdbt); 144 145 /* If the origin is reached, sampling buffer is freed */ 146 if (curr == sfb->sdbt) 147 break; 148 else 149 sdbt = curr; 150 } else { 151 /* Process SDB pointer */ 152 if (*curr) { 153 free_page(*curr); 154 curr++; 155 } 156 } 157 } 158 159 debug_sprintf_event(sfdbg, 5, "%s: freed sdbt %#lx\n", __func__, 160 (unsigned long)sfb->sdbt); 161 memset(sfb, 0, sizeof(*sfb)); 162 } 163 164 static int alloc_sample_data_block(unsigned long *sdbt, gfp_t gfp_flags) 165 { 166 unsigned long sdb, *trailer; 167 168 /* Allocate and initialize sample-data-block */ 169 sdb = get_zeroed_page(gfp_flags); 170 if (!sdb) 171 return -ENOMEM; 172 trailer = trailer_entry_ptr(sdb); 173 *trailer = SDB_TE_ALERT_REQ_MASK; 174 175 /* Link SDB into the sample-data-block-table */ 176 *sdbt = sdb; 177 178 return 0; 179 } 180 181 /* 182 * realloc_sampling_buffer() - extend sampler memory 183 * 184 * Allocates new sample-data-blocks and adds them to the specified sampling 185 * buffer memory. 186 * 187 * Important: This modifies the sampling buffer and must be called when the 188 * sampling facility is disabled. 189 * 190 * Returns zero on success, non-zero otherwise. 191 */ 192 static int realloc_sampling_buffer(struct sf_buffer *sfb, 193 unsigned long num_sdb, gfp_t gfp_flags) 194 { 195 int i, rc; 196 unsigned long *new, *tail, *tail_prev = NULL; 197 198 if (!sfb->sdbt || !sfb->tail) 199 return -EINVAL; 200 201 if (!is_link_entry(sfb->tail)) 202 return -EINVAL; 203 204 /* Append to the existing sampling buffer, overwriting the table-link 205 * register. 206 * The tail variables always points to the "tail" (last and table-link) 207 * entry in an SDB-table. 208 */ 209 tail = sfb->tail; 210 211 /* Do a sanity check whether the table-link entry points to 212 * the sampling buffer origin. 213 */ 214 if (sfb->sdbt != get_next_sdbt(tail)) { 215 debug_sprintf_event(sfdbg, 3, "%s: " 216 "sampling buffer is not linked: origin %#lx" 217 " tail %#lx\n", __func__, 218 (unsigned long)sfb->sdbt, 219 (unsigned long)tail); 220 return -EINVAL; 221 } 222 223 /* Allocate remaining SDBs */ 224 rc = 0; 225 for (i = 0; i < num_sdb; i++) { 226 /* Allocate a new SDB-table if it is full. */ 227 if (require_table_link(tail)) { 228 new = (unsigned long *) get_zeroed_page(gfp_flags); 229 if (!new) { 230 rc = -ENOMEM; 231 break; 232 } 233 sfb->num_sdbt++; 234 /* Link current page to tail of chain */ 235 *tail = (unsigned long)(void *) new + 1; 236 tail_prev = tail; 237 tail = new; 238 } 239 240 /* Allocate a new sample-data-block. 241 * If there is not enough memory, stop the realloc process 242 * and simply use what was allocated. If this is a temporary 243 * issue, a new realloc call (if required) might succeed. 244 */ 245 rc = alloc_sample_data_block(tail, gfp_flags); 246 if (rc) { 247 /* Undo last SDBT. An SDBT with no SDB at its first 248 * entry but with an SDBT entry instead can not be 249 * handled by the interrupt handler code. 250 * Avoid this situation. 251 */ 252 if (tail_prev) { 253 sfb->num_sdbt--; 254 free_page((unsigned long) new); 255 tail = tail_prev; 256 } 257 break; 258 } 259 sfb->num_sdb++; 260 tail++; 261 tail_prev = new = NULL; /* Allocated at least one SBD */ 262 } 263 264 /* Link sampling buffer to its origin */ 265 *tail = (unsigned long) sfb->sdbt + 1; 266 sfb->tail = tail; 267 268 debug_sprintf_event(sfdbg, 4, "%s: new buffer" 269 " settings: sdbt %lu sdb %lu\n", __func__, 270 sfb->num_sdbt, sfb->num_sdb); 271 return rc; 272 } 273 274 /* 275 * allocate_sampling_buffer() - allocate sampler memory 276 * 277 * Allocates and initializes a sampling buffer structure using the 278 * specified number of sample-data-blocks (SDB). For each allocation, 279 * a 4K page is used. The number of sample-data-block-tables (SDBT) 280 * are calculated from SDBs. 281 * Also set the ALERT_REQ mask in each SDBs trailer. 282 * 283 * Returns zero on success, non-zero otherwise. 284 */ 285 static int alloc_sampling_buffer(struct sf_buffer *sfb, unsigned long num_sdb) 286 { 287 int rc; 288 289 if (sfb->sdbt) 290 return -EINVAL; 291 292 /* Allocate the sample-data-block-table origin */ 293 sfb->sdbt = (unsigned long *) get_zeroed_page(GFP_KERNEL); 294 if (!sfb->sdbt) 295 return -ENOMEM; 296 sfb->num_sdb = 0; 297 sfb->num_sdbt = 1; 298 299 /* Link the table origin to point to itself to prepare for 300 * realloc_sampling_buffer() invocation. 301 */ 302 sfb->tail = sfb->sdbt; 303 *sfb->tail = (unsigned long)(void *) sfb->sdbt + 1; 304 305 /* Allocate requested number of sample-data-blocks */ 306 rc = realloc_sampling_buffer(sfb, num_sdb, GFP_KERNEL); 307 if (rc) { 308 free_sampling_buffer(sfb); 309 debug_sprintf_event(sfdbg, 4, "%s: " 310 "realloc_sampling_buffer failed with rc %i\n", 311 __func__, rc); 312 } else 313 debug_sprintf_event(sfdbg, 4, 314 "%s: tear %#lx dear %#lx\n", __func__, 315 (unsigned long)sfb->sdbt, (unsigned long)*sfb->sdbt); 316 return rc; 317 } 318 319 static void sfb_set_limits(unsigned long min, unsigned long max) 320 { 321 struct hws_qsi_info_block si; 322 323 CPUM_SF_MIN_SDB = min; 324 CPUM_SF_MAX_SDB = max; 325 326 memset(&si, 0, sizeof(si)); 327 if (!qsi(&si)) 328 CPUM_SF_SDB_DIAG_FACTOR = DIV_ROUND_UP(si.dsdes, si.bsdes); 329 } 330 331 static unsigned long sfb_max_limit(struct hw_perf_event *hwc) 332 { 333 return SAMPL_DIAG_MODE(hwc) ? CPUM_SF_MAX_SDB * CPUM_SF_SDB_DIAG_FACTOR 334 : CPUM_SF_MAX_SDB; 335 } 336 337 static unsigned long sfb_pending_allocs(struct sf_buffer *sfb, 338 struct hw_perf_event *hwc) 339 { 340 if (!sfb->sdbt) 341 return SFB_ALLOC_REG(hwc); 342 if (SFB_ALLOC_REG(hwc) > sfb->num_sdb) 343 return SFB_ALLOC_REG(hwc) - sfb->num_sdb; 344 return 0; 345 } 346 347 static int sfb_has_pending_allocs(struct sf_buffer *sfb, 348 struct hw_perf_event *hwc) 349 { 350 return sfb_pending_allocs(sfb, hwc) > 0; 351 } 352 353 static void sfb_account_allocs(unsigned long num, struct hw_perf_event *hwc) 354 { 355 /* Limit the number of SDBs to not exceed the maximum */ 356 num = min_t(unsigned long, num, sfb_max_limit(hwc) - SFB_ALLOC_REG(hwc)); 357 if (num) 358 SFB_ALLOC_REG(hwc) += num; 359 } 360 361 static void sfb_init_allocs(unsigned long num, struct hw_perf_event *hwc) 362 { 363 SFB_ALLOC_REG(hwc) = 0; 364 sfb_account_allocs(num, hwc); 365 } 366 367 static void deallocate_buffers(struct cpu_hw_sf *cpuhw) 368 { 369 if (cpuhw->sfb.sdbt) 370 free_sampling_buffer(&cpuhw->sfb); 371 } 372 373 static int allocate_buffers(struct cpu_hw_sf *cpuhw, struct hw_perf_event *hwc) 374 { 375 unsigned long n_sdb, freq; 376 size_t sample_size; 377 378 /* Calculate sampling buffers using 4K pages 379 * 380 * 1. The sampling size is 32 bytes for basic sampling. This size 381 * is the same for all machine types. Diagnostic 382 * sampling uses auxlilary data buffer setup which provides the 383 * memory for SDBs using linux common code auxiliary trace 384 * setup. 385 * 386 * 2. Function alloc_sampling_buffer() sets the Alert Request 387 * Control indicator to trigger a measurement-alert to harvest 388 * sample-data-blocks (SDB). This is done per SDB. This 389 * measurement alert interrupt fires quick enough to handle 390 * one SDB, on very high frequency and work loads there might 391 * be 2 to 3 SBDs available for sample processing. 392 * Currently there is no need for setup alert request on every 393 * n-th page. This is counterproductive as one IRQ triggers 394 * a very high number of samples to be processed at one IRQ. 395 * 396 * 3. Use the sampling frequency as input. 397 * Compute the number of SDBs and ensure a minimum 398 * of CPUM_SF_MIN_SDB. Depending on frequency add some more 399 * SDBs to handle a higher sampling rate. 400 * Use a minimum of CPUM_SF_MIN_SDB and allow for 100 samples 401 * (one SDB) for every 10000 HZ frequency increment. 402 * 403 * 4. Compute the number of sample-data-block-tables (SDBT) and 404 * ensure a minimum of CPUM_SF_MIN_SDBT (one table can manage up 405 * to 511 SDBs). 406 */ 407 sample_size = sizeof(struct hws_basic_entry); 408 freq = sample_rate_to_freq(&cpuhw->qsi, SAMPL_RATE(hwc)); 409 n_sdb = CPUM_SF_MIN_SDB + DIV_ROUND_UP(freq, 10000); 410 411 /* If there is already a sampling buffer allocated, it is very likely 412 * that the sampling facility is enabled too. If the event to be 413 * initialized requires a greater sampling buffer, the allocation must 414 * be postponed. Changing the sampling buffer requires the sampling 415 * facility to be in the disabled state. So, account the number of 416 * required SDBs and let cpumsf_pmu_enable() resize the buffer just 417 * before the event is started. 418 */ 419 sfb_init_allocs(n_sdb, hwc); 420 if (sf_buffer_available(cpuhw)) 421 return 0; 422 423 debug_sprintf_event(sfdbg, 3, 424 "%s: rate %lu f %lu sdb %lu/%lu" 425 " sample_size %lu cpuhw %p\n", __func__, 426 SAMPL_RATE(hwc), freq, n_sdb, sfb_max_limit(hwc), 427 sample_size, cpuhw); 428 429 return alloc_sampling_buffer(&cpuhw->sfb, 430 sfb_pending_allocs(&cpuhw->sfb, hwc)); 431 } 432 433 static unsigned long min_percent(unsigned int percent, unsigned long base, 434 unsigned long min) 435 { 436 return min_t(unsigned long, min, DIV_ROUND_UP(percent * base, 100)); 437 } 438 439 static unsigned long compute_sfb_extent(unsigned long ratio, unsigned long base) 440 { 441 /* Use a percentage-based approach to extend the sampling facility 442 * buffer. Accept up to 5% sample data loss. 443 * Vary the extents between 1% to 5% of the current number of 444 * sample-data-blocks. 445 */ 446 if (ratio <= 5) 447 return 0; 448 if (ratio <= 25) 449 return min_percent(1, base, 1); 450 if (ratio <= 50) 451 return min_percent(1, base, 1); 452 if (ratio <= 75) 453 return min_percent(2, base, 2); 454 if (ratio <= 100) 455 return min_percent(3, base, 3); 456 if (ratio <= 250) 457 return min_percent(4, base, 4); 458 459 return min_percent(5, base, 8); 460 } 461 462 static void sfb_account_overflows(struct cpu_hw_sf *cpuhw, 463 struct hw_perf_event *hwc) 464 { 465 unsigned long ratio, num; 466 467 if (!OVERFLOW_REG(hwc)) 468 return; 469 470 /* The sample_overflow contains the average number of sample data 471 * that has been lost because sample-data-blocks were full. 472 * 473 * Calculate the total number of sample data entries that has been 474 * discarded. Then calculate the ratio of lost samples to total samples 475 * per second in percent. 476 */ 477 ratio = DIV_ROUND_UP(100 * OVERFLOW_REG(hwc) * cpuhw->sfb.num_sdb, 478 sample_rate_to_freq(&cpuhw->qsi, SAMPL_RATE(hwc))); 479 480 /* Compute number of sample-data-blocks */ 481 num = compute_sfb_extent(ratio, cpuhw->sfb.num_sdb); 482 if (num) 483 sfb_account_allocs(num, hwc); 484 485 debug_sprintf_event(sfdbg, 5, "%s: overflow %llu ratio %lu num %lu\n", 486 __func__, OVERFLOW_REG(hwc), ratio, num); 487 OVERFLOW_REG(hwc) = 0; 488 } 489 490 /* extend_sampling_buffer() - Extend sampling buffer 491 * @sfb: Sampling buffer structure (for local CPU) 492 * @hwc: Perf event hardware structure 493 * 494 * Use this function to extend the sampling buffer based on the overflow counter 495 * and postponed allocation extents stored in the specified Perf event hardware. 496 * 497 * Important: This function disables the sampling facility in order to safely 498 * change the sampling buffer structure. Do not call this function 499 * when the PMU is active. 500 */ 501 static void extend_sampling_buffer(struct sf_buffer *sfb, 502 struct hw_perf_event *hwc) 503 { 504 unsigned long num, num_old; 505 int rc; 506 507 num = sfb_pending_allocs(sfb, hwc); 508 if (!num) 509 return; 510 num_old = sfb->num_sdb; 511 512 /* Disable the sampling facility to reset any states and also 513 * clear pending measurement alerts. 514 */ 515 sf_disable(); 516 517 /* Extend the sampling buffer. 518 * This memory allocation typically happens in an atomic context when 519 * called by perf. Because this is a reallocation, it is fine if the 520 * new SDB-request cannot be satisfied immediately. 521 */ 522 rc = realloc_sampling_buffer(sfb, num, GFP_ATOMIC); 523 if (rc) 524 debug_sprintf_event(sfdbg, 5, "%s: realloc failed with rc %i\n", 525 __func__, rc); 526 527 if (sfb_has_pending_allocs(sfb, hwc)) 528 debug_sprintf_event(sfdbg, 5, "%s: " 529 "req %lu alloc %lu remaining %lu\n", 530 __func__, num, sfb->num_sdb - num_old, 531 sfb_pending_allocs(sfb, hwc)); 532 } 533 534 /* Number of perf events counting hardware events */ 535 static atomic_t num_events; 536 /* Used to avoid races in calling reserve/release_cpumf_hardware */ 537 static DEFINE_MUTEX(pmc_reserve_mutex); 538 539 #define PMC_INIT 0 540 #define PMC_RELEASE 1 541 #define PMC_FAILURE 2 542 static void setup_pmc_cpu(void *flags) 543 { 544 int err; 545 struct cpu_hw_sf *cpusf = this_cpu_ptr(&cpu_hw_sf); 546 547 err = 0; 548 switch (*((int *) flags)) { 549 case PMC_INIT: 550 memset(cpusf, 0, sizeof(*cpusf)); 551 err = qsi(&cpusf->qsi); 552 if (err) 553 break; 554 cpusf->flags |= PMU_F_RESERVED; 555 err = sf_disable(); 556 if (err) 557 pr_err("Switching off the sampling facility failed " 558 "with rc %i\n", err); 559 debug_sprintf_event(sfdbg, 5, 560 "%s: initialized: cpuhw %p\n", __func__, 561 cpusf); 562 break; 563 case PMC_RELEASE: 564 cpusf->flags &= ~PMU_F_RESERVED; 565 err = sf_disable(); 566 if (err) { 567 pr_err("Switching off the sampling facility failed " 568 "with rc %i\n", err); 569 } else 570 deallocate_buffers(cpusf); 571 debug_sprintf_event(sfdbg, 5, 572 "%s: released: cpuhw %p\n", __func__, 573 cpusf); 574 break; 575 } 576 if (err) 577 *((int *) flags) |= PMC_FAILURE; 578 } 579 580 static void release_pmc_hardware(void) 581 { 582 int flags = PMC_RELEASE; 583 584 irq_subclass_unregister(IRQ_SUBCLASS_MEASUREMENT_ALERT); 585 on_each_cpu(setup_pmc_cpu, &flags, 1); 586 } 587 588 static int reserve_pmc_hardware(void) 589 { 590 int flags = PMC_INIT; 591 592 on_each_cpu(setup_pmc_cpu, &flags, 1); 593 if (flags & PMC_FAILURE) { 594 release_pmc_hardware(); 595 return -ENODEV; 596 } 597 irq_subclass_register(IRQ_SUBCLASS_MEASUREMENT_ALERT); 598 599 return 0; 600 } 601 602 static void hw_perf_event_destroy(struct perf_event *event) 603 { 604 /* Release PMC if this is the last perf event */ 605 if (!atomic_add_unless(&num_events, -1, 1)) { 606 mutex_lock(&pmc_reserve_mutex); 607 if (atomic_dec_return(&num_events) == 0) 608 release_pmc_hardware(); 609 mutex_unlock(&pmc_reserve_mutex); 610 } 611 } 612 613 static void hw_init_period(struct hw_perf_event *hwc, u64 period) 614 { 615 hwc->sample_period = period; 616 hwc->last_period = hwc->sample_period; 617 local64_set(&hwc->period_left, hwc->sample_period); 618 } 619 620 static unsigned long hw_limit_rate(const struct hws_qsi_info_block *si, 621 unsigned long rate) 622 { 623 return clamp_t(unsigned long, rate, 624 si->min_sampl_rate, si->max_sampl_rate); 625 } 626 627 static u32 cpumsf_pid_type(struct perf_event *event, 628 u32 pid, enum pid_type type) 629 { 630 struct task_struct *tsk; 631 632 /* Idle process */ 633 if (!pid) 634 goto out; 635 636 tsk = find_task_by_pid_ns(pid, &init_pid_ns); 637 pid = -1; 638 if (tsk) { 639 /* 640 * Only top level events contain the pid namespace in which 641 * they are created. 642 */ 643 if (event->parent) 644 event = event->parent; 645 pid = __task_pid_nr_ns(tsk, type, event->ns); 646 /* 647 * See also 1d953111b648 648 * "perf/core: Don't report zero PIDs for exiting tasks". 649 */ 650 if (!pid && !pid_alive(tsk)) 651 pid = -1; 652 } 653 out: 654 return pid; 655 } 656 657 static void cpumsf_output_event_pid(struct perf_event *event, 658 struct perf_sample_data *data, 659 struct pt_regs *regs) 660 { 661 u32 pid; 662 struct perf_event_header header; 663 struct perf_output_handle handle; 664 665 /* 666 * Obtain the PID from the basic-sampling data entry and 667 * correct the data->tid_entry.pid value. 668 */ 669 pid = data->tid_entry.pid; 670 671 /* Protect callchain buffers, tasks */ 672 rcu_read_lock(); 673 674 perf_prepare_sample(&header, data, event, regs); 675 if (perf_output_begin(&handle, event, header.size)) 676 goto out; 677 678 /* Update the process ID (see also kernel/events/core.c) */ 679 data->tid_entry.pid = cpumsf_pid_type(event, pid, PIDTYPE_TGID); 680 data->tid_entry.tid = cpumsf_pid_type(event, pid, PIDTYPE_PID); 681 682 perf_output_sample(&handle, &header, data, event); 683 perf_output_end(&handle); 684 out: 685 rcu_read_unlock(); 686 } 687 688 static unsigned long getrate(bool freq, unsigned long sample, 689 struct hws_qsi_info_block *si) 690 { 691 unsigned long rate; 692 693 if (freq) { 694 rate = freq_to_sample_rate(si, sample); 695 rate = hw_limit_rate(si, rate); 696 } else { 697 /* The min/max sampling rates specifies the valid range 698 * of sample periods. If the specified sample period is 699 * out of range, limit the period to the range boundary. 700 */ 701 rate = hw_limit_rate(si, sample); 702 703 /* The perf core maintains a maximum sample rate that is 704 * configurable through the sysctl interface. Ensure the 705 * sampling rate does not exceed this value. This also helps 706 * to avoid throttling when pushing samples with 707 * perf_event_overflow(). 708 */ 709 if (sample_rate_to_freq(si, rate) > 710 sysctl_perf_event_sample_rate) { 711 debug_sprintf_event(sfdbg, 1, "%s: " 712 "Sampling rate exceeds maximum " 713 "perf sample rate\n", __func__); 714 rate = 0; 715 } 716 } 717 return rate; 718 } 719 720 /* The sampling information (si) contains information about the 721 * min/max sampling intervals and the CPU speed. So calculate the 722 * correct sampling interval and avoid the whole period adjust 723 * feedback loop. 724 * 725 * Since the CPU Measurement sampling facility can not handle frequency 726 * calculate the sampling interval when frequency is specified using 727 * this formula: 728 * interval := cpu_speed * 1000000 / sample_freq 729 * 730 * Returns errno on bad input and zero on success with parameter interval 731 * set to the correct sampling rate. 732 * 733 * Note: This function turns off freq bit to avoid calling function 734 * perf_adjust_period(). This causes frequency adjustment in the common 735 * code part which causes tremendous variations in the counter values. 736 */ 737 static int __hw_perf_event_init_rate(struct perf_event *event, 738 struct hws_qsi_info_block *si) 739 { 740 struct perf_event_attr *attr = &event->attr; 741 struct hw_perf_event *hwc = &event->hw; 742 unsigned long rate; 743 744 if (attr->freq) { 745 if (!attr->sample_freq) 746 return -EINVAL; 747 rate = getrate(attr->freq, attr->sample_freq, si); 748 attr->freq = 0; /* Don't call perf_adjust_period() */ 749 SAMPL_FLAGS(hwc) |= PERF_CPUM_SF_FREQ_MODE; 750 } else { 751 rate = getrate(attr->freq, attr->sample_period, si); 752 if (!rate) 753 return -EINVAL; 754 } 755 attr->sample_period = rate; 756 SAMPL_RATE(hwc) = rate; 757 hw_init_period(hwc, SAMPL_RATE(hwc)); 758 debug_sprintf_event(sfdbg, 4, "%s: cpu %d period %#llx freq %d,%#lx\n", 759 __func__, event->cpu, event->attr.sample_period, 760 event->attr.freq, SAMPLE_FREQ_MODE(hwc)); 761 return 0; 762 } 763 764 static int __hw_perf_event_init(struct perf_event *event) 765 { 766 struct cpu_hw_sf *cpuhw; 767 struct hws_qsi_info_block si; 768 struct perf_event_attr *attr = &event->attr; 769 struct hw_perf_event *hwc = &event->hw; 770 int cpu, err; 771 772 /* Reserve CPU-measurement sampling facility */ 773 err = 0; 774 if (!atomic_inc_not_zero(&num_events)) { 775 mutex_lock(&pmc_reserve_mutex); 776 if (atomic_read(&num_events) == 0 && reserve_pmc_hardware()) 777 err = -EBUSY; 778 else 779 atomic_inc(&num_events); 780 mutex_unlock(&pmc_reserve_mutex); 781 } 782 event->destroy = hw_perf_event_destroy; 783 784 if (err) 785 goto out; 786 787 /* Access per-CPU sampling information (query sampling info) */ 788 /* 789 * The event->cpu value can be -1 to count on every CPU, for example, 790 * when attaching to a task. If this is specified, use the query 791 * sampling info from the current CPU, otherwise use event->cpu to 792 * retrieve the per-CPU information. 793 * Later, cpuhw indicates whether to allocate sampling buffers for a 794 * particular CPU (cpuhw!=NULL) or each online CPU (cpuw==NULL). 795 */ 796 memset(&si, 0, sizeof(si)); 797 cpuhw = NULL; 798 if (event->cpu == -1) 799 qsi(&si); 800 else { 801 /* Event is pinned to a particular CPU, retrieve the per-CPU 802 * sampling structure for accessing the CPU-specific QSI. 803 */ 804 cpuhw = &per_cpu(cpu_hw_sf, event->cpu); 805 si = cpuhw->qsi; 806 } 807 808 /* Check sampling facility authorization and, if not authorized, 809 * fall back to other PMUs. It is safe to check any CPU because 810 * the authorization is identical for all configured CPUs. 811 */ 812 if (!si.as) { 813 err = -ENOENT; 814 goto out; 815 } 816 817 if (si.ribm & CPU_MF_SF_RIBM_NOTAV) { 818 pr_warn("CPU Measurement Facility sampling is temporarily not available\n"); 819 err = -EBUSY; 820 goto out; 821 } 822 823 /* Always enable basic sampling */ 824 SAMPL_FLAGS(hwc) = PERF_CPUM_SF_BASIC_MODE; 825 826 /* Check if diagnostic sampling is requested. Deny if the required 827 * sampling authorization is missing. 828 */ 829 if (attr->config == PERF_EVENT_CPUM_SF_DIAG) { 830 if (!si.ad) { 831 err = -EPERM; 832 goto out; 833 } 834 SAMPL_FLAGS(hwc) |= PERF_CPUM_SF_DIAG_MODE; 835 } 836 837 /* Check and set other sampling flags */ 838 if (attr->config1 & PERF_CPUM_SF_FULL_BLOCKS) 839 SAMPL_FLAGS(hwc) |= PERF_CPUM_SF_FULL_BLOCKS; 840 841 err = __hw_perf_event_init_rate(event, &si); 842 if (err) 843 goto out; 844 845 /* Initialize sample data overflow accounting */ 846 hwc->extra_reg.reg = REG_OVERFLOW; 847 OVERFLOW_REG(hwc) = 0; 848 849 /* Use AUX buffer. No need to allocate it by ourself */ 850 if (attr->config == PERF_EVENT_CPUM_SF_DIAG) 851 return 0; 852 853 /* Allocate the per-CPU sampling buffer using the CPU information 854 * from the event. If the event is not pinned to a particular 855 * CPU (event->cpu == -1; or cpuhw == NULL), allocate sampling 856 * buffers for each online CPU. 857 */ 858 if (cpuhw) 859 /* Event is pinned to a particular CPU */ 860 err = allocate_buffers(cpuhw, hwc); 861 else { 862 /* Event is not pinned, allocate sampling buffer on 863 * each online CPU 864 */ 865 for_each_online_cpu(cpu) { 866 cpuhw = &per_cpu(cpu_hw_sf, cpu); 867 err = allocate_buffers(cpuhw, hwc); 868 if (err) 869 break; 870 } 871 } 872 873 /* If PID/TID sampling is active, replace the default overflow 874 * handler to extract and resolve the PIDs from the basic-sampling 875 * data entries. 876 */ 877 if (event->attr.sample_type & PERF_SAMPLE_TID) 878 if (is_default_overflow_handler(event)) 879 event->overflow_handler = cpumsf_output_event_pid; 880 out: 881 return err; 882 } 883 884 static int cpumsf_pmu_event_init(struct perf_event *event) 885 { 886 int err; 887 888 /* No support for taken branch sampling */ 889 if (has_branch_stack(event)) 890 return -EOPNOTSUPP; 891 892 switch (event->attr.type) { 893 case PERF_TYPE_RAW: 894 if ((event->attr.config != PERF_EVENT_CPUM_SF) && 895 (event->attr.config != PERF_EVENT_CPUM_SF_DIAG)) 896 return -ENOENT; 897 break; 898 case PERF_TYPE_HARDWARE: 899 /* Support sampling of CPU cycles in addition to the 900 * counter facility. However, the counter facility 901 * is more precise and, hence, restrict this PMU to 902 * sampling events only. 903 */ 904 if (event->attr.config != PERF_COUNT_HW_CPU_CYCLES) 905 return -ENOENT; 906 if (!is_sampling_event(event)) 907 return -ENOENT; 908 break; 909 default: 910 return -ENOENT; 911 } 912 913 /* Check online status of the CPU to which the event is pinned */ 914 if (event->cpu >= 0 && !cpu_online(event->cpu)) 915 return -ENODEV; 916 917 /* Force reset of idle/hv excludes regardless of what the 918 * user requested. 919 */ 920 if (event->attr.exclude_hv) 921 event->attr.exclude_hv = 0; 922 if (event->attr.exclude_idle) 923 event->attr.exclude_idle = 0; 924 925 err = __hw_perf_event_init(event); 926 if (unlikely(err)) 927 if (event->destroy) 928 event->destroy(event); 929 return err; 930 } 931 932 static void cpumsf_pmu_enable(struct pmu *pmu) 933 { 934 struct cpu_hw_sf *cpuhw = this_cpu_ptr(&cpu_hw_sf); 935 struct hw_perf_event *hwc; 936 int err; 937 938 if (cpuhw->flags & PMU_F_ENABLED) 939 return; 940 941 if (cpuhw->flags & PMU_F_ERR_MASK) 942 return; 943 944 /* Check whether to extent the sampling buffer. 945 * 946 * Two conditions trigger an increase of the sampling buffer for a 947 * perf event: 948 * 1. Postponed buffer allocations from the event initialization. 949 * 2. Sampling overflows that contribute to pending allocations. 950 * 951 * Note that the extend_sampling_buffer() function disables the sampling 952 * facility, but it can be fully re-enabled using sampling controls that 953 * have been saved in cpumsf_pmu_disable(). 954 */ 955 if (cpuhw->event) { 956 hwc = &cpuhw->event->hw; 957 if (!(SAMPL_DIAG_MODE(hwc))) { 958 /* 959 * Account number of overflow-designated 960 * buffer extents 961 */ 962 sfb_account_overflows(cpuhw, hwc); 963 extend_sampling_buffer(&cpuhw->sfb, hwc); 964 } 965 /* Rate may be adjusted with ioctl() */ 966 cpuhw->lsctl.interval = SAMPL_RATE(&cpuhw->event->hw); 967 } 968 969 /* (Re)enable the PMU and sampling facility */ 970 cpuhw->flags |= PMU_F_ENABLED; 971 barrier(); 972 973 err = lsctl(&cpuhw->lsctl); 974 if (err) { 975 cpuhw->flags &= ~PMU_F_ENABLED; 976 pr_err("Loading sampling controls failed: op %i err %i\n", 977 1, err); 978 return; 979 } 980 981 /* Load current program parameter */ 982 lpp(&S390_lowcore.lpp); 983 984 debug_sprintf_event(sfdbg, 6, "%s: es %i cs %i ed %i cd %i " 985 "interval %#lx tear %#lx dear %#lx\n", __func__, 986 cpuhw->lsctl.es, cpuhw->lsctl.cs, cpuhw->lsctl.ed, 987 cpuhw->lsctl.cd, cpuhw->lsctl.interval, 988 cpuhw->lsctl.tear, cpuhw->lsctl.dear); 989 } 990 991 static void cpumsf_pmu_disable(struct pmu *pmu) 992 { 993 struct cpu_hw_sf *cpuhw = this_cpu_ptr(&cpu_hw_sf); 994 struct hws_lsctl_request_block inactive; 995 struct hws_qsi_info_block si; 996 int err; 997 998 if (!(cpuhw->flags & PMU_F_ENABLED)) 999 return; 1000 1001 if (cpuhw->flags & PMU_F_ERR_MASK) 1002 return; 1003 1004 /* Switch off sampling activation control */ 1005 inactive = cpuhw->lsctl; 1006 inactive.cs = 0; 1007 inactive.cd = 0; 1008 1009 err = lsctl(&inactive); 1010 if (err) { 1011 pr_err("Loading sampling controls failed: op %i err %i\n", 1012 2, err); 1013 return; 1014 } 1015 1016 /* Save state of TEAR and DEAR register contents */ 1017 err = qsi(&si); 1018 if (!err) { 1019 /* TEAR/DEAR values are valid only if the sampling facility is 1020 * enabled. Note that cpumsf_pmu_disable() might be called even 1021 * for a disabled sampling facility because cpumsf_pmu_enable() 1022 * controls the enable/disable state. 1023 */ 1024 if (si.es) { 1025 cpuhw->lsctl.tear = si.tear; 1026 cpuhw->lsctl.dear = si.dear; 1027 } 1028 } else 1029 debug_sprintf_event(sfdbg, 3, "%s: qsi() failed with err %i\n", 1030 __func__, err); 1031 1032 cpuhw->flags &= ~PMU_F_ENABLED; 1033 } 1034 1035 /* perf_exclude_event() - Filter event 1036 * @event: The perf event 1037 * @regs: pt_regs structure 1038 * @sde_regs: Sample-data-entry (sde) regs structure 1039 * 1040 * Filter perf events according to their exclude specification. 1041 * 1042 * Return non-zero if the event shall be excluded. 1043 */ 1044 static int perf_exclude_event(struct perf_event *event, struct pt_regs *regs, 1045 struct perf_sf_sde_regs *sde_regs) 1046 { 1047 if (event->attr.exclude_user && user_mode(regs)) 1048 return 1; 1049 if (event->attr.exclude_kernel && !user_mode(regs)) 1050 return 1; 1051 if (event->attr.exclude_guest && sde_regs->in_guest) 1052 return 1; 1053 if (event->attr.exclude_host && !sde_regs->in_guest) 1054 return 1; 1055 return 0; 1056 } 1057 1058 /* perf_push_sample() - Push samples to perf 1059 * @event: The perf event 1060 * @sample: Hardware sample data 1061 * 1062 * Use the hardware sample data to create perf event sample. The sample 1063 * is the pushed to the event subsystem and the function checks for 1064 * possible event overflows. If an event overflow occurs, the PMU is 1065 * stopped. 1066 * 1067 * Return non-zero if an event overflow occurred. 1068 */ 1069 static int perf_push_sample(struct perf_event *event, 1070 struct hws_basic_entry *basic) 1071 { 1072 int overflow; 1073 struct pt_regs regs; 1074 struct perf_sf_sde_regs *sde_regs; 1075 struct perf_sample_data data; 1076 1077 /* Setup perf sample */ 1078 perf_sample_data_init(&data, 0, event->hw.last_period); 1079 1080 /* Setup pt_regs to look like an CPU-measurement external interrupt 1081 * using the Program Request Alert code. The regs.int_parm_long 1082 * field which is unused contains additional sample-data-entry related 1083 * indicators. 1084 */ 1085 memset(®s, 0, sizeof(regs)); 1086 regs.int_code = 0x1407; 1087 regs.int_parm = CPU_MF_INT_SF_PRA; 1088 sde_regs = (struct perf_sf_sde_regs *) ®s.int_parm_long; 1089 1090 psw_bits(regs.psw).ia = basic->ia; 1091 psw_bits(regs.psw).dat = basic->T; 1092 psw_bits(regs.psw).wait = basic->W; 1093 psw_bits(regs.psw).pstate = basic->P; 1094 psw_bits(regs.psw).as = basic->AS; 1095 1096 /* 1097 * Use the hardware provided configuration level to decide if the 1098 * sample belongs to a guest or host. If that is not available, 1099 * fall back to the following heuristics: 1100 * A non-zero guest program parameter always indicates a guest 1101 * sample. Some early samples or samples from guests without 1102 * lpp usage would be misaccounted to the host. We use the asn 1103 * value as an addon heuristic to detect most of these guest samples. 1104 * If the value differs from 0xffff (the host value), we assume to 1105 * be a KVM guest. 1106 */ 1107 switch (basic->CL) { 1108 case 1: /* logical partition */ 1109 sde_regs->in_guest = 0; 1110 break; 1111 case 2: /* virtual machine */ 1112 sde_regs->in_guest = 1; 1113 break; 1114 default: /* old machine, use heuristics */ 1115 if (basic->gpp || basic->prim_asn != 0xffff) 1116 sde_regs->in_guest = 1; 1117 break; 1118 } 1119 1120 /* 1121 * Store the PID value from the sample-data-entry to be 1122 * processed and resolved by cpumsf_output_event_pid(). 1123 */ 1124 data.tid_entry.pid = basic->hpp & LPP_PID_MASK; 1125 1126 overflow = 0; 1127 if (perf_exclude_event(event, ®s, sde_regs)) 1128 goto out; 1129 if (perf_event_overflow(event, &data, ®s)) { 1130 overflow = 1; 1131 event->pmu->stop(event, 0); 1132 } 1133 perf_event_update_userpage(event); 1134 out: 1135 return overflow; 1136 } 1137 1138 static void perf_event_count_update(struct perf_event *event, u64 count) 1139 { 1140 local64_add(count, &event->count); 1141 } 1142 1143 /* hw_collect_samples() - Walk through a sample-data-block and collect samples 1144 * @event: The perf event 1145 * @sdbt: Sample-data-block table 1146 * @overflow: Event overflow counter 1147 * 1148 * Walks through a sample-data-block and collects sampling data entries that are 1149 * then pushed to the perf event subsystem. Depending on the sampling function, 1150 * there can be either basic-sampling or combined-sampling data entries. A 1151 * combined-sampling data entry consists of a basic- and a diagnostic-sampling 1152 * data entry. The sampling function is determined by the flags in the perf 1153 * event hardware structure. The function always works with a combined-sampling 1154 * data entry but ignores the the diagnostic portion if it is not available. 1155 * 1156 * Note that the implementation focuses on basic-sampling data entries and, if 1157 * such an entry is not valid, the entire combined-sampling data entry is 1158 * ignored. 1159 * 1160 * The overflow variables counts the number of samples that has been discarded 1161 * due to a perf event overflow. 1162 */ 1163 static void hw_collect_samples(struct perf_event *event, unsigned long *sdbt, 1164 unsigned long long *overflow) 1165 { 1166 struct hws_trailer_entry *te; 1167 struct hws_basic_entry *sample; 1168 1169 te = (struct hws_trailer_entry *) trailer_entry_ptr(*sdbt); 1170 sample = (struct hws_basic_entry *) *sdbt; 1171 while ((unsigned long *) sample < (unsigned long *) te) { 1172 /* Check for an empty sample */ 1173 if (!sample->def) 1174 break; 1175 1176 /* Update perf event period */ 1177 perf_event_count_update(event, SAMPL_RATE(&event->hw)); 1178 1179 /* Check whether sample is valid */ 1180 if (sample->def == 0x0001) { 1181 /* If an event overflow occurred, the PMU is stopped to 1182 * throttle event delivery. Remaining sample data is 1183 * discarded. 1184 */ 1185 if (!*overflow) { 1186 /* Check whether sample is consistent */ 1187 if (sample->I == 0 && sample->W == 0) { 1188 /* Deliver sample data to perf */ 1189 *overflow = perf_push_sample(event, 1190 sample); 1191 } 1192 } else 1193 /* Count discarded samples */ 1194 *overflow += 1; 1195 } else { 1196 debug_sprintf_event(sfdbg, 4, 1197 "%s: Found unknown" 1198 " sampling data entry: te->f %i" 1199 " basic.def %#4x (%p)\n", __func__, 1200 te->f, sample->def, sample); 1201 /* Sample slot is not yet written or other record. 1202 * 1203 * This condition can occur if the buffer was reused 1204 * from a combined basic- and diagnostic-sampling. 1205 * If only basic-sampling is then active, entries are 1206 * written into the larger diagnostic entries. 1207 * This is typically the case for sample-data-blocks 1208 * that are not full. Stop processing if the first 1209 * invalid format was detected. 1210 */ 1211 if (!te->f) 1212 break; 1213 } 1214 1215 /* Reset sample slot and advance to next sample */ 1216 sample->def = 0; 1217 sample++; 1218 } 1219 } 1220 1221 /* hw_perf_event_update() - Process sampling buffer 1222 * @event: The perf event 1223 * @flush_all: Flag to also flush partially filled sample-data-blocks 1224 * 1225 * Processes the sampling buffer and create perf event samples. 1226 * The sampling buffer position are retrieved and saved in the TEAR_REG 1227 * register of the specified perf event. 1228 * 1229 * Only full sample-data-blocks are processed. Specify the flash_all flag 1230 * to also walk through partially filled sample-data-blocks. It is ignored 1231 * if PERF_CPUM_SF_FULL_BLOCKS is set. The PERF_CPUM_SF_FULL_BLOCKS flag 1232 * enforces the processing of full sample-data-blocks only (trailer entries 1233 * with the block-full-indicator bit set). 1234 */ 1235 static void hw_perf_event_update(struct perf_event *event, int flush_all) 1236 { 1237 struct hw_perf_event *hwc = &event->hw; 1238 struct hws_trailer_entry *te; 1239 unsigned long *sdbt; 1240 unsigned long long event_overflow, sampl_overflow, num_sdb, te_flags; 1241 int done; 1242 1243 /* 1244 * AUX buffer is used when in diagnostic sampling mode. 1245 * No perf events/samples are created. 1246 */ 1247 if (SAMPL_DIAG_MODE(&event->hw)) 1248 return; 1249 1250 if (flush_all && SDB_FULL_BLOCKS(hwc)) 1251 flush_all = 0; 1252 1253 sdbt = (unsigned long *) TEAR_REG(hwc); 1254 done = event_overflow = sampl_overflow = num_sdb = 0; 1255 while (!done) { 1256 /* Get the trailer entry of the sample-data-block */ 1257 te = (struct hws_trailer_entry *) trailer_entry_ptr(*sdbt); 1258 1259 /* Leave loop if no more work to do (block full indicator) */ 1260 if (!te->f) { 1261 done = 1; 1262 if (!flush_all) 1263 break; 1264 } 1265 1266 /* Check the sample overflow count */ 1267 if (te->overflow) 1268 /* Account sample overflows and, if a particular limit 1269 * is reached, extend the sampling buffer. 1270 * For details, see sfb_account_overflows(). 1271 */ 1272 sampl_overflow += te->overflow; 1273 1274 /* Timestamps are valid for full sample-data-blocks only */ 1275 debug_sprintf_event(sfdbg, 6, "%s: sdbt %#lx " 1276 "overflow %llu timestamp %#llx\n", 1277 __func__, (unsigned long)sdbt, te->overflow, 1278 (te->f) ? trailer_timestamp(te) : 0ULL); 1279 1280 /* Collect all samples from a single sample-data-block and 1281 * flag if an (perf) event overflow happened. If so, the PMU 1282 * is stopped and remaining samples will be discarded. 1283 */ 1284 hw_collect_samples(event, sdbt, &event_overflow); 1285 num_sdb++; 1286 1287 /* Reset trailer (using compare-double-and-swap) */ 1288 do { 1289 te_flags = te->flags & ~SDB_TE_BUFFER_FULL_MASK; 1290 te_flags |= SDB_TE_ALERT_REQ_MASK; 1291 } while (!cmpxchg_double(&te->flags, &te->overflow, 1292 te->flags, te->overflow, 1293 te_flags, 0ULL)); 1294 1295 /* Advance to next sample-data-block */ 1296 sdbt++; 1297 if (is_link_entry(sdbt)) 1298 sdbt = get_next_sdbt(sdbt); 1299 1300 /* Update event hardware registers */ 1301 TEAR_REG(hwc) = (unsigned long) sdbt; 1302 1303 /* Stop processing sample-data if all samples of the current 1304 * sample-data-block were flushed even if it was not full. 1305 */ 1306 if (flush_all && done) 1307 break; 1308 } 1309 1310 /* Account sample overflows in the event hardware structure */ 1311 if (sampl_overflow) 1312 OVERFLOW_REG(hwc) = DIV_ROUND_UP(OVERFLOW_REG(hwc) + 1313 sampl_overflow, 1 + num_sdb); 1314 1315 /* Perf_event_overflow() and perf_event_account_interrupt() limit 1316 * the interrupt rate to an upper limit. Roughly 1000 samples per 1317 * task tick. 1318 * Hitting this limit results in a large number 1319 * of throttled REF_REPORT_THROTTLE entries and the samples 1320 * are dropped. 1321 * Slightly increase the interval to avoid hitting this limit. 1322 */ 1323 if (event_overflow) { 1324 SAMPL_RATE(hwc) += DIV_ROUND_UP(SAMPL_RATE(hwc), 10); 1325 debug_sprintf_event(sfdbg, 1, "%s: rate adjustment %ld\n", 1326 __func__, 1327 DIV_ROUND_UP(SAMPL_RATE(hwc), 10)); 1328 } 1329 1330 if (sampl_overflow || event_overflow) 1331 debug_sprintf_event(sfdbg, 4, "%s: " 1332 "overflows: sample %llu event %llu" 1333 " total %llu num_sdb %llu\n", 1334 __func__, sampl_overflow, event_overflow, 1335 OVERFLOW_REG(hwc), num_sdb); 1336 } 1337 1338 #define AUX_SDB_INDEX(aux, i) ((i) % aux->sfb.num_sdb) 1339 #define AUX_SDB_NUM(aux, start, end) (end >= start ? end - start + 1 : 0) 1340 #define AUX_SDB_NUM_ALERT(aux) AUX_SDB_NUM(aux, aux->head, aux->alert_mark) 1341 #define AUX_SDB_NUM_EMPTY(aux) AUX_SDB_NUM(aux, aux->head, aux->empty_mark) 1342 1343 /* 1344 * Get trailer entry by index of SDB. 1345 */ 1346 static struct hws_trailer_entry *aux_sdb_trailer(struct aux_buffer *aux, 1347 unsigned long index) 1348 { 1349 unsigned long sdb; 1350 1351 index = AUX_SDB_INDEX(aux, index); 1352 sdb = aux->sdb_index[index]; 1353 return (struct hws_trailer_entry *)trailer_entry_ptr(sdb); 1354 } 1355 1356 /* 1357 * Finish sampling on the cpu. Called by cpumsf_pmu_del() with pmu 1358 * disabled. Collect the full SDBs in AUX buffer which have not reached 1359 * the point of alert indicator. And ignore the SDBs which are not 1360 * full. 1361 * 1362 * 1. Scan SDBs to see how much data is there and consume them. 1363 * 2. Remove alert indicator in the buffer. 1364 */ 1365 static void aux_output_end(struct perf_output_handle *handle) 1366 { 1367 unsigned long i, range_scan, idx; 1368 struct aux_buffer *aux; 1369 struct hws_trailer_entry *te; 1370 1371 aux = perf_get_aux(handle); 1372 if (!aux) 1373 return; 1374 1375 range_scan = AUX_SDB_NUM_ALERT(aux); 1376 for (i = 0, idx = aux->head; i < range_scan; i++, idx++) { 1377 te = aux_sdb_trailer(aux, idx); 1378 if (!(te->flags & SDB_TE_BUFFER_FULL_MASK)) 1379 break; 1380 } 1381 /* i is num of SDBs which are full */ 1382 perf_aux_output_end(handle, i << PAGE_SHIFT); 1383 1384 /* Remove alert indicators in the buffer */ 1385 te = aux_sdb_trailer(aux, aux->alert_mark); 1386 te->flags &= ~SDB_TE_ALERT_REQ_MASK; 1387 1388 debug_sprintf_event(sfdbg, 6, "%s: SDBs %ld range %ld head %ld\n", 1389 __func__, i, range_scan, aux->head); 1390 } 1391 1392 /* 1393 * Start sampling on the CPU. Called by cpumsf_pmu_add() when an event 1394 * is first added to the CPU or rescheduled again to the CPU. It is called 1395 * with pmu disabled. 1396 * 1397 * 1. Reset the trailer of SDBs to get ready for new data. 1398 * 2. Tell the hardware where to put the data by reset the SDBs buffer 1399 * head(tear/dear). 1400 */ 1401 static int aux_output_begin(struct perf_output_handle *handle, 1402 struct aux_buffer *aux, 1403 struct cpu_hw_sf *cpuhw) 1404 { 1405 unsigned long range; 1406 unsigned long i, range_scan, idx; 1407 unsigned long head, base, offset; 1408 struct hws_trailer_entry *te; 1409 1410 if (WARN_ON_ONCE(handle->head & ~PAGE_MASK)) 1411 return -EINVAL; 1412 1413 aux->head = handle->head >> PAGE_SHIFT; 1414 range = (handle->size + 1) >> PAGE_SHIFT; 1415 if (range <= 1) 1416 return -ENOMEM; 1417 1418 /* 1419 * SDBs between aux->head and aux->empty_mark are already ready 1420 * for new data. range_scan is num of SDBs not within them. 1421 */ 1422 debug_sprintf_event(sfdbg, 6, 1423 "%s: range %ld head %ld alert %ld empty %ld\n", 1424 __func__, range, aux->head, aux->alert_mark, 1425 aux->empty_mark); 1426 if (range > AUX_SDB_NUM_EMPTY(aux)) { 1427 range_scan = range - AUX_SDB_NUM_EMPTY(aux); 1428 idx = aux->empty_mark + 1; 1429 for (i = 0; i < range_scan; i++, idx++) { 1430 te = aux_sdb_trailer(aux, idx); 1431 te->flags &= ~(SDB_TE_BUFFER_FULL_MASK | 1432 SDB_TE_ALERT_REQ_MASK); 1433 te->overflow = 0; 1434 } 1435 /* Save the position of empty SDBs */ 1436 aux->empty_mark = aux->head + range - 1; 1437 } 1438 1439 /* Set alert indicator */ 1440 aux->alert_mark = aux->head + range/2 - 1; 1441 te = aux_sdb_trailer(aux, aux->alert_mark); 1442 te->flags = te->flags | SDB_TE_ALERT_REQ_MASK; 1443 1444 /* Reset hardware buffer head */ 1445 head = AUX_SDB_INDEX(aux, aux->head); 1446 base = aux->sdbt_index[head / CPUM_SF_SDB_PER_TABLE]; 1447 offset = head % CPUM_SF_SDB_PER_TABLE; 1448 cpuhw->lsctl.tear = base + offset * sizeof(unsigned long); 1449 cpuhw->lsctl.dear = aux->sdb_index[head]; 1450 1451 debug_sprintf_event(sfdbg, 6, "%s: head %ld alert %ld empty %ld " 1452 "index %ld tear %#lx dear %#lx\n", __func__, 1453 aux->head, aux->alert_mark, aux->empty_mark, 1454 head / CPUM_SF_SDB_PER_TABLE, 1455 cpuhw->lsctl.tear, cpuhw->lsctl.dear); 1456 1457 return 0; 1458 } 1459 1460 /* 1461 * Set alert indicator on SDB at index @alert_index while sampler is running. 1462 * 1463 * Return true if successfully. 1464 * Return false if full indicator is already set by hardware sampler. 1465 */ 1466 static bool aux_set_alert(struct aux_buffer *aux, unsigned long alert_index, 1467 unsigned long long *overflow) 1468 { 1469 unsigned long long orig_overflow, orig_flags, new_flags; 1470 struct hws_trailer_entry *te; 1471 1472 te = aux_sdb_trailer(aux, alert_index); 1473 do { 1474 orig_flags = te->flags; 1475 *overflow = orig_overflow = te->overflow; 1476 if (orig_flags & SDB_TE_BUFFER_FULL_MASK) { 1477 /* 1478 * SDB is already set by hardware. 1479 * Abort and try to set somewhere 1480 * behind. 1481 */ 1482 return false; 1483 } 1484 new_flags = orig_flags | SDB_TE_ALERT_REQ_MASK; 1485 } while (!cmpxchg_double(&te->flags, &te->overflow, 1486 orig_flags, orig_overflow, 1487 new_flags, 0ULL)); 1488 return true; 1489 } 1490 1491 /* 1492 * aux_reset_buffer() - Scan and setup SDBs for new samples 1493 * @aux: The AUX buffer to set 1494 * @range: The range of SDBs to scan started from aux->head 1495 * @overflow: Set to overflow count 1496 * 1497 * Set alert indicator on the SDB at index of aux->alert_mark. If this SDB is 1498 * marked as empty, check if it is already set full by the hardware sampler. 1499 * If yes, that means new data is already there before we can set an alert 1500 * indicator. Caller should try to set alert indicator to some position behind. 1501 * 1502 * Scan the SDBs in AUX buffer from behind aux->empty_mark. They are used 1503 * previously and have already been consumed by user space. Reset these SDBs 1504 * (clear full indicator and alert indicator) for new data. 1505 * If aux->alert_mark fall in this area, just set it. Overflow count is 1506 * recorded while scanning. 1507 * 1508 * SDBs between aux->head and aux->empty_mark are already reset at last time. 1509 * and ready for new samples. So scanning on this area could be skipped. 1510 * 1511 * Return true if alert indicator is set successfully and false if not. 1512 */ 1513 static bool aux_reset_buffer(struct aux_buffer *aux, unsigned long range, 1514 unsigned long long *overflow) 1515 { 1516 unsigned long long orig_overflow, orig_flags, new_flags; 1517 unsigned long i, range_scan, idx, idx_old; 1518 struct hws_trailer_entry *te; 1519 1520 debug_sprintf_event(sfdbg, 6, "%s: range %ld head %ld alert %ld " 1521 "empty %ld\n", __func__, range, aux->head, 1522 aux->alert_mark, aux->empty_mark); 1523 if (range <= AUX_SDB_NUM_EMPTY(aux)) 1524 /* 1525 * No need to scan. All SDBs in range are marked as empty. 1526 * Just set alert indicator. Should check race with hardware 1527 * sampler. 1528 */ 1529 return aux_set_alert(aux, aux->alert_mark, overflow); 1530 1531 if (aux->alert_mark <= aux->empty_mark) 1532 /* 1533 * Set alert indicator on empty SDB. Should check race 1534 * with hardware sampler. 1535 */ 1536 if (!aux_set_alert(aux, aux->alert_mark, overflow)) 1537 return false; 1538 1539 /* 1540 * Scan the SDBs to clear full and alert indicator used previously. 1541 * Start scanning from one SDB behind empty_mark. If the new alert 1542 * indicator fall into this range, set it. 1543 */ 1544 range_scan = range - AUX_SDB_NUM_EMPTY(aux); 1545 idx_old = idx = aux->empty_mark + 1; 1546 for (i = 0; i < range_scan; i++, idx++) { 1547 te = aux_sdb_trailer(aux, idx); 1548 do { 1549 orig_flags = te->flags; 1550 orig_overflow = te->overflow; 1551 new_flags = orig_flags & ~SDB_TE_BUFFER_FULL_MASK; 1552 if (idx == aux->alert_mark) 1553 new_flags |= SDB_TE_ALERT_REQ_MASK; 1554 else 1555 new_flags &= ~SDB_TE_ALERT_REQ_MASK; 1556 } while (!cmpxchg_double(&te->flags, &te->overflow, 1557 orig_flags, orig_overflow, 1558 new_flags, 0ULL)); 1559 *overflow += orig_overflow; 1560 } 1561 1562 /* Update empty_mark to new position */ 1563 aux->empty_mark = aux->head + range - 1; 1564 1565 debug_sprintf_event(sfdbg, 6, "%s: range_scan %ld idx %ld..%ld " 1566 "empty %ld\n", __func__, range_scan, idx_old, 1567 idx - 1, aux->empty_mark); 1568 return true; 1569 } 1570 1571 /* 1572 * Measurement alert handler for diagnostic mode sampling. 1573 */ 1574 static void hw_collect_aux(struct cpu_hw_sf *cpuhw) 1575 { 1576 struct aux_buffer *aux; 1577 int done = 0; 1578 unsigned long range = 0, size; 1579 unsigned long long overflow = 0; 1580 struct perf_output_handle *handle = &cpuhw->handle; 1581 unsigned long num_sdb; 1582 1583 aux = perf_get_aux(handle); 1584 if (WARN_ON_ONCE(!aux)) 1585 return; 1586 1587 /* Inform user space new data arrived */ 1588 size = AUX_SDB_NUM_ALERT(aux) << PAGE_SHIFT; 1589 debug_sprintf_event(sfdbg, 6, "%s: #alert %ld\n", __func__, 1590 size >> PAGE_SHIFT); 1591 perf_aux_output_end(handle, size); 1592 1593 num_sdb = aux->sfb.num_sdb; 1594 while (!done) { 1595 /* Get an output handle */ 1596 aux = perf_aux_output_begin(handle, cpuhw->event); 1597 if (handle->size == 0) { 1598 pr_err("The AUX buffer with %lu pages for the " 1599 "diagnostic-sampling mode is full\n", 1600 num_sdb); 1601 debug_sprintf_event(sfdbg, 1, 1602 "%s: AUX buffer used up\n", 1603 __func__); 1604 break; 1605 } 1606 if (WARN_ON_ONCE(!aux)) 1607 return; 1608 1609 /* Update head and alert_mark to new position */ 1610 aux->head = handle->head >> PAGE_SHIFT; 1611 range = (handle->size + 1) >> PAGE_SHIFT; 1612 if (range == 1) 1613 aux->alert_mark = aux->head; 1614 else 1615 aux->alert_mark = aux->head + range/2 - 1; 1616 1617 if (aux_reset_buffer(aux, range, &overflow)) { 1618 if (!overflow) { 1619 done = 1; 1620 break; 1621 } 1622 size = range << PAGE_SHIFT; 1623 perf_aux_output_end(&cpuhw->handle, size); 1624 pr_err("Sample data caused the AUX buffer with %lu " 1625 "pages to overflow\n", aux->sfb.num_sdb); 1626 debug_sprintf_event(sfdbg, 1, "%s: head %ld range %ld " 1627 "overflow %lld\n", __func__, 1628 aux->head, range, overflow); 1629 } else { 1630 size = AUX_SDB_NUM_ALERT(aux) << PAGE_SHIFT; 1631 perf_aux_output_end(&cpuhw->handle, size); 1632 debug_sprintf_event(sfdbg, 6, "%s: head %ld alert %ld " 1633 "already full, try another\n", 1634 __func__, 1635 aux->head, aux->alert_mark); 1636 } 1637 } 1638 1639 if (done) 1640 debug_sprintf_event(sfdbg, 6, "%s: head %ld alert %ld " 1641 "empty %ld\n", __func__, aux->head, 1642 aux->alert_mark, aux->empty_mark); 1643 } 1644 1645 /* 1646 * Callback when freeing AUX buffers. 1647 */ 1648 static void aux_buffer_free(void *data) 1649 { 1650 struct aux_buffer *aux = data; 1651 unsigned long i, num_sdbt; 1652 1653 if (!aux) 1654 return; 1655 1656 /* Free SDBT. SDB is freed by the caller */ 1657 num_sdbt = aux->sfb.num_sdbt; 1658 for (i = 0; i < num_sdbt; i++) 1659 free_page(aux->sdbt_index[i]); 1660 1661 kfree(aux->sdbt_index); 1662 kfree(aux->sdb_index); 1663 kfree(aux); 1664 1665 debug_sprintf_event(sfdbg, 4, "%s: SDBTs %lu\n", __func__, num_sdbt); 1666 } 1667 1668 static void aux_sdb_init(unsigned long sdb) 1669 { 1670 struct hws_trailer_entry *te; 1671 1672 te = (struct hws_trailer_entry *)trailer_entry_ptr(sdb); 1673 1674 /* Save clock base */ 1675 te->clock_base = 1; 1676 memcpy(&te->progusage2, &tod_clock_base[1], 8); 1677 } 1678 1679 /* 1680 * aux_buffer_setup() - Setup AUX buffer for diagnostic mode sampling 1681 * @event: Event the buffer is setup for, event->cpu == -1 means current 1682 * @pages: Array of pointers to buffer pages passed from perf core 1683 * @nr_pages: Total pages 1684 * @snapshot: Flag for snapshot mode 1685 * 1686 * This is the callback when setup an event using AUX buffer. Perf tool can 1687 * trigger this by an additional mmap() call on the event. Unlike the buffer 1688 * for basic samples, AUX buffer belongs to the event. It is scheduled with 1689 * the task among online cpus when it is a per-thread event. 1690 * 1691 * Return the private AUX buffer structure if success or NULL if fails. 1692 */ 1693 static void *aux_buffer_setup(struct perf_event *event, void **pages, 1694 int nr_pages, bool snapshot) 1695 { 1696 struct sf_buffer *sfb; 1697 struct aux_buffer *aux; 1698 unsigned long *new, *tail; 1699 int i, n_sdbt; 1700 1701 if (!nr_pages || !pages) 1702 return NULL; 1703 1704 if (nr_pages > CPUM_SF_MAX_SDB * CPUM_SF_SDB_DIAG_FACTOR) { 1705 pr_err("AUX buffer size (%i pages) is larger than the " 1706 "maximum sampling buffer limit\n", 1707 nr_pages); 1708 return NULL; 1709 } else if (nr_pages < CPUM_SF_MIN_SDB * CPUM_SF_SDB_DIAG_FACTOR) { 1710 pr_err("AUX buffer size (%i pages) is less than the " 1711 "minimum sampling buffer limit\n", 1712 nr_pages); 1713 return NULL; 1714 } 1715 1716 /* Allocate aux_buffer struct for the event */ 1717 aux = kzalloc(sizeof(struct aux_buffer), GFP_KERNEL); 1718 if (!aux) 1719 goto no_aux; 1720 sfb = &aux->sfb; 1721 1722 /* Allocate sdbt_index for fast reference */ 1723 n_sdbt = DIV_ROUND_UP(nr_pages, CPUM_SF_SDB_PER_TABLE); 1724 aux->sdbt_index = kmalloc_array(n_sdbt, sizeof(void *), GFP_KERNEL); 1725 if (!aux->sdbt_index) 1726 goto no_sdbt_index; 1727 1728 /* Allocate sdb_index for fast reference */ 1729 aux->sdb_index = kmalloc_array(nr_pages, sizeof(void *), GFP_KERNEL); 1730 if (!aux->sdb_index) 1731 goto no_sdb_index; 1732 1733 /* Allocate the first SDBT */ 1734 sfb->num_sdbt = 0; 1735 sfb->sdbt = (unsigned long *) get_zeroed_page(GFP_KERNEL); 1736 if (!sfb->sdbt) 1737 goto no_sdbt; 1738 aux->sdbt_index[sfb->num_sdbt++] = (unsigned long)sfb->sdbt; 1739 tail = sfb->tail = sfb->sdbt; 1740 1741 /* 1742 * Link the provided pages of AUX buffer to SDBT. 1743 * Allocate SDBT if needed. 1744 */ 1745 for (i = 0; i < nr_pages; i++, tail++) { 1746 if (require_table_link(tail)) { 1747 new = (unsigned long *) get_zeroed_page(GFP_KERNEL); 1748 if (!new) 1749 goto no_sdbt; 1750 aux->sdbt_index[sfb->num_sdbt++] = (unsigned long)new; 1751 /* Link current page to tail of chain */ 1752 *tail = (unsigned long)(void *) new + 1; 1753 tail = new; 1754 } 1755 /* Tail is the entry in a SDBT */ 1756 *tail = (unsigned long)pages[i]; 1757 aux->sdb_index[i] = (unsigned long)pages[i]; 1758 aux_sdb_init((unsigned long)pages[i]); 1759 } 1760 sfb->num_sdb = nr_pages; 1761 1762 /* Link the last entry in the SDBT to the first SDBT */ 1763 *tail = (unsigned long) sfb->sdbt + 1; 1764 sfb->tail = tail; 1765 1766 /* 1767 * Initial all SDBs are zeroed. Mark it as empty. 1768 * So there is no need to clear the full indicator 1769 * when this event is first added. 1770 */ 1771 aux->empty_mark = sfb->num_sdb - 1; 1772 1773 debug_sprintf_event(sfdbg, 4, "%s: SDBTs %lu SDBs %lu\n", __func__, 1774 sfb->num_sdbt, sfb->num_sdb); 1775 1776 return aux; 1777 1778 no_sdbt: 1779 /* SDBs (AUX buffer pages) are freed by caller */ 1780 for (i = 0; i < sfb->num_sdbt; i++) 1781 free_page(aux->sdbt_index[i]); 1782 kfree(aux->sdb_index); 1783 no_sdb_index: 1784 kfree(aux->sdbt_index); 1785 no_sdbt_index: 1786 kfree(aux); 1787 no_aux: 1788 return NULL; 1789 } 1790 1791 static void cpumsf_pmu_read(struct perf_event *event) 1792 { 1793 /* Nothing to do ... updates are interrupt-driven */ 1794 } 1795 1796 /* Check if the new sampling period/freqeuncy is appropriate. 1797 * 1798 * Return non-zero on error and zero on passed checks. 1799 */ 1800 static int cpumsf_pmu_check_period(struct perf_event *event, u64 value) 1801 { 1802 struct hws_qsi_info_block si; 1803 unsigned long rate; 1804 bool do_freq; 1805 1806 memset(&si, 0, sizeof(si)); 1807 if (event->cpu == -1) { 1808 if (qsi(&si)) 1809 return -ENODEV; 1810 } else { 1811 /* Event is pinned to a particular CPU, retrieve the per-CPU 1812 * sampling structure for accessing the CPU-specific QSI. 1813 */ 1814 struct cpu_hw_sf *cpuhw = &per_cpu(cpu_hw_sf, event->cpu); 1815 1816 si = cpuhw->qsi; 1817 } 1818 1819 do_freq = !!SAMPLE_FREQ_MODE(&event->hw); 1820 rate = getrate(do_freq, value, &si); 1821 if (!rate) 1822 return -EINVAL; 1823 1824 event->attr.sample_period = rate; 1825 SAMPL_RATE(&event->hw) = rate; 1826 hw_init_period(&event->hw, SAMPL_RATE(&event->hw)); 1827 debug_sprintf_event(sfdbg, 4, "%s:" 1828 " cpu %d value %#llx period %#llx freq %d\n", 1829 __func__, event->cpu, value, 1830 event->attr.sample_period, do_freq); 1831 return 0; 1832 } 1833 1834 /* Activate sampling control. 1835 * Next call of pmu_enable() starts sampling. 1836 */ 1837 static void cpumsf_pmu_start(struct perf_event *event, int flags) 1838 { 1839 struct cpu_hw_sf *cpuhw = this_cpu_ptr(&cpu_hw_sf); 1840 1841 if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED))) 1842 return; 1843 1844 if (flags & PERF_EF_RELOAD) 1845 WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE)); 1846 1847 perf_pmu_disable(event->pmu); 1848 event->hw.state = 0; 1849 cpuhw->lsctl.cs = 1; 1850 if (SAMPL_DIAG_MODE(&event->hw)) 1851 cpuhw->lsctl.cd = 1; 1852 perf_pmu_enable(event->pmu); 1853 } 1854 1855 /* Deactivate sampling control. 1856 * Next call of pmu_enable() stops sampling. 1857 */ 1858 static void cpumsf_pmu_stop(struct perf_event *event, int flags) 1859 { 1860 struct cpu_hw_sf *cpuhw = this_cpu_ptr(&cpu_hw_sf); 1861 1862 if (event->hw.state & PERF_HES_STOPPED) 1863 return; 1864 1865 perf_pmu_disable(event->pmu); 1866 cpuhw->lsctl.cs = 0; 1867 cpuhw->lsctl.cd = 0; 1868 event->hw.state |= PERF_HES_STOPPED; 1869 1870 if ((flags & PERF_EF_UPDATE) && !(event->hw.state & PERF_HES_UPTODATE)) { 1871 hw_perf_event_update(event, 1); 1872 event->hw.state |= PERF_HES_UPTODATE; 1873 } 1874 perf_pmu_enable(event->pmu); 1875 } 1876 1877 static int cpumsf_pmu_add(struct perf_event *event, int flags) 1878 { 1879 struct cpu_hw_sf *cpuhw = this_cpu_ptr(&cpu_hw_sf); 1880 struct aux_buffer *aux; 1881 int err; 1882 1883 if (cpuhw->flags & PMU_F_IN_USE) 1884 return -EAGAIN; 1885 1886 if (!SAMPL_DIAG_MODE(&event->hw) && !cpuhw->sfb.sdbt) 1887 return -EINVAL; 1888 1889 err = 0; 1890 perf_pmu_disable(event->pmu); 1891 1892 event->hw.state = PERF_HES_UPTODATE | PERF_HES_STOPPED; 1893 1894 /* Set up sampling controls. Always program the sampling register 1895 * using the SDB-table start. Reset TEAR_REG event hardware register 1896 * that is used by hw_perf_event_update() to store the sampling buffer 1897 * position after samples have been flushed. 1898 */ 1899 cpuhw->lsctl.s = 0; 1900 cpuhw->lsctl.h = 1; 1901 cpuhw->lsctl.interval = SAMPL_RATE(&event->hw); 1902 if (!SAMPL_DIAG_MODE(&event->hw)) { 1903 cpuhw->lsctl.tear = (unsigned long) cpuhw->sfb.sdbt; 1904 cpuhw->lsctl.dear = *(unsigned long *) cpuhw->sfb.sdbt; 1905 TEAR_REG(&event->hw) = (unsigned long) cpuhw->sfb.sdbt; 1906 } 1907 1908 /* Ensure sampling functions are in the disabled state. If disabled, 1909 * switch on sampling enable control. */ 1910 if (WARN_ON_ONCE(cpuhw->lsctl.es == 1 || cpuhw->lsctl.ed == 1)) { 1911 err = -EAGAIN; 1912 goto out; 1913 } 1914 if (SAMPL_DIAG_MODE(&event->hw)) { 1915 aux = perf_aux_output_begin(&cpuhw->handle, event); 1916 if (!aux) { 1917 err = -EINVAL; 1918 goto out; 1919 } 1920 err = aux_output_begin(&cpuhw->handle, aux, cpuhw); 1921 if (err) 1922 goto out; 1923 cpuhw->lsctl.ed = 1; 1924 } 1925 cpuhw->lsctl.es = 1; 1926 1927 /* Set in_use flag and store event */ 1928 cpuhw->event = event; 1929 cpuhw->flags |= PMU_F_IN_USE; 1930 1931 if (flags & PERF_EF_START) 1932 cpumsf_pmu_start(event, PERF_EF_RELOAD); 1933 out: 1934 perf_event_update_userpage(event); 1935 perf_pmu_enable(event->pmu); 1936 return err; 1937 } 1938 1939 static void cpumsf_pmu_del(struct perf_event *event, int flags) 1940 { 1941 struct cpu_hw_sf *cpuhw = this_cpu_ptr(&cpu_hw_sf); 1942 1943 perf_pmu_disable(event->pmu); 1944 cpumsf_pmu_stop(event, PERF_EF_UPDATE); 1945 1946 cpuhw->lsctl.es = 0; 1947 cpuhw->lsctl.ed = 0; 1948 cpuhw->flags &= ~PMU_F_IN_USE; 1949 cpuhw->event = NULL; 1950 1951 if (SAMPL_DIAG_MODE(&event->hw)) 1952 aux_output_end(&cpuhw->handle); 1953 perf_event_update_userpage(event); 1954 perf_pmu_enable(event->pmu); 1955 } 1956 1957 CPUMF_EVENT_ATTR(SF, SF_CYCLES_BASIC, PERF_EVENT_CPUM_SF); 1958 CPUMF_EVENT_ATTR(SF, SF_CYCLES_BASIC_DIAG, PERF_EVENT_CPUM_SF_DIAG); 1959 1960 /* Attribute list for CPU_SF. 1961 * 1962 * The availablitiy depends on the CPU_MF sampling facility authorization 1963 * for basic + diagnositic samples. This is determined at initialization 1964 * time by the sampling facility device driver. 1965 * If the authorization for basic samples is turned off, it should be 1966 * also turned off for diagnostic sampling. 1967 * 1968 * During initialization of the device driver, check the authorization 1969 * level for diagnostic sampling and installs the attribute 1970 * file for diagnostic sampling if necessary. 1971 * 1972 * For now install a placeholder to reference all possible attributes: 1973 * SF_CYCLES_BASIC and SF_CYCLES_BASIC_DIAG. 1974 * Add another entry for the final NULL pointer. 1975 */ 1976 enum { 1977 SF_CYCLES_BASIC_ATTR_IDX = 0, 1978 SF_CYCLES_BASIC_DIAG_ATTR_IDX, 1979 SF_CYCLES_ATTR_MAX 1980 }; 1981 1982 static struct attribute *cpumsf_pmu_events_attr[SF_CYCLES_ATTR_MAX + 1] = { 1983 [SF_CYCLES_BASIC_ATTR_IDX] = CPUMF_EVENT_PTR(SF, SF_CYCLES_BASIC) 1984 }; 1985 1986 PMU_FORMAT_ATTR(event, "config:0-63"); 1987 1988 static struct attribute *cpumsf_pmu_format_attr[] = { 1989 &format_attr_event.attr, 1990 NULL, 1991 }; 1992 1993 static struct attribute_group cpumsf_pmu_events_group = { 1994 .name = "events", 1995 .attrs = cpumsf_pmu_events_attr, 1996 }; 1997 1998 static struct attribute_group cpumsf_pmu_format_group = { 1999 .name = "format", 2000 .attrs = cpumsf_pmu_format_attr, 2001 }; 2002 2003 static const struct attribute_group *cpumsf_pmu_attr_groups[] = { 2004 &cpumsf_pmu_events_group, 2005 &cpumsf_pmu_format_group, 2006 NULL, 2007 }; 2008 2009 static struct pmu cpumf_sampling = { 2010 .pmu_enable = cpumsf_pmu_enable, 2011 .pmu_disable = cpumsf_pmu_disable, 2012 2013 .event_init = cpumsf_pmu_event_init, 2014 .add = cpumsf_pmu_add, 2015 .del = cpumsf_pmu_del, 2016 2017 .start = cpumsf_pmu_start, 2018 .stop = cpumsf_pmu_stop, 2019 .read = cpumsf_pmu_read, 2020 2021 .attr_groups = cpumsf_pmu_attr_groups, 2022 2023 .setup_aux = aux_buffer_setup, 2024 .free_aux = aux_buffer_free, 2025 2026 .check_period = cpumsf_pmu_check_period, 2027 }; 2028 2029 static void cpumf_measurement_alert(struct ext_code ext_code, 2030 unsigned int alert, unsigned long unused) 2031 { 2032 struct cpu_hw_sf *cpuhw; 2033 2034 if (!(alert & CPU_MF_INT_SF_MASK)) 2035 return; 2036 inc_irq_stat(IRQEXT_CMS); 2037 cpuhw = this_cpu_ptr(&cpu_hw_sf); 2038 2039 /* Measurement alerts are shared and might happen when the PMU 2040 * is not reserved. Ignore these alerts in this case. */ 2041 if (!(cpuhw->flags & PMU_F_RESERVED)) 2042 return; 2043 2044 /* The processing below must take care of multiple alert events that 2045 * might be indicated concurrently. */ 2046 2047 /* Program alert request */ 2048 if (alert & CPU_MF_INT_SF_PRA) { 2049 if (cpuhw->flags & PMU_F_IN_USE) 2050 if (SAMPL_DIAG_MODE(&cpuhw->event->hw)) 2051 hw_collect_aux(cpuhw); 2052 else 2053 hw_perf_event_update(cpuhw->event, 0); 2054 else 2055 WARN_ON_ONCE(!(cpuhw->flags & PMU_F_IN_USE)); 2056 } 2057 2058 /* Report measurement alerts only for non-PRA codes */ 2059 if (alert != CPU_MF_INT_SF_PRA) 2060 debug_sprintf_event(sfdbg, 6, "%s: alert %#x\n", __func__, 2061 alert); 2062 2063 /* Sampling authorization change request */ 2064 if (alert & CPU_MF_INT_SF_SACA) 2065 qsi(&cpuhw->qsi); 2066 2067 /* Loss of sample data due to high-priority machine activities */ 2068 if (alert & CPU_MF_INT_SF_LSDA) { 2069 pr_err("Sample data was lost\n"); 2070 cpuhw->flags |= PMU_F_ERR_LSDA; 2071 sf_disable(); 2072 } 2073 2074 /* Invalid sampling buffer entry */ 2075 if (alert & (CPU_MF_INT_SF_IAE|CPU_MF_INT_SF_ISE)) { 2076 pr_err("A sampling buffer entry is incorrect (alert=0x%x)\n", 2077 alert); 2078 cpuhw->flags |= PMU_F_ERR_IBE; 2079 sf_disable(); 2080 } 2081 } 2082 2083 static int cpusf_pmu_setup(unsigned int cpu, int flags) 2084 { 2085 /* Ignore the notification if no events are scheduled on the PMU. 2086 * This might be racy... 2087 */ 2088 if (!atomic_read(&num_events)) 2089 return 0; 2090 2091 local_irq_disable(); 2092 setup_pmc_cpu(&flags); 2093 local_irq_enable(); 2094 return 0; 2095 } 2096 2097 static int s390_pmu_sf_online_cpu(unsigned int cpu) 2098 { 2099 return cpusf_pmu_setup(cpu, PMC_INIT); 2100 } 2101 2102 static int s390_pmu_sf_offline_cpu(unsigned int cpu) 2103 { 2104 return cpusf_pmu_setup(cpu, PMC_RELEASE); 2105 } 2106 2107 static int param_get_sfb_size(char *buffer, const struct kernel_param *kp) 2108 { 2109 if (!cpum_sf_avail()) 2110 return -ENODEV; 2111 return sprintf(buffer, "%lu,%lu", CPUM_SF_MIN_SDB, CPUM_SF_MAX_SDB); 2112 } 2113 2114 static int param_set_sfb_size(const char *val, const struct kernel_param *kp) 2115 { 2116 int rc; 2117 unsigned long min, max; 2118 2119 if (!cpum_sf_avail()) 2120 return -ENODEV; 2121 if (!val || !strlen(val)) 2122 return -EINVAL; 2123 2124 /* Valid parameter values: "min,max" or "max" */ 2125 min = CPUM_SF_MIN_SDB; 2126 max = CPUM_SF_MAX_SDB; 2127 if (strchr(val, ',')) 2128 rc = (sscanf(val, "%lu,%lu", &min, &max) == 2) ? 0 : -EINVAL; 2129 else 2130 rc = kstrtoul(val, 10, &max); 2131 2132 if (min < 2 || min >= max || max > get_num_physpages()) 2133 rc = -EINVAL; 2134 if (rc) 2135 return rc; 2136 2137 sfb_set_limits(min, max); 2138 pr_info("The sampling buffer limits have changed to: " 2139 "min %lu max %lu (diag %lu)\n", 2140 CPUM_SF_MIN_SDB, CPUM_SF_MAX_SDB, CPUM_SF_SDB_DIAG_FACTOR); 2141 return 0; 2142 } 2143 2144 #define param_check_sfb_size(name, p) __param_check(name, p, void) 2145 static const struct kernel_param_ops param_ops_sfb_size = { 2146 .set = param_set_sfb_size, 2147 .get = param_get_sfb_size, 2148 }; 2149 2150 #define RS_INIT_FAILURE_QSI 0x0001 2151 #define RS_INIT_FAILURE_BSDES 0x0002 2152 #define RS_INIT_FAILURE_ALRT 0x0003 2153 #define RS_INIT_FAILURE_PERF 0x0004 2154 static void __init pr_cpumsf_err(unsigned int reason) 2155 { 2156 pr_err("Sampling facility support for perf is not available: " 2157 "reason %#x\n", reason); 2158 } 2159 2160 static int __init init_cpum_sampling_pmu(void) 2161 { 2162 struct hws_qsi_info_block si; 2163 int err; 2164 2165 if (!cpum_sf_avail()) 2166 return -ENODEV; 2167 2168 memset(&si, 0, sizeof(si)); 2169 if (qsi(&si)) { 2170 pr_cpumsf_err(RS_INIT_FAILURE_QSI); 2171 return -ENODEV; 2172 } 2173 2174 if (!si.as && !si.ad) 2175 return -ENODEV; 2176 2177 if (si.bsdes != sizeof(struct hws_basic_entry)) { 2178 pr_cpumsf_err(RS_INIT_FAILURE_BSDES); 2179 return -EINVAL; 2180 } 2181 2182 if (si.ad) { 2183 sfb_set_limits(CPUM_SF_MIN_SDB, CPUM_SF_MAX_SDB); 2184 /* Sampling of diagnostic data authorized, 2185 * install event into attribute list of PMU device. 2186 */ 2187 cpumsf_pmu_events_attr[SF_CYCLES_BASIC_DIAG_ATTR_IDX] = 2188 CPUMF_EVENT_PTR(SF, SF_CYCLES_BASIC_DIAG); 2189 } 2190 2191 sfdbg = debug_register(KMSG_COMPONENT, 2, 1, 80); 2192 if (!sfdbg) { 2193 pr_err("Registering for s390dbf failed\n"); 2194 return -ENOMEM; 2195 } 2196 debug_register_view(sfdbg, &debug_sprintf_view); 2197 2198 err = register_external_irq(EXT_IRQ_MEASURE_ALERT, 2199 cpumf_measurement_alert); 2200 if (err) { 2201 pr_cpumsf_err(RS_INIT_FAILURE_ALRT); 2202 debug_unregister(sfdbg); 2203 goto out; 2204 } 2205 2206 err = perf_pmu_register(&cpumf_sampling, "cpum_sf", PERF_TYPE_RAW); 2207 if (err) { 2208 pr_cpumsf_err(RS_INIT_FAILURE_PERF); 2209 unregister_external_irq(EXT_IRQ_MEASURE_ALERT, 2210 cpumf_measurement_alert); 2211 debug_unregister(sfdbg); 2212 goto out; 2213 } 2214 2215 cpuhp_setup_state(CPUHP_AP_PERF_S390_SF_ONLINE, "perf/s390/sf:online", 2216 s390_pmu_sf_online_cpu, s390_pmu_sf_offline_cpu); 2217 out: 2218 return err; 2219 } 2220 2221 arch_initcall(init_cpum_sampling_pmu); 2222 core_param(cpum_sfb_size, CPUM_SF_MAX_SDB, sfb_size, 0640); 2223