1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Performance event support for the System z CPU-measurement Sampling Facility 4 * 5 * Copyright IBM Corp. 2013, 2018 6 * Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com> 7 */ 8 #define KMSG_COMPONENT "cpum_sf" 9 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 10 11 #include <linux/kernel.h> 12 #include <linux/kernel_stat.h> 13 #include <linux/perf_event.h> 14 #include <linux/percpu.h> 15 #include <linux/pid.h> 16 #include <linux/notifier.h> 17 #include <linux/export.h> 18 #include <linux/slab.h> 19 #include <linux/mm.h> 20 #include <linux/moduleparam.h> 21 #include <asm/cpu_mf.h> 22 #include <asm/irq.h> 23 #include <asm/debug.h> 24 #include <asm/timex.h> 25 26 /* Minimum number of sample-data-block-tables: 27 * At least one table is required for the sampling buffer structure. 28 * A single table contains up to 511 pointers to sample-data-blocks. 29 */ 30 #define CPUM_SF_MIN_SDBT 1 31 32 /* Number of sample-data-blocks per sample-data-block-table (SDBT): 33 * A table contains SDB pointers (8 bytes) and one table-link entry 34 * that points to the origin of the next SDBT. 35 */ 36 #define CPUM_SF_SDB_PER_TABLE ((PAGE_SIZE - 8) / 8) 37 38 /* Maximum page offset for an SDBT table-link entry: 39 * If this page offset is reached, a table-link entry to the next SDBT 40 * must be added. 41 */ 42 #define CPUM_SF_SDBT_TL_OFFSET (CPUM_SF_SDB_PER_TABLE * 8) 43 static inline int require_table_link(const void *sdbt) 44 { 45 return ((unsigned long) sdbt & ~PAGE_MASK) == CPUM_SF_SDBT_TL_OFFSET; 46 } 47 48 /* Minimum and maximum sampling buffer sizes: 49 * 50 * This number represents the maximum size of the sampling buffer taking 51 * the number of sample-data-block-tables into account. Note that these 52 * numbers apply to the basic-sampling function only. 53 * The maximum number of SDBs is increased by CPUM_SF_SDB_DIAG_FACTOR if 54 * the diagnostic-sampling function is active. 55 * 56 * Sampling buffer size Buffer characteristics 57 * --------------------------------------------------- 58 * 64KB == 16 pages (4KB per page) 59 * 1 page for SDB-tables 60 * 15 pages for SDBs 61 * 62 * 32MB == 8192 pages (4KB per page) 63 * 16 pages for SDB-tables 64 * 8176 pages for SDBs 65 */ 66 static unsigned long __read_mostly CPUM_SF_MIN_SDB = 15; 67 static unsigned long __read_mostly CPUM_SF_MAX_SDB = 8176; 68 static unsigned long __read_mostly CPUM_SF_SDB_DIAG_FACTOR = 1; 69 70 struct sf_buffer { 71 unsigned long *sdbt; /* Sample-data-block-table origin */ 72 /* buffer characteristics (required for buffer increments) */ 73 unsigned long num_sdb; /* Number of sample-data-blocks */ 74 unsigned long num_sdbt; /* Number of sample-data-block-tables */ 75 unsigned long *tail; /* last sample-data-block-table */ 76 }; 77 78 struct aux_buffer { 79 struct sf_buffer sfb; 80 unsigned long head; /* index of SDB of buffer head */ 81 unsigned long alert_mark; /* index of SDB of alert request position */ 82 unsigned long empty_mark; /* mark of SDB not marked full */ 83 unsigned long *sdb_index; /* SDB address for fast lookup */ 84 unsigned long *sdbt_index; /* SDBT address for fast lookup */ 85 }; 86 87 struct cpu_hw_sf { 88 /* CPU-measurement sampling information block */ 89 struct hws_qsi_info_block qsi; 90 /* CPU-measurement sampling control block */ 91 struct hws_lsctl_request_block lsctl; 92 struct sf_buffer sfb; /* Sampling buffer */ 93 unsigned int flags; /* Status flags */ 94 struct perf_event *event; /* Scheduled perf event */ 95 struct perf_output_handle handle; /* AUX buffer output handle */ 96 }; 97 static DEFINE_PER_CPU(struct cpu_hw_sf, cpu_hw_sf); 98 99 /* Debug feature */ 100 static debug_info_t *sfdbg; 101 102 /* 103 * sf_disable() - Switch off sampling facility 104 */ 105 static int sf_disable(void) 106 { 107 struct hws_lsctl_request_block sreq; 108 109 memset(&sreq, 0, sizeof(sreq)); 110 return lsctl(&sreq); 111 } 112 113 /* 114 * sf_buffer_available() - Check for an allocated sampling buffer 115 */ 116 static int sf_buffer_available(struct cpu_hw_sf *cpuhw) 117 { 118 return !!cpuhw->sfb.sdbt; 119 } 120 121 /* 122 * deallocate sampling facility buffer 123 */ 124 static void free_sampling_buffer(struct sf_buffer *sfb) 125 { 126 unsigned long *sdbt, *curr; 127 128 if (!sfb->sdbt) 129 return; 130 131 sdbt = sfb->sdbt; 132 curr = sdbt; 133 134 /* Free the SDBT after all SDBs are processed... */ 135 while (1) { 136 if (!*curr || !sdbt) 137 break; 138 139 /* Process table-link entries */ 140 if (is_link_entry(curr)) { 141 curr = get_next_sdbt(curr); 142 if (sdbt) 143 free_page((unsigned long) sdbt); 144 145 /* If the origin is reached, sampling buffer is freed */ 146 if (curr == sfb->sdbt) 147 break; 148 else 149 sdbt = curr; 150 } else { 151 /* Process SDB pointer */ 152 if (*curr) { 153 free_page(*curr); 154 curr++; 155 } 156 } 157 } 158 159 debug_sprintf_event(sfdbg, 5, 160 "free_sampling_buffer: freed sdbt=%p\n", sfb->sdbt); 161 memset(sfb, 0, sizeof(*sfb)); 162 } 163 164 static int alloc_sample_data_block(unsigned long *sdbt, gfp_t gfp_flags) 165 { 166 unsigned long sdb, *trailer; 167 168 /* Allocate and initialize sample-data-block */ 169 sdb = get_zeroed_page(gfp_flags); 170 if (!sdb) 171 return -ENOMEM; 172 trailer = trailer_entry_ptr(sdb); 173 *trailer = SDB_TE_ALERT_REQ_MASK; 174 175 /* Link SDB into the sample-data-block-table */ 176 *sdbt = sdb; 177 178 return 0; 179 } 180 181 /* 182 * realloc_sampling_buffer() - extend sampler memory 183 * 184 * Allocates new sample-data-blocks and adds them to the specified sampling 185 * buffer memory. 186 * 187 * Important: This modifies the sampling buffer and must be called when the 188 * sampling facility is disabled. 189 * 190 * Returns zero on success, non-zero otherwise. 191 */ 192 static int realloc_sampling_buffer(struct sf_buffer *sfb, 193 unsigned long num_sdb, gfp_t gfp_flags) 194 { 195 int i, rc; 196 unsigned long *new, *tail; 197 198 if (!sfb->sdbt || !sfb->tail) 199 return -EINVAL; 200 201 if (!is_link_entry(sfb->tail)) 202 return -EINVAL; 203 204 /* Append to the existing sampling buffer, overwriting the table-link 205 * register. 206 * The tail variables always points to the "tail" (last and table-link) 207 * entry in an SDB-table. 208 */ 209 tail = sfb->tail; 210 211 /* Do a sanity check whether the table-link entry points to 212 * the sampling buffer origin. 213 */ 214 if (sfb->sdbt != get_next_sdbt(tail)) { 215 debug_sprintf_event(sfdbg, 3, "realloc_sampling_buffer: " 216 "sampling buffer is not linked: origin=%p" 217 "tail=%p\n", 218 (void *) sfb->sdbt, (void *) tail); 219 return -EINVAL; 220 } 221 222 /* Allocate remaining SDBs */ 223 rc = 0; 224 for (i = 0; i < num_sdb; i++) { 225 /* Allocate a new SDB-table if it is full. */ 226 if (require_table_link(tail)) { 227 new = (unsigned long *) get_zeroed_page(gfp_flags); 228 if (!new) { 229 rc = -ENOMEM; 230 break; 231 } 232 sfb->num_sdbt++; 233 /* Link current page to tail of chain */ 234 *tail = (unsigned long)(void *) new + 1; 235 tail = new; 236 } 237 238 /* Allocate a new sample-data-block. 239 * If there is not enough memory, stop the realloc process 240 * and simply use what was allocated. If this is a temporary 241 * issue, a new realloc call (if required) might succeed. 242 */ 243 rc = alloc_sample_data_block(tail, gfp_flags); 244 if (rc) 245 break; 246 sfb->num_sdb++; 247 tail++; 248 } 249 250 /* Link sampling buffer to its origin */ 251 *tail = (unsigned long) sfb->sdbt + 1; 252 sfb->tail = tail; 253 254 debug_sprintf_event(sfdbg, 4, "realloc_sampling_buffer: new buffer" 255 " settings: sdbt=%lu sdb=%lu\n", 256 sfb->num_sdbt, sfb->num_sdb); 257 return rc; 258 } 259 260 /* 261 * allocate_sampling_buffer() - allocate sampler memory 262 * 263 * Allocates and initializes a sampling buffer structure using the 264 * specified number of sample-data-blocks (SDB). For each allocation, 265 * a 4K page is used. The number of sample-data-block-tables (SDBT) 266 * are calculated from SDBs. 267 * Also set the ALERT_REQ mask in each SDBs trailer. 268 * 269 * Returns zero on success, non-zero otherwise. 270 */ 271 static int alloc_sampling_buffer(struct sf_buffer *sfb, unsigned long num_sdb) 272 { 273 int rc; 274 275 if (sfb->sdbt) 276 return -EINVAL; 277 278 /* Allocate the sample-data-block-table origin */ 279 sfb->sdbt = (unsigned long *) get_zeroed_page(GFP_KERNEL); 280 if (!sfb->sdbt) 281 return -ENOMEM; 282 sfb->num_sdb = 0; 283 sfb->num_sdbt = 1; 284 285 /* Link the table origin to point to itself to prepare for 286 * realloc_sampling_buffer() invocation. 287 */ 288 sfb->tail = sfb->sdbt; 289 *sfb->tail = (unsigned long)(void *) sfb->sdbt + 1; 290 291 /* Allocate requested number of sample-data-blocks */ 292 rc = realloc_sampling_buffer(sfb, num_sdb, GFP_KERNEL); 293 if (rc) { 294 free_sampling_buffer(sfb); 295 debug_sprintf_event(sfdbg, 4, "alloc_sampling_buffer: " 296 "realloc_sampling_buffer failed with rc=%i\n", rc); 297 } else 298 debug_sprintf_event(sfdbg, 4, 299 "alloc_sampling_buffer: tear=%p dear=%p\n", 300 sfb->sdbt, (void *) *sfb->sdbt); 301 return rc; 302 } 303 304 static void sfb_set_limits(unsigned long min, unsigned long max) 305 { 306 struct hws_qsi_info_block si; 307 308 CPUM_SF_MIN_SDB = min; 309 CPUM_SF_MAX_SDB = max; 310 311 memset(&si, 0, sizeof(si)); 312 if (!qsi(&si)) 313 CPUM_SF_SDB_DIAG_FACTOR = DIV_ROUND_UP(si.dsdes, si.bsdes); 314 } 315 316 static unsigned long sfb_max_limit(struct hw_perf_event *hwc) 317 { 318 return SAMPL_DIAG_MODE(hwc) ? CPUM_SF_MAX_SDB * CPUM_SF_SDB_DIAG_FACTOR 319 : CPUM_SF_MAX_SDB; 320 } 321 322 static unsigned long sfb_pending_allocs(struct sf_buffer *sfb, 323 struct hw_perf_event *hwc) 324 { 325 if (!sfb->sdbt) 326 return SFB_ALLOC_REG(hwc); 327 if (SFB_ALLOC_REG(hwc) > sfb->num_sdb) 328 return SFB_ALLOC_REG(hwc) - sfb->num_sdb; 329 return 0; 330 } 331 332 static int sfb_has_pending_allocs(struct sf_buffer *sfb, 333 struct hw_perf_event *hwc) 334 { 335 return sfb_pending_allocs(sfb, hwc) > 0; 336 } 337 338 static void sfb_account_allocs(unsigned long num, struct hw_perf_event *hwc) 339 { 340 /* Limit the number of SDBs to not exceed the maximum */ 341 num = min_t(unsigned long, num, sfb_max_limit(hwc) - SFB_ALLOC_REG(hwc)); 342 if (num) 343 SFB_ALLOC_REG(hwc) += num; 344 } 345 346 static void sfb_init_allocs(unsigned long num, struct hw_perf_event *hwc) 347 { 348 SFB_ALLOC_REG(hwc) = 0; 349 sfb_account_allocs(num, hwc); 350 } 351 352 static void deallocate_buffers(struct cpu_hw_sf *cpuhw) 353 { 354 if (cpuhw->sfb.sdbt) 355 free_sampling_buffer(&cpuhw->sfb); 356 } 357 358 static int allocate_buffers(struct cpu_hw_sf *cpuhw, struct hw_perf_event *hwc) 359 { 360 unsigned long n_sdb, freq, factor; 361 size_t sample_size; 362 363 /* Calculate sampling buffers using 4K pages 364 * 365 * 1. Determine the sample data size which depends on the used 366 * sampling functions, for example, basic-sampling or 367 * basic-sampling with diagnostic-sampling. 368 * 369 * 2. Use the sampling frequency as input. The sampling buffer is 370 * designed for almost one second. This can be adjusted through 371 * the "factor" variable. 372 * In any case, alloc_sampling_buffer() sets the Alert Request 373 * Control indicator to trigger a measurement-alert to harvest 374 * sample-data-blocks (sdb). 375 * 376 * 3. Compute the number of sample-data-blocks and ensure a minimum 377 * of CPUM_SF_MIN_SDB. Also ensure the upper limit does not 378 * exceed a "calculated" maximum. The symbolic maximum is 379 * designed for basic-sampling only and needs to be increased if 380 * diagnostic-sampling is active. 381 * See also the remarks for these symbolic constants. 382 * 383 * 4. Compute the number of sample-data-block-tables (SDBT) and 384 * ensure a minimum of CPUM_SF_MIN_SDBT (one table can manage up 385 * to 511 SDBs). 386 */ 387 sample_size = sizeof(struct hws_basic_entry); 388 freq = sample_rate_to_freq(&cpuhw->qsi, SAMPL_RATE(hwc)); 389 factor = 1; 390 n_sdb = DIV_ROUND_UP(freq, factor * ((PAGE_SIZE-64) / sample_size)); 391 if (n_sdb < CPUM_SF_MIN_SDB) 392 n_sdb = CPUM_SF_MIN_SDB; 393 394 /* If there is already a sampling buffer allocated, it is very likely 395 * that the sampling facility is enabled too. If the event to be 396 * initialized requires a greater sampling buffer, the allocation must 397 * be postponed. Changing the sampling buffer requires the sampling 398 * facility to be in the disabled state. So, account the number of 399 * required SDBs and let cpumsf_pmu_enable() resize the buffer just 400 * before the event is started. 401 */ 402 sfb_init_allocs(n_sdb, hwc); 403 if (sf_buffer_available(cpuhw)) 404 return 0; 405 406 debug_sprintf_event(sfdbg, 3, 407 "allocate_buffers: rate=%lu f=%lu sdb=%lu/%lu" 408 " sample_size=%lu cpuhw=%p\n", 409 SAMPL_RATE(hwc), freq, n_sdb, sfb_max_limit(hwc), 410 sample_size, cpuhw); 411 412 return alloc_sampling_buffer(&cpuhw->sfb, 413 sfb_pending_allocs(&cpuhw->sfb, hwc)); 414 } 415 416 static unsigned long min_percent(unsigned int percent, unsigned long base, 417 unsigned long min) 418 { 419 return min_t(unsigned long, min, DIV_ROUND_UP(percent * base, 100)); 420 } 421 422 static unsigned long compute_sfb_extent(unsigned long ratio, unsigned long base) 423 { 424 /* Use a percentage-based approach to extend the sampling facility 425 * buffer. Accept up to 5% sample data loss. 426 * Vary the extents between 1% to 5% of the current number of 427 * sample-data-blocks. 428 */ 429 if (ratio <= 5) 430 return 0; 431 if (ratio <= 25) 432 return min_percent(1, base, 1); 433 if (ratio <= 50) 434 return min_percent(1, base, 1); 435 if (ratio <= 75) 436 return min_percent(2, base, 2); 437 if (ratio <= 100) 438 return min_percent(3, base, 3); 439 if (ratio <= 250) 440 return min_percent(4, base, 4); 441 442 return min_percent(5, base, 8); 443 } 444 445 static void sfb_account_overflows(struct cpu_hw_sf *cpuhw, 446 struct hw_perf_event *hwc) 447 { 448 unsigned long ratio, num; 449 450 if (!OVERFLOW_REG(hwc)) 451 return; 452 453 /* The sample_overflow contains the average number of sample data 454 * that has been lost because sample-data-blocks were full. 455 * 456 * Calculate the total number of sample data entries that has been 457 * discarded. Then calculate the ratio of lost samples to total samples 458 * per second in percent. 459 */ 460 ratio = DIV_ROUND_UP(100 * OVERFLOW_REG(hwc) * cpuhw->sfb.num_sdb, 461 sample_rate_to_freq(&cpuhw->qsi, SAMPL_RATE(hwc))); 462 463 /* Compute number of sample-data-blocks */ 464 num = compute_sfb_extent(ratio, cpuhw->sfb.num_sdb); 465 if (num) 466 sfb_account_allocs(num, hwc); 467 468 debug_sprintf_event(sfdbg, 5, "sfb: overflow: overflow=%llu ratio=%lu" 469 " num=%lu\n", OVERFLOW_REG(hwc), ratio, num); 470 OVERFLOW_REG(hwc) = 0; 471 } 472 473 /* extend_sampling_buffer() - Extend sampling buffer 474 * @sfb: Sampling buffer structure (for local CPU) 475 * @hwc: Perf event hardware structure 476 * 477 * Use this function to extend the sampling buffer based on the overflow counter 478 * and postponed allocation extents stored in the specified Perf event hardware. 479 * 480 * Important: This function disables the sampling facility in order to safely 481 * change the sampling buffer structure. Do not call this function 482 * when the PMU is active. 483 */ 484 static void extend_sampling_buffer(struct sf_buffer *sfb, 485 struct hw_perf_event *hwc) 486 { 487 unsigned long num, num_old; 488 int rc; 489 490 num = sfb_pending_allocs(sfb, hwc); 491 if (!num) 492 return; 493 num_old = sfb->num_sdb; 494 495 /* Disable the sampling facility to reset any states and also 496 * clear pending measurement alerts. 497 */ 498 sf_disable(); 499 500 /* Extend the sampling buffer. 501 * This memory allocation typically happens in an atomic context when 502 * called by perf. Because this is a reallocation, it is fine if the 503 * new SDB-request cannot be satisfied immediately. 504 */ 505 rc = realloc_sampling_buffer(sfb, num, GFP_ATOMIC); 506 if (rc) 507 debug_sprintf_event(sfdbg, 5, "sfb: extend: realloc " 508 "failed with rc=%i\n", rc); 509 510 if (sfb_has_pending_allocs(sfb, hwc)) 511 debug_sprintf_event(sfdbg, 5, "sfb: extend: " 512 "req=%lu alloc=%lu remaining=%lu\n", 513 num, sfb->num_sdb - num_old, 514 sfb_pending_allocs(sfb, hwc)); 515 } 516 517 /* Number of perf events counting hardware events */ 518 static atomic_t num_events; 519 /* Used to avoid races in calling reserve/release_cpumf_hardware */ 520 static DEFINE_MUTEX(pmc_reserve_mutex); 521 522 #define PMC_INIT 0 523 #define PMC_RELEASE 1 524 #define PMC_FAILURE 2 525 static void setup_pmc_cpu(void *flags) 526 { 527 int err; 528 struct cpu_hw_sf *cpusf = this_cpu_ptr(&cpu_hw_sf); 529 530 err = 0; 531 switch (*((int *) flags)) { 532 case PMC_INIT: 533 memset(cpusf, 0, sizeof(*cpusf)); 534 err = qsi(&cpusf->qsi); 535 if (err) 536 break; 537 cpusf->flags |= PMU_F_RESERVED; 538 err = sf_disable(); 539 if (err) 540 pr_err("Switching off the sampling facility failed " 541 "with rc=%i\n", err); 542 debug_sprintf_event(sfdbg, 5, 543 "setup_pmc_cpu: initialized: cpuhw=%p\n", cpusf); 544 break; 545 case PMC_RELEASE: 546 cpusf->flags &= ~PMU_F_RESERVED; 547 err = sf_disable(); 548 if (err) { 549 pr_err("Switching off the sampling facility failed " 550 "with rc=%i\n", err); 551 } else 552 deallocate_buffers(cpusf); 553 debug_sprintf_event(sfdbg, 5, 554 "setup_pmc_cpu: released: cpuhw=%p\n", cpusf); 555 break; 556 } 557 if (err) 558 *((int *) flags) |= PMC_FAILURE; 559 } 560 561 static void release_pmc_hardware(void) 562 { 563 int flags = PMC_RELEASE; 564 565 irq_subclass_unregister(IRQ_SUBCLASS_MEASUREMENT_ALERT); 566 on_each_cpu(setup_pmc_cpu, &flags, 1); 567 } 568 569 static int reserve_pmc_hardware(void) 570 { 571 int flags = PMC_INIT; 572 573 on_each_cpu(setup_pmc_cpu, &flags, 1); 574 if (flags & PMC_FAILURE) { 575 release_pmc_hardware(); 576 return -ENODEV; 577 } 578 irq_subclass_register(IRQ_SUBCLASS_MEASUREMENT_ALERT); 579 580 return 0; 581 } 582 583 static void hw_perf_event_destroy(struct perf_event *event) 584 { 585 /* Release PMC if this is the last perf event */ 586 if (!atomic_add_unless(&num_events, -1, 1)) { 587 mutex_lock(&pmc_reserve_mutex); 588 if (atomic_dec_return(&num_events) == 0) 589 release_pmc_hardware(); 590 mutex_unlock(&pmc_reserve_mutex); 591 } 592 } 593 594 static void hw_init_period(struct hw_perf_event *hwc, u64 period) 595 { 596 hwc->sample_period = period; 597 hwc->last_period = hwc->sample_period; 598 local64_set(&hwc->period_left, hwc->sample_period); 599 } 600 601 static void hw_reset_registers(struct hw_perf_event *hwc, 602 unsigned long *sdbt_origin) 603 { 604 /* (Re)set to first sample-data-block-table */ 605 TEAR_REG(hwc) = (unsigned long) sdbt_origin; 606 } 607 608 static unsigned long hw_limit_rate(const struct hws_qsi_info_block *si, 609 unsigned long rate) 610 { 611 return clamp_t(unsigned long, rate, 612 si->min_sampl_rate, si->max_sampl_rate); 613 } 614 615 static u32 cpumsf_pid_type(struct perf_event *event, 616 u32 pid, enum pid_type type) 617 { 618 struct task_struct *tsk; 619 620 /* Idle process */ 621 if (!pid) 622 goto out; 623 624 tsk = find_task_by_pid_ns(pid, &init_pid_ns); 625 pid = -1; 626 if (tsk) { 627 /* 628 * Only top level events contain the pid namespace in which 629 * they are created. 630 */ 631 if (event->parent) 632 event = event->parent; 633 pid = __task_pid_nr_ns(tsk, type, event->ns); 634 /* 635 * See also 1d953111b648 636 * "perf/core: Don't report zero PIDs for exiting tasks". 637 */ 638 if (!pid && !pid_alive(tsk)) 639 pid = -1; 640 } 641 out: 642 return pid; 643 } 644 645 static void cpumsf_output_event_pid(struct perf_event *event, 646 struct perf_sample_data *data, 647 struct pt_regs *regs) 648 { 649 u32 pid; 650 struct perf_event_header header; 651 struct perf_output_handle handle; 652 653 /* 654 * Obtain the PID from the basic-sampling data entry and 655 * correct the data->tid_entry.pid value. 656 */ 657 pid = data->tid_entry.pid; 658 659 /* Protect callchain buffers, tasks */ 660 rcu_read_lock(); 661 662 perf_prepare_sample(&header, data, event, regs); 663 if (perf_output_begin(&handle, event, header.size)) 664 goto out; 665 666 /* Update the process ID (see also kernel/events/core.c) */ 667 data->tid_entry.pid = cpumsf_pid_type(event, pid, PIDTYPE_TGID); 668 data->tid_entry.tid = cpumsf_pid_type(event, pid, PIDTYPE_PID); 669 670 perf_output_sample(&handle, &header, data, event); 671 perf_output_end(&handle); 672 out: 673 rcu_read_unlock(); 674 } 675 676 static unsigned long getrate(bool freq, unsigned long sample, 677 struct hws_qsi_info_block *si) 678 { 679 unsigned long rate; 680 681 if (freq) { 682 rate = freq_to_sample_rate(si, sample); 683 rate = hw_limit_rate(si, rate); 684 } else { 685 /* The min/max sampling rates specifies the valid range 686 * of sample periods. If the specified sample period is 687 * out of range, limit the period to the range boundary. 688 */ 689 rate = hw_limit_rate(si, sample); 690 691 /* The perf core maintains a maximum sample rate that is 692 * configurable through the sysctl interface. Ensure the 693 * sampling rate does not exceed this value. This also helps 694 * to avoid throttling when pushing samples with 695 * perf_event_overflow(). 696 */ 697 if (sample_rate_to_freq(si, rate) > 698 sysctl_perf_event_sample_rate) { 699 debug_sprintf_event(sfdbg, 1, 700 "Sampling rate exceeds maximum " 701 "perf sample rate\n"); 702 rate = 0; 703 } 704 } 705 return rate; 706 } 707 708 /* The sampling information (si) contains information about the 709 * min/max sampling intervals and the CPU speed. So calculate the 710 * correct sampling interval and avoid the whole period adjust 711 * feedback loop. 712 * 713 * Since the CPU Measurement sampling facility can not handle frequency 714 * calculate the sampling interval when frequency is specified using 715 * this formula: 716 * interval := cpu_speed * 1000000 / sample_freq 717 * 718 * Returns errno on bad input and zero on success with parameter interval 719 * set to the correct sampling rate. 720 * 721 * Note: This function turns off freq bit to avoid calling function 722 * perf_adjust_period(). This causes frequency adjustment in the common 723 * code part which causes tremendous variations in the counter values. 724 */ 725 static int __hw_perf_event_init_rate(struct perf_event *event, 726 struct hws_qsi_info_block *si) 727 { 728 struct perf_event_attr *attr = &event->attr; 729 struct hw_perf_event *hwc = &event->hw; 730 unsigned long rate; 731 732 if (attr->freq) { 733 if (!attr->sample_freq) 734 return -EINVAL; 735 rate = getrate(attr->freq, attr->sample_freq, si); 736 attr->freq = 0; /* Don't call perf_adjust_period() */ 737 SAMPL_FLAGS(hwc) |= PERF_CPUM_SF_FREQ_MODE; 738 } else { 739 rate = getrate(attr->freq, attr->sample_period, si); 740 if (!rate) 741 return -EINVAL; 742 } 743 attr->sample_period = rate; 744 SAMPL_RATE(hwc) = rate; 745 hw_init_period(hwc, SAMPL_RATE(hwc)); 746 debug_sprintf_event(sfdbg, 4, "__hw_perf_event_init_rate:" 747 "cpu:%d period:%llx freq:%d,%#lx\n", event->cpu, 748 event->attr.sample_period, event->attr.freq, 749 SAMPLE_FREQ_MODE(hwc)); 750 return 0; 751 } 752 753 static int __hw_perf_event_init(struct perf_event *event) 754 { 755 struct cpu_hw_sf *cpuhw; 756 struct hws_qsi_info_block si; 757 struct perf_event_attr *attr = &event->attr; 758 struct hw_perf_event *hwc = &event->hw; 759 int cpu, err; 760 761 /* Reserve CPU-measurement sampling facility */ 762 err = 0; 763 if (!atomic_inc_not_zero(&num_events)) { 764 mutex_lock(&pmc_reserve_mutex); 765 if (atomic_read(&num_events) == 0 && reserve_pmc_hardware()) 766 err = -EBUSY; 767 else 768 atomic_inc(&num_events); 769 mutex_unlock(&pmc_reserve_mutex); 770 } 771 event->destroy = hw_perf_event_destroy; 772 773 if (err) 774 goto out; 775 776 /* Access per-CPU sampling information (query sampling info) */ 777 /* 778 * The event->cpu value can be -1 to count on every CPU, for example, 779 * when attaching to a task. If this is specified, use the query 780 * sampling info from the current CPU, otherwise use event->cpu to 781 * retrieve the per-CPU information. 782 * Later, cpuhw indicates whether to allocate sampling buffers for a 783 * particular CPU (cpuhw!=NULL) or each online CPU (cpuw==NULL). 784 */ 785 memset(&si, 0, sizeof(si)); 786 cpuhw = NULL; 787 if (event->cpu == -1) 788 qsi(&si); 789 else { 790 /* Event is pinned to a particular CPU, retrieve the per-CPU 791 * sampling structure for accessing the CPU-specific QSI. 792 */ 793 cpuhw = &per_cpu(cpu_hw_sf, event->cpu); 794 si = cpuhw->qsi; 795 } 796 797 /* Check sampling facility authorization and, if not authorized, 798 * fall back to other PMUs. It is safe to check any CPU because 799 * the authorization is identical for all configured CPUs. 800 */ 801 if (!si.as) { 802 err = -ENOENT; 803 goto out; 804 } 805 806 /* Always enable basic sampling */ 807 SAMPL_FLAGS(hwc) = PERF_CPUM_SF_BASIC_MODE; 808 809 /* Check if diagnostic sampling is requested. Deny if the required 810 * sampling authorization is missing. 811 */ 812 if (attr->config == PERF_EVENT_CPUM_SF_DIAG) { 813 if (!si.ad) { 814 err = -EPERM; 815 goto out; 816 } 817 SAMPL_FLAGS(hwc) |= PERF_CPUM_SF_DIAG_MODE; 818 } 819 820 /* Check and set other sampling flags */ 821 if (attr->config1 & PERF_CPUM_SF_FULL_BLOCKS) 822 SAMPL_FLAGS(hwc) |= PERF_CPUM_SF_FULL_BLOCKS; 823 824 err = __hw_perf_event_init_rate(event, &si); 825 if (err) 826 goto out; 827 828 /* Initialize sample data overflow accounting */ 829 hwc->extra_reg.reg = REG_OVERFLOW; 830 OVERFLOW_REG(hwc) = 0; 831 832 /* Use AUX buffer. No need to allocate it by ourself */ 833 if (attr->config == PERF_EVENT_CPUM_SF_DIAG) 834 return 0; 835 836 /* Allocate the per-CPU sampling buffer using the CPU information 837 * from the event. If the event is not pinned to a particular 838 * CPU (event->cpu == -1; or cpuhw == NULL), allocate sampling 839 * buffers for each online CPU. 840 */ 841 if (cpuhw) 842 /* Event is pinned to a particular CPU */ 843 err = allocate_buffers(cpuhw, hwc); 844 else { 845 /* Event is not pinned, allocate sampling buffer on 846 * each online CPU 847 */ 848 for_each_online_cpu(cpu) { 849 cpuhw = &per_cpu(cpu_hw_sf, cpu); 850 err = allocate_buffers(cpuhw, hwc); 851 if (err) 852 break; 853 } 854 } 855 856 /* If PID/TID sampling is active, replace the default overflow 857 * handler to extract and resolve the PIDs from the basic-sampling 858 * data entries. 859 */ 860 if (event->attr.sample_type & PERF_SAMPLE_TID) 861 if (is_default_overflow_handler(event)) 862 event->overflow_handler = cpumsf_output_event_pid; 863 out: 864 return err; 865 } 866 867 static int cpumsf_pmu_event_init(struct perf_event *event) 868 { 869 int err; 870 871 /* No support for taken branch sampling */ 872 if (has_branch_stack(event)) 873 return -EOPNOTSUPP; 874 875 switch (event->attr.type) { 876 case PERF_TYPE_RAW: 877 if ((event->attr.config != PERF_EVENT_CPUM_SF) && 878 (event->attr.config != PERF_EVENT_CPUM_SF_DIAG)) 879 return -ENOENT; 880 break; 881 case PERF_TYPE_HARDWARE: 882 /* Support sampling of CPU cycles in addition to the 883 * counter facility. However, the counter facility 884 * is more precise and, hence, restrict this PMU to 885 * sampling events only. 886 */ 887 if (event->attr.config != PERF_COUNT_HW_CPU_CYCLES) 888 return -ENOENT; 889 if (!is_sampling_event(event)) 890 return -ENOENT; 891 break; 892 default: 893 return -ENOENT; 894 } 895 896 /* Check online status of the CPU to which the event is pinned */ 897 if (event->cpu >= 0 && !cpu_online(event->cpu)) 898 return -ENODEV; 899 900 /* Force reset of idle/hv excludes regardless of what the 901 * user requested. 902 */ 903 if (event->attr.exclude_hv) 904 event->attr.exclude_hv = 0; 905 if (event->attr.exclude_idle) 906 event->attr.exclude_idle = 0; 907 908 err = __hw_perf_event_init(event); 909 if (unlikely(err)) 910 if (event->destroy) 911 event->destroy(event); 912 return err; 913 } 914 915 static void cpumsf_pmu_enable(struct pmu *pmu) 916 { 917 struct cpu_hw_sf *cpuhw = this_cpu_ptr(&cpu_hw_sf); 918 struct hw_perf_event *hwc; 919 int err; 920 921 if (cpuhw->flags & PMU_F_ENABLED) 922 return; 923 924 if (cpuhw->flags & PMU_F_ERR_MASK) 925 return; 926 927 /* Check whether to extent the sampling buffer. 928 * 929 * Two conditions trigger an increase of the sampling buffer for a 930 * perf event: 931 * 1. Postponed buffer allocations from the event initialization. 932 * 2. Sampling overflows that contribute to pending allocations. 933 * 934 * Note that the extend_sampling_buffer() function disables the sampling 935 * facility, but it can be fully re-enabled using sampling controls that 936 * have been saved in cpumsf_pmu_disable(). 937 */ 938 if (cpuhw->event) { 939 hwc = &cpuhw->event->hw; 940 if (!(SAMPL_DIAG_MODE(hwc))) { 941 /* 942 * Account number of overflow-designated 943 * buffer extents 944 */ 945 sfb_account_overflows(cpuhw, hwc); 946 if (sfb_has_pending_allocs(&cpuhw->sfb, hwc)) 947 extend_sampling_buffer(&cpuhw->sfb, hwc); 948 } 949 /* Rate may be adjusted with ioctl() */ 950 cpuhw->lsctl.interval = SAMPL_RATE(&cpuhw->event->hw); 951 } 952 953 /* (Re)enable the PMU and sampling facility */ 954 cpuhw->flags |= PMU_F_ENABLED; 955 barrier(); 956 957 err = lsctl(&cpuhw->lsctl); 958 if (err) { 959 cpuhw->flags &= ~PMU_F_ENABLED; 960 pr_err("Loading sampling controls failed: op=%i err=%i\n", 961 1, err); 962 return; 963 } 964 965 /* Load current program parameter */ 966 lpp(&S390_lowcore.lpp); 967 968 debug_sprintf_event(sfdbg, 6, "pmu_enable: es=%i cs=%i ed=%i cd=%i " 969 "interval:%lx tear=%p dear=%p\n", 970 cpuhw->lsctl.es, cpuhw->lsctl.cs, cpuhw->lsctl.ed, 971 cpuhw->lsctl.cd, cpuhw->lsctl.interval, 972 (void *) cpuhw->lsctl.tear, 973 (void *) cpuhw->lsctl.dear); 974 } 975 976 static void cpumsf_pmu_disable(struct pmu *pmu) 977 { 978 struct cpu_hw_sf *cpuhw = this_cpu_ptr(&cpu_hw_sf); 979 struct hws_lsctl_request_block inactive; 980 struct hws_qsi_info_block si; 981 int err; 982 983 if (!(cpuhw->flags & PMU_F_ENABLED)) 984 return; 985 986 if (cpuhw->flags & PMU_F_ERR_MASK) 987 return; 988 989 /* Switch off sampling activation control */ 990 inactive = cpuhw->lsctl; 991 inactive.cs = 0; 992 inactive.cd = 0; 993 994 err = lsctl(&inactive); 995 if (err) { 996 pr_err("Loading sampling controls failed: op=%i err=%i\n", 997 2, err); 998 return; 999 } 1000 1001 /* Save state of TEAR and DEAR register contents */ 1002 if (!qsi(&si)) { 1003 /* TEAR/DEAR values are valid only if the sampling facility is 1004 * enabled. Note that cpumsf_pmu_disable() might be called even 1005 * for a disabled sampling facility because cpumsf_pmu_enable() 1006 * controls the enable/disable state. 1007 */ 1008 if (si.es) { 1009 cpuhw->lsctl.tear = si.tear; 1010 cpuhw->lsctl.dear = si.dear; 1011 } 1012 } else 1013 debug_sprintf_event(sfdbg, 3, "cpumsf_pmu_disable: " 1014 "qsi() failed with err=%i\n", err); 1015 1016 cpuhw->flags &= ~PMU_F_ENABLED; 1017 } 1018 1019 /* perf_exclude_event() - Filter event 1020 * @event: The perf event 1021 * @regs: pt_regs structure 1022 * @sde_regs: Sample-data-entry (sde) regs structure 1023 * 1024 * Filter perf events according to their exclude specification. 1025 * 1026 * Return non-zero if the event shall be excluded. 1027 */ 1028 static int perf_exclude_event(struct perf_event *event, struct pt_regs *regs, 1029 struct perf_sf_sde_regs *sde_regs) 1030 { 1031 if (event->attr.exclude_user && user_mode(regs)) 1032 return 1; 1033 if (event->attr.exclude_kernel && !user_mode(regs)) 1034 return 1; 1035 if (event->attr.exclude_guest && sde_regs->in_guest) 1036 return 1; 1037 if (event->attr.exclude_host && !sde_regs->in_guest) 1038 return 1; 1039 return 0; 1040 } 1041 1042 /* perf_push_sample() - Push samples to perf 1043 * @event: The perf event 1044 * @sample: Hardware sample data 1045 * 1046 * Use the hardware sample data to create perf event sample. The sample 1047 * is the pushed to the event subsystem and the function checks for 1048 * possible event overflows. If an event overflow occurs, the PMU is 1049 * stopped. 1050 * 1051 * Return non-zero if an event overflow occurred. 1052 */ 1053 static int perf_push_sample(struct perf_event *event, 1054 struct hws_basic_entry *basic) 1055 { 1056 int overflow; 1057 struct pt_regs regs; 1058 struct perf_sf_sde_regs *sde_regs; 1059 struct perf_sample_data data; 1060 1061 /* Setup perf sample */ 1062 perf_sample_data_init(&data, 0, event->hw.last_period); 1063 1064 /* Setup pt_regs to look like an CPU-measurement external interrupt 1065 * using the Program Request Alert code. The regs.int_parm_long 1066 * field which is unused contains additional sample-data-entry related 1067 * indicators. 1068 */ 1069 memset(®s, 0, sizeof(regs)); 1070 regs.int_code = 0x1407; 1071 regs.int_parm = CPU_MF_INT_SF_PRA; 1072 sde_regs = (struct perf_sf_sde_regs *) ®s.int_parm_long; 1073 1074 psw_bits(regs.psw).ia = basic->ia; 1075 psw_bits(regs.psw).dat = basic->T; 1076 psw_bits(regs.psw).wait = basic->W; 1077 psw_bits(regs.psw).pstate = basic->P; 1078 psw_bits(regs.psw).as = basic->AS; 1079 1080 /* 1081 * Use the hardware provided configuration level to decide if the 1082 * sample belongs to a guest or host. If that is not available, 1083 * fall back to the following heuristics: 1084 * A non-zero guest program parameter always indicates a guest 1085 * sample. Some early samples or samples from guests without 1086 * lpp usage would be misaccounted to the host. We use the asn 1087 * value as an addon heuristic to detect most of these guest samples. 1088 * If the value differs from 0xffff (the host value), we assume to 1089 * be a KVM guest. 1090 */ 1091 switch (basic->CL) { 1092 case 1: /* logical partition */ 1093 sde_regs->in_guest = 0; 1094 break; 1095 case 2: /* virtual machine */ 1096 sde_regs->in_guest = 1; 1097 break; 1098 default: /* old machine, use heuristics */ 1099 if (basic->gpp || basic->prim_asn != 0xffff) 1100 sde_regs->in_guest = 1; 1101 break; 1102 } 1103 1104 /* 1105 * Store the PID value from the sample-data-entry to be 1106 * processed and resolved by cpumsf_output_event_pid(). 1107 */ 1108 data.tid_entry.pid = basic->hpp & LPP_PID_MASK; 1109 1110 overflow = 0; 1111 if (perf_exclude_event(event, ®s, sde_regs)) 1112 goto out; 1113 if (perf_event_overflow(event, &data, ®s)) { 1114 overflow = 1; 1115 event->pmu->stop(event, 0); 1116 } 1117 perf_event_update_userpage(event); 1118 out: 1119 return overflow; 1120 } 1121 1122 static void perf_event_count_update(struct perf_event *event, u64 count) 1123 { 1124 local64_add(count, &event->count); 1125 } 1126 1127 static void debug_sample_entry(struct hws_basic_entry *sample, 1128 struct hws_trailer_entry *te) 1129 { 1130 debug_sprintf_event(sfdbg, 4, "hw_collect_samples: Found unknown " 1131 "sampling data entry: te->f=%i basic.def=%04x " 1132 "(%p)\n", 1133 te->f, sample->def, sample); 1134 } 1135 1136 /* hw_collect_samples() - Walk through a sample-data-block and collect samples 1137 * @event: The perf event 1138 * @sdbt: Sample-data-block table 1139 * @overflow: Event overflow counter 1140 * 1141 * Walks through a sample-data-block and collects sampling data entries that are 1142 * then pushed to the perf event subsystem. Depending on the sampling function, 1143 * there can be either basic-sampling or combined-sampling data entries. A 1144 * combined-sampling data entry consists of a basic- and a diagnostic-sampling 1145 * data entry. The sampling function is determined by the flags in the perf 1146 * event hardware structure. The function always works with a combined-sampling 1147 * data entry but ignores the the diagnostic portion if it is not available. 1148 * 1149 * Note that the implementation focuses on basic-sampling data entries and, if 1150 * such an entry is not valid, the entire combined-sampling data entry is 1151 * ignored. 1152 * 1153 * The overflow variables counts the number of samples that has been discarded 1154 * due to a perf event overflow. 1155 */ 1156 static void hw_collect_samples(struct perf_event *event, unsigned long *sdbt, 1157 unsigned long long *overflow) 1158 { 1159 struct hws_trailer_entry *te; 1160 struct hws_basic_entry *sample; 1161 1162 te = (struct hws_trailer_entry *) trailer_entry_ptr(*sdbt); 1163 sample = (struct hws_basic_entry *) *sdbt; 1164 while ((unsigned long *) sample < (unsigned long *) te) { 1165 /* Check for an empty sample */ 1166 if (!sample->def) 1167 break; 1168 1169 /* Update perf event period */ 1170 perf_event_count_update(event, SAMPL_RATE(&event->hw)); 1171 1172 /* Check whether sample is valid */ 1173 if (sample->def == 0x0001) { 1174 /* If an event overflow occurred, the PMU is stopped to 1175 * throttle event delivery. Remaining sample data is 1176 * discarded. 1177 */ 1178 if (!*overflow) { 1179 /* Check whether sample is consistent */ 1180 if (sample->I == 0 && sample->W == 0) { 1181 /* Deliver sample data to perf */ 1182 *overflow = perf_push_sample(event, 1183 sample); 1184 } 1185 } else 1186 /* Count discarded samples */ 1187 *overflow += 1; 1188 } else { 1189 debug_sample_entry(sample, te); 1190 /* Sample slot is not yet written or other record. 1191 * 1192 * This condition can occur if the buffer was reused 1193 * from a combined basic- and diagnostic-sampling. 1194 * If only basic-sampling is then active, entries are 1195 * written into the larger diagnostic entries. 1196 * This is typically the case for sample-data-blocks 1197 * that are not full. Stop processing if the first 1198 * invalid format was detected. 1199 */ 1200 if (!te->f) 1201 break; 1202 } 1203 1204 /* Reset sample slot and advance to next sample */ 1205 sample->def = 0; 1206 sample++; 1207 } 1208 } 1209 1210 /* hw_perf_event_update() - Process sampling buffer 1211 * @event: The perf event 1212 * @flush_all: Flag to also flush partially filled sample-data-blocks 1213 * 1214 * Processes the sampling buffer and create perf event samples. 1215 * The sampling buffer position are retrieved and saved in the TEAR_REG 1216 * register of the specified perf event. 1217 * 1218 * Only full sample-data-blocks are processed. Specify the flash_all flag 1219 * to also walk through partially filled sample-data-blocks. It is ignored 1220 * if PERF_CPUM_SF_FULL_BLOCKS is set. The PERF_CPUM_SF_FULL_BLOCKS flag 1221 * enforces the processing of full sample-data-blocks only (trailer entries 1222 * with the block-full-indicator bit set). 1223 */ 1224 static void hw_perf_event_update(struct perf_event *event, int flush_all) 1225 { 1226 struct hw_perf_event *hwc = &event->hw; 1227 struct hws_trailer_entry *te; 1228 unsigned long *sdbt; 1229 unsigned long long event_overflow, sampl_overflow, num_sdb, te_flags; 1230 int done; 1231 1232 /* 1233 * AUX buffer is used when in diagnostic sampling mode. 1234 * No perf events/samples are created. 1235 */ 1236 if (SAMPL_DIAG_MODE(&event->hw)) 1237 return; 1238 1239 if (flush_all && SDB_FULL_BLOCKS(hwc)) 1240 flush_all = 0; 1241 1242 sdbt = (unsigned long *) TEAR_REG(hwc); 1243 done = event_overflow = sampl_overflow = num_sdb = 0; 1244 while (!done) { 1245 /* Get the trailer entry of the sample-data-block */ 1246 te = (struct hws_trailer_entry *) trailer_entry_ptr(*sdbt); 1247 1248 /* Leave loop if no more work to do (block full indicator) */ 1249 if (!te->f) { 1250 done = 1; 1251 if (!flush_all) 1252 break; 1253 } 1254 1255 /* Check the sample overflow count */ 1256 if (te->overflow) 1257 /* Account sample overflows and, if a particular limit 1258 * is reached, extend the sampling buffer. 1259 * For details, see sfb_account_overflows(). 1260 */ 1261 sampl_overflow += te->overflow; 1262 1263 /* Timestamps are valid for full sample-data-blocks only */ 1264 debug_sprintf_event(sfdbg, 6, "hw_perf_event_update: sdbt=%p " 1265 "overflow=%llu timestamp=%#llx\n", 1266 sdbt, te->overflow, 1267 (te->f) ? trailer_timestamp(te) : 0ULL); 1268 1269 /* Collect all samples from a single sample-data-block and 1270 * flag if an (perf) event overflow happened. If so, the PMU 1271 * is stopped and remaining samples will be discarded. 1272 */ 1273 hw_collect_samples(event, sdbt, &event_overflow); 1274 num_sdb++; 1275 1276 /* Reset trailer (using compare-double-and-swap) */ 1277 do { 1278 te_flags = te->flags & ~SDB_TE_BUFFER_FULL_MASK; 1279 te_flags |= SDB_TE_ALERT_REQ_MASK; 1280 } while (!cmpxchg_double(&te->flags, &te->overflow, 1281 te->flags, te->overflow, 1282 te_flags, 0ULL)); 1283 1284 /* Advance to next sample-data-block */ 1285 sdbt++; 1286 if (is_link_entry(sdbt)) 1287 sdbt = get_next_sdbt(sdbt); 1288 1289 /* Update event hardware registers */ 1290 TEAR_REG(hwc) = (unsigned long) sdbt; 1291 1292 /* Stop processing sample-data if all samples of the current 1293 * sample-data-block were flushed even if it was not full. 1294 */ 1295 if (flush_all && done) 1296 break; 1297 1298 /* If an event overflow happened, discard samples by 1299 * processing any remaining sample-data-blocks. 1300 */ 1301 if (event_overflow) 1302 flush_all = 1; 1303 } 1304 1305 /* Account sample overflows in the event hardware structure */ 1306 if (sampl_overflow) 1307 OVERFLOW_REG(hwc) = DIV_ROUND_UP(OVERFLOW_REG(hwc) + 1308 sampl_overflow, 1 + num_sdb); 1309 if (sampl_overflow || event_overflow) 1310 debug_sprintf_event(sfdbg, 4, "hw_perf_event_update: " 1311 "overflow stats: sample=%llu event=%llu\n", 1312 sampl_overflow, event_overflow); 1313 } 1314 1315 #define AUX_SDB_INDEX(aux, i) ((i) % aux->sfb.num_sdb) 1316 #define AUX_SDB_NUM(aux, start, end) (end >= start ? end - start + 1 : 0) 1317 #define AUX_SDB_NUM_ALERT(aux) AUX_SDB_NUM(aux, aux->head, aux->alert_mark) 1318 #define AUX_SDB_NUM_EMPTY(aux) AUX_SDB_NUM(aux, aux->head, aux->empty_mark) 1319 1320 /* 1321 * Get trailer entry by index of SDB. 1322 */ 1323 static struct hws_trailer_entry *aux_sdb_trailer(struct aux_buffer *aux, 1324 unsigned long index) 1325 { 1326 unsigned long sdb; 1327 1328 index = AUX_SDB_INDEX(aux, index); 1329 sdb = aux->sdb_index[index]; 1330 return (struct hws_trailer_entry *)trailer_entry_ptr(sdb); 1331 } 1332 1333 /* 1334 * Finish sampling on the cpu. Called by cpumsf_pmu_del() with pmu 1335 * disabled. Collect the full SDBs in AUX buffer which have not reached 1336 * the point of alert indicator. And ignore the SDBs which are not 1337 * full. 1338 * 1339 * 1. Scan SDBs to see how much data is there and consume them. 1340 * 2. Remove alert indicator in the buffer. 1341 */ 1342 static void aux_output_end(struct perf_output_handle *handle) 1343 { 1344 unsigned long i, range_scan, idx; 1345 struct aux_buffer *aux; 1346 struct hws_trailer_entry *te; 1347 1348 aux = perf_get_aux(handle); 1349 if (!aux) 1350 return; 1351 1352 range_scan = AUX_SDB_NUM_ALERT(aux); 1353 for (i = 0, idx = aux->head; i < range_scan; i++, idx++) { 1354 te = aux_sdb_trailer(aux, idx); 1355 if (!(te->flags & SDB_TE_BUFFER_FULL_MASK)) 1356 break; 1357 } 1358 /* i is num of SDBs which are full */ 1359 perf_aux_output_end(handle, i << PAGE_SHIFT); 1360 1361 /* Remove alert indicators in the buffer */ 1362 te = aux_sdb_trailer(aux, aux->alert_mark); 1363 te->flags &= ~SDB_TE_ALERT_REQ_MASK; 1364 1365 debug_sprintf_event(sfdbg, 6, "aux_output_end: collect %lx SDBs\n", i); 1366 } 1367 1368 /* 1369 * Start sampling on the CPU. Called by cpumsf_pmu_add() when an event 1370 * is first added to the CPU or rescheduled again to the CPU. It is called 1371 * with pmu disabled. 1372 * 1373 * 1. Reset the trailer of SDBs to get ready for new data. 1374 * 2. Tell the hardware where to put the data by reset the SDBs buffer 1375 * head(tear/dear). 1376 */ 1377 static int aux_output_begin(struct perf_output_handle *handle, 1378 struct aux_buffer *aux, 1379 struct cpu_hw_sf *cpuhw) 1380 { 1381 unsigned long range; 1382 unsigned long i, range_scan, idx; 1383 unsigned long head, base, offset; 1384 struct hws_trailer_entry *te; 1385 1386 if (WARN_ON_ONCE(handle->head & ~PAGE_MASK)) 1387 return -EINVAL; 1388 1389 aux->head = handle->head >> PAGE_SHIFT; 1390 range = (handle->size + 1) >> PAGE_SHIFT; 1391 if (range <= 1) 1392 return -ENOMEM; 1393 1394 /* 1395 * SDBs between aux->head and aux->empty_mark are already ready 1396 * for new data. range_scan is num of SDBs not within them. 1397 */ 1398 if (range > AUX_SDB_NUM_EMPTY(aux)) { 1399 range_scan = range - AUX_SDB_NUM_EMPTY(aux); 1400 idx = aux->empty_mark + 1; 1401 for (i = 0; i < range_scan; i++, idx++) { 1402 te = aux_sdb_trailer(aux, idx); 1403 te->flags = te->flags & ~SDB_TE_BUFFER_FULL_MASK; 1404 te->flags = te->flags & ~SDB_TE_ALERT_REQ_MASK; 1405 te->overflow = 0; 1406 } 1407 /* Save the position of empty SDBs */ 1408 aux->empty_mark = aux->head + range - 1; 1409 } 1410 1411 /* Set alert indicator */ 1412 aux->alert_mark = aux->head + range/2 - 1; 1413 te = aux_sdb_trailer(aux, aux->alert_mark); 1414 te->flags = te->flags | SDB_TE_ALERT_REQ_MASK; 1415 1416 /* Reset hardware buffer head */ 1417 head = AUX_SDB_INDEX(aux, aux->head); 1418 base = aux->sdbt_index[head / CPUM_SF_SDB_PER_TABLE]; 1419 offset = head % CPUM_SF_SDB_PER_TABLE; 1420 cpuhw->lsctl.tear = base + offset * sizeof(unsigned long); 1421 cpuhw->lsctl.dear = aux->sdb_index[head]; 1422 1423 debug_sprintf_event(sfdbg, 6, "aux_output_begin: " 1424 "head->alert_mark->empty_mark (num_alert, range)" 1425 "[%lx -> %lx -> %lx] (%lx, %lx) " 1426 "tear index %lx, tear %lx dear %lx\n", 1427 aux->head, aux->alert_mark, aux->empty_mark, 1428 AUX_SDB_NUM_ALERT(aux), range, 1429 head / CPUM_SF_SDB_PER_TABLE, 1430 cpuhw->lsctl.tear, 1431 cpuhw->lsctl.dear); 1432 1433 return 0; 1434 } 1435 1436 /* 1437 * Set alert indicator on SDB at index @alert_index while sampler is running. 1438 * 1439 * Return true if successfully. 1440 * Return false if full indicator is already set by hardware sampler. 1441 */ 1442 static bool aux_set_alert(struct aux_buffer *aux, unsigned long alert_index, 1443 unsigned long long *overflow) 1444 { 1445 unsigned long long orig_overflow, orig_flags, new_flags; 1446 struct hws_trailer_entry *te; 1447 1448 te = aux_sdb_trailer(aux, alert_index); 1449 do { 1450 orig_flags = te->flags; 1451 orig_overflow = te->overflow; 1452 *overflow = orig_overflow; 1453 if (orig_flags & SDB_TE_BUFFER_FULL_MASK) { 1454 /* 1455 * SDB is already set by hardware. 1456 * Abort and try to set somewhere 1457 * behind. 1458 */ 1459 return false; 1460 } 1461 new_flags = orig_flags | SDB_TE_ALERT_REQ_MASK; 1462 } while (!cmpxchg_double(&te->flags, &te->overflow, 1463 orig_flags, orig_overflow, 1464 new_flags, 0ULL)); 1465 return true; 1466 } 1467 1468 /* 1469 * aux_reset_buffer() - Scan and setup SDBs for new samples 1470 * @aux: The AUX buffer to set 1471 * @range: The range of SDBs to scan started from aux->head 1472 * @overflow: Set to overflow count 1473 * 1474 * Set alert indicator on the SDB at index of aux->alert_mark. If this SDB is 1475 * marked as empty, check if it is already set full by the hardware sampler. 1476 * If yes, that means new data is already there before we can set an alert 1477 * indicator. Caller should try to set alert indicator to some position behind. 1478 * 1479 * Scan the SDBs in AUX buffer from behind aux->empty_mark. They are used 1480 * previously and have already been consumed by user space. Reset these SDBs 1481 * (clear full indicator and alert indicator) for new data. 1482 * If aux->alert_mark fall in this area, just set it. Overflow count is 1483 * recorded while scanning. 1484 * 1485 * SDBs between aux->head and aux->empty_mark are already reset at last time. 1486 * and ready for new samples. So scanning on this area could be skipped. 1487 * 1488 * Return true if alert indicator is set successfully and false if not. 1489 */ 1490 static bool aux_reset_buffer(struct aux_buffer *aux, unsigned long range, 1491 unsigned long long *overflow) 1492 { 1493 unsigned long long orig_overflow, orig_flags, new_flags; 1494 unsigned long i, range_scan, idx; 1495 struct hws_trailer_entry *te; 1496 1497 if (range <= AUX_SDB_NUM_EMPTY(aux)) 1498 /* 1499 * No need to scan. All SDBs in range are marked as empty. 1500 * Just set alert indicator. Should check race with hardware 1501 * sampler. 1502 */ 1503 return aux_set_alert(aux, aux->alert_mark, overflow); 1504 1505 if (aux->alert_mark <= aux->empty_mark) 1506 /* 1507 * Set alert indicator on empty SDB. Should check race 1508 * with hardware sampler. 1509 */ 1510 if (!aux_set_alert(aux, aux->alert_mark, overflow)) 1511 return false; 1512 1513 /* 1514 * Scan the SDBs to clear full and alert indicator used previously. 1515 * Start scanning from one SDB behind empty_mark. If the new alert 1516 * indicator fall into this range, set it. 1517 */ 1518 range_scan = range - AUX_SDB_NUM_EMPTY(aux); 1519 idx = aux->empty_mark + 1; 1520 for (i = 0; i < range_scan; i++, idx++) { 1521 te = aux_sdb_trailer(aux, idx); 1522 do { 1523 orig_flags = te->flags; 1524 orig_overflow = te->overflow; 1525 new_flags = orig_flags & ~SDB_TE_BUFFER_FULL_MASK; 1526 if (idx == aux->alert_mark) 1527 new_flags |= SDB_TE_ALERT_REQ_MASK; 1528 else 1529 new_flags &= ~SDB_TE_ALERT_REQ_MASK; 1530 } while (!cmpxchg_double(&te->flags, &te->overflow, 1531 orig_flags, orig_overflow, 1532 new_flags, 0ULL)); 1533 *overflow += orig_overflow; 1534 } 1535 1536 /* Update empty_mark to new position */ 1537 aux->empty_mark = aux->head + range - 1; 1538 1539 return true; 1540 } 1541 1542 /* 1543 * Measurement alert handler for diagnostic mode sampling. 1544 */ 1545 static void hw_collect_aux(struct cpu_hw_sf *cpuhw) 1546 { 1547 struct aux_buffer *aux; 1548 int done = 0; 1549 unsigned long range = 0, size; 1550 unsigned long long overflow = 0; 1551 struct perf_output_handle *handle = &cpuhw->handle; 1552 unsigned long num_sdb; 1553 1554 aux = perf_get_aux(handle); 1555 if (WARN_ON_ONCE(!aux)) 1556 return; 1557 1558 /* Inform user space new data arrived */ 1559 size = AUX_SDB_NUM_ALERT(aux) << PAGE_SHIFT; 1560 perf_aux_output_end(handle, size); 1561 num_sdb = aux->sfb.num_sdb; 1562 1563 while (!done) { 1564 /* Get an output handle */ 1565 aux = perf_aux_output_begin(handle, cpuhw->event); 1566 if (handle->size == 0) { 1567 pr_err("The AUX buffer with %lu pages for the " 1568 "diagnostic-sampling mode is full\n", 1569 num_sdb); 1570 debug_sprintf_event(sfdbg, 1, "AUX buffer used up\n"); 1571 break; 1572 } 1573 if (WARN_ON_ONCE(!aux)) 1574 return; 1575 1576 /* Update head and alert_mark to new position */ 1577 aux->head = handle->head >> PAGE_SHIFT; 1578 range = (handle->size + 1) >> PAGE_SHIFT; 1579 if (range == 1) 1580 aux->alert_mark = aux->head; 1581 else 1582 aux->alert_mark = aux->head + range/2 - 1; 1583 1584 if (aux_reset_buffer(aux, range, &overflow)) { 1585 if (!overflow) { 1586 done = 1; 1587 break; 1588 } 1589 size = range << PAGE_SHIFT; 1590 perf_aux_output_end(&cpuhw->handle, size); 1591 pr_err("Sample data caused the AUX buffer with %lu " 1592 "pages to overflow\n", num_sdb); 1593 debug_sprintf_event(sfdbg, 1, "head %lx range %lx " 1594 "overflow %llx\n", 1595 aux->head, range, overflow); 1596 } else { 1597 size = AUX_SDB_NUM_ALERT(aux) << PAGE_SHIFT; 1598 perf_aux_output_end(&cpuhw->handle, size); 1599 debug_sprintf_event(sfdbg, 6, "head %lx alert %lx " 1600 "already full, try another\n", 1601 aux->head, aux->alert_mark); 1602 } 1603 } 1604 1605 if (done) 1606 debug_sprintf_event(sfdbg, 6, "aux_reset_buffer: " 1607 "[%lx -> %lx -> %lx] (%lx, %lx)\n", 1608 aux->head, aux->alert_mark, aux->empty_mark, 1609 AUX_SDB_NUM_ALERT(aux), range); 1610 } 1611 1612 /* 1613 * Callback when freeing AUX buffers. 1614 */ 1615 static void aux_buffer_free(void *data) 1616 { 1617 struct aux_buffer *aux = data; 1618 unsigned long i, num_sdbt; 1619 1620 if (!aux) 1621 return; 1622 1623 /* Free SDBT. SDB is freed by the caller */ 1624 num_sdbt = aux->sfb.num_sdbt; 1625 for (i = 0; i < num_sdbt; i++) 1626 free_page(aux->sdbt_index[i]); 1627 1628 kfree(aux->sdbt_index); 1629 kfree(aux->sdb_index); 1630 kfree(aux); 1631 1632 debug_sprintf_event(sfdbg, 4, "aux_buffer_free: free " 1633 "%lu SDBTs\n", num_sdbt); 1634 } 1635 1636 static void aux_sdb_init(unsigned long sdb) 1637 { 1638 struct hws_trailer_entry *te; 1639 1640 te = (struct hws_trailer_entry *)trailer_entry_ptr(sdb); 1641 1642 /* Save clock base */ 1643 te->clock_base = 1; 1644 memcpy(&te->progusage2, &tod_clock_base[1], 8); 1645 } 1646 1647 /* 1648 * aux_buffer_setup() - Setup AUX buffer for diagnostic mode sampling 1649 * @event: Event the buffer is setup for, event->cpu == -1 means current 1650 * @pages: Array of pointers to buffer pages passed from perf core 1651 * @nr_pages: Total pages 1652 * @snapshot: Flag for snapshot mode 1653 * 1654 * This is the callback when setup an event using AUX buffer. Perf tool can 1655 * trigger this by an additional mmap() call on the event. Unlike the buffer 1656 * for basic samples, AUX buffer belongs to the event. It is scheduled with 1657 * the task among online cpus when it is a per-thread event. 1658 * 1659 * Return the private AUX buffer structure if success or NULL if fails. 1660 */ 1661 static void *aux_buffer_setup(struct perf_event *event, void **pages, 1662 int nr_pages, bool snapshot) 1663 { 1664 struct sf_buffer *sfb; 1665 struct aux_buffer *aux; 1666 unsigned long *new, *tail; 1667 int i, n_sdbt; 1668 1669 if (!nr_pages || !pages) 1670 return NULL; 1671 1672 if (nr_pages > CPUM_SF_MAX_SDB * CPUM_SF_SDB_DIAG_FACTOR) { 1673 pr_err("AUX buffer size (%i pages) is larger than the " 1674 "maximum sampling buffer limit\n", 1675 nr_pages); 1676 return NULL; 1677 } else if (nr_pages < CPUM_SF_MIN_SDB * CPUM_SF_SDB_DIAG_FACTOR) { 1678 pr_err("AUX buffer size (%i pages) is less than the " 1679 "minimum sampling buffer limit\n", 1680 nr_pages); 1681 return NULL; 1682 } 1683 1684 /* Allocate aux_buffer struct for the event */ 1685 aux = kmalloc(sizeof(struct aux_buffer), GFP_KERNEL); 1686 if (!aux) 1687 goto no_aux; 1688 sfb = &aux->sfb; 1689 1690 /* Allocate sdbt_index for fast reference */ 1691 n_sdbt = (nr_pages + CPUM_SF_SDB_PER_TABLE - 1) / CPUM_SF_SDB_PER_TABLE; 1692 aux->sdbt_index = kmalloc_array(n_sdbt, sizeof(void *), GFP_KERNEL); 1693 if (!aux->sdbt_index) 1694 goto no_sdbt_index; 1695 1696 /* Allocate sdb_index for fast reference */ 1697 aux->sdb_index = kmalloc_array(nr_pages, sizeof(void *), GFP_KERNEL); 1698 if (!aux->sdb_index) 1699 goto no_sdb_index; 1700 1701 /* Allocate the first SDBT */ 1702 sfb->num_sdbt = 0; 1703 sfb->sdbt = (unsigned long *) get_zeroed_page(GFP_KERNEL); 1704 if (!sfb->sdbt) 1705 goto no_sdbt; 1706 aux->sdbt_index[sfb->num_sdbt++] = (unsigned long)sfb->sdbt; 1707 tail = sfb->tail = sfb->sdbt; 1708 1709 /* 1710 * Link the provided pages of AUX buffer to SDBT. 1711 * Allocate SDBT if needed. 1712 */ 1713 for (i = 0; i < nr_pages; i++, tail++) { 1714 if (require_table_link(tail)) { 1715 new = (unsigned long *) get_zeroed_page(GFP_KERNEL); 1716 if (!new) 1717 goto no_sdbt; 1718 aux->sdbt_index[sfb->num_sdbt++] = (unsigned long)new; 1719 /* Link current page to tail of chain */ 1720 *tail = (unsigned long)(void *) new + 1; 1721 tail = new; 1722 } 1723 /* Tail is the entry in a SDBT */ 1724 *tail = (unsigned long)pages[i]; 1725 aux->sdb_index[i] = (unsigned long)pages[i]; 1726 aux_sdb_init((unsigned long)pages[i]); 1727 } 1728 sfb->num_sdb = nr_pages; 1729 1730 /* Link the last entry in the SDBT to the first SDBT */ 1731 *tail = (unsigned long) sfb->sdbt + 1; 1732 sfb->tail = tail; 1733 1734 /* 1735 * Initial all SDBs are zeroed. Mark it as empty. 1736 * So there is no need to clear the full indicator 1737 * when this event is first added. 1738 */ 1739 aux->empty_mark = sfb->num_sdb - 1; 1740 1741 debug_sprintf_event(sfdbg, 4, "aux_buffer_setup: setup %lu SDBTs" 1742 " and %lu SDBs\n", 1743 sfb->num_sdbt, sfb->num_sdb); 1744 1745 return aux; 1746 1747 no_sdbt: 1748 /* SDBs (AUX buffer pages) are freed by caller */ 1749 for (i = 0; i < sfb->num_sdbt; i++) 1750 free_page(aux->sdbt_index[i]); 1751 kfree(aux->sdb_index); 1752 no_sdb_index: 1753 kfree(aux->sdbt_index); 1754 no_sdbt_index: 1755 kfree(aux); 1756 no_aux: 1757 return NULL; 1758 } 1759 1760 static void cpumsf_pmu_read(struct perf_event *event) 1761 { 1762 /* Nothing to do ... updates are interrupt-driven */ 1763 } 1764 1765 /* Check if the new sampling period/freqeuncy is appropriate. 1766 * 1767 * Return non-zero on error and zero on passed checks. 1768 */ 1769 static int cpumsf_pmu_check_period(struct perf_event *event, u64 value) 1770 { 1771 struct hws_qsi_info_block si; 1772 unsigned long rate; 1773 bool do_freq; 1774 1775 memset(&si, 0, sizeof(si)); 1776 if (event->cpu == -1) { 1777 if (qsi(&si)) 1778 return -ENODEV; 1779 } else { 1780 /* Event is pinned to a particular CPU, retrieve the per-CPU 1781 * sampling structure for accessing the CPU-specific QSI. 1782 */ 1783 struct cpu_hw_sf *cpuhw = &per_cpu(cpu_hw_sf, event->cpu); 1784 1785 si = cpuhw->qsi; 1786 } 1787 1788 do_freq = !!SAMPLE_FREQ_MODE(&event->hw); 1789 rate = getrate(do_freq, value, &si); 1790 if (!rate) 1791 return -EINVAL; 1792 1793 event->attr.sample_period = rate; 1794 SAMPL_RATE(&event->hw) = rate; 1795 hw_init_period(&event->hw, SAMPL_RATE(&event->hw)); 1796 debug_sprintf_event(sfdbg, 4, "cpumsf_pmu_check_period:" 1797 "cpu:%d value:%llx period:%llx freq:%d\n", 1798 event->cpu, value, 1799 event->attr.sample_period, do_freq); 1800 return 0; 1801 } 1802 1803 /* Activate sampling control. 1804 * Next call of pmu_enable() starts sampling. 1805 */ 1806 static void cpumsf_pmu_start(struct perf_event *event, int flags) 1807 { 1808 struct cpu_hw_sf *cpuhw = this_cpu_ptr(&cpu_hw_sf); 1809 1810 if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED))) 1811 return; 1812 1813 if (flags & PERF_EF_RELOAD) 1814 WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE)); 1815 1816 perf_pmu_disable(event->pmu); 1817 event->hw.state = 0; 1818 cpuhw->lsctl.cs = 1; 1819 if (SAMPL_DIAG_MODE(&event->hw)) 1820 cpuhw->lsctl.cd = 1; 1821 perf_pmu_enable(event->pmu); 1822 } 1823 1824 /* Deactivate sampling control. 1825 * Next call of pmu_enable() stops sampling. 1826 */ 1827 static void cpumsf_pmu_stop(struct perf_event *event, int flags) 1828 { 1829 struct cpu_hw_sf *cpuhw = this_cpu_ptr(&cpu_hw_sf); 1830 1831 if (event->hw.state & PERF_HES_STOPPED) 1832 return; 1833 1834 perf_pmu_disable(event->pmu); 1835 cpuhw->lsctl.cs = 0; 1836 cpuhw->lsctl.cd = 0; 1837 event->hw.state |= PERF_HES_STOPPED; 1838 1839 if ((flags & PERF_EF_UPDATE) && !(event->hw.state & PERF_HES_UPTODATE)) { 1840 hw_perf_event_update(event, 1); 1841 event->hw.state |= PERF_HES_UPTODATE; 1842 } 1843 perf_pmu_enable(event->pmu); 1844 } 1845 1846 static int cpumsf_pmu_add(struct perf_event *event, int flags) 1847 { 1848 struct cpu_hw_sf *cpuhw = this_cpu_ptr(&cpu_hw_sf); 1849 struct aux_buffer *aux; 1850 int err; 1851 1852 if (cpuhw->flags & PMU_F_IN_USE) 1853 return -EAGAIN; 1854 1855 if (!SAMPL_DIAG_MODE(&event->hw) && !cpuhw->sfb.sdbt) 1856 return -EINVAL; 1857 1858 err = 0; 1859 perf_pmu_disable(event->pmu); 1860 1861 event->hw.state = PERF_HES_UPTODATE | PERF_HES_STOPPED; 1862 1863 /* Set up sampling controls. Always program the sampling register 1864 * using the SDB-table start. Reset TEAR_REG event hardware register 1865 * that is used by hw_perf_event_update() to store the sampling buffer 1866 * position after samples have been flushed. 1867 */ 1868 cpuhw->lsctl.s = 0; 1869 cpuhw->lsctl.h = 1; 1870 cpuhw->lsctl.interval = SAMPL_RATE(&event->hw); 1871 if (!SAMPL_DIAG_MODE(&event->hw)) { 1872 cpuhw->lsctl.tear = (unsigned long) cpuhw->sfb.sdbt; 1873 cpuhw->lsctl.dear = *(unsigned long *) cpuhw->sfb.sdbt; 1874 hw_reset_registers(&event->hw, cpuhw->sfb.sdbt); 1875 } 1876 1877 /* Ensure sampling functions are in the disabled state. If disabled, 1878 * switch on sampling enable control. */ 1879 if (WARN_ON_ONCE(cpuhw->lsctl.es == 1 || cpuhw->lsctl.ed == 1)) { 1880 err = -EAGAIN; 1881 goto out; 1882 } 1883 if (SAMPL_DIAG_MODE(&event->hw)) { 1884 aux = perf_aux_output_begin(&cpuhw->handle, event); 1885 if (!aux) { 1886 err = -EINVAL; 1887 goto out; 1888 } 1889 err = aux_output_begin(&cpuhw->handle, aux, cpuhw); 1890 if (err) 1891 goto out; 1892 cpuhw->lsctl.ed = 1; 1893 } 1894 cpuhw->lsctl.es = 1; 1895 1896 /* Set in_use flag and store event */ 1897 cpuhw->event = event; 1898 cpuhw->flags |= PMU_F_IN_USE; 1899 1900 if (flags & PERF_EF_START) 1901 cpumsf_pmu_start(event, PERF_EF_RELOAD); 1902 out: 1903 perf_event_update_userpage(event); 1904 perf_pmu_enable(event->pmu); 1905 return err; 1906 } 1907 1908 static void cpumsf_pmu_del(struct perf_event *event, int flags) 1909 { 1910 struct cpu_hw_sf *cpuhw = this_cpu_ptr(&cpu_hw_sf); 1911 1912 perf_pmu_disable(event->pmu); 1913 cpumsf_pmu_stop(event, PERF_EF_UPDATE); 1914 1915 cpuhw->lsctl.es = 0; 1916 cpuhw->lsctl.ed = 0; 1917 cpuhw->flags &= ~PMU_F_IN_USE; 1918 cpuhw->event = NULL; 1919 1920 if (SAMPL_DIAG_MODE(&event->hw)) 1921 aux_output_end(&cpuhw->handle); 1922 perf_event_update_userpage(event); 1923 perf_pmu_enable(event->pmu); 1924 } 1925 1926 CPUMF_EVENT_ATTR(SF, SF_CYCLES_BASIC, PERF_EVENT_CPUM_SF); 1927 CPUMF_EVENT_ATTR(SF, SF_CYCLES_BASIC_DIAG, PERF_EVENT_CPUM_SF_DIAG); 1928 1929 /* Attribute list for CPU_SF. 1930 * 1931 * The availablitiy depends on the CPU_MF sampling facility authorization 1932 * for basic + diagnositic samples. This is determined at initialization 1933 * time by the sampling facility device driver. 1934 * If the authorization for basic samples is turned off, it should be 1935 * also turned off for diagnostic sampling. 1936 * 1937 * During initialization of the device driver, check the authorization 1938 * level for diagnostic sampling and installs the attribute 1939 * file for diagnostic sampling if necessary. 1940 * 1941 * For now install a placeholder to reference all possible attributes: 1942 * SF_CYCLES_BASIC and SF_CYCLES_BASIC_DIAG. 1943 * Add another entry for the final NULL pointer. 1944 */ 1945 enum { 1946 SF_CYCLES_BASIC_ATTR_IDX = 0, 1947 SF_CYCLES_BASIC_DIAG_ATTR_IDX, 1948 SF_CYCLES_ATTR_MAX 1949 }; 1950 1951 static struct attribute *cpumsf_pmu_events_attr[SF_CYCLES_ATTR_MAX + 1] = { 1952 [SF_CYCLES_BASIC_ATTR_IDX] = CPUMF_EVENT_PTR(SF, SF_CYCLES_BASIC) 1953 }; 1954 1955 PMU_FORMAT_ATTR(event, "config:0-63"); 1956 1957 static struct attribute *cpumsf_pmu_format_attr[] = { 1958 &format_attr_event.attr, 1959 NULL, 1960 }; 1961 1962 static struct attribute_group cpumsf_pmu_events_group = { 1963 .name = "events", 1964 .attrs = cpumsf_pmu_events_attr, 1965 }; 1966 1967 static struct attribute_group cpumsf_pmu_format_group = { 1968 .name = "format", 1969 .attrs = cpumsf_pmu_format_attr, 1970 }; 1971 1972 static const struct attribute_group *cpumsf_pmu_attr_groups[] = { 1973 &cpumsf_pmu_events_group, 1974 &cpumsf_pmu_format_group, 1975 NULL, 1976 }; 1977 1978 static struct pmu cpumf_sampling = { 1979 .pmu_enable = cpumsf_pmu_enable, 1980 .pmu_disable = cpumsf_pmu_disable, 1981 1982 .event_init = cpumsf_pmu_event_init, 1983 .add = cpumsf_pmu_add, 1984 .del = cpumsf_pmu_del, 1985 1986 .start = cpumsf_pmu_start, 1987 .stop = cpumsf_pmu_stop, 1988 .read = cpumsf_pmu_read, 1989 1990 .attr_groups = cpumsf_pmu_attr_groups, 1991 1992 .setup_aux = aux_buffer_setup, 1993 .free_aux = aux_buffer_free, 1994 1995 .check_period = cpumsf_pmu_check_period, 1996 }; 1997 1998 static void cpumf_measurement_alert(struct ext_code ext_code, 1999 unsigned int alert, unsigned long unused) 2000 { 2001 struct cpu_hw_sf *cpuhw; 2002 2003 if (!(alert & CPU_MF_INT_SF_MASK)) 2004 return; 2005 inc_irq_stat(IRQEXT_CMS); 2006 cpuhw = this_cpu_ptr(&cpu_hw_sf); 2007 2008 /* Measurement alerts are shared and might happen when the PMU 2009 * is not reserved. Ignore these alerts in this case. */ 2010 if (!(cpuhw->flags & PMU_F_RESERVED)) 2011 return; 2012 2013 /* The processing below must take care of multiple alert events that 2014 * might be indicated concurrently. */ 2015 2016 /* Program alert request */ 2017 if (alert & CPU_MF_INT_SF_PRA) { 2018 if (cpuhw->flags & PMU_F_IN_USE) 2019 if (SAMPL_DIAG_MODE(&cpuhw->event->hw)) 2020 hw_collect_aux(cpuhw); 2021 else 2022 hw_perf_event_update(cpuhw->event, 0); 2023 else 2024 WARN_ON_ONCE(!(cpuhw->flags & PMU_F_IN_USE)); 2025 } 2026 2027 /* Report measurement alerts only for non-PRA codes */ 2028 if (alert != CPU_MF_INT_SF_PRA) 2029 debug_sprintf_event(sfdbg, 6, "measurement alert: %#x\n", 2030 alert); 2031 2032 /* Sampling authorization change request */ 2033 if (alert & CPU_MF_INT_SF_SACA) 2034 qsi(&cpuhw->qsi); 2035 2036 /* Loss of sample data due to high-priority machine activities */ 2037 if (alert & CPU_MF_INT_SF_LSDA) { 2038 pr_err("Sample data was lost\n"); 2039 cpuhw->flags |= PMU_F_ERR_LSDA; 2040 sf_disable(); 2041 } 2042 2043 /* Invalid sampling buffer entry */ 2044 if (alert & (CPU_MF_INT_SF_IAE|CPU_MF_INT_SF_ISE)) { 2045 pr_err("A sampling buffer entry is incorrect (alert=0x%x)\n", 2046 alert); 2047 cpuhw->flags |= PMU_F_ERR_IBE; 2048 sf_disable(); 2049 } 2050 } 2051 2052 static int cpusf_pmu_setup(unsigned int cpu, int flags) 2053 { 2054 /* Ignore the notification if no events are scheduled on the PMU. 2055 * This might be racy... 2056 */ 2057 if (!atomic_read(&num_events)) 2058 return 0; 2059 2060 local_irq_disable(); 2061 setup_pmc_cpu(&flags); 2062 local_irq_enable(); 2063 return 0; 2064 } 2065 2066 static int s390_pmu_sf_online_cpu(unsigned int cpu) 2067 { 2068 return cpusf_pmu_setup(cpu, PMC_INIT); 2069 } 2070 2071 static int s390_pmu_sf_offline_cpu(unsigned int cpu) 2072 { 2073 return cpusf_pmu_setup(cpu, PMC_RELEASE); 2074 } 2075 2076 static int param_get_sfb_size(char *buffer, const struct kernel_param *kp) 2077 { 2078 if (!cpum_sf_avail()) 2079 return -ENODEV; 2080 return sprintf(buffer, "%lu,%lu", CPUM_SF_MIN_SDB, CPUM_SF_MAX_SDB); 2081 } 2082 2083 static int param_set_sfb_size(const char *val, const struct kernel_param *kp) 2084 { 2085 int rc; 2086 unsigned long min, max; 2087 2088 if (!cpum_sf_avail()) 2089 return -ENODEV; 2090 if (!val || !strlen(val)) 2091 return -EINVAL; 2092 2093 /* Valid parameter values: "min,max" or "max" */ 2094 min = CPUM_SF_MIN_SDB; 2095 max = CPUM_SF_MAX_SDB; 2096 if (strchr(val, ',')) 2097 rc = (sscanf(val, "%lu,%lu", &min, &max) == 2) ? 0 : -EINVAL; 2098 else 2099 rc = kstrtoul(val, 10, &max); 2100 2101 if (min < 2 || min >= max || max > get_num_physpages()) 2102 rc = -EINVAL; 2103 if (rc) 2104 return rc; 2105 2106 sfb_set_limits(min, max); 2107 pr_info("The sampling buffer limits have changed to: " 2108 "min=%lu max=%lu (diag=x%lu)\n", 2109 CPUM_SF_MIN_SDB, CPUM_SF_MAX_SDB, CPUM_SF_SDB_DIAG_FACTOR); 2110 return 0; 2111 } 2112 2113 #define param_check_sfb_size(name, p) __param_check(name, p, void) 2114 static const struct kernel_param_ops param_ops_sfb_size = { 2115 .set = param_set_sfb_size, 2116 .get = param_get_sfb_size, 2117 }; 2118 2119 #define RS_INIT_FAILURE_QSI 0x0001 2120 #define RS_INIT_FAILURE_BSDES 0x0002 2121 #define RS_INIT_FAILURE_ALRT 0x0003 2122 #define RS_INIT_FAILURE_PERF 0x0004 2123 static void __init pr_cpumsf_err(unsigned int reason) 2124 { 2125 pr_err("Sampling facility support for perf is not available: " 2126 "reason=%04x\n", reason); 2127 } 2128 2129 static int __init init_cpum_sampling_pmu(void) 2130 { 2131 struct hws_qsi_info_block si; 2132 int err; 2133 2134 if (!cpum_sf_avail()) 2135 return -ENODEV; 2136 2137 memset(&si, 0, sizeof(si)); 2138 if (qsi(&si)) { 2139 pr_cpumsf_err(RS_INIT_FAILURE_QSI); 2140 return -ENODEV; 2141 } 2142 2143 if (!si.as && !si.ad) 2144 return -ENODEV; 2145 2146 if (si.bsdes != sizeof(struct hws_basic_entry)) { 2147 pr_cpumsf_err(RS_INIT_FAILURE_BSDES); 2148 return -EINVAL; 2149 } 2150 2151 if (si.ad) { 2152 sfb_set_limits(CPUM_SF_MIN_SDB, CPUM_SF_MAX_SDB); 2153 /* Sampling of diagnostic data authorized, 2154 * install event into attribute list of PMU device. 2155 */ 2156 cpumsf_pmu_events_attr[SF_CYCLES_BASIC_DIAG_ATTR_IDX] = 2157 CPUMF_EVENT_PTR(SF, SF_CYCLES_BASIC_DIAG); 2158 } 2159 2160 sfdbg = debug_register(KMSG_COMPONENT, 2, 1, 80); 2161 if (!sfdbg) { 2162 pr_err("Registering for s390dbf failed\n"); 2163 return -ENOMEM; 2164 } 2165 debug_register_view(sfdbg, &debug_sprintf_view); 2166 2167 err = register_external_irq(EXT_IRQ_MEASURE_ALERT, 2168 cpumf_measurement_alert); 2169 if (err) { 2170 pr_cpumsf_err(RS_INIT_FAILURE_ALRT); 2171 debug_unregister(sfdbg); 2172 goto out; 2173 } 2174 2175 err = perf_pmu_register(&cpumf_sampling, "cpum_sf", PERF_TYPE_RAW); 2176 if (err) { 2177 pr_cpumsf_err(RS_INIT_FAILURE_PERF); 2178 unregister_external_irq(EXT_IRQ_MEASURE_ALERT, 2179 cpumf_measurement_alert); 2180 debug_unregister(sfdbg); 2181 goto out; 2182 } 2183 2184 cpuhp_setup_state(CPUHP_AP_PERF_S390_SF_ONLINE, "perf/s390/sf:online", 2185 s390_pmu_sf_online_cpu, s390_pmu_sf_offline_cpu); 2186 out: 2187 return err; 2188 } 2189 2190 arch_initcall(init_cpum_sampling_pmu); 2191 core_param(cpum_sfb_size, CPUM_SF_MAX_SDB, sfb_size, 0640); 2192