1 /* 2 * Copyright © 2017 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 */ 24 25 #include <linux/perf_event.h> 26 #include <linux/pm_runtime.h> 27 28 #include "i915_drv.h" 29 #include "i915_pmu.h" 30 #include "intel_ringbuffer.h" 31 32 /* Frequency for the sampling timer for events which need it. */ 33 #define FREQUENCY 200 34 #define PERIOD max_t(u64, 10000, NSEC_PER_SEC / FREQUENCY) 35 36 #define ENGINE_SAMPLE_MASK \ 37 (BIT(I915_SAMPLE_BUSY) | \ 38 BIT(I915_SAMPLE_WAIT) | \ 39 BIT(I915_SAMPLE_SEMA)) 40 41 #define ENGINE_SAMPLE_BITS (1 << I915_PMU_SAMPLE_BITS) 42 43 static cpumask_t i915_pmu_cpumask; 44 45 static u8 engine_config_sample(u64 config) 46 { 47 return config & I915_PMU_SAMPLE_MASK; 48 } 49 50 static u8 engine_event_sample(struct perf_event *event) 51 { 52 return engine_config_sample(event->attr.config); 53 } 54 55 static u8 engine_event_class(struct perf_event *event) 56 { 57 return (event->attr.config >> I915_PMU_CLASS_SHIFT) & 0xff; 58 } 59 60 static u8 engine_event_instance(struct perf_event *event) 61 { 62 return (event->attr.config >> I915_PMU_SAMPLE_BITS) & 0xff; 63 } 64 65 static bool is_engine_config(u64 config) 66 { 67 return config < __I915_PMU_OTHER(0); 68 } 69 70 static unsigned int config_enabled_bit(u64 config) 71 { 72 if (is_engine_config(config)) 73 return engine_config_sample(config); 74 else 75 return ENGINE_SAMPLE_BITS + (config - __I915_PMU_OTHER(0)); 76 } 77 78 static u64 config_enabled_mask(u64 config) 79 { 80 return BIT_ULL(config_enabled_bit(config)); 81 } 82 83 static bool is_engine_event(struct perf_event *event) 84 { 85 return is_engine_config(event->attr.config); 86 } 87 88 static unsigned int event_enabled_bit(struct perf_event *event) 89 { 90 return config_enabled_bit(event->attr.config); 91 } 92 93 static bool pmu_needs_timer(struct drm_i915_private *i915, bool gpu_active) 94 { 95 u64 enable; 96 97 /* 98 * Only some counters need the sampling timer. 99 * 100 * We start with a bitmask of all currently enabled events. 101 */ 102 enable = i915->pmu.enable; 103 104 /* 105 * Mask out all the ones which do not need the timer, or in 106 * other words keep all the ones that could need the timer. 107 */ 108 enable &= config_enabled_mask(I915_PMU_ACTUAL_FREQUENCY) | 109 config_enabled_mask(I915_PMU_REQUESTED_FREQUENCY) | 110 ENGINE_SAMPLE_MASK; 111 112 /* 113 * When the GPU is idle per-engine counters do not need to be 114 * running so clear those bits out. 115 */ 116 if (!gpu_active) 117 enable &= ~ENGINE_SAMPLE_MASK; 118 /* 119 * Also there is software busyness tracking available we do not 120 * need the timer for I915_SAMPLE_BUSY counter. 121 * 122 * Use RCS as proxy for all engines. 123 */ 124 else if (intel_engine_supports_stats(i915->engine[RCS])) 125 enable &= ~BIT(I915_SAMPLE_BUSY); 126 127 /* 128 * If some bits remain it means we need the sampling timer running. 129 */ 130 return enable; 131 } 132 133 void i915_pmu_gt_parked(struct drm_i915_private *i915) 134 { 135 if (!i915->pmu.base.event_init) 136 return; 137 138 spin_lock_irq(&i915->pmu.lock); 139 /* 140 * Signal sampling timer to stop if only engine events are enabled and 141 * GPU went idle. 142 */ 143 i915->pmu.timer_enabled = pmu_needs_timer(i915, false); 144 spin_unlock_irq(&i915->pmu.lock); 145 } 146 147 static void __i915_pmu_maybe_start_timer(struct drm_i915_private *i915) 148 { 149 if (!i915->pmu.timer_enabled && pmu_needs_timer(i915, true)) { 150 i915->pmu.timer_enabled = true; 151 hrtimer_start_range_ns(&i915->pmu.timer, 152 ns_to_ktime(PERIOD), 0, 153 HRTIMER_MODE_REL_PINNED); 154 } 155 } 156 157 void i915_pmu_gt_unparked(struct drm_i915_private *i915) 158 { 159 if (!i915->pmu.base.event_init) 160 return; 161 162 spin_lock_irq(&i915->pmu.lock); 163 /* 164 * Re-enable sampling timer when GPU goes active. 165 */ 166 __i915_pmu_maybe_start_timer(i915); 167 spin_unlock_irq(&i915->pmu.lock); 168 } 169 170 static bool grab_forcewake(struct drm_i915_private *i915, bool fw) 171 { 172 if (!fw) 173 intel_uncore_forcewake_get(i915, FORCEWAKE_ALL); 174 175 return true; 176 } 177 178 static void 179 update_sample(struct i915_pmu_sample *sample, u32 unit, u32 val) 180 { 181 sample->cur += mul_u32_u32(val, unit); 182 } 183 184 static void engines_sample(struct drm_i915_private *dev_priv) 185 { 186 struct intel_engine_cs *engine; 187 enum intel_engine_id id; 188 bool fw = false; 189 190 if ((dev_priv->pmu.enable & ENGINE_SAMPLE_MASK) == 0) 191 return; 192 193 if (!dev_priv->gt.awake) 194 return; 195 196 if (!intel_runtime_pm_get_if_in_use(dev_priv)) 197 return; 198 199 for_each_engine(engine, dev_priv, id) { 200 u32 current_seqno = intel_engine_get_seqno(engine); 201 u32 last_seqno = intel_engine_last_submit(engine); 202 u32 val; 203 204 val = !i915_seqno_passed(current_seqno, last_seqno); 205 206 update_sample(&engine->pmu.sample[I915_SAMPLE_BUSY], 207 PERIOD, val); 208 209 if (val && (engine->pmu.enable & 210 (BIT(I915_SAMPLE_WAIT) | BIT(I915_SAMPLE_SEMA)))) { 211 fw = grab_forcewake(dev_priv, fw); 212 213 val = I915_READ_FW(RING_CTL(engine->mmio_base)); 214 } else { 215 val = 0; 216 } 217 218 update_sample(&engine->pmu.sample[I915_SAMPLE_WAIT], 219 PERIOD, !!(val & RING_WAIT)); 220 221 update_sample(&engine->pmu.sample[I915_SAMPLE_SEMA], 222 PERIOD, !!(val & RING_WAIT_SEMAPHORE)); 223 } 224 225 if (fw) 226 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); 227 228 intel_runtime_pm_put(dev_priv); 229 } 230 231 static void frequency_sample(struct drm_i915_private *dev_priv) 232 { 233 if (dev_priv->pmu.enable & 234 config_enabled_mask(I915_PMU_ACTUAL_FREQUENCY)) { 235 u32 val; 236 237 val = dev_priv->gt_pm.rps.cur_freq; 238 if (dev_priv->gt.awake && 239 intel_runtime_pm_get_if_in_use(dev_priv)) { 240 val = intel_get_cagf(dev_priv, 241 I915_READ_NOTRACE(GEN6_RPSTAT1)); 242 intel_runtime_pm_put(dev_priv); 243 } 244 245 update_sample(&dev_priv->pmu.sample[__I915_SAMPLE_FREQ_ACT], 246 1, intel_gpu_freq(dev_priv, val)); 247 } 248 249 if (dev_priv->pmu.enable & 250 config_enabled_mask(I915_PMU_REQUESTED_FREQUENCY)) { 251 update_sample(&dev_priv->pmu.sample[__I915_SAMPLE_FREQ_REQ], 1, 252 intel_gpu_freq(dev_priv, 253 dev_priv->gt_pm.rps.cur_freq)); 254 } 255 } 256 257 static enum hrtimer_restart i915_sample(struct hrtimer *hrtimer) 258 { 259 struct drm_i915_private *i915 = 260 container_of(hrtimer, struct drm_i915_private, pmu.timer); 261 262 if (!READ_ONCE(i915->pmu.timer_enabled)) 263 return HRTIMER_NORESTART; 264 265 engines_sample(i915); 266 frequency_sample(i915); 267 268 hrtimer_forward_now(hrtimer, ns_to_ktime(PERIOD)); 269 return HRTIMER_RESTART; 270 } 271 272 static u64 count_interrupts(struct drm_i915_private *i915) 273 { 274 /* open-coded kstat_irqs() */ 275 struct irq_desc *desc = irq_to_desc(i915->drm.pdev->irq); 276 u64 sum = 0; 277 int cpu; 278 279 if (!desc || !desc->kstat_irqs) 280 return 0; 281 282 for_each_possible_cpu(cpu) 283 sum += *per_cpu_ptr(desc->kstat_irqs, cpu); 284 285 return sum; 286 } 287 288 static void engine_event_destroy(struct perf_event *event) 289 { 290 struct drm_i915_private *i915 = 291 container_of(event->pmu, typeof(*i915), pmu.base); 292 struct intel_engine_cs *engine; 293 294 engine = intel_engine_lookup_user(i915, 295 engine_event_class(event), 296 engine_event_instance(event)); 297 if (WARN_ON_ONCE(!engine)) 298 return; 299 300 if (engine_event_sample(event) == I915_SAMPLE_BUSY && 301 intel_engine_supports_stats(engine)) 302 intel_disable_engine_stats(engine); 303 } 304 305 static void i915_pmu_event_destroy(struct perf_event *event) 306 { 307 WARN_ON(event->parent); 308 309 if (is_engine_event(event)) 310 engine_event_destroy(event); 311 } 312 313 static int 314 engine_event_status(struct intel_engine_cs *engine, 315 enum drm_i915_pmu_engine_sample sample) 316 { 317 switch (sample) { 318 case I915_SAMPLE_BUSY: 319 case I915_SAMPLE_WAIT: 320 break; 321 case I915_SAMPLE_SEMA: 322 if (INTEL_GEN(engine->i915) < 6) 323 return -ENODEV; 324 break; 325 default: 326 return -ENOENT; 327 } 328 329 return 0; 330 } 331 332 static int engine_event_init(struct perf_event *event) 333 { 334 struct drm_i915_private *i915 = 335 container_of(event->pmu, typeof(*i915), pmu.base); 336 struct intel_engine_cs *engine; 337 u8 sample; 338 int ret; 339 340 engine = intel_engine_lookup_user(i915, engine_event_class(event), 341 engine_event_instance(event)); 342 if (!engine) 343 return -ENODEV; 344 345 sample = engine_event_sample(event); 346 ret = engine_event_status(engine, sample); 347 if (ret) 348 return ret; 349 350 if (sample == I915_SAMPLE_BUSY && intel_engine_supports_stats(engine)) 351 ret = intel_enable_engine_stats(engine); 352 353 return ret; 354 } 355 356 static int i915_pmu_event_init(struct perf_event *event) 357 { 358 struct drm_i915_private *i915 = 359 container_of(event->pmu, typeof(*i915), pmu.base); 360 int ret; 361 362 if (event->attr.type != event->pmu->type) 363 return -ENOENT; 364 365 /* unsupported modes and filters */ 366 if (event->attr.sample_period) /* no sampling */ 367 return -EINVAL; 368 369 if (has_branch_stack(event)) 370 return -EOPNOTSUPP; 371 372 if (event->cpu < 0) 373 return -EINVAL; 374 375 /* only allow running on one cpu at a time */ 376 if (!cpumask_test_cpu(event->cpu, &i915_pmu_cpumask)) 377 return -EINVAL; 378 379 if (is_engine_event(event)) { 380 ret = engine_event_init(event); 381 } else { 382 ret = 0; 383 switch (event->attr.config) { 384 case I915_PMU_ACTUAL_FREQUENCY: 385 if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) 386 /* Requires a mutex for sampling! */ 387 ret = -ENODEV; 388 case I915_PMU_REQUESTED_FREQUENCY: 389 if (INTEL_GEN(i915) < 6) 390 ret = -ENODEV; 391 break; 392 case I915_PMU_INTERRUPTS: 393 break; 394 case I915_PMU_RC6_RESIDENCY: 395 if (!HAS_RC6(i915)) 396 ret = -ENODEV; 397 break; 398 default: 399 ret = -ENOENT; 400 break; 401 } 402 } 403 if (ret) 404 return ret; 405 406 if (!event->parent) 407 event->destroy = i915_pmu_event_destroy; 408 409 return 0; 410 } 411 412 static u64 __get_rc6(struct drm_i915_private *i915) 413 { 414 u64 val; 415 416 val = intel_rc6_residency_ns(i915, 417 IS_VALLEYVIEW(i915) ? 418 VLV_GT_RENDER_RC6 : 419 GEN6_GT_GFX_RC6); 420 421 if (HAS_RC6p(i915)) 422 val += intel_rc6_residency_ns(i915, GEN6_GT_GFX_RC6p); 423 424 if (HAS_RC6pp(i915)) 425 val += intel_rc6_residency_ns(i915, GEN6_GT_GFX_RC6pp); 426 427 return val; 428 } 429 430 static u64 get_rc6(struct drm_i915_private *i915, bool locked) 431 { 432 #if IS_ENABLED(CONFIG_PM) 433 unsigned long flags; 434 u64 val; 435 436 if (intel_runtime_pm_get_if_in_use(i915)) { 437 val = __get_rc6(i915); 438 intel_runtime_pm_put(i915); 439 440 /* 441 * If we are coming back from being runtime suspended we must 442 * be careful not to report a larger value than returned 443 * previously. 444 */ 445 446 if (!locked) 447 spin_lock_irqsave(&i915->pmu.lock, flags); 448 449 if (val >= i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur) { 450 i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur = 0; 451 i915->pmu.sample[__I915_SAMPLE_RC6].cur = val; 452 } else { 453 val = i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur; 454 } 455 456 if (!locked) 457 spin_unlock_irqrestore(&i915->pmu.lock, flags); 458 } else { 459 struct pci_dev *pdev = i915->drm.pdev; 460 struct device *kdev = &pdev->dev; 461 unsigned long flags2; 462 463 /* 464 * We are runtime suspended. 465 * 466 * Report the delta from when the device was suspended to now, 467 * on top of the last known real value, as the approximated RC6 468 * counter value. 469 */ 470 if (!locked) 471 spin_lock_irqsave(&i915->pmu.lock, flags); 472 473 spin_lock_irqsave(&kdev->power.lock, flags2); 474 475 if (!i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur) 476 i915->pmu.suspended_jiffies_last = 477 kdev->power.suspended_jiffies; 478 479 val = kdev->power.suspended_jiffies - 480 i915->pmu.suspended_jiffies_last; 481 val += jiffies - kdev->power.accounting_timestamp; 482 483 spin_unlock_irqrestore(&kdev->power.lock, flags2); 484 485 val = jiffies_to_nsecs(val); 486 val += i915->pmu.sample[__I915_SAMPLE_RC6].cur; 487 i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur = val; 488 489 if (!locked) 490 spin_unlock_irqrestore(&i915->pmu.lock, flags); 491 } 492 493 return val; 494 #else 495 return __get_rc6(i915); 496 #endif 497 } 498 499 static u64 __i915_pmu_event_read(struct perf_event *event, bool locked) 500 { 501 struct drm_i915_private *i915 = 502 container_of(event->pmu, typeof(*i915), pmu.base); 503 u64 val = 0; 504 505 if (is_engine_event(event)) { 506 u8 sample = engine_event_sample(event); 507 struct intel_engine_cs *engine; 508 509 engine = intel_engine_lookup_user(i915, 510 engine_event_class(event), 511 engine_event_instance(event)); 512 513 if (WARN_ON_ONCE(!engine)) { 514 /* Do nothing */ 515 } else if (sample == I915_SAMPLE_BUSY && 516 intel_engine_supports_stats(engine)) { 517 val = ktime_to_ns(intel_engine_get_busy_time(engine)); 518 } else { 519 val = engine->pmu.sample[sample].cur; 520 } 521 } else { 522 switch (event->attr.config) { 523 case I915_PMU_ACTUAL_FREQUENCY: 524 val = 525 div_u64(i915->pmu.sample[__I915_SAMPLE_FREQ_ACT].cur, 526 FREQUENCY); 527 break; 528 case I915_PMU_REQUESTED_FREQUENCY: 529 val = 530 div_u64(i915->pmu.sample[__I915_SAMPLE_FREQ_REQ].cur, 531 FREQUENCY); 532 break; 533 case I915_PMU_INTERRUPTS: 534 val = count_interrupts(i915); 535 break; 536 case I915_PMU_RC6_RESIDENCY: 537 val = get_rc6(i915, locked); 538 break; 539 } 540 } 541 542 return val; 543 } 544 545 static void i915_pmu_event_read(struct perf_event *event) 546 { 547 struct hw_perf_event *hwc = &event->hw; 548 u64 prev, new; 549 550 again: 551 prev = local64_read(&hwc->prev_count); 552 new = __i915_pmu_event_read(event, false); 553 554 if (local64_cmpxchg(&hwc->prev_count, prev, new) != prev) 555 goto again; 556 557 local64_add(new - prev, &event->count); 558 } 559 560 static void i915_pmu_enable(struct perf_event *event) 561 { 562 struct drm_i915_private *i915 = 563 container_of(event->pmu, typeof(*i915), pmu.base); 564 unsigned int bit = event_enabled_bit(event); 565 unsigned long flags; 566 567 spin_lock_irqsave(&i915->pmu.lock, flags); 568 569 /* 570 * Update the bitmask of enabled events and increment 571 * the event reference counter. 572 */ 573 GEM_BUG_ON(bit >= I915_PMU_MASK_BITS); 574 GEM_BUG_ON(i915->pmu.enable_count[bit] == ~0); 575 i915->pmu.enable |= BIT_ULL(bit); 576 i915->pmu.enable_count[bit]++; 577 578 /* 579 * Start the sampling timer if needed and not already enabled. 580 */ 581 __i915_pmu_maybe_start_timer(i915); 582 583 /* 584 * For per-engine events the bitmask and reference counting 585 * is stored per engine. 586 */ 587 if (is_engine_event(event)) { 588 u8 sample = engine_event_sample(event); 589 struct intel_engine_cs *engine; 590 591 engine = intel_engine_lookup_user(i915, 592 engine_event_class(event), 593 engine_event_instance(event)); 594 GEM_BUG_ON(!engine); 595 engine->pmu.enable |= BIT(sample); 596 597 GEM_BUG_ON(sample >= I915_PMU_SAMPLE_BITS); 598 GEM_BUG_ON(engine->pmu.enable_count[sample] == ~0); 599 engine->pmu.enable_count[sample]++; 600 } 601 602 /* 603 * Store the current counter value so we can report the correct delta 604 * for all listeners. Even when the event was already enabled and has 605 * an existing non-zero value. 606 */ 607 local64_set(&event->hw.prev_count, __i915_pmu_event_read(event, true)); 608 609 spin_unlock_irqrestore(&i915->pmu.lock, flags); 610 } 611 612 static void i915_pmu_disable(struct perf_event *event) 613 { 614 struct drm_i915_private *i915 = 615 container_of(event->pmu, typeof(*i915), pmu.base); 616 unsigned int bit = event_enabled_bit(event); 617 unsigned long flags; 618 619 spin_lock_irqsave(&i915->pmu.lock, flags); 620 621 if (is_engine_event(event)) { 622 u8 sample = engine_event_sample(event); 623 struct intel_engine_cs *engine; 624 625 engine = intel_engine_lookup_user(i915, 626 engine_event_class(event), 627 engine_event_instance(event)); 628 GEM_BUG_ON(!engine); 629 GEM_BUG_ON(sample >= I915_PMU_SAMPLE_BITS); 630 GEM_BUG_ON(engine->pmu.enable_count[sample] == 0); 631 /* 632 * Decrement the reference count and clear the enabled 633 * bitmask when the last listener on an event goes away. 634 */ 635 if (--engine->pmu.enable_count[sample] == 0) 636 engine->pmu.enable &= ~BIT(sample); 637 } 638 639 GEM_BUG_ON(bit >= I915_PMU_MASK_BITS); 640 GEM_BUG_ON(i915->pmu.enable_count[bit] == 0); 641 /* 642 * Decrement the reference count and clear the enabled 643 * bitmask when the last listener on an event goes away. 644 */ 645 if (--i915->pmu.enable_count[bit] == 0) { 646 i915->pmu.enable &= ~BIT_ULL(bit); 647 i915->pmu.timer_enabled &= pmu_needs_timer(i915, true); 648 } 649 650 spin_unlock_irqrestore(&i915->pmu.lock, flags); 651 } 652 653 static void i915_pmu_event_start(struct perf_event *event, int flags) 654 { 655 i915_pmu_enable(event); 656 event->hw.state = 0; 657 } 658 659 static void i915_pmu_event_stop(struct perf_event *event, int flags) 660 { 661 if (flags & PERF_EF_UPDATE) 662 i915_pmu_event_read(event); 663 i915_pmu_disable(event); 664 event->hw.state = PERF_HES_STOPPED; 665 } 666 667 static int i915_pmu_event_add(struct perf_event *event, int flags) 668 { 669 if (flags & PERF_EF_START) 670 i915_pmu_event_start(event, flags); 671 672 return 0; 673 } 674 675 static void i915_pmu_event_del(struct perf_event *event, int flags) 676 { 677 i915_pmu_event_stop(event, PERF_EF_UPDATE); 678 } 679 680 static int i915_pmu_event_event_idx(struct perf_event *event) 681 { 682 return 0; 683 } 684 685 struct i915_str_attribute { 686 struct device_attribute attr; 687 const char *str; 688 }; 689 690 static ssize_t i915_pmu_format_show(struct device *dev, 691 struct device_attribute *attr, char *buf) 692 { 693 struct i915_str_attribute *eattr; 694 695 eattr = container_of(attr, struct i915_str_attribute, attr); 696 return sprintf(buf, "%s\n", eattr->str); 697 } 698 699 #define I915_PMU_FORMAT_ATTR(_name, _config) \ 700 (&((struct i915_str_attribute[]) { \ 701 { .attr = __ATTR(_name, 0444, i915_pmu_format_show, NULL), \ 702 .str = _config, } \ 703 })[0].attr.attr) 704 705 static struct attribute *i915_pmu_format_attrs[] = { 706 I915_PMU_FORMAT_ATTR(i915_eventid, "config:0-20"), 707 NULL, 708 }; 709 710 static const struct attribute_group i915_pmu_format_attr_group = { 711 .name = "format", 712 .attrs = i915_pmu_format_attrs, 713 }; 714 715 struct i915_ext_attribute { 716 struct device_attribute attr; 717 unsigned long val; 718 }; 719 720 static ssize_t i915_pmu_event_show(struct device *dev, 721 struct device_attribute *attr, char *buf) 722 { 723 struct i915_ext_attribute *eattr; 724 725 eattr = container_of(attr, struct i915_ext_attribute, attr); 726 return sprintf(buf, "config=0x%lx\n", eattr->val); 727 } 728 729 #define I915_EVENT_ATTR(_name, _config) \ 730 (&((struct i915_ext_attribute[]) { \ 731 { .attr = __ATTR(_name, 0444, i915_pmu_event_show, NULL), \ 732 .val = _config, } \ 733 })[0].attr.attr) 734 735 #define I915_EVENT_STR(_name, _str) \ 736 (&((struct perf_pmu_events_attr[]) { \ 737 { .attr = __ATTR(_name, 0444, perf_event_sysfs_show, NULL), \ 738 .id = 0, \ 739 .event_str = _str, } \ 740 })[0].attr.attr) 741 742 #define I915_EVENT(_name, _config, _unit) \ 743 I915_EVENT_ATTR(_name, _config), \ 744 I915_EVENT_STR(_name.unit, _unit) 745 746 #define I915_ENGINE_EVENT(_name, _class, _instance, _sample) \ 747 I915_EVENT_ATTR(_name, __I915_PMU_ENGINE(_class, _instance, _sample)), \ 748 I915_EVENT_STR(_name.unit, "ns") 749 750 #define I915_ENGINE_EVENTS(_name, _class, _instance) \ 751 I915_ENGINE_EVENT(_name##_instance-busy, _class, _instance, I915_SAMPLE_BUSY), \ 752 I915_ENGINE_EVENT(_name##_instance-sema, _class, _instance, I915_SAMPLE_SEMA), \ 753 I915_ENGINE_EVENT(_name##_instance-wait, _class, _instance, I915_SAMPLE_WAIT) 754 755 static struct attribute *i915_pmu_events_attrs[] = { 756 I915_ENGINE_EVENTS(rcs, I915_ENGINE_CLASS_RENDER, 0), 757 I915_ENGINE_EVENTS(bcs, I915_ENGINE_CLASS_COPY, 0), 758 I915_ENGINE_EVENTS(vcs, I915_ENGINE_CLASS_VIDEO, 0), 759 I915_ENGINE_EVENTS(vcs, I915_ENGINE_CLASS_VIDEO, 1), 760 I915_ENGINE_EVENTS(vecs, I915_ENGINE_CLASS_VIDEO_ENHANCE, 0), 761 762 I915_EVENT(actual-frequency, I915_PMU_ACTUAL_FREQUENCY, "MHz"), 763 I915_EVENT(requested-frequency, I915_PMU_REQUESTED_FREQUENCY, "MHz"), 764 765 I915_EVENT_ATTR(interrupts, I915_PMU_INTERRUPTS), 766 767 I915_EVENT(rc6-residency, I915_PMU_RC6_RESIDENCY, "ns"), 768 769 NULL, 770 }; 771 772 static const struct attribute_group i915_pmu_events_attr_group = { 773 .name = "events", 774 .attrs = i915_pmu_events_attrs, 775 }; 776 777 static ssize_t 778 i915_pmu_get_attr_cpumask(struct device *dev, 779 struct device_attribute *attr, 780 char *buf) 781 { 782 return cpumap_print_to_pagebuf(true, buf, &i915_pmu_cpumask); 783 } 784 785 static DEVICE_ATTR(cpumask, 0444, i915_pmu_get_attr_cpumask, NULL); 786 787 static struct attribute *i915_cpumask_attrs[] = { 788 &dev_attr_cpumask.attr, 789 NULL, 790 }; 791 792 static struct attribute_group i915_pmu_cpumask_attr_group = { 793 .attrs = i915_cpumask_attrs, 794 }; 795 796 static const struct attribute_group *i915_pmu_attr_groups[] = { 797 &i915_pmu_format_attr_group, 798 &i915_pmu_events_attr_group, 799 &i915_pmu_cpumask_attr_group, 800 NULL 801 }; 802 803 static int i915_pmu_cpu_online(unsigned int cpu, struct hlist_node *node) 804 { 805 struct i915_pmu *pmu = hlist_entry_safe(node, typeof(*pmu), node); 806 807 GEM_BUG_ON(!pmu->base.event_init); 808 809 /* Select the first online CPU as a designated reader. */ 810 if (!cpumask_weight(&i915_pmu_cpumask)) 811 cpumask_set_cpu(cpu, &i915_pmu_cpumask); 812 813 return 0; 814 } 815 816 static int i915_pmu_cpu_offline(unsigned int cpu, struct hlist_node *node) 817 { 818 struct i915_pmu *pmu = hlist_entry_safe(node, typeof(*pmu), node); 819 unsigned int target; 820 821 GEM_BUG_ON(!pmu->base.event_init); 822 823 if (cpumask_test_and_clear_cpu(cpu, &i915_pmu_cpumask)) { 824 target = cpumask_any_but(topology_sibling_cpumask(cpu), cpu); 825 /* Migrate events if there is a valid target */ 826 if (target < nr_cpu_ids) { 827 cpumask_set_cpu(target, &i915_pmu_cpumask); 828 perf_pmu_migrate_context(&pmu->base, cpu, target); 829 } 830 } 831 832 return 0; 833 } 834 835 static enum cpuhp_state cpuhp_slot = CPUHP_INVALID; 836 837 static int i915_pmu_register_cpuhp_state(struct drm_i915_private *i915) 838 { 839 enum cpuhp_state slot; 840 int ret; 841 842 ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, 843 "perf/x86/intel/i915:online", 844 i915_pmu_cpu_online, 845 i915_pmu_cpu_offline); 846 if (ret < 0) 847 return ret; 848 849 slot = ret; 850 ret = cpuhp_state_add_instance(slot, &i915->pmu.node); 851 if (ret) { 852 cpuhp_remove_multi_state(slot); 853 return ret; 854 } 855 856 cpuhp_slot = slot; 857 return 0; 858 } 859 860 static void i915_pmu_unregister_cpuhp_state(struct drm_i915_private *i915) 861 { 862 WARN_ON(cpuhp_slot == CPUHP_INVALID); 863 WARN_ON(cpuhp_state_remove_instance(cpuhp_slot, &i915->pmu.node)); 864 cpuhp_remove_multi_state(cpuhp_slot); 865 } 866 867 void i915_pmu_register(struct drm_i915_private *i915) 868 { 869 int ret; 870 871 if (INTEL_GEN(i915) <= 2) { 872 DRM_INFO("PMU not supported for this GPU."); 873 return; 874 } 875 876 i915->pmu.base.attr_groups = i915_pmu_attr_groups; 877 i915->pmu.base.task_ctx_nr = perf_invalid_context; 878 i915->pmu.base.event_init = i915_pmu_event_init; 879 i915->pmu.base.add = i915_pmu_event_add; 880 i915->pmu.base.del = i915_pmu_event_del; 881 i915->pmu.base.start = i915_pmu_event_start; 882 i915->pmu.base.stop = i915_pmu_event_stop; 883 i915->pmu.base.read = i915_pmu_event_read; 884 i915->pmu.base.event_idx = i915_pmu_event_event_idx; 885 886 spin_lock_init(&i915->pmu.lock); 887 hrtimer_init(&i915->pmu.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 888 i915->pmu.timer.function = i915_sample; 889 890 ret = perf_pmu_register(&i915->pmu.base, "i915", -1); 891 if (ret) 892 goto err; 893 894 ret = i915_pmu_register_cpuhp_state(i915); 895 if (ret) 896 goto err_unreg; 897 898 return; 899 900 err_unreg: 901 perf_pmu_unregister(&i915->pmu.base); 902 err: 903 i915->pmu.base.event_init = NULL; 904 DRM_NOTE("Failed to register PMU! (err=%d)\n", ret); 905 } 906 907 void i915_pmu_unregister(struct drm_i915_private *i915) 908 { 909 if (!i915->pmu.base.event_init) 910 return; 911 912 WARN_ON(i915->pmu.enable); 913 914 hrtimer_cancel(&i915->pmu.timer); 915 916 i915_pmu_unregister_cpuhp_state(i915); 917 918 perf_pmu_unregister(&i915->pmu.base); 919 i915->pmu.base.event_init = NULL; 920 } 921