1 /* 2 * SPDX-License-Identifier: MIT 3 * 4 * Copyright © 2017-2018 Intel Corporation 5 */ 6 7 #include <linux/irq.h> 8 #include <linux/pm_runtime.h> 9 10 #include "gt/intel_engine.h" 11 12 #include "i915_drv.h" 13 #include "i915_pmu.h" 14 #include "intel_pm.h" 15 16 /* Frequency for the sampling timer for events which need it. */ 17 #define FREQUENCY 200 18 #define PERIOD max_t(u64, 10000, NSEC_PER_SEC / FREQUENCY) 19 20 #define ENGINE_SAMPLE_MASK \ 21 (BIT(I915_SAMPLE_BUSY) | \ 22 BIT(I915_SAMPLE_WAIT) | \ 23 BIT(I915_SAMPLE_SEMA)) 24 25 #define ENGINE_SAMPLE_BITS (1 << I915_PMU_SAMPLE_BITS) 26 27 static cpumask_t i915_pmu_cpumask; 28 29 static u8 engine_config_sample(u64 config) 30 { 31 return config & I915_PMU_SAMPLE_MASK; 32 } 33 34 static u8 engine_event_sample(struct perf_event *event) 35 { 36 return engine_config_sample(event->attr.config); 37 } 38 39 static u8 engine_event_class(struct perf_event *event) 40 { 41 return (event->attr.config >> I915_PMU_CLASS_SHIFT) & 0xff; 42 } 43 44 static u8 engine_event_instance(struct perf_event *event) 45 { 46 return (event->attr.config >> I915_PMU_SAMPLE_BITS) & 0xff; 47 } 48 49 static bool is_engine_config(u64 config) 50 { 51 return config < __I915_PMU_OTHER(0); 52 } 53 54 static unsigned int config_enabled_bit(u64 config) 55 { 56 if (is_engine_config(config)) 57 return engine_config_sample(config); 58 else 59 return ENGINE_SAMPLE_BITS + (config - __I915_PMU_OTHER(0)); 60 } 61 62 static u64 config_enabled_mask(u64 config) 63 { 64 return BIT_ULL(config_enabled_bit(config)); 65 } 66 67 static bool is_engine_event(struct perf_event *event) 68 { 69 return is_engine_config(event->attr.config); 70 } 71 72 static unsigned int event_enabled_bit(struct perf_event *event) 73 { 74 return config_enabled_bit(event->attr.config); 75 } 76 77 static bool pmu_needs_timer(struct drm_i915_private *i915, bool gpu_active) 78 { 79 u64 enable; 80 81 /* 82 * Only some counters need the sampling timer. 83 * 84 * We start with a bitmask of all currently enabled events. 85 */ 86 enable = i915->pmu.enable; 87 88 /* 89 * Mask out all the ones which do not need the timer, or in 90 * other words keep all the ones that could need the timer. 91 */ 92 enable &= config_enabled_mask(I915_PMU_ACTUAL_FREQUENCY) | 93 config_enabled_mask(I915_PMU_REQUESTED_FREQUENCY) | 94 ENGINE_SAMPLE_MASK; 95 96 /* 97 * When the GPU is idle per-engine counters do not need to be 98 * running so clear those bits out. 99 */ 100 if (!gpu_active) 101 enable &= ~ENGINE_SAMPLE_MASK; 102 /* 103 * Also there is software busyness tracking available we do not 104 * need the timer for I915_SAMPLE_BUSY counter. 105 * 106 * Use RCS as proxy for all engines. 107 */ 108 else if (intel_engine_supports_stats(i915->engine[RCS0])) 109 enable &= ~BIT(I915_SAMPLE_BUSY); 110 111 /* 112 * If some bits remain it means we need the sampling timer running. 113 */ 114 return enable; 115 } 116 117 void i915_pmu_gt_parked(struct drm_i915_private *i915) 118 { 119 if (!i915->pmu.base.event_init) 120 return; 121 122 spin_lock_irq(&i915->pmu.lock); 123 /* 124 * Signal sampling timer to stop if only engine events are enabled and 125 * GPU went idle. 126 */ 127 i915->pmu.timer_enabled = pmu_needs_timer(i915, false); 128 spin_unlock_irq(&i915->pmu.lock); 129 } 130 131 static void __i915_pmu_maybe_start_timer(struct drm_i915_private *i915) 132 { 133 if (!i915->pmu.timer_enabled && pmu_needs_timer(i915, true)) { 134 i915->pmu.timer_enabled = true; 135 i915->pmu.timer_last = ktime_get(); 136 hrtimer_start_range_ns(&i915->pmu.timer, 137 ns_to_ktime(PERIOD), 0, 138 HRTIMER_MODE_REL_PINNED); 139 } 140 } 141 142 void i915_pmu_gt_unparked(struct drm_i915_private *i915) 143 { 144 if (!i915->pmu.base.event_init) 145 return; 146 147 spin_lock_irq(&i915->pmu.lock); 148 /* 149 * Re-enable sampling timer when GPU goes active. 150 */ 151 __i915_pmu_maybe_start_timer(i915); 152 spin_unlock_irq(&i915->pmu.lock); 153 } 154 155 static void 156 add_sample(struct i915_pmu_sample *sample, u32 val) 157 { 158 sample->cur += val; 159 } 160 161 static void 162 engines_sample(struct drm_i915_private *dev_priv, unsigned int period_ns) 163 { 164 struct intel_engine_cs *engine; 165 enum intel_engine_id id; 166 intel_wakeref_t wakeref; 167 unsigned long flags; 168 169 if ((dev_priv->pmu.enable & ENGINE_SAMPLE_MASK) == 0) 170 return; 171 172 wakeref = 0; 173 if (READ_ONCE(dev_priv->gt.awake)) 174 wakeref = intel_runtime_pm_get_if_in_use(dev_priv); 175 if (!wakeref) 176 return; 177 178 spin_lock_irqsave(&dev_priv->uncore.lock, flags); 179 for_each_engine(engine, dev_priv, id) { 180 struct intel_engine_pmu *pmu = &engine->pmu; 181 bool busy; 182 u32 val; 183 184 val = I915_READ_FW(RING_CTL(engine->mmio_base)); 185 if (val == 0) /* powerwell off => engine idle */ 186 continue; 187 188 if (val & RING_WAIT) 189 add_sample(&pmu->sample[I915_SAMPLE_WAIT], period_ns); 190 if (val & RING_WAIT_SEMAPHORE) 191 add_sample(&pmu->sample[I915_SAMPLE_SEMA], period_ns); 192 193 /* 194 * While waiting on a semaphore or event, MI_MODE reports the 195 * ring as idle. However, previously using the seqno, and with 196 * execlists sampling, we account for the ring waiting as the 197 * engine being busy. Therefore, we record the sample as being 198 * busy if either waiting or !idle. 199 */ 200 busy = val & (RING_WAIT_SEMAPHORE | RING_WAIT); 201 if (!busy) { 202 val = I915_READ_FW(RING_MI_MODE(engine->mmio_base)); 203 busy = !(val & MODE_IDLE); 204 } 205 if (busy) 206 add_sample(&pmu->sample[I915_SAMPLE_BUSY], period_ns); 207 } 208 spin_unlock_irqrestore(&dev_priv->uncore.lock, flags); 209 210 intel_runtime_pm_put(dev_priv, wakeref); 211 } 212 213 static void 214 add_sample_mult(struct i915_pmu_sample *sample, u32 val, u32 mul) 215 { 216 sample->cur += mul_u32_u32(val, mul); 217 } 218 219 static void 220 frequency_sample(struct drm_i915_private *dev_priv, unsigned int period_ns) 221 { 222 if (dev_priv->pmu.enable & 223 config_enabled_mask(I915_PMU_ACTUAL_FREQUENCY)) { 224 u32 val; 225 226 val = dev_priv->gt_pm.rps.cur_freq; 227 if (dev_priv->gt.awake) { 228 intel_wakeref_t wakeref; 229 230 with_intel_runtime_pm_if_in_use(dev_priv, wakeref) 231 val = intel_get_cagf(dev_priv, 232 I915_READ_NOTRACE(GEN6_RPSTAT1)); 233 } 234 235 add_sample_mult(&dev_priv->pmu.sample[__I915_SAMPLE_FREQ_ACT], 236 intel_gpu_freq(dev_priv, val), 237 period_ns / 1000); 238 } 239 240 if (dev_priv->pmu.enable & 241 config_enabled_mask(I915_PMU_REQUESTED_FREQUENCY)) { 242 add_sample_mult(&dev_priv->pmu.sample[__I915_SAMPLE_FREQ_REQ], 243 intel_gpu_freq(dev_priv, 244 dev_priv->gt_pm.rps.cur_freq), 245 period_ns / 1000); 246 } 247 } 248 249 static enum hrtimer_restart i915_sample(struct hrtimer *hrtimer) 250 { 251 struct drm_i915_private *i915 = 252 container_of(hrtimer, struct drm_i915_private, pmu.timer); 253 unsigned int period_ns; 254 ktime_t now; 255 256 if (!READ_ONCE(i915->pmu.timer_enabled)) 257 return HRTIMER_NORESTART; 258 259 now = ktime_get(); 260 period_ns = ktime_to_ns(ktime_sub(now, i915->pmu.timer_last)); 261 i915->pmu.timer_last = now; 262 263 /* 264 * Strictly speaking the passed in period may not be 100% accurate for 265 * all internal calculation, since some amount of time can be spent on 266 * grabbing the forcewake. However the potential error from timer call- 267 * back delay greatly dominates this so we keep it simple. 268 */ 269 engines_sample(i915, period_ns); 270 frequency_sample(i915, period_ns); 271 272 hrtimer_forward(hrtimer, now, ns_to_ktime(PERIOD)); 273 274 return HRTIMER_RESTART; 275 } 276 277 static u64 count_interrupts(struct drm_i915_private *i915) 278 { 279 /* open-coded kstat_irqs() */ 280 struct irq_desc *desc = irq_to_desc(i915->drm.pdev->irq); 281 u64 sum = 0; 282 int cpu; 283 284 if (!desc || !desc->kstat_irqs) 285 return 0; 286 287 for_each_possible_cpu(cpu) 288 sum += *per_cpu_ptr(desc->kstat_irqs, cpu); 289 290 return sum; 291 } 292 293 static void engine_event_destroy(struct perf_event *event) 294 { 295 struct drm_i915_private *i915 = 296 container_of(event->pmu, typeof(*i915), pmu.base); 297 struct intel_engine_cs *engine; 298 299 engine = intel_engine_lookup_user(i915, 300 engine_event_class(event), 301 engine_event_instance(event)); 302 if (WARN_ON_ONCE(!engine)) 303 return; 304 305 if (engine_event_sample(event) == I915_SAMPLE_BUSY && 306 intel_engine_supports_stats(engine)) 307 intel_disable_engine_stats(engine); 308 } 309 310 static void i915_pmu_event_destroy(struct perf_event *event) 311 { 312 WARN_ON(event->parent); 313 314 if (is_engine_event(event)) 315 engine_event_destroy(event); 316 } 317 318 static int 319 engine_event_status(struct intel_engine_cs *engine, 320 enum drm_i915_pmu_engine_sample sample) 321 { 322 switch (sample) { 323 case I915_SAMPLE_BUSY: 324 case I915_SAMPLE_WAIT: 325 break; 326 case I915_SAMPLE_SEMA: 327 if (INTEL_GEN(engine->i915) < 6) 328 return -ENODEV; 329 break; 330 default: 331 return -ENOENT; 332 } 333 334 return 0; 335 } 336 337 static int 338 config_status(struct drm_i915_private *i915, u64 config) 339 { 340 switch (config) { 341 case I915_PMU_ACTUAL_FREQUENCY: 342 if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) 343 /* Requires a mutex for sampling! */ 344 return -ENODEV; 345 /* Fall-through. */ 346 case I915_PMU_REQUESTED_FREQUENCY: 347 if (INTEL_GEN(i915) < 6) 348 return -ENODEV; 349 break; 350 case I915_PMU_INTERRUPTS: 351 break; 352 case I915_PMU_RC6_RESIDENCY: 353 if (!HAS_RC6(i915)) 354 return -ENODEV; 355 break; 356 default: 357 return -ENOENT; 358 } 359 360 return 0; 361 } 362 363 static int engine_event_init(struct perf_event *event) 364 { 365 struct drm_i915_private *i915 = 366 container_of(event->pmu, typeof(*i915), pmu.base); 367 struct intel_engine_cs *engine; 368 u8 sample; 369 int ret; 370 371 engine = intel_engine_lookup_user(i915, engine_event_class(event), 372 engine_event_instance(event)); 373 if (!engine) 374 return -ENODEV; 375 376 sample = engine_event_sample(event); 377 ret = engine_event_status(engine, sample); 378 if (ret) 379 return ret; 380 381 if (sample == I915_SAMPLE_BUSY && intel_engine_supports_stats(engine)) 382 ret = intel_enable_engine_stats(engine); 383 384 return ret; 385 } 386 387 static int i915_pmu_event_init(struct perf_event *event) 388 { 389 struct drm_i915_private *i915 = 390 container_of(event->pmu, typeof(*i915), pmu.base); 391 int ret; 392 393 if (event->attr.type != event->pmu->type) 394 return -ENOENT; 395 396 /* unsupported modes and filters */ 397 if (event->attr.sample_period) /* no sampling */ 398 return -EINVAL; 399 400 if (has_branch_stack(event)) 401 return -EOPNOTSUPP; 402 403 if (event->cpu < 0) 404 return -EINVAL; 405 406 /* only allow running on one cpu at a time */ 407 if (!cpumask_test_cpu(event->cpu, &i915_pmu_cpumask)) 408 return -EINVAL; 409 410 if (is_engine_event(event)) 411 ret = engine_event_init(event); 412 else 413 ret = config_status(i915, event->attr.config); 414 if (ret) 415 return ret; 416 417 if (!event->parent) 418 event->destroy = i915_pmu_event_destroy; 419 420 return 0; 421 } 422 423 static u64 __get_rc6(struct drm_i915_private *i915) 424 { 425 u64 val; 426 427 val = intel_rc6_residency_ns(i915, 428 IS_VALLEYVIEW(i915) ? 429 VLV_GT_RENDER_RC6 : 430 GEN6_GT_GFX_RC6); 431 432 if (HAS_RC6p(i915)) 433 val += intel_rc6_residency_ns(i915, GEN6_GT_GFX_RC6p); 434 435 if (HAS_RC6pp(i915)) 436 val += intel_rc6_residency_ns(i915, GEN6_GT_GFX_RC6pp); 437 438 return val; 439 } 440 441 static u64 get_rc6(struct drm_i915_private *i915) 442 { 443 #if IS_ENABLED(CONFIG_PM) 444 intel_wakeref_t wakeref; 445 unsigned long flags; 446 u64 val; 447 448 wakeref = intel_runtime_pm_get_if_in_use(i915); 449 if (wakeref) { 450 val = __get_rc6(i915); 451 intel_runtime_pm_put(i915, wakeref); 452 453 /* 454 * If we are coming back from being runtime suspended we must 455 * be careful not to report a larger value than returned 456 * previously. 457 */ 458 459 spin_lock_irqsave(&i915->pmu.lock, flags); 460 461 if (val >= i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur) { 462 i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur = 0; 463 i915->pmu.sample[__I915_SAMPLE_RC6].cur = val; 464 } else { 465 val = i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur; 466 } 467 468 spin_unlock_irqrestore(&i915->pmu.lock, flags); 469 } else { 470 struct pci_dev *pdev = i915->drm.pdev; 471 struct device *kdev = &pdev->dev; 472 473 /* 474 * We are runtime suspended. 475 * 476 * Report the delta from when the device was suspended to now, 477 * on top of the last known real value, as the approximated RC6 478 * counter value. 479 */ 480 spin_lock_irqsave(&i915->pmu.lock, flags); 481 482 /* 483 * After the above branch intel_runtime_pm_get_if_in_use failed 484 * to get the runtime PM reference we cannot assume we are in 485 * runtime suspend since we can either: a) race with coming out 486 * of it before we took the power.lock, or b) there are other 487 * states than suspended which can bring us here. 488 * 489 * We need to double-check that we are indeed currently runtime 490 * suspended and if not we cannot do better than report the last 491 * known RC6 value. 492 */ 493 if (pm_runtime_status_suspended(kdev)) { 494 val = pm_runtime_suspended_time(kdev); 495 496 if (!i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur) 497 i915->pmu.suspended_time_last = val; 498 499 val -= i915->pmu.suspended_time_last; 500 val += i915->pmu.sample[__I915_SAMPLE_RC6].cur; 501 502 i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur = val; 503 } else if (i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur) { 504 val = i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur; 505 } else { 506 val = i915->pmu.sample[__I915_SAMPLE_RC6].cur; 507 } 508 509 spin_unlock_irqrestore(&i915->pmu.lock, flags); 510 } 511 512 return val; 513 #else 514 return __get_rc6(i915); 515 #endif 516 } 517 518 static u64 __i915_pmu_event_read(struct perf_event *event) 519 { 520 struct drm_i915_private *i915 = 521 container_of(event->pmu, typeof(*i915), pmu.base); 522 u64 val = 0; 523 524 if (is_engine_event(event)) { 525 u8 sample = engine_event_sample(event); 526 struct intel_engine_cs *engine; 527 528 engine = intel_engine_lookup_user(i915, 529 engine_event_class(event), 530 engine_event_instance(event)); 531 532 if (WARN_ON_ONCE(!engine)) { 533 /* Do nothing */ 534 } else if (sample == I915_SAMPLE_BUSY && 535 intel_engine_supports_stats(engine)) { 536 val = ktime_to_ns(intel_engine_get_busy_time(engine)); 537 } else { 538 val = engine->pmu.sample[sample].cur; 539 } 540 } else { 541 switch (event->attr.config) { 542 case I915_PMU_ACTUAL_FREQUENCY: 543 val = 544 div_u64(i915->pmu.sample[__I915_SAMPLE_FREQ_ACT].cur, 545 USEC_PER_SEC /* to MHz */); 546 break; 547 case I915_PMU_REQUESTED_FREQUENCY: 548 val = 549 div_u64(i915->pmu.sample[__I915_SAMPLE_FREQ_REQ].cur, 550 USEC_PER_SEC /* to MHz */); 551 break; 552 case I915_PMU_INTERRUPTS: 553 val = count_interrupts(i915); 554 break; 555 case I915_PMU_RC6_RESIDENCY: 556 val = get_rc6(i915); 557 break; 558 } 559 } 560 561 return val; 562 } 563 564 static void i915_pmu_event_read(struct perf_event *event) 565 { 566 struct hw_perf_event *hwc = &event->hw; 567 u64 prev, new; 568 569 again: 570 prev = local64_read(&hwc->prev_count); 571 new = __i915_pmu_event_read(event); 572 573 if (local64_cmpxchg(&hwc->prev_count, prev, new) != prev) 574 goto again; 575 576 local64_add(new - prev, &event->count); 577 } 578 579 static void i915_pmu_enable(struct perf_event *event) 580 { 581 struct drm_i915_private *i915 = 582 container_of(event->pmu, typeof(*i915), pmu.base); 583 unsigned int bit = event_enabled_bit(event); 584 unsigned long flags; 585 586 spin_lock_irqsave(&i915->pmu.lock, flags); 587 588 /* 589 * Update the bitmask of enabled events and increment 590 * the event reference counter. 591 */ 592 BUILD_BUG_ON(ARRAY_SIZE(i915->pmu.enable_count) != I915_PMU_MASK_BITS); 593 GEM_BUG_ON(bit >= ARRAY_SIZE(i915->pmu.enable_count)); 594 GEM_BUG_ON(i915->pmu.enable_count[bit] == ~0); 595 i915->pmu.enable |= BIT_ULL(bit); 596 i915->pmu.enable_count[bit]++; 597 598 /* 599 * Start the sampling timer if needed and not already enabled. 600 */ 601 __i915_pmu_maybe_start_timer(i915); 602 603 /* 604 * For per-engine events the bitmask and reference counting 605 * is stored per engine. 606 */ 607 if (is_engine_event(event)) { 608 u8 sample = engine_event_sample(event); 609 struct intel_engine_cs *engine; 610 611 engine = intel_engine_lookup_user(i915, 612 engine_event_class(event), 613 engine_event_instance(event)); 614 615 BUILD_BUG_ON(ARRAY_SIZE(engine->pmu.enable_count) != 616 I915_ENGINE_SAMPLE_COUNT); 617 BUILD_BUG_ON(ARRAY_SIZE(engine->pmu.sample) != 618 I915_ENGINE_SAMPLE_COUNT); 619 GEM_BUG_ON(sample >= ARRAY_SIZE(engine->pmu.enable_count)); 620 GEM_BUG_ON(sample >= ARRAY_SIZE(engine->pmu.sample)); 621 GEM_BUG_ON(engine->pmu.enable_count[sample] == ~0); 622 623 engine->pmu.enable |= BIT(sample); 624 engine->pmu.enable_count[sample]++; 625 } 626 627 spin_unlock_irqrestore(&i915->pmu.lock, flags); 628 629 /* 630 * Store the current counter value so we can report the correct delta 631 * for all listeners. Even when the event was already enabled and has 632 * an existing non-zero value. 633 */ 634 local64_set(&event->hw.prev_count, __i915_pmu_event_read(event)); 635 } 636 637 static void i915_pmu_disable(struct perf_event *event) 638 { 639 struct drm_i915_private *i915 = 640 container_of(event->pmu, typeof(*i915), pmu.base); 641 unsigned int bit = event_enabled_bit(event); 642 unsigned long flags; 643 644 spin_lock_irqsave(&i915->pmu.lock, flags); 645 646 if (is_engine_event(event)) { 647 u8 sample = engine_event_sample(event); 648 struct intel_engine_cs *engine; 649 650 engine = intel_engine_lookup_user(i915, 651 engine_event_class(event), 652 engine_event_instance(event)); 653 654 GEM_BUG_ON(sample >= ARRAY_SIZE(engine->pmu.enable_count)); 655 GEM_BUG_ON(sample >= ARRAY_SIZE(engine->pmu.sample)); 656 GEM_BUG_ON(engine->pmu.enable_count[sample] == 0); 657 658 /* 659 * Decrement the reference count and clear the enabled 660 * bitmask when the last listener on an event goes away. 661 */ 662 if (--engine->pmu.enable_count[sample] == 0) 663 engine->pmu.enable &= ~BIT(sample); 664 } 665 666 GEM_BUG_ON(bit >= ARRAY_SIZE(i915->pmu.enable_count)); 667 GEM_BUG_ON(i915->pmu.enable_count[bit] == 0); 668 /* 669 * Decrement the reference count and clear the enabled 670 * bitmask when the last listener on an event goes away. 671 */ 672 if (--i915->pmu.enable_count[bit] == 0) { 673 i915->pmu.enable &= ~BIT_ULL(bit); 674 i915->pmu.timer_enabled &= pmu_needs_timer(i915, true); 675 } 676 677 spin_unlock_irqrestore(&i915->pmu.lock, flags); 678 } 679 680 static void i915_pmu_event_start(struct perf_event *event, int flags) 681 { 682 i915_pmu_enable(event); 683 event->hw.state = 0; 684 } 685 686 static void i915_pmu_event_stop(struct perf_event *event, int flags) 687 { 688 if (flags & PERF_EF_UPDATE) 689 i915_pmu_event_read(event); 690 i915_pmu_disable(event); 691 event->hw.state = PERF_HES_STOPPED; 692 } 693 694 static int i915_pmu_event_add(struct perf_event *event, int flags) 695 { 696 if (flags & PERF_EF_START) 697 i915_pmu_event_start(event, flags); 698 699 return 0; 700 } 701 702 static void i915_pmu_event_del(struct perf_event *event, int flags) 703 { 704 i915_pmu_event_stop(event, PERF_EF_UPDATE); 705 } 706 707 static int i915_pmu_event_event_idx(struct perf_event *event) 708 { 709 return 0; 710 } 711 712 struct i915_str_attribute { 713 struct device_attribute attr; 714 const char *str; 715 }; 716 717 static ssize_t i915_pmu_format_show(struct device *dev, 718 struct device_attribute *attr, char *buf) 719 { 720 struct i915_str_attribute *eattr; 721 722 eattr = container_of(attr, struct i915_str_attribute, attr); 723 return sprintf(buf, "%s\n", eattr->str); 724 } 725 726 #define I915_PMU_FORMAT_ATTR(_name, _config) \ 727 (&((struct i915_str_attribute[]) { \ 728 { .attr = __ATTR(_name, 0444, i915_pmu_format_show, NULL), \ 729 .str = _config, } \ 730 })[0].attr.attr) 731 732 static struct attribute *i915_pmu_format_attrs[] = { 733 I915_PMU_FORMAT_ATTR(i915_eventid, "config:0-20"), 734 NULL, 735 }; 736 737 static const struct attribute_group i915_pmu_format_attr_group = { 738 .name = "format", 739 .attrs = i915_pmu_format_attrs, 740 }; 741 742 struct i915_ext_attribute { 743 struct device_attribute attr; 744 unsigned long val; 745 }; 746 747 static ssize_t i915_pmu_event_show(struct device *dev, 748 struct device_attribute *attr, char *buf) 749 { 750 struct i915_ext_attribute *eattr; 751 752 eattr = container_of(attr, struct i915_ext_attribute, attr); 753 return sprintf(buf, "config=0x%lx\n", eattr->val); 754 } 755 756 static struct attribute_group i915_pmu_events_attr_group = { 757 .name = "events", 758 /* Patch in attrs at runtime. */ 759 }; 760 761 static ssize_t 762 i915_pmu_get_attr_cpumask(struct device *dev, 763 struct device_attribute *attr, 764 char *buf) 765 { 766 return cpumap_print_to_pagebuf(true, buf, &i915_pmu_cpumask); 767 } 768 769 static DEVICE_ATTR(cpumask, 0444, i915_pmu_get_attr_cpumask, NULL); 770 771 static struct attribute *i915_cpumask_attrs[] = { 772 &dev_attr_cpumask.attr, 773 NULL, 774 }; 775 776 static const struct attribute_group i915_pmu_cpumask_attr_group = { 777 .attrs = i915_cpumask_attrs, 778 }; 779 780 static const struct attribute_group *i915_pmu_attr_groups[] = { 781 &i915_pmu_format_attr_group, 782 &i915_pmu_events_attr_group, 783 &i915_pmu_cpumask_attr_group, 784 NULL 785 }; 786 787 #define __event(__config, __name, __unit) \ 788 { \ 789 .config = (__config), \ 790 .name = (__name), \ 791 .unit = (__unit), \ 792 } 793 794 #define __engine_event(__sample, __name) \ 795 { \ 796 .sample = (__sample), \ 797 .name = (__name), \ 798 } 799 800 static struct i915_ext_attribute * 801 add_i915_attr(struct i915_ext_attribute *attr, const char *name, u64 config) 802 { 803 sysfs_attr_init(&attr->attr.attr); 804 attr->attr.attr.name = name; 805 attr->attr.attr.mode = 0444; 806 attr->attr.show = i915_pmu_event_show; 807 attr->val = config; 808 809 return ++attr; 810 } 811 812 static struct perf_pmu_events_attr * 813 add_pmu_attr(struct perf_pmu_events_attr *attr, const char *name, 814 const char *str) 815 { 816 sysfs_attr_init(&attr->attr.attr); 817 attr->attr.attr.name = name; 818 attr->attr.attr.mode = 0444; 819 attr->attr.show = perf_event_sysfs_show; 820 attr->event_str = str; 821 822 return ++attr; 823 } 824 825 static struct attribute ** 826 create_event_attributes(struct drm_i915_private *i915) 827 { 828 static const struct { 829 u64 config; 830 const char *name; 831 const char *unit; 832 } events[] = { 833 __event(I915_PMU_ACTUAL_FREQUENCY, "actual-frequency", "MHz"), 834 __event(I915_PMU_REQUESTED_FREQUENCY, "requested-frequency", "MHz"), 835 __event(I915_PMU_INTERRUPTS, "interrupts", NULL), 836 __event(I915_PMU_RC6_RESIDENCY, "rc6-residency", "ns"), 837 }; 838 static const struct { 839 enum drm_i915_pmu_engine_sample sample; 840 char *name; 841 } engine_events[] = { 842 __engine_event(I915_SAMPLE_BUSY, "busy"), 843 __engine_event(I915_SAMPLE_SEMA, "sema"), 844 __engine_event(I915_SAMPLE_WAIT, "wait"), 845 }; 846 unsigned int count = 0; 847 struct perf_pmu_events_attr *pmu_attr = NULL, *pmu_iter; 848 struct i915_ext_attribute *i915_attr = NULL, *i915_iter; 849 struct attribute **attr = NULL, **attr_iter; 850 struct intel_engine_cs *engine; 851 enum intel_engine_id id; 852 unsigned int i; 853 854 /* Count how many counters we will be exposing. */ 855 for (i = 0; i < ARRAY_SIZE(events); i++) { 856 if (!config_status(i915, events[i].config)) 857 count++; 858 } 859 860 for_each_engine(engine, i915, id) { 861 for (i = 0; i < ARRAY_SIZE(engine_events); i++) { 862 if (!engine_event_status(engine, 863 engine_events[i].sample)) 864 count++; 865 } 866 } 867 868 /* Allocate attribute objects and table. */ 869 i915_attr = kcalloc(count, sizeof(*i915_attr), GFP_KERNEL); 870 if (!i915_attr) 871 goto err_alloc; 872 873 pmu_attr = kcalloc(count, sizeof(*pmu_attr), GFP_KERNEL); 874 if (!pmu_attr) 875 goto err_alloc; 876 877 /* Max one pointer of each attribute type plus a termination entry. */ 878 attr = kcalloc(count * 2 + 1, sizeof(*attr), GFP_KERNEL); 879 if (!attr) 880 goto err_alloc; 881 882 i915_iter = i915_attr; 883 pmu_iter = pmu_attr; 884 attr_iter = attr; 885 886 /* Initialize supported non-engine counters. */ 887 for (i = 0; i < ARRAY_SIZE(events); i++) { 888 char *str; 889 890 if (config_status(i915, events[i].config)) 891 continue; 892 893 str = kstrdup(events[i].name, GFP_KERNEL); 894 if (!str) 895 goto err; 896 897 *attr_iter++ = &i915_iter->attr.attr; 898 i915_iter = add_i915_attr(i915_iter, str, events[i].config); 899 900 if (events[i].unit) { 901 str = kasprintf(GFP_KERNEL, "%s.unit", events[i].name); 902 if (!str) 903 goto err; 904 905 *attr_iter++ = &pmu_iter->attr.attr; 906 pmu_iter = add_pmu_attr(pmu_iter, str, events[i].unit); 907 } 908 } 909 910 /* Initialize supported engine counters. */ 911 for_each_engine(engine, i915, id) { 912 for (i = 0; i < ARRAY_SIZE(engine_events); i++) { 913 char *str; 914 915 if (engine_event_status(engine, 916 engine_events[i].sample)) 917 continue; 918 919 str = kasprintf(GFP_KERNEL, "%s-%s", 920 engine->name, engine_events[i].name); 921 if (!str) 922 goto err; 923 924 *attr_iter++ = &i915_iter->attr.attr; 925 i915_iter = 926 add_i915_attr(i915_iter, str, 927 __I915_PMU_ENGINE(engine->uabi_class, 928 engine->instance, 929 engine_events[i].sample)); 930 931 str = kasprintf(GFP_KERNEL, "%s-%s.unit", 932 engine->name, engine_events[i].name); 933 if (!str) 934 goto err; 935 936 *attr_iter++ = &pmu_iter->attr.attr; 937 pmu_iter = add_pmu_attr(pmu_iter, str, "ns"); 938 } 939 } 940 941 i915->pmu.i915_attr = i915_attr; 942 i915->pmu.pmu_attr = pmu_attr; 943 944 return attr; 945 946 err:; 947 for (attr_iter = attr; *attr_iter; attr_iter++) 948 kfree((*attr_iter)->name); 949 950 err_alloc: 951 kfree(attr); 952 kfree(i915_attr); 953 kfree(pmu_attr); 954 955 return NULL; 956 } 957 958 static void free_event_attributes(struct drm_i915_private *i915) 959 { 960 struct attribute **attr_iter = i915_pmu_events_attr_group.attrs; 961 962 for (; *attr_iter; attr_iter++) 963 kfree((*attr_iter)->name); 964 965 kfree(i915_pmu_events_attr_group.attrs); 966 kfree(i915->pmu.i915_attr); 967 kfree(i915->pmu.pmu_attr); 968 969 i915_pmu_events_attr_group.attrs = NULL; 970 i915->pmu.i915_attr = NULL; 971 i915->pmu.pmu_attr = NULL; 972 } 973 974 static int i915_pmu_cpu_online(unsigned int cpu, struct hlist_node *node) 975 { 976 struct i915_pmu *pmu = hlist_entry_safe(node, typeof(*pmu), node); 977 978 GEM_BUG_ON(!pmu->base.event_init); 979 980 /* Select the first online CPU as a designated reader. */ 981 if (!cpumask_weight(&i915_pmu_cpumask)) 982 cpumask_set_cpu(cpu, &i915_pmu_cpumask); 983 984 return 0; 985 } 986 987 static int i915_pmu_cpu_offline(unsigned int cpu, struct hlist_node *node) 988 { 989 struct i915_pmu *pmu = hlist_entry_safe(node, typeof(*pmu), node); 990 unsigned int target; 991 992 GEM_BUG_ON(!pmu->base.event_init); 993 994 if (cpumask_test_and_clear_cpu(cpu, &i915_pmu_cpumask)) { 995 target = cpumask_any_but(topology_sibling_cpumask(cpu), cpu); 996 /* Migrate events if there is a valid target */ 997 if (target < nr_cpu_ids) { 998 cpumask_set_cpu(target, &i915_pmu_cpumask); 999 perf_pmu_migrate_context(&pmu->base, cpu, target); 1000 } 1001 } 1002 1003 return 0; 1004 } 1005 1006 static enum cpuhp_state cpuhp_slot = CPUHP_INVALID; 1007 1008 static int i915_pmu_register_cpuhp_state(struct drm_i915_private *i915) 1009 { 1010 enum cpuhp_state slot; 1011 int ret; 1012 1013 ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, 1014 "perf/x86/intel/i915:online", 1015 i915_pmu_cpu_online, 1016 i915_pmu_cpu_offline); 1017 if (ret < 0) 1018 return ret; 1019 1020 slot = ret; 1021 ret = cpuhp_state_add_instance(slot, &i915->pmu.node); 1022 if (ret) { 1023 cpuhp_remove_multi_state(slot); 1024 return ret; 1025 } 1026 1027 cpuhp_slot = slot; 1028 return 0; 1029 } 1030 1031 static void i915_pmu_unregister_cpuhp_state(struct drm_i915_private *i915) 1032 { 1033 WARN_ON(cpuhp_slot == CPUHP_INVALID); 1034 WARN_ON(cpuhp_state_remove_instance(cpuhp_slot, &i915->pmu.node)); 1035 cpuhp_remove_multi_state(cpuhp_slot); 1036 } 1037 1038 void i915_pmu_register(struct drm_i915_private *i915) 1039 { 1040 int ret; 1041 1042 if (INTEL_GEN(i915) <= 2) { 1043 DRM_INFO("PMU not supported for this GPU."); 1044 return; 1045 } 1046 1047 i915_pmu_events_attr_group.attrs = create_event_attributes(i915); 1048 if (!i915_pmu_events_attr_group.attrs) { 1049 ret = -ENOMEM; 1050 goto err; 1051 } 1052 1053 i915->pmu.base.attr_groups = i915_pmu_attr_groups; 1054 i915->pmu.base.task_ctx_nr = perf_invalid_context; 1055 i915->pmu.base.event_init = i915_pmu_event_init; 1056 i915->pmu.base.add = i915_pmu_event_add; 1057 i915->pmu.base.del = i915_pmu_event_del; 1058 i915->pmu.base.start = i915_pmu_event_start; 1059 i915->pmu.base.stop = i915_pmu_event_stop; 1060 i915->pmu.base.read = i915_pmu_event_read; 1061 i915->pmu.base.event_idx = i915_pmu_event_event_idx; 1062 1063 spin_lock_init(&i915->pmu.lock); 1064 hrtimer_init(&i915->pmu.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 1065 i915->pmu.timer.function = i915_sample; 1066 1067 ret = perf_pmu_register(&i915->pmu.base, "i915", -1); 1068 if (ret) 1069 goto err; 1070 1071 ret = i915_pmu_register_cpuhp_state(i915); 1072 if (ret) 1073 goto err_unreg; 1074 1075 return; 1076 1077 err_unreg: 1078 perf_pmu_unregister(&i915->pmu.base); 1079 err: 1080 i915->pmu.base.event_init = NULL; 1081 free_event_attributes(i915); 1082 DRM_NOTE("Failed to register PMU! (err=%d)\n", ret); 1083 } 1084 1085 void i915_pmu_unregister(struct drm_i915_private *i915) 1086 { 1087 if (!i915->pmu.base.event_init) 1088 return; 1089 1090 WARN_ON(i915->pmu.enable); 1091 1092 hrtimer_cancel(&i915->pmu.timer); 1093 1094 i915_pmu_unregister_cpuhp_state(i915); 1095 1096 perf_pmu_unregister(&i915->pmu.base); 1097 i915->pmu.base.event_init = NULL; 1098 free_event_attributes(i915); 1099 } 1100