1 /* 2 * SPDX-License-Identifier: MIT 3 * 4 * Copyright © 2017-2018 Intel Corporation 5 */ 6 7 #include <linux/pm_runtime.h> 8 9 #include "gt/intel_engine.h" 10 #include "gt/intel_engine_pm.h" 11 #include "gt/intel_engine_user.h" 12 #include "gt/intel_gt_pm.h" 13 #include "gt/intel_rc6.h" 14 #include "gt/intel_rps.h" 15 16 #include "i915_drv.h" 17 #include "i915_pmu.h" 18 #include "intel_pm.h" 19 20 /* Frequency for the sampling timer for events which need it. */ 21 #define FREQUENCY 200 22 #define PERIOD max_t(u64, 10000, NSEC_PER_SEC / FREQUENCY) 23 24 #define ENGINE_SAMPLE_MASK \ 25 (BIT(I915_SAMPLE_BUSY) | \ 26 BIT(I915_SAMPLE_WAIT) | \ 27 BIT(I915_SAMPLE_SEMA)) 28 29 static cpumask_t i915_pmu_cpumask; 30 static unsigned int i915_pmu_target_cpu = -1; 31 32 static u8 engine_config_sample(u64 config) 33 { 34 return config & I915_PMU_SAMPLE_MASK; 35 } 36 37 static u8 engine_event_sample(struct perf_event *event) 38 { 39 return engine_config_sample(event->attr.config); 40 } 41 42 static u8 engine_event_class(struct perf_event *event) 43 { 44 return (event->attr.config >> I915_PMU_CLASS_SHIFT) & 0xff; 45 } 46 47 static u8 engine_event_instance(struct perf_event *event) 48 { 49 return (event->attr.config >> I915_PMU_SAMPLE_BITS) & 0xff; 50 } 51 52 static bool is_engine_config(u64 config) 53 { 54 return config < __I915_PMU_OTHER(0); 55 } 56 57 static unsigned int other_bit(const u64 config) 58 { 59 unsigned int val; 60 61 switch (config) { 62 case I915_PMU_ACTUAL_FREQUENCY: 63 val = __I915_PMU_ACTUAL_FREQUENCY_ENABLED; 64 break; 65 case I915_PMU_REQUESTED_FREQUENCY: 66 val = __I915_PMU_REQUESTED_FREQUENCY_ENABLED; 67 break; 68 case I915_PMU_RC6_RESIDENCY: 69 val = __I915_PMU_RC6_RESIDENCY_ENABLED; 70 break; 71 default: 72 /* 73 * Events that do not require sampling, or tracking state 74 * transitions between enabled and disabled can be ignored. 75 */ 76 return -1; 77 } 78 79 return I915_ENGINE_SAMPLE_COUNT + val; 80 } 81 82 static unsigned int config_bit(const u64 config) 83 { 84 if (is_engine_config(config)) 85 return engine_config_sample(config); 86 else 87 return other_bit(config); 88 } 89 90 static u64 config_mask(u64 config) 91 { 92 return BIT_ULL(config_bit(config)); 93 } 94 95 static bool is_engine_event(struct perf_event *event) 96 { 97 return is_engine_config(event->attr.config); 98 } 99 100 static unsigned int event_bit(struct perf_event *event) 101 { 102 return config_bit(event->attr.config); 103 } 104 105 static bool pmu_needs_timer(struct i915_pmu *pmu, bool gpu_active) 106 { 107 struct drm_i915_private *i915 = container_of(pmu, typeof(*i915), pmu); 108 u32 enable; 109 110 /* 111 * Only some counters need the sampling timer. 112 * 113 * We start with a bitmask of all currently enabled events. 114 */ 115 enable = pmu->enable; 116 117 /* 118 * Mask out all the ones which do not need the timer, or in 119 * other words keep all the ones that could need the timer. 120 */ 121 enable &= config_mask(I915_PMU_ACTUAL_FREQUENCY) | 122 config_mask(I915_PMU_REQUESTED_FREQUENCY) | 123 ENGINE_SAMPLE_MASK; 124 125 /* 126 * When the GPU is idle per-engine counters do not need to be 127 * running so clear those bits out. 128 */ 129 if (!gpu_active) 130 enable &= ~ENGINE_SAMPLE_MASK; 131 /* 132 * Also there is software busyness tracking available we do not 133 * need the timer for I915_SAMPLE_BUSY counter. 134 */ 135 else if (i915->caps.scheduler & I915_SCHEDULER_CAP_ENGINE_BUSY_STATS) 136 enable &= ~BIT(I915_SAMPLE_BUSY); 137 138 /* 139 * If some bits remain it means we need the sampling timer running. 140 */ 141 return enable; 142 } 143 144 static u64 __get_rc6(struct intel_gt *gt) 145 { 146 struct drm_i915_private *i915 = gt->i915; 147 u64 val; 148 149 val = intel_rc6_residency_ns(>->rc6, 150 IS_VALLEYVIEW(i915) ? 151 VLV_GT_RENDER_RC6 : 152 GEN6_GT_GFX_RC6); 153 154 if (HAS_RC6p(i915)) 155 val += intel_rc6_residency_ns(>->rc6, GEN6_GT_GFX_RC6p); 156 157 if (HAS_RC6pp(i915)) 158 val += intel_rc6_residency_ns(>->rc6, GEN6_GT_GFX_RC6pp); 159 160 return val; 161 } 162 163 static inline s64 ktime_since_raw(const ktime_t kt) 164 { 165 return ktime_to_ns(ktime_sub(ktime_get_raw(), kt)); 166 } 167 168 static u64 get_rc6(struct intel_gt *gt) 169 { 170 struct drm_i915_private *i915 = gt->i915; 171 struct i915_pmu *pmu = &i915->pmu; 172 unsigned long flags; 173 bool awake = false; 174 u64 val; 175 176 if (intel_gt_pm_get_if_awake(gt)) { 177 val = __get_rc6(gt); 178 intel_gt_pm_put_async(gt); 179 awake = true; 180 } 181 182 spin_lock_irqsave(&pmu->lock, flags); 183 184 if (awake) { 185 pmu->sample[__I915_SAMPLE_RC6].cur = val; 186 } else { 187 /* 188 * We think we are runtime suspended. 189 * 190 * Report the delta from when the device was suspended to now, 191 * on top of the last known real value, as the approximated RC6 192 * counter value. 193 */ 194 val = ktime_since_raw(pmu->sleep_last); 195 val += pmu->sample[__I915_SAMPLE_RC6].cur; 196 } 197 198 if (val < pmu->sample[__I915_SAMPLE_RC6_LAST_REPORTED].cur) 199 val = pmu->sample[__I915_SAMPLE_RC6_LAST_REPORTED].cur; 200 else 201 pmu->sample[__I915_SAMPLE_RC6_LAST_REPORTED].cur = val; 202 203 spin_unlock_irqrestore(&pmu->lock, flags); 204 205 return val; 206 } 207 208 static void init_rc6(struct i915_pmu *pmu) 209 { 210 struct drm_i915_private *i915 = container_of(pmu, typeof(*i915), pmu); 211 intel_wakeref_t wakeref; 212 213 with_intel_runtime_pm(i915->gt.uncore->rpm, wakeref) { 214 pmu->sample[__I915_SAMPLE_RC6].cur = __get_rc6(&i915->gt); 215 pmu->sample[__I915_SAMPLE_RC6_LAST_REPORTED].cur = 216 pmu->sample[__I915_SAMPLE_RC6].cur; 217 pmu->sleep_last = ktime_get_raw(); 218 } 219 } 220 221 static void park_rc6(struct drm_i915_private *i915) 222 { 223 struct i915_pmu *pmu = &i915->pmu; 224 225 pmu->sample[__I915_SAMPLE_RC6].cur = __get_rc6(&i915->gt); 226 pmu->sleep_last = ktime_get_raw(); 227 } 228 229 static void __i915_pmu_maybe_start_timer(struct i915_pmu *pmu) 230 { 231 if (!pmu->timer_enabled && pmu_needs_timer(pmu, true)) { 232 pmu->timer_enabled = true; 233 pmu->timer_last = ktime_get(); 234 hrtimer_start_range_ns(&pmu->timer, 235 ns_to_ktime(PERIOD), 0, 236 HRTIMER_MODE_REL_PINNED); 237 } 238 } 239 240 void i915_pmu_gt_parked(struct drm_i915_private *i915) 241 { 242 struct i915_pmu *pmu = &i915->pmu; 243 244 if (!pmu->base.event_init) 245 return; 246 247 spin_lock_irq(&pmu->lock); 248 249 park_rc6(i915); 250 251 /* 252 * Signal sampling timer to stop if only engine events are enabled and 253 * GPU went idle. 254 */ 255 pmu->timer_enabled = pmu_needs_timer(pmu, false); 256 257 spin_unlock_irq(&pmu->lock); 258 } 259 260 void i915_pmu_gt_unparked(struct drm_i915_private *i915) 261 { 262 struct i915_pmu *pmu = &i915->pmu; 263 264 if (!pmu->base.event_init) 265 return; 266 267 spin_lock_irq(&pmu->lock); 268 269 /* 270 * Re-enable sampling timer when GPU goes active. 271 */ 272 __i915_pmu_maybe_start_timer(pmu); 273 274 spin_unlock_irq(&pmu->lock); 275 } 276 277 static void 278 add_sample(struct i915_pmu_sample *sample, u32 val) 279 { 280 sample->cur += val; 281 } 282 283 static bool exclusive_mmio_access(const struct drm_i915_private *i915) 284 { 285 /* 286 * We have to avoid concurrent mmio cache line access on gen7 or 287 * risk a machine hang. For a fun history lesson dig out the old 288 * userspace intel_gpu_top and run it on Ivybridge or Haswell! 289 */ 290 return IS_GEN(i915, 7); 291 } 292 293 static void engine_sample(struct intel_engine_cs *engine, unsigned int period_ns) 294 { 295 struct intel_engine_pmu *pmu = &engine->pmu; 296 bool busy; 297 u32 val; 298 299 val = ENGINE_READ_FW(engine, RING_CTL); 300 if (val == 0) /* powerwell off => engine idle */ 301 return; 302 303 if (val & RING_WAIT) 304 add_sample(&pmu->sample[I915_SAMPLE_WAIT], period_ns); 305 if (val & RING_WAIT_SEMAPHORE) 306 add_sample(&pmu->sample[I915_SAMPLE_SEMA], period_ns); 307 308 /* No need to sample when busy stats are supported. */ 309 if (intel_engine_supports_stats(engine)) 310 return; 311 312 /* 313 * While waiting on a semaphore or event, MI_MODE reports the 314 * ring as idle. However, previously using the seqno, and with 315 * execlists sampling, we account for the ring waiting as the 316 * engine being busy. Therefore, we record the sample as being 317 * busy if either waiting or !idle. 318 */ 319 busy = val & (RING_WAIT_SEMAPHORE | RING_WAIT); 320 if (!busy) { 321 val = ENGINE_READ_FW(engine, RING_MI_MODE); 322 busy = !(val & MODE_IDLE); 323 } 324 if (busy) 325 add_sample(&pmu->sample[I915_SAMPLE_BUSY], period_ns); 326 } 327 328 static void 329 engines_sample(struct intel_gt *gt, unsigned int period_ns) 330 { 331 struct drm_i915_private *i915 = gt->i915; 332 struct intel_engine_cs *engine; 333 enum intel_engine_id id; 334 unsigned long flags; 335 336 if ((i915->pmu.enable & ENGINE_SAMPLE_MASK) == 0) 337 return; 338 339 if (!intel_gt_pm_is_awake(gt)) 340 return; 341 342 for_each_engine(engine, gt, id) { 343 if (!intel_engine_pm_get_if_awake(engine)) 344 continue; 345 346 if (exclusive_mmio_access(i915)) { 347 spin_lock_irqsave(&engine->uncore->lock, flags); 348 engine_sample(engine, period_ns); 349 spin_unlock_irqrestore(&engine->uncore->lock, flags); 350 } else { 351 engine_sample(engine, period_ns); 352 } 353 354 intel_engine_pm_put_async(engine); 355 } 356 } 357 358 static void 359 add_sample_mult(struct i915_pmu_sample *sample, u32 val, u32 mul) 360 { 361 sample->cur += mul_u32_u32(val, mul); 362 } 363 364 static bool frequency_sampling_enabled(struct i915_pmu *pmu) 365 { 366 return pmu->enable & 367 (config_mask(I915_PMU_ACTUAL_FREQUENCY) | 368 config_mask(I915_PMU_REQUESTED_FREQUENCY)); 369 } 370 371 static void 372 frequency_sample(struct intel_gt *gt, unsigned int period_ns) 373 { 374 struct drm_i915_private *i915 = gt->i915; 375 struct intel_uncore *uncore = gt->uncore; 376 struct i915_pmu *pmu = &i915->pmu; 377 struct intel_rps *rps = >->rps; 378 379 if (!frequency_sampling_enabled(pmu)) 380 return; 381 382 /* Report 0/0 (actual/requested) frequency while parked. */ 383 if (!intel_gt_pm_get_if_awake(gt)) 384 return; 385 386 if (pmu->enable & config_mask(I915_PMU_ACTUAL_FREQUENCY)) { 387 u32 val; 388 389 /* 390 * We take a quick peek here without using forcewake 391 * so that we don't perturb the system under observation 392 * (forcewake => !rc6 => increased power use). We expect 393 * that if the read fails because it is outside of the 394 * mmio power well, then it will return 0 -- in which 395 * case we assume the system is running at the intended 396 * frequency. Fortunately, the read should rarely fail! 397 */ 398 val = intel_uncore_read_fw(uncore, GEN6_RPSTAT1); 399 if (val) 400 val = intel_rps_get_cagf(rps, val); 401 else 402 val = rps->cur_freq; 403 404 add_sample_mult(&pmu->sample[__I915_SAMPLE_FREQ_ACT], 405 intel_gpu_freq(rps, val), period_ns / 1000); 406 } 407 408 if (pmu->enable & config_mask(I915_PMU_REQUESTED_FREQUENCY)) { 409 add_sample_mult(&pmu->sample[__I915_SAMPLE_FREQ_REQ], 410 intel_gpu_freq(rps, rps->cur_freq), 411 period_ns / 1000); 412 } 413 414 intel_gt_pm_put_async(gt); 415 } 416 417 static enum hrtimer_restart i915_sample(struct hrtimer *hrtimer) 418 { 419 struct drm_i915_private *i915 = 420 container_of(hrtimer, struct drm_i915_private, pmu.timer); 421 struct i915_pmu *pmu = &i915->pmu; 422 struct intel_gt *gt = &i915->gt; 423 unsigned int period_ns; 424 ktime_t now; 425 426 if (!READ_ONCE(pmu->timer_enabled)) 427 return HRTIMER_NORESTART; 428 429 now = ktime_get(); 430 period_ns = ktime_to_ns(ktime_sub(now, pmu->timer_last)); 431 pmu->timer_last = now; 432 433 /* 434 * Strictly speaking the passed in period may not be 100% accurate for 435 * all internal calculation, since some amount of time can be spent on 436 * grabbing the forcewake. However the potential error from timer call- 437 * back delay greatly dominates this so we keep it simple. 438 */ 439 engines_sample(gt, period_ns); 440 frequency_sample(gt, period_ns); 441 442 hrtimer_forward(hrtimer, now, ns_to_ktime(PERIOD)); 443 444 return HRTIMER_RESTART; 445 } 446 447 static void i915_pmu_event_destroy(struct perf_event *event) 448 { 449 struct drm_i915_private *i915 = 450 container_of(event->pmu, typeof(*i915), pmu.base); 451 452 drm_WARN_ON(&i915->drm, event->parent); 453 454 drm_dev_put(&i915->drm); 455 } 456 457 static int 458 engine_event_status(struct intel_engine_cs *engine, 459 enum drm_i915_pmu_engine_sample sample) 460 { 461 switch (sample) { 462 case I915_SAMPLE_BUSY: 463 case I915_SAMPLE_WAIT: 464 break; 465 case I915_SAMPLE_SEMA: 466 if (INTEL_GEN(engine->i915) < 6) 467 return -ENODEV; 468 break; 469 default: 470 return -ENOENT; 471 } 472 473 return 0; 474 } 475 476 static int 477 config_status(struct drm_i915_private *i915, u64 config) 478 { 479 switch (config) { 480 case I915_PMU_ACTUAL_FREQUENCY: 481 if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) 482 /* Requires a mutex for sampling! */ 483 return -ENODEV; 484 fallthrough; 485 case I915_PMU_REQUESTED_FREQUENCY: 486 if (INTEL_GEN(i915) < 6) 487 return -ENODEV; 488 break; 489 case I915_PMU_INTERRUPTS: 490 break; 491 case I915_PMU_RC6_RESIDENCY: 492 if (!HAS_RC6(i915)) 493 return -ENODEV; 494 break; 495 case I915_PMU_SOFTWARE_GT_AWAKE_TIME: 496 break; 497 default: 498 return -ENOENT; 499 } 500 501 return 0; 502 } 503 504 static int engine_event_init(struct perf_event *event) 505 { 506 struct drm_i915_private *i915 = 507 container_of(event->pmu, typeof(*i915), pmu.base); 508 struct intel_engine_cs *engine; 509 510 engine = intel_engine_lookup_user(i915, engine_event_class(event), 511 engine_event_instance(event)); 512 if (!engine) 513 return -ENODEV; 514 515 return engine_event_status(engine, engine_event_sample(event)); 516 } 517 518 static int i915_pmu_event_init(struct perf_event *event) 519 { 520 struct drm_i915_private *i915 = 521 container_of(event->pmu, typeof(*i915), pmu.base); 522 struct i915_pmu *pmu = &i915->pmu; 523 int ret; 524 525 if (pmu->closed) 526 return -ENODEV; 527 528 if (event->attr.type != event->pmu->type) 529 return -ENOENT; 530 531 /* unsupported modes and filters */ 532 if (event->attr.sample_period) /* no sampling */ 533 return -EINVAL; 534 535 if (has_branch_stack(event)) 536 return -EOPNOTSUPP; 537 538 if (event->cpu < 0) 539 return -EINVAL; 540 541 /* only allow running on one cpu at a time */ 542 if (!cpumask_test_cpu(event->cpu, &i915_pmu_cpumask)) 543 return -EINVAL; 544 545 if (is_engine_event(event)) 546 ret = engine_event_init(event); 547 else 548 ret = config_status(i915, event->attr.config); 549 if (ret) 550 return ret; 551 552 if (!event->parent) { 553 drm_dev_get(&i915->drm); 554 event->destroy = i915_pmu_event_destroy; 555 } 556 557 return 0; 558 } 559 560 static u64 __i915_pmu_event_read(struct perf_event *event) 561 { 562 struct drm_i915_private *i915 = 563 container_of(event->pmu, typeof(*i915), pmu.base); 564 struct i915_pmu *pmu = &i915->pmu; 565 u64 val = 0; 566 567 if (is_engine_event(event)) { 568 u8 sample = engine_event_sample(event); 569 struct intel_engine_cs *engine; 570 571 engine = intel_engine_lookup_user(i915, 572 engine_event_class(event), 573 engine_event_instance(event)); 574 575 if (drm_WARN_ON_ONCE(&i915->drm, !engine)) { 576 /* Do nothing */ 577 } else if (sample == I915_SAMPLE_BUSY && 578 intel_engine_supports_stats(engine)) { 579 ktime_t unused; 580 581 val = ktime_to_ns(intel_engine_get_busy_time(engine, 582 &unused)); 583 } else { 584 val = engine->pmu.sample[sample].cur; 585 } 586 } else { 587 switch (event->attr.config) { 588 case I915_PMU_ACTUAL_FREQUENCY: 589 val = 590 div_u64(pmu->sample[__I915_SAMPLE_FREQ_ACT].cur, 591 USEC_PER_SEC /* to MHz */); 592 break; 593 case I915_PMU_REQUESTED_FREQUENCY: 594 val = 595 div_u64(pmu->sample[__I915_SAMPLE_FREQ_REQ].cur, 596 USEC_PER_SEC /* to MHz */); 597 break; 598 case I915_PMU_INTERRUPTS: 599 val = READ_ONCE(pmu->irq_count); 600 break; 601 case I915_PMU_RC6_RESIDENCY: 602 val = get_rc6(&i915->gt); 603 break; 604 case I915_PMU_SOFTWARE_GT_AWAKE_TIME: 605 val = ktime_to_ns(intel_gt_get_awake_time(&i915->gt)); 606 break; 607 } 608 } 609 610 return val; 611 } 612 613 static void i915_pmu_event_read(struct perf_event *event) 614 { 615 struct drm_i915_private *i915 = 616 container_of(event->pmu, typeof(*i915), pmu.base); 617 struct hw_perf_event *hwc = &event->hw; 618 struct i915_pmu *pmu = &i915->pmu; 619 u64 prev, new; 620 621 if (pmu->closed) { 622 event->hw.state = PERF_HES_STOPPED; 623 return; 624 } 625 again: 626 prev = local64_read(&hwc->prev_count); 627 new = __i915_pmu_event_read(event); 628 629 if (local64_cmpxchg(&hwc->prev_count, prev, new) != prev) 630 goto again; 631 632 local64_add(new - prev, &event->count); 633 } 634 635 static void i915_pmu_enable(struct perf_event *event) 636 { 637 struct drm_i915_private *i915 = 638 container_of(event->pmu, typeof(*i915), pmu.base); 639 struct i915_pmu *pmu = &i915->pmu; 640 unsigned long flags; 641 unsigned int bit; 642 643 bit = event_bit(event); 644 if (bit == -1) 645 goto update; 646 647 spin_lock_irqsave(&pmu->lock, flags); 648 649 /* 650 * Update the bitmask of enabled events and increment 651 * the event reference counter. 652 */ 653 BUILD_BUG_ON(ARRAY_SIZE(pmu->enable_count) != I915_PMU_MASK_BITS); 654 GEM_BUG_ON(bit >= ARRAY_SIZE(pmu->enable_count)); 655 GEM_BUG_ON(pmu->enable_count[bit] == ~0); 656 657 pmu->enable |= BIT_ULL(bit); 658 pmu->enable_count[bit]++; 659 660 /* 661 * Start the sampling timer if needed and not already enabled. 662 */ 663 __i915_pmu_maybe_start_timer(pmu); 664 665 /* 666 * For per-engine events the bitmask and reference counting 667 * is stored per engine. 668 */ 669 if (is_engine_event(event)) { 670 u8 sample = engine_event_sample(event); 671 struct intel_engine_cs *engine; 672 673 engine = intel_engine_lookup_user(i915, 674 engine_event_class(event), 675 engine_event_instance(event)); 676 677 BUILD_BUG_ON(ARRAY_SIZE(engine->pmu.enable_count) != 678 I915_ENGINE_SAMPLE_COUNT); 679 BUILD_BUG_ON(ARRAY_SIZE(engine->pmu.sample) != 680 I915_ENGINE_SAMPLE_COUNT); 681 GEM_BUG_ON(sample >= ARRAY_SIZE(engine->pmu.enable_count)); 682 GEM_BUG_ON(sample >= ARRAY_SIZE(engine->pmu.sample)); 683 GEM_BUG_ON(engine->pmu.enable_count[sample] == ~0); 684 685 engine->pmu.enable |= BIT(sample); 686 engine->pmu.enable_count[sample]++; 687 } 688 689 spin_unlock_irqrestore(&pmu->lock, flags); 690 691 update: 692 /* 693 * Store the current counter value so we can report the correct delta 694 * for all listeners. Even when the event was already enabled and has 695 * an existing non-zero value. 696 */ 697 local64_set(&event->hw.prev_count, __i915_pmu_event_read(event)); 698 } 699 700 static void i915_pmu_disable(struct perf_event *event) 701 { 702 struct drm_i915_private *i915 = 703 container_of(event->pmu, typeof(*i915), pmu.base); 704 unsigned int bit = event_bit(event); 705 struct i915_pmu *pmu = &i915->pmu; 706 unsigned long flags; 707 708 if (bit == -1) 709 return; 710 711 spin_lock_irqsave(&pmu->lock, flags); 712 713 if (is_engine_event(event)) { 714 u8 sample = engine_event_sample(event); 715 struct intel_engine_cs *engine; 716 717 engine = intel_engine_lookup_user(i915, 718 engine_event_class(event), 719 engine_event_instance(event)); 720 721 GEM_BUG_ON(sample >= ARRAY_SIZE(engine->pmu.enable_count)); 722 GEM_BUG_ON(sample >= ARRAY_SIZE(engine->pmu.sample)); 723 GEM_BUG_ON(engine->pmu.enable_count[sample] == 0); 724 725 /* 726 * Decrement the reference count and clear the enabled 727 * bitmask when the last listener on an event goes away. 728 */ 729 if (--engine->pmu.enable_count[sample] == 0) 730 engine->pmu.enable &= ~BIT(sample); 731 } 732 733 GEM_BUG_ON(bit >= ARRAY_SIZE(pmu->enable_count)); 734 GEM_BUG_ON(pmu->enable_count[bit] == 0); 735 /* 736 * Decrement the reference count and clear the enabled 737 * bitmask when the last listener on an event goes away. 738 */ 739 if (--pmu->enable_count[bit] == 0) { 740 pmu->enable &= ~BIT_ULL(bit); 741 pmu->timer_enabled &= pmu_needs_timer(pmu, true); 742 } 743 744 spin_unlock_irqrestore(&pmu->lock, flags); 745 } 746 747 static void i915_pmu_event_start(struct perf_event *event, int flags) 748 { 749 struct drm_i915_private *i915 = 750 container_of(event->pmu, typeof(*i915), pmu.base); 751 struct i915_pmu *pmu = &i915->pmu; 752 753 if (pmu->closed) 754 return; 755 756 i915_pmu_enable(event); 757 event->hw.state = 0; 758 } 759 760 static void i915_pmu_event_stop(struct perf_event *event, int flags) 761 { 762 if (flags & PERF_EF_UPDATE) 763 i915_pmu_event_read(event); 764 i915_pmu_disable(event); 765 event->hw.state = PERF_HES_STOPPED; 766 } 767 768 static int i915_pmu_event_add(struct perf_event *event, int flags) 769 { 770 struct drm_i915_private *i915 = 771 container_of(event->pmu, typeof(*i915), pmu.base); 772 struct i915_pmu *pmu = &i915->pmu; 773 774 if (pmu->closed) 775 return -ENODEV; 776 777 if (flags & PERF_EF_START) 778 i915_pmu_event_start(event, flags); 779 780 return 0; 781 } 782 783 static void i915_pmu_event_del(struct perf_event *event, int flags) 784 { 785 i915_pmu_event_stop(event, PERF_EF_UPDATE); 786 } 787 788 static int i915_pmu_event_event_idx(struct perf_event *event) 789 { 790 return 0; 791 } 792 793 struct i915_str_attribute { 794 struct device_attribute attr; 795 const char *str; 796 }; 797 798 static ssize_t i915_pmu_format_show(struct device *dev, 799 struct device_attribute *attr, char *buf) 800 { 801 struct i915_str_attribute *eattr; 802 803 eattr = container_of(attr, struct i915_str_attribute, attr); 804 return sprintf(buf, "%s\n", eattr->str); 805 } 806 807 #define I915_PMU_FORMAT_ATTR(_name, _config) \ 808 (&((struct i915_str_attribute[]) { \ 809 { .attr = __ATTR(_name, 0444, i915_pmu_format_show, NULL), \ 810 .str = _config, } \ 811 })[0].attr.attr) 812 813 static struct attribute *i915_pmu_format_attrs[] = { 814 I915_PMU_FORMAT_ATTR(i915_eventid, "config:0-20"), 815 NULL, 816 }; 817 818 static const struct attribute_group i915_pmu_format_attr_group = { 819 .name = "format", 820 .attrs = i915_pmu_format_attrs, 821 }; 822 823 struct i915_ext_attribute { 824 struct device_attribute attr; 825 unsigned long val; 826 }; 827 828 static ssize_t i915_pmu_event_show(struct device *dev, 829 struct device_attribute *attr, char *buf) 830 { 831 struct i915_ext_attribute *eattr; 832 833 eattr = container_of(attr, struct i915_ext_attribute, attr); 834 return sprintf(buf, "config=0x%lx\n", eattr->val); 835 } 836 837 static ssize_t 838 i915_pmu_get_attr_cpumask(struct device *dev, 839 struct device_attribute *attr, 840 char *buf) 841 { 842 return cpumap_print_to_pagebuf(true, buf, &i915_pmu_cpumask); 843 } 844 845 static DEVICE_ATTR(cpumask, 0444, i915_pmu_get_attr_cpumask, NULL); 846 847 static struct attribute *i915_cpumask_attrs[] = { 848 &dev_attr_cpumask.attr, 849 NULL, 850 }; 851 852 static const struct attribute_group i915_pmu_cpumask_attr_group = { 853 .attrs = i915_cpumask_attrs, 854 }; 855 856 #define __event(__config, __name, __unit) \ 857 { \ 858 .config = (__config), \ 859 .name = (__name), \ 860 .unit = (__unit), \ 861 } 862 863 #define __engine_event(__sample, __name) \ 864 { \ 865 .sample = (__sample), \ 866 .name = (__name), \ 867 } 868 869 static struct i915_ext_attribute * 870 add_i915_attr(struct i915_ext_attribute *attr, const char *name, u64 config) 871 { 872 sysfs_attr_init(&attr->attr.attr); 873 attr->attr.attr.name = name; 874 attr->attr.attr.mode = 0444; 875 attr->attr.show = i915_pmu_event_show; 876 attr->val = config; 877 878 return ++attr; 879 } 880 881 static struct perf_pmu_events_attr * 882 add_pmu_attr(struct perf_pmu_events_attr *attr, const char *name, 883 const char *str) 884 { 885 sysfs_attr_init(&attr->attr.attr); 886 attr->attr.attr.name = name; 887 attr->attr.attr.mode = 0444; 888 attr->attr.show = perf_event_sysfs_show; 889 attr->event_str = str; 890 891 return ++attr; 892 } 893 894 static struct attribute ** 895 create_event_attributes(struct i915_pmu *pmu) 896 { 897 struct drm_i915_private *i915 = container_of(pmu, typeof(*i915), pmu); 898 static const struct { 899 u64 config; 900 const char *name; 901 const char *unit; 902 } events[] = { 903 __event(I915_PMU_ACTUAL_FREQUENCY, "actual-frequency", "M"), 904 __event(I915_PMU_REQUESTED_FREQUENCY, "requested-frequency", "M"), 905 __event(I915_PMU_INTERRUPTS, "interrupts", NULL), 906 __event(I915_PMU_RC6_RESIDENCY, "rc6-residency", "ns"), 907 __event(I915_PMU_SOFTWARE_GT_AWAKE_TIME, "software-gt-awake-time", "ns"), 908 }; 909 static const struct { 910 enum drm_i915_pmu_engine_sample sample; 911 char *name; 912 } engine_events[] = { 913 __engine_event(I915_SAMPLE_BUSY, "busy"), 914 __engine_event(I915_SAMPLE_SEMA, "sema"), 915 __engine_event(I915_SAMPLE_WAIT, "wait"), 916 }; 917 unsigned int count = 0; 918 struct perf_pmu_events_attr *pmu_attr = NULL, *pmu_iter; 919 struct i915_ext_attribute *i915_attr = NULL, *i915_iter; 920 struct attribute **attr = NULL, **attr_iter; 921 struct intel_engine_cs *engine; 922 unsigned int i; 923 924 /* Count how many counters we will be exposing. */ 925 for (i = 0; i < ARRAY_SIZE(events); i++) { 926 if (!config_status(i915, events[i].config)) 927 count++; 928 } 929 930 for_each_uabi_engine(engine, i915) { 931 for (i = 0; i < ARRAY_SIZE(engine_events); i++) { 932 if (!engine_event_status(engine, 933 engine_events[i].sample)) 934 count++; 935 } 936 } 937 938 /* Allocate attribute objects and table. */ 939 i915_attr = kcalloc(count, sizeof(*i915_attr), GFP_KERNEL); 940 if (!i915_attr) 941 goto err_alloc; 942 943 pmu_attr = kcalloc(count, sizeof(*pmu_attr), GFP_KERNEL); 944 if (!pmu_attr) 945 goto err_alloc; 946 947 /* Max one pointer of each attribute type plus a termination entry. */ 948 attr = kcalloc(count * 2 + 1, sizeof(*attr), GFP_KERNEL); 949 if (!attr) 950 goto err_alloc; 951 952 i915_iter = i915_attr; 953 pmu_iter = pmu_attr; 954 attr_iter = attr; 955 956 /* Initialize supported non-engine counters. */ 957 for (i = 0; i < ARRAY_SIZE(events); i++) { 958 char *str; 959 960 if (config_status(i915, events[i].config)) 961 continue; 962 963 str = kstrdup(events[i].name, GFP_KERNEL); 964 if (!str) 965 goto err; 966 967 *attr_iter++ = &i915_iter->attr.attr; 968 i915_iter = add_i915_attr(i915_iter, str, events[i].config); 969 970 if (events[i].unit) { 971 str = kasprintf(GFP_KERNEL, "%s.unit", events[i].name); 972 if (!str) 973 goto err; 974 975 *attr_iter++ = &pmu_iter->attr.attr; 976 pmu_iter = add_pmu_attr(pmu_iter, str, events[i].unit); 977 } 978 } 979 980 /* Initialize supported engine counters. */ 981 for_each_uabi_engine(engine, i915) { 982 for (i = 0; i < ARRAY_SIZE(engine_events); i++) { 983 char *str; 984 985 if (engine_event_status(engine, 986 engine_events[i].sample)) 987 continue; 988 989 str = kasprintf(GFP_KERNEL, "%s-%s", 990 engine->name, engine_events[i].name); 991 if (!str) 992 goto err; 993 994 *attr_iter++ = &i915_iter->attr.attr; 995 i915_iter = 996 add_i915_attr(i915_iter, str, 997 __I915_PMU_ENGINE(engine->uabi_class, 998 engine->uabi_instance, 999 engine_events[i].sample)); 1000 1001 str = kasprintf(GFP_KERNEL, "%s-%s.unit", 1002 engine->name, engine_events[i].name); 1003 if (!str) 1004 goto err; 1005 1006 *attr_iter++ = &pmu_iter->attr.attr; 1007 pmu_iter = add_pmu_attr(pmu_iter, str, "ns"); 1008 } 1009 } 1010 1011 pmu->i915_attr = i915_attr; 1012 pmu->pmu_attr = pmu_attr; 1013 1014 return attr; 1015 1016 err:; 1017 for (attr_iter = attr; *attr_iter; attr_iter++) 1018 kfree((*attr_iter)->name); 1019 1020 err_alloc: 1021 kfree(attr); 1022 kfree(i915_attr); 1023 kfree(pmu_attr); 1024 1025 return NULL; 1026 } 1027 1028 static void free_event_attributes(struct i915_pmu *pmu) 1029 { 1030 struct attribute **attr_iter = pmu->events_attr_group.attrs; 1031 1032 for (; *attr_iter; attr_iter++) 1033 kfree((*attr_iter)->name); 1034 1035 kfree(pmu->events_attr_group.attrs); 1036 kfree(pmu->i915_attr); 1037 kfree(pmu->pmu_attr); 1038 1039 pmu->events_attr_group.attrs = NULL; 1040 pmu->i915_attr = NULL; 1041 pmu->pmu_attr = NULL; 1042 } 1043 1044 static int i915_pmu_cpu_online(unsigned int cpu, struct hlist_node *node) 1045 { 1046 struct i915_pmu *pmu = hlist_entry_safe(node, typeof(*pmu), cpuhp.node); 1047 1048 GEM_BUG_ON(!pmu->base.event_init); 1049 1050 /* Select the first online CPU as a designated reader. */ 1051 if (!cpumask_weight(&i915_pmu_cpumask)) 1052 cpumask_set_cpu(cpu, &i915_pmu_cpumask); 1053 1054 return 0; 1055 } 1056 1057 static int i915_pmu_cpu_offline(unsigned int cpu, struct hlist_node *node) 1058 { 1059 struct i915_pmu *pmu = hlist_entry_safe(node, typeof(*pmu), cpuhp.node); 1060 unsigned int target = i915_pmu_target_cpu; 1061 1062 GEM_BUG_ON(!pmu->base.event_init); 1063 1064 /* 1065 * Unregistering an instance generates a CPU offline event which we must 1066 * ignore to avoid incorrectly modifying the shared i915_pmu_cpumask. 1067 */ 1068 if (pmu->closed) 1069 return 0; 1070 1071 if (cpumask_test_and_clear_cpu(cpu, &i915_pmu_cpumask)) { 1072 target = cpumask_any_but(topology_sibling_cpumask(cpu), cpu); 1073 1074 /* Migrate events if there is a valid target */ 1075 if (target < nr_cpu_ids) { 1076 cpumask_set_cpu(target, &i915_pmu_cpumask); 1077 i915_pmu_target_cpu = target; 1078 } 1079 } 1080 1081 if (target < nr_cpu_ids && target != pmu->cpuhp.cpu) { 1082 perf_pmu_migrate_context(&pmu->base, cpu, target); 1083 pmu->cpuhp.cpu = target; 1084 } 1085 1086 return 0; 1087 } 1088 1089 static enum cpuhp_state cpuhp_slot = CPUHP_INVALID; 1090 1091 void i915_pmu_init(void) 1092 { 1093 int ret; 1094 1095 ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, 1096 "perf/x86/intel/i915:online", 1097 i915_pmu_cpu_online, 1098 i915_pmu_cpu_offline); 1099 if (ret < 0) 1100 pr_notice("Failed to setup cpuhp state for i915 PMU! (%d)\n", 1101 ret); 1102 else 1103 cpuhp_slot = ret; 1104 } 1105 1106 void i915_pmu_exit(void) 1107 { 1108 if (cpuhp_slot != CPUHP_INVALID) 1109 cpuhp_remove_multi_state(cpuhp_slot); 1110 } 1111 1112 static int i915_pmu_register_cpuhp_state(struct i915_pmu *pmu) 1113 { 1114 if (cpuhp_slot == CPUHP_INVALID) 1115 return -EINVAL; 1116 1117 return cpuhp_state_add_instance(cpuhp_slot, &pmu->cpuhp.node); 1118 } 1119 1120 static void i915_pmu_unregister_cpuhp_state(struct i915_pmu *pmu) 1121 { 1122 cpuhp_state_remove_instance(cpuhp_slot, &pmu->cpuhp.node); 1123 } 1124 1125 static bool is_igp(struct drm_i915_private *i915) 1126 { 1127 struct pci_dev *pdev = i915->drm.pdev; 1128 1129 /* IGP is 0000:00:02.0 */ 1130 return pci_domain_nr(pdev->bus) == 0 && 1131 pdev->bus->number == 0 && 1132 PCI_SLOT(pdev->devfn) == 2 && 1133 PCI_FUNC(pdev->devfn) == 0; 1134 } 1135 1136 void i915_pmu_register(struct drm_i915_private *i915) 1137 { 1138 struct i915_pmu *pmu = &i915->pmu; 1139 const struct attribute_group *attr_groups[] = { 1140 &i915_pmu_format_attr_group, 1141 &pmu->events_attr_group, 1142 &i915_pmu_cpumask_attr_group, 1143 NULL 1144 }; 1145 1146 int ret = -ENOMEM; 1147 1148 if (INTEL_GEN(i915) <= 2) { 1149 drm_info(&i915->drm, "PMU not supported for this GPU."); 1150 return; 1151 } 1152 1153 spin_lock_init(&pmu->lock); 1154 hrtimer_init(&pmu->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 1155 pmu->timer.function = i915_sample; 1156 pmu->cpuhp.cpu = -1; 1157 init_rc6(pmu); 1158 1159 if (!is_igp(i915)) { 1160 pmu->name = kasprintf(GFP_KERNEL, 1161 "i915_%s", 1162 dev_name(i915->drm.dev)); 1163 if (pmu->name) { 1164 /* tools/perf reserves colons as special. */ 1165 strreplace((char *)pmu->name, ':', '_'); 1166 } 1167 } else { 1168 pmu->name = "i915"; 1169 } 1170 if (!pmu->name) 1171 goto err; 1172 1173 pmu->events_attr_group.name = "events"; 1174 pmu->events_attr_group.attrs = create_event_attributes(pmu); 1175 if (!pmu->events_attr_group.attrs) 1176 goto err_name; 1177 1178 pmu->base.attr_groups = kmemdup(attr_groups, sizeof(attr_groups), 1179 GFP_KERNEL); 1180 if (!pmu->base.attr_groups) 1181 goto err_attr; 1182 1183 pmu->base.module = THIS_MODULE; 1184 pmu->base.task_ctx_nr = perf_invalid_context; 1185 pmu->base.event_init = i915_pmu_event_init; 1186 pmu->base.add = i915_pmu_event_add; 1187 pmu->base.del = i915_pmu_event_del; 1188 pmu->base.start = i915_pmu_event_start; 1189 pmu->base.stop = i915_pmu_event_stop; 1190 pmu->base.read = i915_pmu_event_read; 1191 pmu->base.event_idx = i915_pmu_event_event_idx; 1192 1193 ret = perf_pmu_register(&pmu->base, pmu->name, -1); 1194 if (ret) 1195 goto err_groups; 1196 1197 ret = i915_pmu_register_cpuhp_state(pmu); 1198 if (ret) 1199 goto err_unreg; 1200 1201 return; 1202 1203 err_unreg: 1204 perf_pmu_unregister(&pmu->base); 1205 err_groups: 1206 kfree(pmu->base.attr_groups); 1207 err_attr: 1208 pmu->base.event_init = NULL; 1209 free_event_attributes(pmu); 1210 err_name: 1211 if (!is_igp(i915)) 1212 kfree(pmu->name); 1213 err: 1214 drm_notice(&i915->drm, "Failed to register PMU!\n"); 1215 } 1216 1217 void i915_pmu_unregister(struct drm_i915_private *i915) 1218 { 1219 struct i915_pmu *pmu = &i915->pmu; 1220 1221 if (!pmu->base.event_init) 1222 return; 1223 1224 /* 1225 * "Disconnect" the PMU callbacks - since all are atomic synchronize_rcu 1226 * ensures all currently executing ones will have exited before we 1227 * proceed with unregistration. 1228 */ 1229 pmu->closed = true; 1230 synchronize_rcu(); 1231 1232 hrtimer_cancel(&pmu->timer); 1233 1234 i915_pmu_unregister_cpuhp_state(pmu); 1235 1236 perf_pmu_unregister(&pmu->base); 1237 pmu->base.event_init = NULL; 1238 kfree(pmu->base.attr_groups); 1239 if (!is_igp(i915)) 1240 kfree(pmu->name); 1241 free_event_attributes(pmu); 1242 } 1243