1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright © 2019 Intel Corporation 4 */ 5 6 #include <linux/string_helpers.h> 7 8 #include <drm/i915_drm.h> 9 10 #include "display/intel_display.h" 11 #include "i915_drv.h" 12 #include "i915_irq.h" 13 #include "intel_breadcrumbs.h" 14 #include "intel_gt.h" 15 #include "intel_gt_clock_utils.h" 16 #include "intel_gt_irq.h" 17 #include "intel_gt_pm_irq.h" 18 #include "intel_gt_regs.h" 19 #include "intel_mchbar_regs.h" 20 #include "intel_pcode.h" 21 #include "intel_rps.h" 22 #include "vlv_sideband.h" 23 #include "../../../platform/x86/intel_ips.h" 24 25 #define BUSY_MAX_EI 20u /* ms */ 26 27 /* 28 * Lock protecting IPS related data structures 29 */ 30 static DEFINE_SPINLOCK(mchdev_lock); 31 32 static struct intel_gt *rps_to_gt(struct intel_rps *rps) 33 { 34 return container_of(rps, struct intel_gt, rps); 35 } 36 37 static struct drm_i915_private *rps_to_i915(struct intel_rps *rps) 38 { 39 return rps_to_gt(rps)->i915; 40 } 41 42 static struct intel_uncore *rps_to_uncore(struct intel_rps *rps) 43 { 44 return rps_to_gt(rps)->uncore; 45 } 46 47 static struct intel_guc_slpc *rps_to_slpc(struct intel_rps *rps) 48 { 49 struct intel_gt *gt = rps_to_gt(rps); 50 51 return >->uc.guc.slpc; 52 } 53 54 static bool rps_uses_slpc(struct intel_rps *rps) 55 { 56 struct intel_gt *gt = rps_to_gt(rps); 57 58 return intel_uc_uses_guc_slpc(>->uc); 59 } 60 61 static u32 rps_pm_sanitize_mask(struct intel_rps *rps, u32 mask) 62 { 63 return mask & ~rps->pm_intrmsk_mbz; 64 } 65 66 static void set(struct intel_uncore *uncore, i915_reg_t reg, u32 val) 67 { 68 intel_uncore_write_fw(uncore, reg, val); 69 } 70 71 static void rps_timer(struct timer_list *t) 72 { 73 struct intel_rps *rps = from_timer(rps, t, timer); 74 struct intel_engine_cs *engine; 75 ktime_t dt, last, timestamp; 76 enum intel_engine_id id; 77 s64 max_busy[3] = {}; 78 79 timestamp = 0; 80 for_each_engine(engine, rps_to_gt(rps), id) { 81 s64 busy; 82 int i; 83 84 dt = intel_engine_get_busy_time(engine, ×tamp); 85 last = engine->stats.rps; 86 engine->stats.rps = dt; 87 88 busy = ktime_to_ns(ktime_sub(dt, last)); 89 for (i = 0; i < ARRAY_SIZE(max_busy); i++) { 90 if (busy > max_busy[i]) 91 swap(busy, max_busy[i]); 92 } 93 } 94 last = rps->pm_timestamp; 95 rps->pm_timestamp = timestamp; 96 97 if (intel_rps_is_active(rps)) { 98 s64 busy; 99 int i; 100 101 dt = ktime_sub(timestamp, last); 102 103 /* 104 * Our goal is to evaluate each engine independently, so we run 105 * at the lowest clocks required to sustain the heaviest 106 * workload. However, a task may be split into sequential 107 * dependent operations across a set of engines, such that 108 * the independent contributions do not account for high load, 109 * but overall the task is GPU bound. For example, consider 110 * video decode on vcs followed by colour post-processing 111 * on vecs, followed by general post-processing on rcs. 112 * Since multi-engines being active does imply a single 113 * continuous workload across all engines, we hedge our 114 * bets by only contributing a factor of the distributed 115 * load into our busyness calculation. 116 */ 117 busy = max_busy[0]; 118 for (i = 1; i < ARRAY_SIZE(max_busy); i++) { 119 if (!max_busy[i]) 120 break; 121 122 busy += div_u64(max_busy[i], 1 << i); 123 } 124 GT_TRACE(rps_to_gt(rps), 125 "busy:%lld [%d%%], max:[%lld, %lld, %lld], interval:%d\n", 126 busy, (int)div64_u64(100 * busy, dt), 127 max_busy[0], max_busy[1], max_busy[2], 128 rps->pm_interval); 129 130 if (100 * busy > rps->power.up_threshold * dt && 131 rps->cur_freq < rps->max_freq_softlimit) { 132 rps->pm_iir |= GEN6_PM_RP_UP_THRESHOLD; 133 rps->pm_interval = 1; 134 schedule_work(&rps->work); 135 } else if (100 * busy < rps->power.down_threshold * dt && 136 rps->cur_freq > rps->min_freq_softlimit) { 137 rps->pm_iir |= GEN6_PM_RP_DOWN_THRESHOLD; 138 rps->pm_interval = 1; 139 schedule_work(&rps->work); 140 } else { 141 rps->last_adj = 0; 142 } 143 144 mod_timer(&rps->timer, 145 jiffies + msecs_to_jiffies(rps->pm_interval)); 146 rps->pm_interval = min(rps->pm_interval * 2, BUSY_MAX_EI); 147 } 148 } 149 150 static void rps_start_timer(struct intel_rps *rps) 151 { 152 rps->pm_timestamp = ktime_sub(ktime_get(), rps->pm_timestamp); 153 rps->pm_interval = 1; 154 mod_timer(&rps->timer, jiffies + 1); 155 } 156 157 static void rps_stop_timer(struct intel_rps *rps) 158 { 159 del_timer_sync(&rps->timer); 160 rps->pm_timestamp = ktime_sub(ktime_get(), rps->pm_timestamp); 161 cancel_work_sync(&rps->work); 162 } 163 164 static u32 rps_pm_mask(struct intel_rps *rps, u8 val) 165 { 166 u32 mask = 0; 167 168 /* We use UP_EI_EXPIRED interrupts for both up/down in manual mode */ 169 if (val > rps->min_freq_softlimit) 170 mask |= (GEN6_PM_RP_UP_EI_EXPIRED | 171 GEN6_PM_RP_DOWN_THRESHOLD | 172 GEN6_PM_RP_DOWN_TIMEOUT); 173 174 if (val < rps->max_freq_softlimit) 175 mask |= GEN6_PM_RP_UP_EI_EXPIRED | GEN6_PM_RP_UP_THRESHOLD; 176 177 mask &= rps->pm_events; 178 179 return rps_pm_sanitize_mask(rps, ~mask); 180 } 181 182 static void rps_reset_ei(struct intel_rps *rps) 183 { 184 memset(&rps->ei, 0, sizeof(rps->ei)); 185 } 186 187 static void rps_enable_interrupts(struct intel_rps *rps) 188 { 189 struct intel_gt *gt = rps_to_gt(rps); 190 191 GEM_BUG_ON(rps_uses_slpc(rps)); 192 193 GT_TRACE(gt, "interrupts:on rps->pm_events: %x, rps_pm_mask:%x\n", 194 rps->pm_events, rps_pm_mask(rps, rps->last_freq)); 195 196 rps_reset_ei(rps); 197 198 spin_lock_irq(gt->irq_lock); 199 gen6_gt_pm_enable_irq(gt, rps->pm_events); 200 spin_unlock_irq(gt->irq_lock); 201 202 intel_uncore_write(gt->uncore, 203 GEN6_PMINTRMSK, rps_pm_mask(rps, rps->last_freq)); 204 } 205 206 static void gen6_rps_reset_interrupts(struct intel_rps *rps) 207 { 208 gen6_gt_pm_reset_iir(rps_to_gt(rps), GEN6_PM_RPS_EVENTS); 209 } 210 211 static void gen11_rps_reset_interrupts(struct intel_rps *rps) 212 { 213 while (gen11_gt_reset_one_iir(rps_to_gt(rps), 0, GEN11_GTPM)) 214 ; 215 } 216 217 static void rps_reset_interrupts(struct intel_rps *rps) 218 { 219 struct intel_gt *gt = rps_to_gt(rps); 220 221 spin_lock_irq(gt->irq_lock); 222 if (GRAPHICS_VER(gt->i915) >= 11) 223 gen11_rps_reset_interrupts(rps); 224 else 225 gen6_rps_reset_interrupts(rps); 226 227 rps->pm_iir = 0; 228 spin_unlock_irq(gt->irq_lock); 229 } 230 231 static void rps_disable_interrupts(struct intel_rps *rps) 232 { 233 struct intel_gt *gt = rps_to_gt(rps); 234 235 intel_uncore_write(gt->uncore, 236 GEN6_PMINTRMSK, rps_pm_sanitize_mask(rps, ~0u)); 237 238 spin_lock_irq(gt->irq_lock); 239 gen6_gt_pm_disable_irq(gt, GEN6_PM_RPS_EVENTS); 240 spin_unlock_irq(gt->irq_lock); 241 242 intel_synchronize_irq(gt->i915); 243 244 /* 245 * Now that we will not be generating any more work, flush any 246 * outstanding tasks. As we are called on the RPS idle path, 247 * we will reset the GPU to minimum frequencies, so the current 248 * state of the worker can be discarded. 249 */ 250 cancel_work_sync(&rps->work); 251 252 rps_reset_interrupts(rps); 253 GT_TRACE(gt, "interrupts:off\n"); 254 } 255 256 static const struct cparams { 257 u16 i; 258 u16 t; 259 u16 m; 260 u16 c; 261 } cparams[] = { 262 { 1, 1333, 301, 28664 }, 263 { 1, 1066, 294, 24460 }, 264 { 1, 800, 294, 25192 }, 265 { 0, 1333, 276, 27605 }, 266 { 0, 1066, 276, 27605 }, 267 { 0, 800, 231, 23784 }, 268 }; 269 270 static void gen5_rps_init(struct intel_rps *rps) 271 { 272 struct drm_i915_private *i915 = rps_to_i915(rps); 273 struct intel_uncore *uncore = rps_to_uncore(rps); 274 u8 fmax, fmin, fstart; 275 u32 rgvmodectl; 276 int c_m, i; 277 278 if (i915->fsb_freq <= 3200) 279 c_m = 0; 280 else if (i915->fsb_freq <= 4800) 281 c_m = 1; 282 else 283 c_m = 2; 284 285 for (i = 0; i < ARRAY_SIZE(cparams); i++) { 286 if (cparams[i].i == c_m && cparams[i].t == i915->mem_freq) { 287 rps->ips.m = cparams[i].m; 288 rps->ips.c = cparams[i].c; 289 break; 290 } 291 } 292 293 rgvmodectl = intel_uncore_read(uncore, MEMMODECTL); 294 295 /* Set up min, max, and cur for interrupt handling */ 296 fmax = (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT; 297 fmin = (rgvmodectl & MEMMODE_FMIN_MASK); 298 fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >> 299 MEMMODE_FSTART_SHIFT; 300 drm_dbg(&i915->drm, "fmax: %d, fmin: %d, fstart: %d\n", 301 fmax, fmin, fstart); 302 303 rps->min_freq = fmax; 304 rps->efficient_freq = fstart; 305 rps->max_freq = fmin; 306 } 307 308 static unsigned long 309 __ips_chipset_val(struct intel_ips *ips) 310 { 311 struct intel_uncore *uncore = 312 rps_to_uncore(container_of(ips, struct intel_rps, ips)); 313 unsigned long now = jiffies_to_msecs(jiffies), dt; 314 unsigned long result; 315 u64 total, delta; 316 317 lockdep_assert_held(&mchdev_lock); 318 319 /* 320 * Prevent division-by-zero if we are asking too fast. 321 * Also, we don't get interesting results if we are polling 322 * faster than once in 10ms, so just return the saved value 323 * in such cases. 324 */ 325 dt = now - ips->last_time1; 326 if (dt <= 10) 327 return ips->chipset_power; 328 329 /* FIXME: handle per-counter overflow */ 330 total = intel_uncore_read(uncore, DMIEC); 331 total += intel_uncore_read(uncore, DDREC); 332 total += intel_uncore_read(uncore, CSIEC); 333 334 delta = total - ips->last_count1; 335 336 result = div_u64(div_u64(ips->m * delta, dt) + ips->c, 10); 337 338 ips->last_count1 = total; 339 ips->last_time1 = now; 340 341 ips->chipset_power = result; 342 343 return result; 344 } 345 346 static unsigned long ips_mch_val(struct intel_uncore *uncore) 347 { 348 unsigned int m, x, b; 349 u32 tsfs; 350 351 tsfs = intel_uncore_read(uncore, TSFS); 352 x = intel_uncore_read8(uncore, TR1); 353 354 b = tsfs & TSFS_INTR_MASK; 355 m = (tsfs & TSFS_SLOPE_MASK) >> TSFS_SLOPE_SHIFT; 356 357 return m * x / 127 - b; 358 } 359 360 static int _pxvid_to_vd(u8 pxvid) 361 { 362 if (pxvid == 0) 363 return 0; 364 365 if (pxvid >= 8 && pxvid < 31) 366 pxvid = 31; 367 368 return (pxvid + 2) * 125; 369 } 370 371 static u32 pvid_to_extvid(struct drm_i915_private *i915, u8 pxvid) 372 { 373 const int vd = _pxvid_to_vd(pxvid); 374 375 if (INTEL_INFO(i915)->is_mobile) 376 return max(vd - 1125, 0); 377 378 return vd; 379 } 380 381 static void __gen5_ips_update(struct intel_ips *ips) 382 { 383 struct intel_uncore *uncore = 384 rps_to_uncore(container_of(ips, struct intel_rps, ips)); 385 u64 now, delta, dt; 386 u32 count; 387 388 lockdep_assert_held(&mchdev_lock); 389 390 now = ktime_get_raw_ns(); 391 dt = now - ips->last_time2; 392 do_div(dt, NSEC_PER_MSEC); 393 394 /* Don't divide by 0 */ 395 if (dt <= 10) 396 return; 397 398 count = intel_uncore_read(uncore, GFXEC); 399 delta = count - ips->last_count2; 400 401 ips->last_count2 = count; 402 ips->last_time2 = now; 403 404 /* More magic constants... */ 405 ips->gfx_power = div_u64(delta * 1181, dt * 10); 406 } 407 408 static void gen5_rps_update(struct intel_rps *rps) 409 { 410 spin_lock_irq(&mchdev_lock); 411 __gen5_ips_update(&rps->ips); 412 spin_unlock_irq(&mchdev_lock); 413 } 414 415 static unsigned int gen5_invert_freq(struct intel_rps *rps, 416 unsigned int val) 417 { 418 /* Invert the frequency bin into an ips delay */ 419 val = rps->max_freq - val; 420 val = rps->min_freq + val; 421 422 return val; 423 } 424 425 static int __gen5_rps_set(struct intel_rps *rps, u8 val) 426 { 427 struct intel_uncore *uncore = rps_to_uncore(rps); 428 u16 rgvswctl; 429 430 lockdep_assert_held(&mchdev_lock); 431 432 rgvswctl = intel_uncore_read16(uncore, MEMSWCTL); 433 if (rgvswctl & MEMCTL_CMD_STS) { 434 drm_dbg(&rps_to_i915(rps)->drm, 435 "gpu busy, RCS change rejected\n"); 436 return -EBUSY; /* still busy with another command */ 437 } 438 439 /* Invert the frequency bin into an ips delay */ 440 val = gen5_invert_freq(rps, val); 441 442 rgvswctl = 443 (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) | 444 (val << MEMCTL_FREQ_SHIFT) | 445 MEMCTL_SFCAVM; 446 intel_uncore_write16(uncore, MEMSWCTL, rgvswctl); 447 intel_uncore_posting_read16(uncore, MEMSWCTL); 448 449 rgvswctl |= MEMCTL_CMD_STS; 450 intel_uncore_write16(uncore, MEMSWCTL, rgvswctl); 451 452 return 0; 453 } 454 455 static int gen5_rps_set(struct intel_rps *rps, u8 val) 456 { 457 int err; 458 459 spin_lock_irq(&mchdev_lock); 460 err = __gen5_rps_set(rps, val); 461 spin_unlock_irq(&mchdev_lock); 462 463 return err; 464 } 465 466 static unsigned long intel_pxfreq(u32 vidfreq) 467 { 468 int div = (vidfreq & 0x3f0000) >> 16; 469 int post = (vidfreq & 0x3000) >> 12; 470 int pre = (vidfreq & 0x7); 471 472 if (!pre) 473 return 0; 474 475 return div * 133333 / (pre << post); 476 } 477 478 static unsigned int init_emon(struct intel_uncore *uncore) 479 { 480 u8 pxw[16]; 481 int i; 482 483 /* Disable to program */ 484 intel_uncore_write(uncore, ECR, 0); 485 intel_uncore_posting_read(uncore, ECR); 486 487 /* Program energy weights for various events */ 488 intel_uncore_write(uncore, SDEW, 0x15040d00); 489 intel_uncore_write(uncore, CSIEW0, 0x007f0000); 490 intel_uncore_write(uncore, CSIEW1, 0x1e220004); 491 intel_uncore_write(uncore, CSIEW2, 0x04000004); 492 493 for (i = 0; i < 5; i++) 494 intel_uncore_write(uncore, PEW(i), 0); 495 for (i = 0; i < 3; i++) 496 intel_uncore_write(uncore, DEW(i), 0); 497 498 /* Program P-state weights to account for frequency power adjustment */ 499 for (i = 0; i < 16; i++) { 500 u32 pxvidfreq = intel_uncore_read(uncore, PXVFREQ(i)); 501 unsigned int freq = intel_pxfreq(pxvidfreq); 502 unsigned int vid = 503 (pxvidfreq & PXVFREQ_PX_MASK) >> PXVFREQ_PX_SHIFT; 504 unsigned int val; 505 506 val = vid * vid * freq / 1000 * 255; 507 val /= 127 * 127 * 900; 508 509 pxw[i] = val; 510 } 511 /* Render standby states get 0 weight */ 512 pxw[14] = 0; 513 pxw[15] = 0; 514 515 for (i = 0; i < 4; i++) { 516 intel_uncore_write(uncore, PXW(i), 517 pxw[i * 4 + 0] << 24 | 518 pxw[i * 4 + 1] << 16 | 519 pxw[i * 4 + 2] << 8 | 520 pxw[i * 4 + 3] << 0); 521 } 522 523 /* Adjust magic regs to magic values (more experimental results) */ 524 intel_uncore_write(uncore, OGW0, 0); 525 intel_uncore_write(uncore, OGW1, 0); 526 intel_uncore_write(uncore, EG0, 0x00007f00); 527 intel_uncore_write(uncore, EG1, 0x0000000e); 528 intel_uncore_write(uncore, EG2, 0x000e0000); 529 intel_uncore_write(uncore, EG3, 0x68000300); 530 intel_uncore_write(uncore, EG4, 0x42000000); 531 intel_uncore_write(uncore, EG5, 0x00140031); 532 intel_uncore_write(uncore, EG6, 0); 533 intel_uncore_write(uncore, EG7, 0); 534 535 for (i = 0; i < 8; i++) 536 intel_uncore_write(uncore, PXWL(i), 0); 537 538 /* Enable PMON + select events */ 539 intel_uncore_write(uncore, ECR, 0x80000019); 540 541 return intel_uncore_read(uncore, LCFUSE02) & LCFUSE_HIV_MASK; 542 } 543 544 static bool gen5_rps_enable(struct intel_rps *rps) 545 { 546 struct drm_i915_private *i915 = rps_to_i915(rps); 547 struct intel_uncore *uncore = rps_to_uncore(rps); 548 u8 fstart, vstart; 549 u32 rgvmodectl; 550 551 spin_lock_irq(&mchdev_lock); 552 553 rgvmodectl = intel_uncore_read(uncore, MEMMODECTL); 554 555 /* Enable temp reporting */ 556 intel_uncore_write16(uncore, PMMISC, 557 intel_uncore_read16(uncore, PMMISC) | MCPPCE_EN); 558 intel_uncore_write16(uncore, TSC1, 559 intel_uncore_read16(uncore, TSC1) | TSE); 560 561 /* 100ms RC evaluation intervals */ 562 intel_uncore_write(uncore, RCUPEI, 100000); 563 intel_uncore_write(uncore, RCDNEI, 100000); 564 565 /* Set max/min thresholds to 90ms and 80ms respectively */ 566 intel_uncore_write(uncore, RCBMAXAVG, 90000); 567 intel_uncore_write(uncore, RCBMINAVG, 80000); 568 569 intel_uncore_write(uncore, MEMIHYST, 1); 570 571 /* Set up min, max, and cur for interrupt handling */ 572 fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >> 573 MEMMODE_FSTART_SHIFT; 574 575 vstart = (intel_uncore_read(uncore, PXVFREQ(fstart)) & 576 PXVFREQ_PX_MASK) >> PXVFREQ_PX_SHIFT; 577 578 intel_uncore_write(uncore, 579 MEMINTREN, 580 MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN); 581 582 intel_uncore_write(uncore, VIDSTART, vstart); 583 intel_uncore_posting_read(uncore, VIDSTART); 584 585 rgvmodectl |= MEMMODE_SWMODE_EN; 586 intel_uncore_write(uncore, MEMMODECTL, rgvmodectl); 587 588 if (wait_for_atomic((intel_uncore_read(uncore, MEMSWCTL) & 589 MEMCTL_CMD_STS) == 0, 10)) 590 drm_err(&uncore->i915->drm, 591 "stuck trying to change perf mode\n"); 592 mdelay(1); 593 594 __gen5_rps_set(rps, rps->cur_freq); 595 596 rps->ips.last_count1 = intel_uncore_read(uncore, DMIEC); 597 rps->ips.last_count1 += intel_uncore_read(uncore, DDREC); 598 rps->ips.last_count1 += intel_uncore_read(uncore, CSIEC); 599 rps->ips.last_time1 = jiffies_to_msecs(jiffies); 600 601 rps->ips.last_count2 = intel_uncore_read(uncore, GFXEC); 602 rps->ips.last_time2 = ktime_get_raw_ns(); 603 604 spin_lock(&i915->irq_lock); 605 ilk_enable_display_irq(i915, DE_PCU_EVENT); 606 spin_unlock(&i915->irq_lock); 607 608 spin_unlock_irq(&mchdev_lock); 609 610 rps->ips.corr = init_emon(uncore); 611 612 return true; 613 } 614 615 static void gen5_rps_disable(struct intel_rps *rps) 616 { 617 struct drm_i915_private *i915 = rps_to_i915(rps); 618 struct intel_uncore *uncore = rps_to_uncore(rps); 619 u16 rgvswctl; 620 621 spin_lock_irq(&mchdev_lock); 622 623 spin_lock(&i915->irq_lock); 624 ilk_disable_display_irq(i915, DE_PCU_EVENT); 625 spin_unlock(&i915->irq_lock); 626 627 rgvswctl = intel_uncore_read16(uncore, MEMSWCTL); 628 629 /* Ack interrupts, disable EFC interrupt */ 630 intel_uncore_rmw(uncore, MEMINTREN, MEMINT_EVAL_CHG_EN, 0); 631 intel_uncore_write(uncore, MEMINTRSTS, MEMINT_EVAL_CHG); 632 633 /* Go back to the starting frequency */ 634 __gen5_rps_set(rps, rps->idle_freq); 635 mdelay(1); 636 rgvswctl |= MEMCTL_CMD_STS; 637 intel_uncore_write(uncore, MEMSWCTL, rgvswctl); 638 mdelay(1); 639 640 spin_unlock_irq(&mchdev_lock); 641 } 642 643 static u32 rps_limits(struct intel_rps *rps, u8 val) 644 { 645 u32 limits; 646 647 /* 648 * Only set the down limit when we've reached the lowest level to avoid 649 * getting more interrupts, otherwise leave this clear. This prevents a 650 * race in the hw when coming out of rc6: There's a tiny window where 651 * the hw runs at the minimal clock before selecting the desired 652 * frequency, if the down threshold expires in that window we will not 653 * receive a down interrupt. 654 */ 655 if (GRAPHICS_VER(rps_to_i915(rps)) >= 9) { 656 limits = rps->max_freq_softlimit << 23; 657 if (val <= rps->min_freq_softlimit) 658 limits |= rps->min_freq_softlimit << 14; 659 } else { 660 limits = rps->max_freq_softlimit << 24; 661 if (val <= rps->min_freq_softlimit) 662 limits |= rps->min_freq_softlimit << 16; 663 } 664 665 return limits; 666 } 667 668 static void rps_set_power(struct intel_rps *rps, int new_power) 669 { 670 struct intel_gt *gt = rps_to_gt(rps); 671 struct intel_uncore *uncore = gt->uncore; 672 u32 threshold_up = 0, threshold_down = 0; /* in % */ 673 u32 ei_up = 0, ei_down = 0; 674 675 lockdep_assert_held(&rps->power.mutex); 676 677 if (new_power == rps->power.mode) 678 return; 679 680 threshold_up = 95; 681 threshold_down = 85; 682 683 /* Note the units here are not exactly 1us, but 1280ns. */ 684 switch (new_power) { 685 case LOW_POWER: 686 ei_up = 16000; 687 ei_down = 32000; 688 break; 689 690 case BETWEEN: 691 ei_up = 13000; 692 ei_down = 32000; 693 break; 694 695 case HIGH_POWER: 696 ei_up = 10000; 697 ei_down = 32000; 698 break; 699 } 700 701 /* When byt can survive without system hang with dynamic 702 * sw freq adjustments, this restriction can be lifted. 703 */ 704 if (IS_VALLEYVIEW(gt->i915)) 705 goto skip_hw_write; 706 707 GT_TRACE(gt, 708 "changing power mode [%d], up %d%% @ %dus, down %d%% @ %dus\n", 709 new_power, threshold_up, ei_up, threshold_down, ei_down); 710 711 set(uncore, GEN6_RP_UP_EI, 712 intel_gt_ns_to_pm_interval(gt, ei_up * 1000)); 713 set(uncore, GEN6_RP_UP_THRESHOLD, 714 intel_gt_ns_to_pm_interval(gt, ei_up * threshold_up * 10)); 715 716 set(uncore, GEN6_RP_DOWN_EI, 717 intel_gt_ns_to_pm_interval(gt, ei_down * 1000)); 718 set(uncore, GEN6_RP_DOWN_THRESHOLD, 719 intel_gt_ns_to_pm_interval(gt, ei_down * threshold_down * 10)); 720 721 set(uncore, GEN6_RP_CONTROL, 722 (GRAPHICS_VER(gt->i915) > 9 ? 0 : GEN6_RP_MEDIA_TURBO) | 723 GEN6_RP_MEDIA_HW_NORMAL_MODE | 724 GEN6_RP_MEDIA_IS_GFX | 725 GEN6_RP_ENABLE | 726 GEN6_RP_UP_BUSY_AVG | 727 GEN6_RP_DOWN_IDLE_AVG); 728 729 skip_hw_write: 730 rps->power.mode = new_power; 731 rps->power.up_threshold = threshold_up; 732 rps->power.down_threshold = threshold_down; 733 } 734 735 static void gen6_rps_set_thresholds(struct intel_rps *rps, u8 val) 736 { 737 int new_power; 738 739 new_power = rps->power.mode; 740 switch (rps->power.mode) { 741 case LOW_POWER: 742 if (val > rps->efficient_freq + 1 && 743 val > rps->cur_freq) 744 new_power = BETWEEN; 745 break; 746 747 case BETWEEN: 748 if (val <= rps->efficient_freq && 749 val < rps->cur_freq) 750 new_power = LOW_POWER; 751 else if (val >= rps->rp0_freq && 752 val > rps->cur_freq) 753 new_power = HIGH_POWER; 754 break; 755 756 case HIGH_POWER: 757 if (val < (rps->rp1_freq + rps->rp0_freq) >> 1 && 758 val < rps->cur_freq) 759 new_power = BETWEEN; 760 break; 761 } 762 /* Max/min bins are special */ 763 if (val <= rps->min_freq_softlimit) 764 new_power = LOW_POWER; 765 if (val >= rps->max_freq_softlimit) 766 new_power = HIGH_POWER; 767 768 mutex_lock(&rps->power.mutex); 769 if (rps->power.interactive) 770 new_power = HIGH_POWER; 771 rps_set_power(rps, new_power); 772 mutex_unlock(&rps->power.mutex); 773 } 774 775 void intel_rps_mark_interactive(struct intel_rps *rps, bool interactive) 776 { 777 GT_TRACE(rps_to_gt(rps), "mark interactive: %s\n", 778 str_yes_no(interactive)); 779 780 mutex_lock(&rps->power.mutex); 781 if (interactive) { 782 if (!rps->power.interactive++ && intel_rps_is_active(rps)) 783 rps_set_power(rps, HIGH_POWER); 784 } else { 785 GEM_BUG_ON(!rps->power.interactive); 786 rps->power.interactive--; 787 } 788 mutex_unlock(&rps->power.mutex); 789 } 790 791 static int gen6_rps_set(struct intel_rps *rps, u8 val) 792 { 793 struct intel_uncore *uncore = rps_to_uncore(rps); 794 struct drm_i915_private *i915 = rps_to_i915(rps); 795 u32 swreq; 796 797 GEM_BUG_ON(rps_uses_slpc(rps)); 798 799 if (GRAPHICS_VER(i915) >= 9) 800 swreq = GEN9_FREQUENCY(val); 801 else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) 802 swreq = HSW_FREQUENCY(val); 803 else 804 swreq = (GEN6_FREQUENCY(val) | 805 GEN6_OFFSET(0) | 806 GEN6_AGGRESSIVE_TURBO); 807 set(uncore, GEN6_RPNSWREQ, swreq); 808 809 GT_TRACE(rps_to_gt(rps), "set val:%x, freq:%d, swreq:%x\n", 810 val, intel_gpu_freq(rps, val), swreq); 811 812 return 0; 813 } 814 815 static int vlv_rps_set(struct intel_rps *rps, u8 val) 816 { 817 struct drm_i915_private *i915 = rps_to_i915(rps); 818 int err; 819 820 vlv_punit_get(i915); 821 err = vlv_punit_write(i915, PUNIT_REG_GPU_FREQ_REQ, val); 822 vlv_punit_put(i915); 823 824 GT_TRACE(rps_to_gt(rps), "set val:%x, freq:%d\n", 825 val, intel_gpu_freq(rps, val)); 826 827 return err; 828 } 829 830 static int rps_set(struct intel_rps *rps, u8 val, bool update) 831 { 832 struct drm_i915_private *i915 = rps_to_i915(rps); 833 int err; 834 835 if (val == rps->last_freq) 836 return 0; 837 838 if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) 839 err = vlv_rps_set(rps, val); 840 else if (GRAPHICS_VER(i915) >= 6) 841 err = gen6_rps_set(rps, val); 842 else 843 err = gen5_rps_set(rps, val); 844 if (err) 845 return err; 846 847 if (update && GRAPHICS_VER(i915) >= 6) 848 gen6_rps_set_thresholds(rps, val); 849 rps->last_freq = val; 850 851 return 0; 852 } 853 854 void intel_rps_unpark(struct intel_rps *rps) 855 { 856 if (!intel_rps_is_enabled(rps)) 857 return; 858 859 GT_TRACE(rps_to_gt(rps), "unpark:%x\n", rps->cur_freq); 860 861 /* 862 * Use the user's desired frequency as a guide, but for better 863 * performance, jump directly to RPe as our starting frequency. 864 */ 865 mutex_lock(&rps->lock); 866 867 intel_rps_set_active(rps); 868 intel_rps_set(rps, 869 clamp(rps->cur_freq, 870 rps->min_freq_softlimit, 871 rps->max_freq_softlimit)); 872 873 mutex_unlock(&rps->lock); 874 875 rps->pm_iir = 0; 876 if (intel_rps_has_interrupts(rps)) 877 rps_enable_interrupts(rps); 878 if (intel_rps_uses_timer(rps)) 879 rps_start_timer(rps); 880 881 if (GRAPHICS_VER(rps_to_i915(rps)) == 5) 882 gen5_rps_update(rps); 883 } 884 885 void intel_rps_park(struct intel_rps *rps) 886 { 887 int adj; 888 889 if (!intel_rps_is_enabled(rps)) 890 return; 891 892 if (!intel_rps_clear_active(rps)) 893 return; 894 895 if (intel_rps_uses_timer(rps)) 896 rps_stop_timer(rps); 897 if (intel_rps_has_interrupts(rps)) 898 rps_disable_interrupts(rps); 899 900 if (rps->last_freq <= rps->idle_freq) 901 return; 902 903 /* 904 * The punit delays the write of the frequency and voltage until it 905 * determines the GPU is awake. During normal usage we don't want to 906 * waste power changing the frequency if the GPU is sleeping (rc6). 907 * However, the GPU and driver is now idle and we do not want to delay 908 * switching to minimum voltage (reducing power whilst idle) as we do 909 * not expect to be woken in the near future and so must flush the 910 * change by waking the device. 911 * 912 * We choose to take the media powerwell (either would do to trick the 913 * punit into committing the voltage change) as that takes a lot less 914 * power than the render powerwell. 915 */ 916 intel_uncore_forcewake_get(rps_to_uncore(rps), FORCEWAKE_MEDIA); 917 rps_set(rps, rps->idle_freq, false); 918 intel_uncore_forcewake_put(rps_to_uncore(rps), FORCEWAKE_MEDIA); 919 920 /* 921 * Since we will try and restart from the previously requested 922 * frequency on unparking, treat this idle point as a downclock 923 * interrupt and reduce the frequency for resume. If we park/unpark 924 * more frequently than the rps worker can run, we will not respond 925 * to any EI and never see a change in frequency. 926 * 927 * (Note we accommodate Cherryview's limitation of only using an 928 * even bin by applying it to all.) 929 */ 930 adj = rps->last_adj; 931 if (adj < 0) 932 adj *= 2; 933 else /* CHV needs even encode values */ 934 adj = -2; 935 rps->last_adj = adj; 936 rps->cur_freq = max_t(int, rps->cur_freq + adj, rps->min_freq); 937 if (rps->cur_freq < rps->efficient_freq) { 938 rps->cur_freq = rps->efficient_freq; 939 rps->last_adj = 0; 940 } 941 942 GT_TRACE(rps_to_gt(rps), "park:%x\n", rps->cur_freq); 943 } 944 945 u32 intel_rps_get_boost_frequency(struct intel_rps *rps) 946 { 947 struct intel_guc_slpc *slpc; 948 949 if (rps_uses_slpc(rps)) { 950 slpc = rps_to_slpc(rps); 951 952 return slpc->boost_freq; 953 } else { 954 return intel_gpu_freq(rps, rps->boost_freq); 955 } 956 } 957 958 static int rps_set_boost_freq(struct intel_rps *rps, u32 val) 959 { 960 bool boost = false; 961 962 /* Validate against (static) hardware limits */ 963 val = intel_freq_opcode(rps, val); 964 if (val < rps->min_freq || val > rps->max_freq) 965 return -EINVAL; 966 967 mutex_lock(&rps->lock); 968 if (val != rps->boost_freq) { 969 rps->boost_freq = val; 970 boost = atomic_read(&rps->num_waiters); 971 } 972 mutex_unlock(&rps->lock); 973 if (boost) 974 schedule_work(&rps->work); 975 976 return 0; 977 } 978 979 int intel_rps_set_boost_frequency(struct intel_rps *rps, u32 freq) 980 { 981 struct intel_guc_slpc *slpc; 982 983 if (rps_uses_slpc(rps)) { 984 slpc = rps_to_slpc(rps); 985 986 return intel_guc_slpc_set_boost_freq(slpc, freq); 987 } else { 988 return rps_set_boost_freq(rps, freq); 989 } 990 } 991 992 void intel_rps_dec_waiters(struct intel_rps *rps) 993 { 994 struct intel_guc_slpc *slpc; 995 996 if (rps_uses_slpc(rps)) { 997 slpc = rps_to_slpc(rps); 998 999 intel_guc_slpc_dec_waiters(slpc); 1000 } else { 1001 atomic_dec(&rps->num_waiters); 1002 } 1003 } 1004 1005 void intel_rps_boost(struct i915_request *rq) 1006 { 1007 struct intel_guc_slpc *slpc; 1008 1009 if (i915_request_signaled(rq) || i915_request_has_waitboost(rq)) 1010 return; 1011 1012 /* Serializes with i915_request_retire() */ 1013 if (!test_and_set_bit(I915_FENCE_FLAG_BOOST, &rq->fence.flags)) { 1014 struct intel_rps *rps = &READ_ONCE(rq->engine)->gt->rps; 1015 1016 if (rps_uses_slpc(rps)) { 1017 slpc = rps_to_slpc(rps); 1018 1019 if (slpc->min_freq_softlimit >= slpc->boost_freq) 1020 return; 1021 1022 /* Return if old value is non zero */ 1023 if (!atomic_fetch_inc(&slpc->num_waiters)) { 1024 GT_TRACE(rps_to_gt(rps), "boost fence:%llx:%llx\n", 1025 rq->fence.context, rq->fence.seqno); 1026 schedule_work(&slpc->boost_work); 1027 } 1028 1029 return; 1030 } 1031 1032 if (atomic_fetch_inc(&rps->num_waiters)) 1033 return; 1034 1035 if (!intel_rps_is_active(rps)) 1036 return; 1037 1038 GT_TRACE(rps_to_gt(rps), "boost fence:%llx:%llx\n", 1039 rq->fence.context, rq->fence.seqno); 1040 1041 if (READ_ONCE(rps->cur_freq) < rps->boost_freq) 1042 schedule_work(&rps->work); 1043 1044 WRITE_ONCE(rps->boosts, rps->boosts + 1); /* debug only */ 1045 } 1046 } 1047 1048 int intel_rps_set(struct intel_rps *rps, u8 val) 1049 { 1050 int err; 1051 1052 lockdep_assert_held(&rps->lock); 1053 GEM_BUG_ON(val > rps->max_freq); 1054 GEM_BUG_ON(val < rps->min_freq); 1055 1056 if (intel_rps_is_active(rps)) { 1057 err = rps_set(rps, val, true); 1058 if (err) 1059 return err; 1060 1061 /* 1062 * Make sure we continue to get interrupts 1063 * until we hit the minimum or maximum frequencies. 1064 */ 1065 if (intel_rps_has_interrupts(rps)) { 1066 struct intel_uncore *uncore = rps_to_uncore(rps); 1067 1068 set(uncore, 1069 GEN6_RP_INTERRUPT_LIMITS, rps_limits(rps, val)); 1070 1071 set(uncore, GEN6_PMINTRMSK, rps_pm_mask(rps, val)); 1072 } 1073 } 1074 1075 rps->cur_freq = val; 1076 return 0; 1077 } 1078 1079 static u32 intel_rps_read_state_cap(struct intel_rps *rps) 1080 { 1081 struct drm_i915_private *i915 = rps_to_i915(rps); 1082 struct intel_uncore *uncore = rps_to_uncore(rps); 1083 1084 if (IS_PONTEVECCHIO(i915)) 1085 return intel_uncore_read(uncore, PVC_RP_STATE_CAP); 1086 else if (IS_XEHPSDV(i915)) 1087 return intel_uncore_read(uncore, XEHPSDV_RP_STATE_CAP); 1088 else if (IS_GEN9_LP(i915)) 1089 return intel_uncore_read(uncore, BXT_RP_STATE_CAP); 1090 else 1091 return intel_uncore_read(uncore, GEN6_RP_STATE_CAP); 1092 } 1093 1094 static void 1095 mtl_get_freq_caps(struct intel_rps *rps, struct intel_rps_freq_caps *caps) 1096 { 1097 struct intel_uncore *uncore = rps_to_uncore(rps); 1098 u32 rp_state_cap = rps_to_gt(rps)->type == GT_MEDIA ? 1099 intel_uncore_read(uncore, MTL_MEDIAP_STATE_CAP) : 1100 intel_uncore_read(uncore, MTL_RP_STATE_CAP); 1101 u32 rpe = rps_to_gt(rps)->type == GT_MEDIA ? 1102 intel_uncore_read(uncore, MTL_MPE_FREQUENCY) : 1103 intel_uncore_read(uncore, MTL_GT_RPE_FREQUENCY); 1104 1105 /* MTL values are in units of 16.67 MHz */ 1106 caps->rp0_freq = REG_FIELD_GET(MTL_RP0_CAP_MASK, rp_state_cap); 1107 caps->min_freq = REG_FIELD_GET(MTL_RPN_CAP_MASK, rp_state_cap); 1108 caps->rp1_freq = REG_FIELD_GET(MTL_RPE_MASK, rpe); 1109 } 1110 1111 static void 1112 __gen6_rps_get_freq_caps(struct intel_rps *rps, struct intel_rps_freq_caps *caps) 1113 { 1114 struct drm_i915_private *i915 = rps_to_i915(rps); 1115 u32 rp_state_cap; 1116 1117 rp_state_cap = intel_rps_read_state_cap(rps); 1118 1119 /* static values from HW: RP0 > RP1 > RPn (min_freq) */ 1120 if (IS_GEN9_LP(i915)) { 1121 caps->rp0_freq = (rp_state_cap >> 16) & 0xff; 1122 caps->rp1_freq = (rp_state_cap >> 8) & 0xff; 1123 caps->min_freq = (rp_state_cap >> 0) & 0xff; 1124 } else { 1125 caps->rp0_freq = (rp_state_cap >> 0) & 0xff; 1126 if (GRAPHICS_VER(i915) >= 10) 1127 caps->rp1_freq = REG_FIELD_GET(RPE_MASK, 1128 intel_uncore_read(to_gt(i915)->uncore, 1129 GEN10_FREQ_INFO_REC)); 1130 else 1131 caps->rp1_freq = (rp_state_cap >> 8) & 0xff; 1132 caps->min_freq = (rp_state_cap >> 16) & 0xff; 1133 } 1134 1135 if (IS_GEN9_BC(i915) || GRAPHICS_VER(i915) >= 11) { 1136 /* 1137 * In this case rp_state_cap register reports frequencies in 1138 * units of 50 MHz. Convert these to the actual "hw unit", i.e. 1139 * units of 16.67 MHz 1140 */ 1141 caps->rp0_freq *= GEN9_FREQ_SCALER; 1142 caps->rp1_freq *= GEN9_FREQ_SCALER; 1143 caps->min_freq *= GEN9_FREQ_SCALER; 1144 } 1145 } 1146 1147 /** 1148 * gen6_rps_get_freq_caps - Get freq caps exposed by HW 1149 * @rps: the intel_rps structure 1150 * @caps: returned freq caps 1151 * 1152 * Returned "caps" frequencies should be converted to MHz using 1153 * intel_gpu_freq() 1154 */ 1155 void gen6_rps_get_freq_caps(struct intel_rps *rps, struct intel_rps_freq_caps *caps) 1156 { 1157 struct drm_i915_private *i915 = rps_to_i915(rps); 1158 1159 if (IS_METEORLAKE(i915)) 1160 return mtl_get_freq_caps(rps, caps); 1161 else 1162 return __gen6_rps_get_freq_caps(rps, caps); 1163 } 1164 1165 static void gen6_rps_init(struct intel_rps *rps) 1166 { 1167 struct drm_i915_private *i915 = rps_to_i915(rps); 1168 struct intel_rps_freq_caps caps; 1169 1170 gen6_rps_get_freq_caps(rps, &caps); 1171 rps->rp0_freq = caps.rp0_freq; 1172 rps->rp1_freq = caps.rp1_freq; 1173 rps->min_freq = caps.min_freq; 1174 1175 /* hw_max = RP0 until we check for overclocking */ 1176 rps->max_freq = rps->rp0_freq; 1177 1178 rps->efficient_freq = rps->rp1_freq; 1179 if (IS_HASWELL(i915) || IS_BROADWELL(i915) || 1180 IS_GEN9_BC(i915) || GRAPHICS_VER(i915) >= 11) { 1181 u32 ddcc_status = 0; 1182 u32 mult = 1; 1183 1184 if (IS_GEN9_BC(i915) || GRAPHICS_VER(i915) >= 11) 1185 mult = GEN9_FREQ_SCALER; 1186 if (snb_pcode_read(rps_to_gt(rps)->uncore, 1187 HSW_PCODE_DYNAMIC_DUTY_CYCLE_CONTROL, 1188 &ddcc_status, NULL) == 0) 1189 rps->efficient_freq = 1190 clamp_t(u32, 1191 ((ddcc_status >> 8) & 0xff) * mult, 1192 rps->min_freq, 1193 rps->max_freq); 1194 } 1195 } 1196 1197 static bool rps_reset(struct intel_rps *rps) 1198 { 1199 struct drm_i915_private *i915 = rps_to_i915(rps); 1200 1201 /* force a reset */ 1202 rps->power.mode = -1; 1203 rps->last_freq = -1; 1204 1205 if (rps_set(rps, rps->min_freq, true)) { 1206 drm_err(&i915->drm, "Failed to reset RPS to initial values\n"); 1207 return false; 1208 } 1209 1210 rps->cur_freq = rps->min_freq; 1211 return true; 1212 } 1213 1214 /* See the Gen9_GT_PM_Programming_Guide doc for the below */ 1215 static bool gen9_rps_enable(struct intel_rps *rps) 1216 { 1217 struct intel_gt *gt = rps_to_gt(rps); 1218 struct intel_uncore *uncore = gt->uncore; 1219 1220 /* Program defaults and thresholds for RPS */ 1221 if (GRAPHICS_VER(gt->i915) == 9) 1222 intel_uncore_write_fw(uncore, GEN6_RC_VIDEO_FREQ, 1223 GEN9_FREQUENCY(rps->rp1_freq)); 1224 1225 intel_uncore_write_fw(uncore, GEN6_RP_IDLE_HYSTERSIS, 0xa); 1226 1227 rps->pm_events = GEN6_PM_RP_UP_THRESHOLD | GEN6_PM_RP_DOWN_THRESHOLD; 1228 1229 return rps_reset(rps); 1230 } 1231 1232 static bool gen8_rps_enable(struct intel_rps *rps) 1233 { 1234 struct intel_uncore *uncore = rps_to_uncore(rps); 1235 1236 intel_uncore_write_fw(uncore, GEN6_RC_VIDEO_FREQ, 1237 HSW_FREQUENCY(rps->rp1_freq)); 1238 1239 intel_uncore_write_fw(uncore, GEN6_RP_IDLE_HYSTERSIS, 10); 1240 1241 rps->pm_events = GEN6_PM_RP_UP_THRESHOLD | GEN6_PM_RP_DOWN_THRESHOLD; 1242 1243 return rps_reset(rps); 1244 } 1245 1246 static bool gen6_rps_enable(struct intel_rps *rps) 1247 { 1248 struct intel_uncore *uncore = rps_to_uncore(rps); 1249 1250 /* Power down if completely idle for over 50ms */ 1251 intel_uncore_write_fw(uncore, GEN6_RP_DOWN_TIMEOUT, 50000); 1252 intel_uncore_write_fw(uncore, GEN6_RP_IDLE_HYSTERSIS, 10); 1253 1254 rps->pm_events = (GEN6_PM_RP_UP_THRESHOLD | 1255 GEN6_PM_RP_DOWN_THRESHOLD | 1256 GEN6_PM_RP_DOWN_TIMEOUT); 1257 1258 return rps_reset(rps); 1259 } 1260 1261 static int chv_rps_max_freq(struct intel_rps *rps) 1262 { 1263 struct drm_i915_private *i915 = rps_to_i915(rps); 1264 struct intel_gt *gt = rps_to_gt(rps); 1265 u32 val; 1266 1267 val = vlv_punit_read(i915, FB_GFX_FMAX_AT_VMAX_FUSE); 1268 1269 switch (gt->info.sseu.eu_total) { 1270 case 8: 1271 /* (2 * 4) config */ 1272 val >>= FB_GFX_FMAX_AT_VMAX_2SS4EU_FUSE_SHIFT; 1273 break; 1274 case 12: 1275 /* (2 * 6) config */ 1276 val >>= FB_GFX_FMAX_AT_VMAX_2SS6EU_FUSE_SHIFT; 1277 break; 1278 case 16: 1279 /* (2 * 8) config */ 1280 default: 1281 /* Setting (2 * 8) Min RP0 for any other combination */ 1282 val >>= FB_GFX_FMAX_AT_VMAX_2SS8EU_FUSE_SHIFT; 1283 break; 1284 } 1285 1286 return val & FB_GFX_FREQ_FUSE_MASK; 1287 } 1288 1289 static int chv_rps_rpe_freq(struct intel_rps *rps) 1290 { 1291 struct drm_i915_private *i915 = rps_to_i915(rps); 1292 u32 val; 1293 1294 val = vlv_punit_read(i915, PUNIT_GPU_DUTYCYCLE_REG); 1295 val >>= PUNIT_GPU_DUTYCYCLE_RPE_FREQ_SHIFT; 1296 1297 return val & PUNIT_GPU_DUTYCYCLE_RPE_FREQ_MASK; 1298 } 1299 1300 static int chv_rps_guar_freq(struct intel_rps *rps) 1301 { 1302 struct drm_i915_private *i915 = rps_to_i915(rps); 1303 u32 val; 1304 1305 val = vlv_punit_read(i915, FB_GFX_FMAX_AT_VMAX_FUSE); 1306 1307 return val & FB_GFX_FREQ_FUSE_MASK; 1308 } 1309 1310 static u32 chv_rps_min_freq(struct intel_rps *rps) 1311 { 1312 struct drm_i915_private *i915 = rps_to_i915(rps); 1313 u32 val; 1314 1315 val = vlv_punit_read(i915, FB_GFX_FMIN_AT_VMIN_FUSE); 1316 val >>= FB_GFX_FMIN_AT_VMIN_FUSE_SHIFT; 1317 1318 return val & FB_GFX_FREQ_FUSE_MASK; 1319 } 1320 1321 static bool chv_rps_enable(struct intel_rps *rps) 1322 { 1323 struct intel_uncore *uncore = rps_to_uncore(rps); 1324 struct drm_i915_private *i915 = rps_to_i915(rps); 1325 u32 val; 1326 1327 /* 1: Program defaults and thresholds for RPS*/ 1328 intel_uncore_write_fw(uncore, GEN6_RP_DOWN_TIMEOUT, 1000000); 1329 intel_uncore_write_fw(uncore, GEN6_RP_UP_THRESHOLD, 59400); 1330 intel_uncore_write_fw(uncore, GEN6_RP_DOWN_THRESHOLD, 245000); 1331 intel_uncore_write_fw(uncore, GEN6_RP_UP_EI, 66000); 1332 intel_uncore_write_fw(uncore, GEN6_RP_DOWN_EI, 350000); 1333 1334 intel_uncore_write_fw(uncore, GEN6_RP_IDLE_HYSTERSIS, 10); 1335 1336 /* 2: Enable RPS */ 1337 intel_uncore_write_fw(uncore, GEN6_RP_CONTROL, 1338 GEN6_RP_MEDIA_HW_NORMAL_MODE | 1339 GEN6_RP_MEDIA_IS_GFX | 1340 GEN6_RP_ENABLE | 1341 GEN6_RP_UP_BUSY_AVG | 1342 GEN6_RP_DOWN_IDLE_AVG); 1343 1344 rps->pm_events = (GEN6_PM_RP_UP_THRESHOLD | 1345 GEN6_PM_RP_DOWN_THRESHOLD | 1346 GEN6_PM_RP_DOWN_TIMEOUT); 1347 1348 /* Setting Fixed Bias */ 1349 vlv_punit_get(i915); 1350 1351 val = VLV_OVERRIDE_EN | VLV_SOC_TDP_EN | CHV_BIAS_CPU_50_SOC_50; 1352 vlv_punit_write(i915, VLV_TURBO_SOC_OVERRIDE, val); 1353 1354 val = vlv_punit_read(i915, PUNIT_REG_GPU_FREQ_STS); 1355 1356 vlv_punit_put(i915); 1357 1358 /* RPS code assumes GPLL is used */ 1359 drm_WARN_ONCE(&i915->drm, (val & GPLLENABLE) == 0, 1360 "GPLL not enabled\n"); 1361 1362 drm_dbg(&i915->drm, "GPLL enabled? %s\n", 1363 str_yes_no(val & GPLLENABLE)); 1364 drm_dbg(&i915->drm, "GPU status: 0x%08x\n", val); 1365 1366 return rps_reset(rps); 1367 } 1368 1369 static int vlv_rps_guar_freq(struct intel_rps *rps) 1370 { 1371 struct drm_i915_private *i915 = rps_to_i915(rps); 1372 u32 val, rp1; 1373 1374 val = vlv_nc_read(i915, IOSF_NC_FB_GFX_FREQ_FUSE); 1375 1376 rp1 = val & FB_GFX_FGUARANTEED_FREQ_FUSE_MASK; 1377 rp1 >>= FB_GFX_FGUARANTEED_FREQ_FUSE_SHIFT; 1378 1379 return rp1; 1380 } 1381 1382 static int vlv_rps_max_freq(struct intel_rps *rps) 1383 { 1384 struct drm_i915_private *i915 = rps_to_i915(rps); 1385 u32 val, rp0; 1386 1387 val = vlv_nc_read(i915, IOSF_NC_FB_GFX_FREQ_FUSE); 1388 1389 rp0 = (val & FB_GFX_MAX_FREQ_FUSE_MASK) >> FB_GFX_MAX_FREQ_FUSE_SHIFT; 1390 /* Clamp to max */ 1391 rp0 = min_t(u32, rp0, 0xea); 1392 1393 return rp0; 1394 } 1395 1396 static int vlv_rps_rpe_freq(struct intel_rps *rps) 1397 { 1398 struct drm_i915_private *i915 = rps_to_i915(rps); 1399 u32 val, rpe; 1400 1401 val = vlv_nc_read(i915, IOSF_NC_FB_GFX_FMAX_FUSE_LO); 1402 rpe = (val & FB_FMAX_VMIN_FREQ_LO_MASK) >> FB_FMAX_VMIN_FREQ_LO_SHIFT; 1403 val = vlv_nc_read(i915, IOSF_NC_FB_GFX_FMAX_FUSE_HI); 1404 rpe |= (val & FB_FMAX_VMIN_FREQ_HI_MASK) << 5; 1405 1406 return rpe; 1407 } 1408 1409 static int vlv_rps_min_freq(struct intel_rps *rps) 1410 { 1411 struct drm_i915_private *i915 = rps_to_i915(rps); 1412 u32 val; 1413 1414 val = vlv_punit_read(i915, PUNIT_REG_GPU_LFM) & 0xff; 1415 /* 1416 * According to the BYT Punit GPU turbo HAS 1.1.6.3 the minimum value 1417 * for the minimum frequency in GPLL mode is 0xc1. Contrary to this on 1418 * a BYT-M B0 the above register contains 0xbf. Moreover when setting 1419 * a frequency Punit will not allow values below 0xc0. Clamp it 0xc0 1420 * to make sure it matches what Punit accepts. 1421 */ 1422 return max_t(u32, val, 0xc0); 1423 } 1424 1425 static bool vlv_rps_enable(struct intel_rps *rps) 1426 { 1427 struct intel_uncore *uncore = rps_to_uncore(rps); 1428 struct drm_i915_private *i915 = rps_to_i915(rps); 1429 u32 val; 1430 1431 intel_uncore_write_fw(uncore, GEN6_RP_DOWN_TIMEOUT, 1000000); 1432 intel_uncore_write_fw(uncore, GEN6_RP_UP_THRESHOLD, 59400); 1433 intel_uncore_write_fw(uncore, GEN6_RP_DOWN_THRESHOLD, 245000); 1434 intel_uncore_write_fw(uncore, GEN6_RP_UP_EI, 66000); 1435 intel_uncore_write_fw(uncore, GEN6_RP_DOWN_EI, 350000); 1436 1437 intel_uncore_write_fw(uncore, GEN6_RP_IDLE_HYSTERSIS, 10); 1438 1439 intel_uncore_write_fw(uncore, GEN6_RP_CONTROL, 1440 GEN6_RP_MEDIA_TURBO | 1441 GEN6_RP_MEDIA_HW_NORMAL_MODE | 1442 GEN6_RP_MEDIA_IS_GFX | 1443 GEN6_RP_ENABLE | 1444 GEN6_RP_UP_BUSY_AVG | 1445 GEN6_RP_DOWN_IDLE_CONT); 1446 1447 /* WaGsvRC0ResidencyMethod:vlv */ 1448 rps->pm_events = GEN6_PM_RP_UP_EI_EXPIRED; 1449 1450 vlv_punit_get(i915); 1451 1452 /* Setting Fixed Bias */ 1453 val = VLV_OVERRIDE_EN | VLV_SOC_TDP_EN | VLV_BIAS_CPU_125_SOC_875; 1454 vlv_punit_write(i915, VLV_TURBO_SOC_OVERRIDE, val); 1455 1456 val = vlv_punit_read(i915, PUNIT_REG_GPU_FREQ_STS); 1457 1458 vlv_punit_put(i915); 1459 1460 /* RPS code assumes GPLL is used */ 1461 drm_WARN_ONCE(&i915->drm, (val & GPLLENABLE) == 0, 1462 "GPLL not enabled\n"); 1463 1464 drm_dbg(&i915->drm, "GPLL enabled? %s\n", 1465 str_yes_no(val & GPLLENABLE)); 1466 drm_dbg(&i915->drm, "GPU status: 0x%08x\n", val); 1467 1468 return rps_reset(rps); 1469 } 1470 1471 static unsigned long __ips_gfx_val(struct intel_ips *ips) 1472 { 1473 struct intel_rps *rps = container_of(ips, typeof(*rps), ips); 1474 struct intel_uncore *uncore = rps_to_uncore(rps); 1475 unsigned int t, state1, state2; 1476 u32 pxvid, ext_v; 1477 u64 corr, corr2; 1478 1479 lockdep_assert_held(&mchdev_lock); 1480 1481 pxvid = intel_uncore_read(uncore, PXVFREQ(rps->cur_freq)); 1482 pxvid = (pxvid >> 24) & 0x7f; 1483 ext_v = pvid_to_extvid(rps_to_i915(rps), pxvid); 1484 1485 state1 = ext_v; 1486 1487 /* Revel in the empirically derived constants */ 1488 1489 /* Correction factor in 1/100000 units */ 1490 t = ips_mch_val(uncore); 1491 if (t > 80) 1492 corr = t * 2349 + 135940; 1493 else if (t >= 50) 1494 corr = t * 964 + 29317; 1495 else /* < 50 */ 1496 corr = t * 301 + 1004; 1497 1498 corr = div_u64(corr * 150142 * state1, 10000) - 78642; 1499 corr2 = div_u64(corr, 100000) * ips->corr; 1500 1501 state2 = div_u64(corr2 * state1, 10000); 1502 state2 /= 100; /* convert to mW */ 1503 1504 __gen5_ips_update(ips); 1505 1506 return ips->gfx_power + state2; 1507 } 1508 1509 static bool has_busy_stats(struct intel_rps *rps) 1510 { 1511 struct intel_engine_cs *engine; 1512 enum intel_engine_id id; 1513 1514 for_each_engine(engine, rps_to_gt(rps), id) { 1515 if (!intel_engine_supports_stats(engine)) 1516 return false; 1517 } 1518 1519 return true; 1520 } 1521 1522 void intel_rps_enable(struct intel_rps *rps) 1523 { 1524 struct drm_i915_private *i915 = rps_to_i915(rps); 1525 struct intel_uncore *uncore = rps_to_uncore(rps); 1526 bool enabled = false; 1527 1528 if (!HAS_RPS(i915)) 1529 return; 1530 1531 if (rps_uses_slpc(rps)) 1532 return; 1533 1534 intel_gt_check_clock_frequency(rps_to_gt(rps)); 1535 1536 intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL); 1537 if (rps->max_freq <= rps->min_freq) 1538 /* leave disabled, no room for dynamic reclocking */; 1539 else if (IS_CHERRYVIEW(i915)) 1540 enabled = chv_rps_enable(rps); 1541 else if (IS_VALLEYVIEW(i915)) 1542 enabled = vlv_rps_enable(rps); 1543 else if (GRAPHICS_VER(i915) >= 9) 1544 enabled = gen9_rps_enable(rps); 1545 else if (GRAPHICS_VER(i915) >= 8) 1546 enabled = gen8_rps_enable(rps); 1547 else if (GRAPHICS_VER(i915) >= 6) 1548 enabled = gen6_rps_enable(rps); 1549 else if (IS_IRONLAKE_M(i915)) 1550 enabled = gen5_rps_enable(rps); 1551 else 1552 MISSING_CASE(GRAPHICS_VER(i915)); 1553 intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL); 1554 if (!enabled) 1555 return; 1556 1557 GT_TRACE(rps_to_gt(rps), 1558 "min:%x, max:%x, freq:[%d, %d]\n", 1559 rps->min_freq, rps->max_freq, 1560 intel_gpu_freq(rps, rps->min_freq), 1561 intel_gpu_freq(rps, rps->max_freq)); 1562 1563 GEM_BUG_ON(rps->max_freq < rps->min_freq); 1564 GEM_BUG_ON(rps->idle_freq > rps->max_freq); 1565 1566 GEM_BUG_ON(rps->efficient_freq < rps->min_freq); 1567 GEM_BUG_ON(rps->efficient_freq > rps->max_freq); 1568 1569 if (has_busy_stats(rps)) 1570 intel_rps_set_timer(rps); 1571 else if (GRAPHICS_VER(i915) >= 6 && GRAPHICS_VER(i915) <= 11) 1572 intel_rps_set_interrupts(rps); 1573 else 1574 /* Ironlake currently uses intel_ips.ko */ {} 1575 1576 intel_rps_set_enabled(rps); 1577 } 1578 1579 static void gen6_rps_disable(struct intel_rps *rps) 1580 { 1581 set(rps_to_uncore(rps), GEN6_RP_CONTROL, 0); 1582 } 1583 1584 void intel_rps_disable(struct intel_rps *rps) 1585 { 1586 struct drm_i915_private *i915 = rps_to_i915(rps); 1587 1588 if (!intel_rps_is_enabled(rps)) 1589 return; 1590 1591 intel_rps_clear_enabled(rps); 1592 intel_rps_clear_interrupts(rps); 1593 intel_rps_clear_timer(rps); 1594 1595 if (GRAPHICS_VER(i915) >= 6) 1596 gen6_rps_disable(rps); 1597 else if (IS_IRONLAKE_M(i915)) 1598 gen5_rps_disable(rps); 1599 } 1600 1601 static int byt_gpu_freq(struct intel_rps *rps, int val) 1602 { 1603 /* 1604 * N = val - 0xb7 1605 * Slow = Fast = GPLL ref * N 1606 */ 1607 return DIV_ROUND_CLOSEST(rps->gpll_ref_freq * (val - 0xb7), 1000); 1608 } 1609 1610 static int byt_freq_opcode(struct intel_rps *rps, int val) 1611 { 1612 return DIV_ROUND_CLOSEST(1000 * val, rps->gpll_ref_freq) + 0xb7; 1613 } 1614 1615 static int chv_gpu_freq(struct intel_rps *rps, int val) 1616 { 1617 /* 1618 * N = val / 2 1619 * CU (slow) = CU2x (fast) / 2 = GPLL ref * N / 2 1620 */ 1621 return DIV_ROUND_CLOSEST(rps->gpll_ref_freq * val, 2 * 2 * 1000); 1622 } 1623 1624 static int chv_freq_opcode(struct intel_rps *rps, int val) 1625 { 1626 /* CHV needs even values */ 1627 return DIV_ROUND_CLOSEST(2 * 1000 * val, rps->gpll_ref_freq) * 2; 1628 } 1629 1630 int intel_gpu_freq(struct intel_rps *rps, int val) 1631 { 1632 struct drm_i915_private *i915 = rps_to_i915(rps); 1633 1634 if (GRAPHICS_VER(i915) >= 9) 1635 return DIV_ROUND_CLOSEST(val * GT_FREQUENCY_MULTIPLIER, 1636 GEN9_FREQ_SCALER); 1637 else if (IS_CHERRYVIEW(i915)) 1638 return chv_gpu_freq(rps, val); 1639 else if (IS_VALLEYVIEW(i915)) 1640 return byt_gpu_freq(rps, val); 1641 else if (GRAPHICS_VER(i915) >= 6) 1642 return val * GT_FREQUENCY_MULTIPLIER; 1643 else 1644 return val; 1645 } 1646 1647 int intel_freq_opcode(struct intel_rps *rps, int val) 1648 { 1649 struct drm_i915_private *i915 = rps_to_i915(rps); 1650 1651 if (GRAPHICS_VER(i915) >= 9) 1652 return DIV_ROUND_CLOSEST(val * GEN9_FREQ_SCALER, 1653 GT_FREQUENCY_MULTIPLIER); 1654 else if (IS_CHERRYVIEW(i915)) 1655 return chv_freq_opcode(rps, val); 1656 else if (IS_VALLEYVIEW(i915)) 1657 return byt_freq_opcode(rps, val); 1658 else if (GRAPHICS_VER(i915) >= 6) 1659 return DIV_ROUND_CLOSEST(val, GT_FREQUENCY_MULTIPLIER); 1660 else 1661 return val; 1662 } 1663 1664 static void vlv_init_gpll_ref_freq(struct intel_rps *rps) 1665 { 1666 struct drm_i915_private *i915 = rps_to_i915(rps); 1667 1668 rps->gpll_ref_freq = 1669 vlv_get_cck_clock(i915, "GPLL ref", 1670 CCK_GPLL_CLOCK_CONTROL, 1671 i915->czclk_freq); 1672 1673 drm_dbg(&i915->drm, "GPLL reference freq: %d kHz\n", 1674 rps->gpll_ref_freq); 1675 } 1676 1677 static void vlv_rps_init(struct intel_rps *rps) 1678 { 1679 struct drm_i915_private *i915 = rps_to_i915(rps); 1680 u32 val; 1681 1682 vlv_iosf_sb_get(i915, 1683 BIT(VLV_IOSF_SB_PUNIT) | 1684 BIT(VLV_IOSF_SB_NC) | 1685 BIT(VLV_IOSF_SB_CCK)); 1686 1687 vlv_init_gpll_ref_freq(rps); 1688 1689 val = vlv_punit_read(i915, PUNIT_REG_GPU_FREQ_STS); 1690 switch ((val >> 6) & 3) { 1691 case 0: 1692 case 1: 1693 i915->mem_freq = 800; 1694 break; 1695 case 2: 1696 i915->mem_freq = 1066; 1697 break; 1698 case 3: 1699 i915->mem_freq = 1333; 1700 break; 1701 } 1702 drm_dbg(&i915->drm, "DDR speed: %d MHz\n", i915->mem_freq); 1703 1704 rps->max_freq = vlv_rps_max_freq(rps); 1705 rps->rp0_freq = rps->max_freq; 1706 drm_dbg(&i915->drm, "max GPU freq: %d MHz (%u)\n", 1707 intel_gpu_freq(rps, rps->max_freq), rps->max_freq); 1708 1709 rps->efficient_freq = vlv_rps_rpe_freq(rps); 1710 drm_dbg(&i915->drm, "RPe GPU freq: %d MHz (%u)\n", 1711 intel_gpu_freq(rps, rps->efficient_freq), rps->efficient_freq); 1712 1713 rps->rp1_freq = vlv_rps_guar_freq(rps); 1714 drm_dbg(&i915->drm, "RP1(Guar Freq) GPU freq: %d MHz (%u)\n", 1715 intel_gpu_freq(rps, rps->rp1_freq), rps->rp1_freq); 1716 1717 rps->min_freq = vlv_rps_min_freq(rps); 1718 drm_dbg(&i915->drm, "min GPU freq: %d MHz (%u)\n", 1719 intel_gpu_freq(rps, rps->min_freq), rps->min_freq); 1720 1721 vlv_iosf_sb_put(i915, 1722 BIT(VLV_IOSF_SB_PUNIT) | 1723 BIT(VLV_IOSF_SB_NC) | 1724 BIT(VLV_IOSF_SB_CCK)); 1725 } 1726 1727 static void chv_rps_init(struct intel_rps *rps) 1728 { 1729 struct drm_i915_private *i915 = rps_to_i915(rps); 1730 u32 val; 1731 1732 vlv_iosf_sb_get(i915, 1733 BIT(VLV_IOSF_SB_PUNIT) | 1734 BIT(VLV_IOSF_SB_NC) | 1735 BIT(VLV_IOSF_SB_CCK)); 1736 1737 vlv_init_gpll_ref_freq(rps); 1738 1739 val = vlv_cck_read(i915, CCK_FUSE_REG); 1740 1741 switch ((val >> 2) & 0x7) { 1742 case 3: 1743 i915->mem_freq = 2000; 1744 break; 1745 default: 1746 i915->mem_freq = 1600; 1747 break; 1748 } 1749 drm_dbg(&i915->drm, "DDR speed: %d MHz\n", i915->mem_freq); 1750 1751 rps->max_freq = chv_rps_max_freq(rps); 1752 rps->rp0_freq = rps->max_freq; 1753 drm_dbg(&i915->drm, "max GPU freq: %d MHz (%u)\n", 1754 intel_gpu_freq(rps, rps->max_freq), rps->max_freq); 1755 1756 rps->efficient_freq = chv_rps_rpe_freq(rps); 1757 drm_dbg(&i915->drm, "RPe GPU freq: %d MHz (%u)\n", 1758 intel_gpu_freq(rps, rps->efficient_freq), rps->efficient_freq); 1759 1760 rps->rp1_freq = chv_rps_guar_freq(rps); 1761 drm_dbg(&i915->drm, "RP1(Guar) GPU freq: %d MHz (%u)\n", 1762 intel_gpu_freq(rps, rps->rp1_freq), rps->rp1_freq); 1763 1764 rps->min_freq = chv_rps_min_freq(rps); 1765 drm_dbg(&i915->drm, "min GPU freq: %d MHz (%u)\n", 1766 intel_gpu_freq(rps, rps->min_freq), rps->min_freq); 1767 1768 vlv_iosf_sb_put(i915, 1769 BIT(VLV_IOSF_SB_PUNIT) | 1770 BIT(VLV_IOSF_SB_NC) | 1771 BIT(VLV_IOSF_SB_CCK)); 1772 1773 drm_WARN_ONCE(&i915->drm, (rps->max_freq | rps->efficient_freq | 1774 rps->rp1_freq | rps->min_freq) & 1, 1775 "Odd GPU freq values\n"); 1776 } 1777 1778 static void vlv_c0_read(struct intel_uncore *uncore, struct intel_rps_ei *ei) 1779 { 1780 ei->ktime = ktime_get_raw(); 1781 ei->render_c0 = intel_uncore_read(uncore, VLV_RENDER_C0_COUNT); 1782 ei->media_c0 = intel_uncore_read(uncore, VLV_MEDIA_C0_COUNT); 1783 } 1784 1785 static u32 vlv_wa_c0_ei(struct intel_rps *rps, u32 pm_iir) 1786 { 1787 struct intel_uncore *uncore = rps_to_uncore(rps); 1788 const struct intel_rps_ei *prev = &rps->ei; 1789 struct intel_rps_ei now; 1790 u32 events = 0; 1791 1792 if ((pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) == 0) 1793 return 0; 1794 1795 vlv_c0_read(uncore, &now); 1796 1797 if (prev->ktime) { 1798 u64 time, c0; 1799 u32 render, media; 1800 1801 time = ktime_us_delta(now.ktime, prev->ktime); 1802 1803 time *= rps_to_i915(rps)->czclk_freq; 1804 1805 /* Workload can be split between render + media, 1806 * e.g. SwapBuffers being blitted in X after being rendered in 1807 * mesa. To account for this we need to combine both engines 1808 * into our activity counter. 1809 */ 1810 render = now.render_c0 - prev->render_c0; 1811 media = now.media_c0 - prev->media_c0; 1812 c0 = max(render, media); 1813 c0 *= 1000 * 100 << 8; /* to usecs and scale to threshold% */ 1814 1815 if (c0 > time * rps->power.up_threshold) 1816 events = GEN6_PM_RP_UP_THRESHOLD; 1817 else if (c0 < time * rps->power.down_threshold) 1818 events = GEN6_PM_RP_DOWN_THRESHOLD; 1819 } 1820 1821 rps->ei = now; 1822 return events; 1823 } 1824 1825 static void rps_work(struct work_struct *work) 1826 { 1827 struct intel_rps *rps = container_of(work, typeof(*rps), work); 1828 struct intel_gt *gt = rps_to_gt(rps); 1829 struct drm_i915_private *i915 = rps_to_i915(rps); 1830 bool client_boost = false; 1831 int new_freq, adj, min, max; 1832 u32 pm_iir = 0; 1833 1834 spin_lock_irq(gt->irq_lock); 1835 pm_iir = fetch_and_zero(&rps->pm_iir) & rps->pm_events; 1836 client_boost = atomic_read(&rps->num_waiters); 1837 spin_unlock_irq(gt->irq_lock); 1838 1839 /* Make sure we didn't queue anything we're not going to process. */ 1840 if (!pm_iir && !client_boost) 1841 goto out; 1842 1843 mutex_lock(&rps->lock); 1844 if (!intel_rps_is_active(rps)) { 1845 mutex_unlock(&rps->lock); 1846 return; 1847 } 1848 1849 pm_iir |= vlv_wa_c0_ei(rps, pm_iir); 1850 1851 adj = rps->last_adj; 1852 new_freq = rps->cur_freq; 1853 min = rps->min_freq_softlimit; 1854 max = rps->max_freq_softlimit; 1855 if (client_boost) 1856 max = rps->max_freq; 1857 1858 GT_TRACE(gt, 1859 "pm_iir:%x, client_boost:%s, last:%d, cur:%x, min:%x, max:%x\n", 1860 pm_iir, str_yes_no(client_boost), 1861 adj, new_freq, min, max); 1862 1863 if (client_boost && new_freq < rps->boost_freq) { 1864 new_freq = rps->boost_freq; 1865 adj = 0; 1866 } else if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) { 1867 if (adj > 0) 1868 adj *= 2; 1869 else /* CHV needs even encode values */ 1870 adj = IS_CHERRYVIEW(gt->i915) ? 2 : 1; 1871 1872 if (new_freq >= rps->max_freq_softlimit) 1873 adj = 0; 1874 } else if (client_boost) { 1875 adj = 0; 1876 } else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) { 1877 if (rps->cur_freq > rps->efficient_freq) 1878 new_freq = rps->efficient_freq; 1879 else if (rps->cur_freq > rps->min_freq_softlimit) 1880 new_freq = rps->min_freq_softlimit; 1881 adj = 0; 1882 } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) { 1883 if (adj < 0) 1884 adj *= 2; 1885 else /* CHV needs even encode values */ 1886 adj = IS_CHERRYVIEW(gt->i915) ? -2 : -1; 1887 1888 if (new_freq <= rps->min_freq_softlimit) 1889 adj = 0; 1890 } else { /* unknown event */ 1891 adj = 0; 1892 } 1893 1894 /* 1895 * sysfs frequency limits may have snuck in while 1896 * servicing the interrupt 1897 */ 1898 new_freq += adj; 1899 new_freq = clamp_t(int, new_freq, min, max); 1900 1901 if (intel_rps_set(rps, new_freq)) { 1902 drm_dbg(&i915->drm, "Failed to set new GPU frequency\n"); 1903 adj = 0; 1904 } 1905 rps->last_adj = adj; 1906 1907 mutex_unlock(&rps->lock); 1908 1909 out: 1910 spin_lock_irq(gt->irq_lock); 1911 gen6_gt_pm_unmask_irq(gt, rps->pm_events); 1912 spin_unlock_irq(gt->irq_lock); 1913 } 1914 1915 void gen11_rps_irq_handler(struct intel_rps *rps, u32 pm_iir) 1916 { 1917 struct intel_gt *gt = rps_to_gt(rps); 1918 const u32 events = rps->pm_events & pm_iir; 1919 1920 lockdep_assert_held(gt->irq_lock); 1921 1922 if (unlikely(!events)) 1923 return; 1924 1925 GT_TRACE(gt, "irq events:%x\n", events); 1926 1927 gen6_gt_pm_mask_irq(gt, events); 1928 1929 rps->pm_iir |= events; 1930 schedule_work(&rps->work); 1931 } 1932 1933 void gen6_rps_irq_handler(struct intel_rps *rps, u32 pm_iir) 1934 { 1935 struct intel_gt *gt = rps_to_gt(rps); 1936 u32 events; 1937 1938 events = pm_iir & rps->pm_events; 1939 if (events) { 1940 spin_lock(gt->irq_lock); 1941 1942 GT_TRACE(gt, "irq events:%x\n", events); 1943 1944 gen6_gt_pm_mask_irq(gt, events); 1945 rps->pm_iir |= events; 1946 1947 schedule_work(&rps->work); 1948 spin_unlock(gt->irq_lock); 1949 } 1950 1951 if (GRAPHICS_VER(gt->i915) >= 8) 1952 return; 1953 1954 if (pm_iir & PM_VEBOX_USER_INTERRUPT) 1955 intel_engine_cs_irq(gt->engine[VECS0], pm_iir >> 10); 1956 1957 if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT) 1958 drm_dbg(&rps_to_i915(rps)->drm, 1959 "Command parser error, pm_iir 0x%08x\n", pm_iir); 1960 } 1961 1962 void gen5_rps_irq_handler(struct intel_rps *rps) 1963 { 1964 struct intel_uncore *uncore = rps_to_uncore(rps); 1965 u32 busy_up, busy_down, max_avg, min_avg; 1966 u8 new_freq; 1967 1968 spin_lock(&mchdev_lock); 1969 1970 intel_uncore_write16(uncore, 1971 MEMINTRSTS, 1972 intel_uncore_read(uncore, MEMINTRSTS)); 1973 1974 intel_uncore_write16(uncore, MEMINTRSTS, MEMINT_EVAL_CHG); 1975 busy_up = intel_uncore_read(uncore, RCPREVBSYTUPAVG); 1976 busy_down = intel_uncore_read(uncore, RCPREVBSYTDNAVG); 1977 max_avg = intel_uncore_read(uncore, RCBMAXAVG); 1978 min_avg = intel_uncore_read(uncore, RCBMINAVG); 1979 1980 /* Handle RCS change request from hw */ 1981 new_freq = rps->cur_freq; 1982 if (busy_up > max_avg) 1983 new_freq++; 1984 else if (busy_down < min_avg) 1985 new_freq--; 1986 new_freq = clamp(new_freq, 1987 rps->min_freq_softlimit, 1988 rps->max_freq_softlimit); 1989 1990 if (new_freq != rps->cur_freq && !__gen5_rps_set(rps, new_freq)) 1991 rps->cur_freq = new_freq; 1992 1993 spin_unlock(&mchdev_lock); 1994 } 1995 1996 void intel_rps_init_early(struct intel_rps *rps) 1997 { 1998 mutex_init(&rps->lock); 1999 mutex_init(&rps->power.mutex); 2000 2001 INIT_WORK(&rps->work, rps_work); 2002 timer_setup(&rps->timer, rps_timer, 0); 2003 2004 atomic_set(&rps->num_waiters, 0); 2005 } 2006 2007 void intel_rps_init(struct intel_rps *rps) 2008 { 2009 struct drm_i915_private *i915 = rps_to_i915(rps); 2010 2011 if (rps_uses_slpc(rps)) 2012 return; 2013 2014 if (IS_CHERRYVIEW(i915)) 2015 chv_rps_init(rps); 2016 else if (IS_VALLEYVIEW(i915)) 2017 vlv_rps_init(rps); 2018 else if (GRAPHICS_VER(i915) >= 6) 2019 gen6_rps_init(rps); 2020 else if (IS_IRONLAKE_M(i915)) 2021 gen5_rps_init(rps); 2022 2023 /* Derive initial user preferences/limits from the hardware limits */ 2024 rps->max_freq_softlimit = rps->max_freq; 2025 rps_to_gt(rps)->defaults.max_freq = rps->max_freq_softlimit; 2026 rps->min_freq_softlimit = rps->min_freq; 2027 rps_to_gt(rps)->defaults.min_freq = rps->min_freq_softlimit; 2028 2029 /* After setting max-softlimit, find the overclock max freq */ 2030 if (GRAPHICS_VER(i915) == 6 || IS_IVYBRIDGE(i915) || IS_HASWELL(i915)) { 2031 u32 params = 0; 2032 2033 snb_pcode_read(rps_to_gt(rps)->uncore, GEN6_READ_OC_PARAMS, ¶ms, NULL); 2034 if (params & BIT(31)) { /* OC supported */ 2035 drm_dbg(&i915->drm, 2036 "Overclocking supported, max: %dMHz, overclock: %dMHz\n", 2037 (rps->max_freq & 0xff) * 50, 2038 (params & 0xff) * 50); 2039 rps->max_freq = params & 0xff; 2040 } 2041 } 2042 2043 /* Finally allow us to boost to max by default */ 2044 rps->boost_freq = rps->max_freq; 2045 rps->idle_freq = rps->min_freq; 2046 2047 /* Start in the middle, from here we will autotune based on workload */ 2048 rps->cur_freq = rps->efficient_freq; 2049 2050 rps->pm_intrmsk_mbz = 0; 2051 2052 /* 2053 * SNB,IVB,HSW can while VLV,CHV may hard hang on looping batchbuffer 2054 * if GEN6_PM_UP_EI_EXPIRED is masked. 2055 * 2056 * TODO: verify if this can be reproduced on VLV,CHV. 2057 */ 2058 if (GRAPHICS_VER(i915) <= 7) 2059 rps->pm_intrmsk_mbz |= GEN6_PM_RP_UP_EI_EXPIRED; 2060 2061 if (GRAPHICS_VER(i915) >= 8 && GRAPHICS_VER(i915) < 11) 2062 rps->pm_intrmsk_mbz |= GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC; 2063 2064 /* GuC needs ARAT expired interrupt unmasked */ 2065 if (intel_uc_uses_guc_submission(&rps_to_gt(rps)->uc)) 2066 rps->pm_intrmsk_mbz |= ARAT_EXPIRED_INTRMSK; 2067 } 2068 2069 void intel_rps_sanitize(struct intel_rps *rps) 2070 { 2071 if (rps_uses_slpc(rps)) 2072 return; 2073 2074 if (GRAPHICS_VER(rps_to_i915(rps)) >= 6) 2075 rps_disable_interrupts(rps); 2076 } 2077 2078 u32 intel_rps_read_rpstat_fw(struct intel_rps *rps) 2079 { 2080 struct drm_i915_private *i915 = rps_to_i915(rps); 2081 i915_reg_t rpstat; 2082 2083 rpstat = (GRAPHICS_VER(i915) >= 12) ? GEN12_RPSTAT1 : GEN6_RPSTAT1; 2084 2085 return intel_uncore_read_fw(rps_to_gt(rps)->uncore, rpstat); 2086 } 2087 2088 u32 intel_rps_read_rpstat(struct intel_rps *rps) 2089 { 2090 struct drm_i915_private *i915 = rps_to_i915(rps); 2091 i915_reg_t rpstat; 2092 2093 rpstat = (GRAPHICS_VER(i915) >= 12) ? GEN12_RPSTAT1 : GEN6_RPSTAT1; 2094 2095 return intel_uncore_read(rps_to_gt(rps)->uncore, rpstat); 2096 } 2097 2098 u32 intel_rps_get_cagf(struct intel_rps *rps, u32 rpstat) 2099 { 2100 struct drm_i915_private *i915 = rps_to_i915(rps); 2101 u32 cagf; 2102 2103 if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 70)) 2104 cagf = REG_FIELD_GET(MTL_CAGF_MASK, rpstat); 2105 else if (GRAPHICS_VER(i915) >= 12) 2106 cagf = REG_FIELD_GET(GEN12_CAGF_MASK, rpstat); 2107 else if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) 2108 cagf = REG_FIELD_GET(RPE_MASK, rpstat); 2109 else if (GRAPHICS_VER(i915) >= 9) 2110 cagf = REG_FIELD_GET(GEN9_CAGF_MASK, rpstat); 2111 else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) 2112 cagf = REG_FIELD_GET(HSW_CAGF_MASK, rpstat); 2113 else if (GRAPHICS_VER(i915) >= 6) 2114 cagf = REG_FIELD_GET(GEN6_CAGF_MASK, rpstat); 2115 else 2116 cagf = gen5_invert_freq(rps, REG_FIELD_GET(MEMSTAT_PSTATE_MASK, rpstat)); 2117 2118 return cagf; 2119 } 2120 2121 static u32 read_cagf(struct intel_rps *rps) 2122 { 2123 struct drm_i915_private *i915 = rps_to_i915(rps); 2124 struct intel_uncore *uncore = rps_to_uncore(rps); 2125 u32 freq; 2126 2127 /* 2128 * For Gen12+ reading freq from HW does not need a forcewake and 2129 * registers will return 0 freq when GT is in RC6 2130 */ 2131 if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 70)) { 2132 freq = intel_uncore_read(uncore, MTL_MIRROR_TARGET_WP1); 2133 } else if (GRAPHICS_VER(i915) >= 12) { 2134 freq = intel_uncore_read(uncore, GEN12_RPSTAT1); 2135 } else if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) { 2136 vlv_punit_get(i915); 2137 freq = vlv_punit_read(i915, PUNIT_REG_GPU_FREQ_STS); 2138 vlv_punit_put(i915); 2139 } else if (GRAPHICS_VER(i915) >= 6) { 2140 freq = intel_uncore_read(uncore, GEN6_RPSTAT1); 2141 } else { 2142 freq = intel_uncore_read(uncore, MEMSTAT_ILK); 2143 } 2144 2145 return intel_rps_get_cagf(rps, freq); 2146 } 2147 2148 u32 intel_rps_read_actual_frequency(struct intel_rps *rps) 2149 { 2150 struct intel_runtime_pm *rpm = rps_to_uncore(rps)->rpm; 2151 intel_wakeref_t wakeref; 2152 u32 freq = 0; 2153 2154 with_intel_runtime_pm_if_in_use(rpm, wakeref) 2155 freq = intel_gpu_freq(rps, read_cagf(rps)); 2156 2157 return freq; 2158 } 2159 2160 u32 intel_rps_read_punit_req(struct intel_rps *rps) 2161 { 2162 struct intel_uncore *uncore = rps_to_uncore(rps); 2163 struct intel_runtime_pm *rpm = rps_to_uncore(rps)->rpm; 2164 intel_wakeref_t wakeref; 2165 u32 freq = 0; 2166 2167 with_intel_runtime_pm_if_in_use(rpm, wakeref) 2168 freq = intel_uncore_read(uncore, GEN6_RPNSWREQ); 2169 2170 return freq; 2171 } 2172 2173 static u32 intel_rps_get_req(u32 pureq) 2174 { 2175 u32 req = pureq >> GEN9_SW_REQ_UNSLICE_RATIO_SHIFT; 2176 2177 return req; 2178 } 2179 2180 u32 intel_rps_read_punit_req_frequency(struct intel_rps *rps) 2181 { 2182 u32 freq = intel_rps_get_req(intel_rps_read_punit_req(rps)); 2183 2184 return intel_gpu_freq(rps, freq); 2185 } 2186 2187 u32 intel_rps_get_requested_frequency(struct intel_rps *rps) 2188 { 2189 if (rps_uses_slpc(rps)) 2190 return intel_rps_read_punit_req_frequency(rps); 2191 else 2192 return intel_gpu_freq(rps, rps->cur_freq); 2193 } 2194 2195 u32 intel_rps_get_max_frequency(struct intel_rps *rps) 2196 { 2197 struct intel_guc_slpc *slpc = rps_to_slpc(rps); 2198 2199 if (rps_uses_slpc(rps)) 2200 return slpc->max_freq_softlimit; 2201 else 2202 return intel_gpu_freq(rps, rps->max_freq_softlimit); 2203 } 2204 2205 /** 2206 * intel_rps_get_max_raw_freq - returns the max frequency in some raw format. 2207 * @rps: the intel_rps structure 2208 * 2209 * Returns the max frequency in a raw format. In newer platforms raw is in 2210 * units of 50 MHz. 2211 */ 2212 u32 intel_rps_get_max_raw_freq(struct intel_rps *rps) 2213 { 2214 struct intel_guc_slpc *slpc = rps_to_slpc(rps); 2215 u32 freq; 2216 2217 if (rps_uses_slpc(rps)) { 2218 return DIV_ROUND_CLOSEST(slpc->rp0_freq, 2219 GT_FREQUENCY_MULTIPLIER); 2220 } else { 2221 freq = rps->max_freq; 2222 if (GRAPHICS_VER(rps_to_i915(rps)) >= 9) { 2223 /* Convert GT frequency to 50 MHz units */ 2224 freq /= GEN9_FREQ_SCALER; 2225 } 2226 return freq; 2227 } 2228 } 2229 2230 u32 intel_rps_get_rp0_frequency(struct intel_rps *rps) 2231 { 2232 struct intel_guc_slpc *slpc = rps_to_slpc(rps); 2233 2234 if (rps_uses_slpc(rps)) 2235 return slpc->rp0_freq; 2236 else 2237 return intel_gpu_freq(rps, rps->rp0_freq); 2238 } 2239 2240 u32 intel_rps_get_rp1_frequency(struct intel_rps *rps) 2241 { 2242 struct intel_guc_slpc *slpc = rps_to_slpc(rps); 2243 2244 if (rps_uses_slpc(rps)) 2245 return slpc->rp1_freq; 2246 else 2247 return intel_gpu_freq(rps, rps->rp1_freq); 2248 } 2249 2250 u32 intel_rps_get_rpn_frequency(struct intel_rps *rps) 2251 { 2252 struct intel_guc_slpc *slpc = rps_to_slpc(rps); 2253 2254 if (rps_uses_slpc(rps)) 2255 return slpc->min_freq; 2256 else 2257 return intel_gpu_freq(rps, rps->min_freq); 2258 } 2259 2260 static void rps_frequency_dump(struct intel_rps *rps, struct drm_printer *p) 2261 { 2262 struct intel_gt *gt = rps_to_gt(rps); 2263 struct drm_i915_private *i915 = gt->i915; 2264 struct intel_uncore *uncore = gt->uncore; 2265 struct intel_rps_freq_caps caps; 2266 u32 rp_state_limits; 2267 u32 gt_perf_status; 2268 u32 rpmodectl, rpinclimit, rpdeclimit; 2269 u32 rpstat, cagf, reqf; 2270 u32 rpcurupei, rpcurup, rpprevup; 2271 u32 rpcurdownei, rpcurdown, rpprevdown; 2272 u32 rpupei, rpupt, rpdownei, rpdownt; 2273 u32 pm_ier, pm_imr, pm_isr, pm_iir, pm_mask; 2274 2275 rp_state_limits = intel_uncore_read(uncore, GEN6_RP_STATE_LIMITS); 2276 gen6_rps_get_freq_caps(rps, &caps); 2277 if (IS_GEN9_LP(i915)) 2278 gt_perf_status = intel_uncore_read(uncore, BXT_GT_PERF_STATUS); 2279 else 2280 gt_perf_status = intel_uncore_read(uncore, GEN6_GT_PERF_STATUS); 2281 2282 /* RPSTAT1 is in the GT power well */ 2283 intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL); 2284 2285 reqf = intel_uncore_read(uncore, GEN6_RPNSWREQ); 2286 if (GRAPHICS_VER(i915) >= 9) { 2287 reqf >>= 23; 2288 } else { 2289 reqf &= ~GEN6_TURBO_DISABLE; 2290 if (IS_HASWELL(i915) || IS_BROADWELL(i915)) 2291 reqf >>= 24; 2292 else 2293 reqf >>= 25; 2294 } 2295 reqf = intel_gpu_freq(rps, reqf); 2296 2297 rpmodectl = intel_uncore_read(uncore, GEN6_RP_CONTROL); 2298 rpinclimit = intel_uncore_read(uncore, GEN6_RP_UP_THRESHOLD); 2299 rpdeclimit = intel_uncore_read(uncore, GEN6_RP_DOWN_THRESHOLD); 2300 2301 rpstat = intel_rps_read_rpstat(rps); 2302 rpcurupei = intel_uncore_read(uncore, GEN6_RP_CUR_UP_EI) & GEN6_CURICONT_MASK; 2303 rpcurup = intel_uncore_read(uncore, GEN6_RP_CUR_UP) & GEN6_CURBSYTAVG_MASK; 2304 rpprevup = intel_uncore_read(uncore, GEN6_RP_PREV_UP) & GEN6_CURBSYTAVG_MASK; 2305 rpcurdownei = intel_uncore_read(uncore, GEN6_RP_CUR_DOWN_EI) & GEN6_CURIAVG_MASK; 2306 rpcurdown = intel_uncore_read(uncore, GEN6_RP_CUR_DOWN) & GEN6_CURBSYTAVG_MASK; 2307 rpprevdown = intel_uncore_read(uncore, GEN6_RP_PREV_DOWN) & GEN6_CURBSYTAVG_MASK; 2308 2309 rpupei = intel_uncore_read(uncore, GEN6_RP_UP_EI); 2310 rpupt = intel_uncore_read(uncore, GEN6_RP_UP_THRESHOLD); 2311 2312 rpdownei = intel_uncore_read(uncore, GEN6_RP_DOWN_EI); 2313 rpdownt = intel_uncore_read(uncore, GEN6_RP_DOWN_THRESHOLD); 2314 2315 cagf = intel_rps_read_actual_frequency(rps); 2316 2317 intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL); 2318 2319 if (GRAPHICS_VER(i915) >= 11) { 2320 pm_ier = intel_uncore_read(uncore, GEN11_GPM_WGBOXPERF_INTR_ENABLE); 2321 pm_imr = intel_uncore_read(uncore, GEN11_GPM_WGBOXPERF_INTR_MASK); 2322 /* 2323 * The equivalent to the PM ISR & IIR cannot be read 2324 * without affecting the current state of the system 2325 */ 2326 pm_isr = 0; 2327 pm_iir = 0; 2328 } else if (GRAPHICS_VER(i915) >= 8) { 2329 pm_ier = intel_uncore_read(uncore, GEN8_GT_IER(2)); 2330 pm_imr = intel_uncore_read(uncore, GEN8_GT_IMR(2)); 2331 pm_isr = intel_uncore_read(uncore, GEN8_GT_ISR(2)); 2332 pm_iir = intel_uncore_read(uncore, GEN8_GT_IIR(2)); 2333 } else { 2334 pm_ier = intel_uncore_read(uncore, GEN6_PMIER); 2335 pm_imr = intel_uncore_read(uncore, GEN6_PMIMR); 2336 pm_isr = intel_uncore_read(uncore, GEN6_PMISR); 2337 pm_iir = intel_uncore_read(uncore, GEN6_PMIIR); 2338 } 2339 pm_mask = intel_uncore_read(uncore, GEN6_PMINTRMSK); 2340 2341 drm_printf(p, "Video Turbo Mode: %s\n", 2342 str_yes_no(rpmodectl & GEN6_RP_MEDIA_TURBO)); 2343 drm_printf(p, "HW control enabled: %s\n", 2344 str_yes_no(rpmodectl & GEN6_RP_ENABLE)); 2345 drm_printf(p, "SW control enabled: %s\n", 2346 str_yes_no((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) == GEN6_RP_MEDIA_SW_MODE)); 2347 2348 drm_printf(p, "PM IER=0x%08x IMR=0x%08x, MASK=0x%08x\n", 2349 pm_ier, pm_imr, pm_mask); 2350 if (GRAPHICS_VER(i915) <= 10) 2351 drm_printf(p, "PM ISR=0x%08x IIR=0x%08x\n", 2352 pm_isr, pm_iir); 2353 drm_printf(p, "pm_intrmsk_mbz: 0x%08x\n", 2354 rps->pm_intrmsk_mbz); 2355 drm_printf(p, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status); 2356 drm_printf(p, "Render p-state ratio: %d\n", 2357 (gt_perf_status & (GRAPHICS_VER(i915) >= 9 ? 0x1ff00 : 0xff00)) >> 8); 2358 drm_printf(p, "Render p-state VID: %d\n", 2359 gt_perf_status & 0xff); 2360 drm_printf(p, "Render p-state limit: %d\n", 2361 rp_state_limits & 0xff); 2362 drm_printf(p, "RPSTAT1: 0x%08x\n", rpstat); 2363 drm_printf(p, "RPMODECTL: 0x%08x\n", rpmodectl); 2364 drm_printf(p, "RPINCLIMIT: 0x%08x\n", rpinclimit); 2365 drm_printf(p, "RPDECLIMIT: 0x%08x\n", rpdeclimit); 2366 drm_printf(p, "RPNSWREQ: %dMHz\n", reqf); 2367 drm_printf(p, "CAGF: %dMHz\n", cagf); 2368 drm_printf(p, "RP CUR UP EI: %d (%lldns)\n", 2369 rpcurupei, 2370 intel_gt_pm_interval_to_ns(gt, rpcurupei)); 2371 drm_printf(p, "RP CUR UP: %d (%lldns)\n", 2372 rpcurup, intel_gt_pm_interval_to_ns(gt, rpcurup)); 2373 drm_printf(p, "RP PREV UP: %d (%lldns)\n", 2374 rpprevup, intel_gt_pm_interval_to_ns(gt, rpprevup)); 2375 drm_printf(p, "Up threshold: %d%%\n", 2376 rps->power.up_threshold); 2377 drm_printf(p, "RP UP EI: %d (%lldns)\n", 2378 rpupei, intel_gt_pm_interval_to_ns(gt, rpupei)); 2379 drm_printf(p, "RP UP THRESHOLD: %d (%lldns)\n", 2380 rpupt, intel_gt_pm_interval_to_ns(gt, rpupt)); 2381 2382 drm_printf(p, "RP CUR DOWN EI: %d (%lldns)\n", 2383 rpcurdownei, 2384 intel_gt_pm_interval_to_ns(gt, rpcurdownei)); 2385 drm_printf(p, "RP CUR DOWN: %d (%lldns)\n", 2386 rpcurdown, 2387 intel_gt_pm_interval_to_ns(gt, rpcurdown)); 2388 drm_printf(p, "RP PREV DOWN: %d (%lldns)\n", 2389 rpprevdown, 2390 intel_gt_pm_interval_to_ns(gt, rpprevdown)); 2391 drm_printf(p, "Down threshold: %d%%\n", 2392 rps->power.down_threshold); 2393 drm_printf(p, "RP DOWN EI: %d (%lldns)\n", 2394 rpdownei, intel_gt_pm_interval_to_ns(gt, rpdownei)); 2395 drm_printf(p, "RP DOWN THRESHOLD: %d (%lldns)\n", 2396 rpdownt, intel_gt_pm_interval_to_ns(gt, rpdownt)); 2397 2398 drm_printf(p, "Lowest (RPN) frequency: %dMHz\n", 2399 intel_gpu_freq(rps, caps.min_freq)); 2400 drm_printf(p, "Nominal (RP1) frequency: %dMHz\n", 2401 intel_gpu_freq(rps, caps.rp1_freq)); 2402 drm_printf(p, "Max non-overclocked (RP0) frequency: %dMHz\n", 2403 intel_gpu_freq(rps, caps.rp0_freq)); 2404 drm_printf(p, "Max overclocked frequency: %dMHz\n", 2405 intel_gpu_freq(rps, rps->max_freq)); 2406 2407 drm_printf(p, "Current freq: %d MHz\n", 2408 intel_gpu_freq(rps, rps->cur_freq)); 2409 drm_printf(p, "Actual freq: %d MHz\n", cagf); 2410 drm_printf(p, "Idle freq: %d MHz\n", 2411 intel_gpu_freq(rps, rps->idle_freq)); 2412 drm_printf(p, "Min freq: %d MHz\n", 2413 intel_gpu_freq(rps, rps->min_freq)); 2414 drm_printf(p, "Boost freq: %d MHz\n", 2415 intel_gpu_freq(rps, rps->boost_freq)); 2416 drm_printf(p, "Max freq: %d MHz\n", 2417 intel_gpu_freq(rps, rps->max_freq)); 2418 drm_printf(p, 2419 "efficient (RPe) frequency: %d MHz\n", 2420 intel_gpu_freq(rps, rps->efficient_freq)); 2421 } 2422 2423 static void slpc_frequency_dump(struct intel_rps *rps, struct drm_printer *p) 2424 { 2425 struct intel_gt *gt = rps_to_gt(rps); 2426 struct intel_uncore *uncore = gt->uncore; 2427 struct intel_rps_freq_caps caps; 2428 u32 pm_mask; 2429 2430 gen6_rps_get_freq_caps(rps, &caps); 2431 pm_mask = intel_uncore_read(uncore, GEN6_PMINTRMSK); 2432 2433 drm_printf(p, "PM MASK=0x%08x\n", pm_mask); 2434 drm_printf(p, "pm_intrmsk_mbz: 0x%08x\n", 2435 rps->pm_intrmsk_mbz); 2436 drm_printf(p, "RPSTAT1: 0x%08x\n", intel_rps_read_rpstat(rps)); 2437 drm_printf(p, "RPNSWREQ: %dMHz\n", intel_rps_get_requested_frequency(rps)); 2438 drm_printf(p, "Lowest (RPN) frequency: %dMHz\n", 2439 intel_gpu_freq(rps, caps.min_freq)); 2440 drm_printf(p, "Nominal (RP1) frequency: %dMHz\n", 2441 intel_gpu_freq(rps, caps.rp1_freq)); 2442 drm_printf(p, "Max non-overclocked (RP0) frequency: %dMHz\n", 2443 intel_gpu_freq(rps, caps.rp0_freq)); 2444 drm_printf(p, "Current freq: %d MHz\n", 2445 intel_rps_get_requested_frequency(rps)); 2446 drm_printf(p, "Actual freq: %d MHz\n", 2447 intel_rps_read_actual_frequency(rps)); 2448 drm_printf(p, "Min freq: %d MHz\n", 2449 intel_rps_get_min_frequency(rps)); 2450 drm_printf(p, "Boost freq: %d MHz\n", 2451 intel_rps_get_boost_frequency(rps)); 2452 drm_printf(p, "Max freq: %d MHz\n", 2453 intel_rps_get_max_frequency(rps)); 2454 drm_printf(p, 2455 "efficient (RPe) frequency: %d MHz\n", 2456 intel_gpu_freq(rps, caps.rp1_freq)); 2457 } 2458 2459 void gen6_rps_frequency_dump(struct intel_rps *rps, struct drm_printer *p) 2460 { 2461 if (rps_uses_slpc(rps)) 2462 return slpc_frequency_dump(rps, p); 2463 else 2464 return rps_frequency_dump(rps, p); 2465 } 2466 2467 static int set_max_freq(struct intel_rps *rps, u32 val) 2468 { 2469 struct drm_i915_private *i915 = rps_to_i915(rps); 2470 int ret = 0; 2471 2472 mutex_lock(&rps->lock); 2473 2474 val = intel_freq_opcode(rps, val); 2475 if (val < rps->min_freq || 2476 val > rps->max_freq || 2477 val < rps->min_freq_softlimit) { 2478 ret = -EINVAL; 2479 goto unlock; 2480 } 2481 2482 if (val > rps->rp0_freq) 2483 drm_dbg(&i915->drm, "User requested overclocking to %d\n", 2484 intel_gpu_freq(rps, val)); 2485 2486 rps->max_freq_softlimit = val; 2487 2488 val = clamp_t(int, rps->cur_freq, 2489 rps->min_freq_softlimit, 2490 rps->max_freq_softlimit); 2491 2492 /* 2493 * We still need *_set_rps to process the new max_delay and 2494 * update the interrupt limits and PMINTRMSK even though 2495 * frequency request may be unchanged. 2496 */ 2497 intel_rps_set(rps, val); 2498 2499 unlock: 2500 mutex_unlock(&rps->lock); 2501 2502 return ret; 2503 } 2504 2505 int intel_rps_set_max_frequency(struct intel_rps *rps, u32 val) 2506 { 2507 struct intel_guc_slpc *slpc = rps_to_slpc(rps); 2508 2509 if (rps_uses_slpc(rps)) 2510 return intel_guc_slpc_set_max_freq(slpc, val); 2511 else 2512 return set_max_freq(rps, val); 2513 } 2514 2515 u32 intel_rps_get_min_frequency(struct intel_rps *rps) 2516 { 2517 struct intel_guc_slpc *slpc = rps_to_slpc(rps); 2518 2519 if (rps_uses_slpc(rps)) 2520 return slpc->min_freq_softlimit; 2521 else 2522 return intel_gpu_freq(rps, rps->min_freq_softlimit); 2523 } 2524 2525 /** 2526 * intel_rps_get_min_raw_freq - returns the min frequency in some raw format. 2527 * @rps: the intel_rps structure 2528 * 2529 * Returns the min frequency in a raw format. In newer platforms raw is in 2530 * units of 50 MHz. 2531 */ 2532 u32 intel_rps_get_min_raw_freq(struct intel_rps *rps) 2533 { 2534 struct intel_guc_slpc *slpc = rps_to_slpc(rps); 2535 u32 freq; 2536 2537 if (rps_uses_slpc(rps)) { 2538 return DIV_ROUND_CLOSEST(slpc->min_freq, 2539 GT_FREQUENCY_MULTIPLIER); 2540 } else { 2541 freq = rps->min_freq; 2542 if (GRAPHICS_VER(rps_to_i915(rps)) >= 9) { 2543 /* Convert GT frequency to 50 MHz units */ 2544 freq /= GEN9_FREQ_SCALER; 2545 } 2546 return freq; 2547 } 2548 } 2549 2550 static int set_min_freq(struct intel_rps *rps, u32 val) 2551 { 2552 int ret = 0; 2553 2554 mutex_lock(&rps->lock); 2555 2556 val = intel_freq_opcode(rps, val); 2557 if (val < rps->min_freq || 2558 val > rps->max_freq || 2559 val > rps->max_freq_softlimit) { 2560 ret = -EINVAL; 2561 goto unlock; 2562 } 2563 2564 rps->min_freq_softlimit = val; 2565 2566 val = clamp_t(int, rps->cur_freq, 2567 rps->min_freq_softlimit, 2568 rps->max_freq_softlimit); 2569 2570 /* 2571 * We still need *_set_rps to process the new min_delay and 2572 * update the interrupt limits and PMINTRMSK even though 2573 * frequency request may be unchanged. 2574 */ 2575 intel_rps_set(rps, val); 2576 2577 unlock: 2578 mutex_unlock(&rps->lock); 2579 2580 return ret; 2581 } 2582 2583 int intel_rps_set_min_frequency(struct intel_rps *rps, u32 val) 2584 { 2585 struct intel_guc_slpc *slpc = rps_to_slpc(rps); 2586 2587 if (rps_uses_slpc(rps)) 2588 return intel_guc_slpc_set_min_freq(slpc, val); 2589 else 2590 return set_min_freq(rps, val); 2591 } 2592 2593 static void intel_rps_set_manual(struct intel_rps *rps, bool enable) 2594 { 2595 struct intel_uncore *uncore = rps_to_uncore(rps); 2596 u32 state = enable ? GEN9_RPSWCTL_ENABLE : GEN9_RPSWCTL_DISABLE; 2597 2598 /* Allow punit to process software requests */ 2599 intel_uncore_write(uncore, GEN6_RP_CONTROL, state); 2600 } 2601 2602 void intel_rps_raise_unslice(struct intel_rps *rps) 2603 { 2604 struct intel_uncore *uncore = rps_to_uncore(rps); 2605 2606 mutex_lock(&rps->lock); 2607 2608 if (rps_uses_slpc(rps)) { 2609 /* RP limits have not been initialized yet for SLPC path */ 2610 struct intel_rps_freq_caps caps; 2611 2612 gen6_rps_get_freq_caps(rps, &caps); 2613 2614 intel_rps_set_manual(rps, true); 2615 intel_uncore_write(uncore, GEN6_RPNSWREQ, 2616 ((caps.rp0_freq << 2617 GEN9_SW_REQ_UNSLICE_RATIO_SHIFT) | 2618 GEN9_IGNORE_SLICE_RATIO)); 2619 intel_rps_set_manual(rps, false); 2620 } else { 2621 intel_rps_set(rps, rps->rp0_freq); 2622 } 2623 2624 mutex_unlock(&rps->lock); 2625 } 2626 2627 void intel_rps_lower_unslice(struct intel_rps *rps) 2628 { 2629 struct intel_uncore *uncore = rps_to_uncore(rps); 2630 2631 mutex_lock(&rps->lock); 2632 2633 if (rps_uses_slpc(rps)) { 2634 /* RP limits have not been initialized yet for SLPC path */ 2635 struct intel_rps_freq_caps caps; 2636 2637 gen6_rps_get_freq_caps(rps, &caps); 2638 2639 intel_rps_set_manual(rps, true); 2640 intel_uncore_write(uncore, GEN6_RPNSWREQ, 2641 ((caps.min_freq << 2642 GEN9_SW_REQ_UNSLICE_RATIO_SHIFT) | 2643 GEN9_IGNORE_SLICE_RATIO)); 2644 intel_rps_set_manual(rps, false); 2645 } else { 2646 intel_rps_set(rps, rps->min_freq); 2647 } 2648 2649 mutex_unlock(&rps->lock); 2650 } 2651 2652 static u32 rps_read_mmio(struct intel_rps *rps, i915_reg_t reg32) 2653 { 2654 struct intel_gt *gt = rps_to_gt(rps); 2655 intel_wakeref_t wakeref; 2656 u32 val; 2657 2658 with_intel_runtime_pm(gt->uncore->rpm, wakeref) 2659 val = intel_uncore_read(gt->uncore, reg32); 2660 2661 return val; 2662 } 2663 2664 bool rps_read_mask_mmio(struct intel_rps *rps, 2665 i915_reg_t reg32, u32 mask) 2666 { 2667 return rps_read_mmio(rps, reg32) & mask; 2668 } 2669 2670 /* External interface for intel_ips.ko */ 2671 2672 static struct drm_i915_private __rcu *ips_mchdev; 2673 2674 /** 2675 * Tells the intel_ips driver that the i915 driver is now loaded, if 2676 * IPS got loaded first. 2677 * 2678 * This awkward dance is so that neither module has to depend on the 2679 * other in order for IPS to do the appropriate communication of 2680 * GPU turbo limits to i915. 2681 */ 2682 static void 2683 ips_ping_for_i915_load(void) 2684 { 2685 void (*link)(void); 2686 2687 link = symbol_get(ips_link_to_i915_driver); 2688 if (link) { 2689 link(); 2690 symbol_put(ips_link_to_i915_driver); 2691 } 2692 } 2693 2694 void intel_rps_driver_register(struct intel_rps *rps) 2695 { 2696 struct intel_gt *gt = rps_to_gt(rps); 2697 2698 /* 2699 * We only register the i915 ips part with intel-ips once everything is 2700 * set up, to avoid intel-ips sneaking in and reading bogus values. 2701 */ 2702 if (GRAPHICS_VER(gt->i915) == 5) { 2703 GEM_BUG_ON(ips_mchdev); 2704 rcu_assign_pointer(ips_mchdev, gt->i915); 2705 ips_ping_for_i915_load(); 2706 } 2707 } 2708 2709 void intel_rps_driver_unregister(struct intel_rps *rps) 2710 { 2711 if (rcu_access_pointer(ips_mchdev) == rps_to_i915(rps)) 2712 rcu_assign_pointer(ips_mchdev, NULL); 2713 } 2714 2715 static struct drm_i915_private *mchdev_get(void) 2716 { 2717 struct drm_i915_private *i915; 2718 2719 rcu_read_lock(); 2720 i915 = rcu_dereference(ips_mchdev); 2721 if (i915 && !kref_get_unless_zero(&i915->drm.ref)) 2722 i915 = NULL; 2723 rcu_read_unlock(); 2724 2725 return i915; 2726 } 2727 2728 /** 2729 * i915_read_mch_val - return value for IPS use 2730 * 2731 * Calculate and return a value for the IPS driver to use when deciding whether 2732 * we have thermal and power headroom to increase CPU or GPU power budget. 2733 */ 2734 unsigned long i915_read_mch_val(void) 2735 { 2736 struct drm_i915_private *i915; 2737 unsigned long chipset_val = 0; 2738 unsigned long graphics_val = 0; 2739 intel_wakeref_t wakeref; 2740 2741 i915 = mchdev_get(); 2742 if (!i915) 2743 return 0; 2744 2745 with_intel_runtime_pm(&i915->runtime_pm, wakeref) { 2746 struct intel_ips *ips = &to_gt(i915)->rps.ips; 2747 2748 spin_lock_irq(&mchdev_lock); 2749 chipset_val = __ips_chipset_val(ips); 2750 graphics_val = __ips_gfx_val(ips); 2751 spin_unlock_irq(&mchdev_lock); 2752 } 2753 2754 drm_dev_put(&i915->drm); 2755 return chipset_val + graphics_val; 2756 } 2757 EXPORT_SYMBOL_GPL(i915_read_mch_val); 2758 2759 /** 2760 * i915_gpu_raise - raise GPU frequency limit 2761 * 2762 * Raise the limit; IPS indicates we have thermal headroom. 2763 */ 2764 bool i915_gpu_raise(void) 2765 { 2766 struct drm_i915_private *i915; 2767 struct intel_rps *rps; 2768 2769 i915 = mchdev_get(); 2770 if (!i915) 2771 return false; 2772 2773 rps = &to_gt(i915)->rps; 2774 2775 spin_lock_irq(&mchdev_lock); 2776 if (rps->max_freq_softlimit < rps->max_freq) 2777 rps->max_freq_softlimit++; 2778 spin_unlock_irq(&mchdev_lock); 2779 2780 drm_dev_put(&i915->drm); 2781 return true; 2782 } 2783 EXPORT_SYMBOL_GPL(i915_gpu_raise); 2784 2785 /** 2786 * i915_gpu_lower - lower GPU frequency limit 2787 * 2788 * IPS indicates we're close to a thermal limit, so throttle back the GPU 2789 * frequency maximum. 2790 */ 2791 bool i915_gpu_lower(void) 2792 { 2793 struct drm_i915_private *i915; 2794 struct intel_rps *rps; 2795 2796 i915 = mchdev_get(); 2797 if (!i915) 2798 return false; 2799 2800 rps = &to_gt(i915)->rps; 2801 2802 spin_lock_irq(&mchdev_lock); 2803 if (rps->max_freq_softlimit > rps->min_freq) 2804 rps->max_freq_softlimit--; 2805 spin_unlock_irq(&mchdev_lock); 2806 2807 drm_dev_put(&i915->drm); 2808 return true; 2809 } 2810 EXPORT_SYMBOL_GPL(i915_gpu_lower); 2811 2812 /** 2813 * i915_gpu_busy - indicate GPU business to IPS 2814 * 2815 * Tell the IPS driver whether or not the GPU is busy. 2816 */ 2817 bool i915_gpu_busy(void) 2818 { 2819 struct drm_i915_private *i915; 2820 bool ret; 2821 2822 i915 = mchdev_get(); 2823 if (!i915) 2824 return false; 2825 2826 ret = to_gt(i915)->awake; 2827 2828 drm_dev_put(&i915->drm); 2829 return ret; 2830 } 2831 EXPORT_SYMBOL_GPL(i915_gpu_busy); 2832 2833 /** 2834 * i915_gpu_turbo_disable - disable graphics turbo 2835 * 2836 * Disable graphics turbo by resetting the max frequency and setting the 2837 * current frequency to the default. 2838 */ 2839 bool i915_gpu_turbo_disable(void) 2840 { 2841 struct drm_i915_private *i915; 2842 struct intel_rps *rps; 2843 bool ret; 2844 2845 i915 = mchdev_get(); 2846 if (!i915) 2847 return false; 2848 2849 rps = &to_gt(i915)->rps; 2850 2851 spin_lock_irq(&mchdev_lock); 2852 rps->max_freq_softlimit = rps->min_freq; 2853 ret = !__gen5_rps_set(&to_gt(i915)->rps, rps->min_freq); 2854 spin_unlock_irq(&mchdev_lock); 2855 2856 drm_dev_put(&i915->drm); 2857 return ret; 2858 } 2859 EXPORT_SYMBOL_GPL(i915_gpu_turbo_disable); 2860 2861 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) 2862 #include "selftest_rps.c" 2863 #include "selftest_slpc.c" 2864 #endif 2865