1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright © 2019 Intel Corporation 4 */ 5 6 #include <linux/string_helpers.h> 7 8 #include <drm/i915_drm.h> 9 10 #include "display/intel_display.h" 11 #include "i915_drv.h" 12 #include "i915_irq.h" 13 #include "intel_breadcrumbs.h" 14 #include "intel_gt.h" 15 #include "intel_gt_clock_utils.h" 16 #include "intel_gt_irq.h" 17 #include "intel_gt_pm.h" 18 #include "intel_gt_pm_irq.h" 19 #include "intel_gt_print.h" 20 #include "intel_gt_regs.h" 21 #include "intel_mchbar_regs.h" 22 #include "intel_pcode.h" 23 #include "intel_rps.h" 24 #include "vlv_sideband.h" 25 #include "../../../platform/x86/intel_ips.h" 26 27 #define BUSY_MAX_EI 20u /* ms */ 28 29 /* 30 * Lock protecting IPS related data structures 31 */ 32 static DEFINE_SPINLOCK(mchdev_lock); 33 34 static struct intel_gt *rps_to_gt(struct intel_rps *rps) 35 { 36 return container_of(rps, struct intel_gt, rps); 37 } 38 39 static struct drm_i915_private *rps_to_i915(struct intel_rps *rps) 40 { 41 return rps_to_gt(rps)->i915; 42 } 43 44 static struct intel_uncore *rps_to_uncore(struct intel_rps *rps) 45 { 46 return rps_to_gt(rps)->uncore; 47 } 48 49 static struct intel_guc_slpc *rps_to_slpc(struct intel_rps *rps) 50 { 51 struct intel_gt *gt = rps_to_gt(rps); 52 53 return >->uc.guc.slpc; 54 } 55 56 static bool rps_uses_slpc(struct intel_rps *rps) 57 { 58 struct intel_gt *gt = rps_to_gt(rps); 59 60 return intel_uc_uses_guc_slpc(>->uc); 61 } 62 63 static u32 rps_pm_sanitize_mask(struct intel_rps *rps, u32 mask) 64 { 65 return mask & ~rps->pm_intrmsk_mbz; 66 } 67 68 static void set(struct intel_uncore *uncore, i915_reg_t reg, u32 val) 69 { 70 intel_uncore_write_fw(uncore, reg, val); 71 } 72 73 static void rps_timer(struct timer_list *t) 74 { 75 struct intel_rps *rps = from_timer(rps, t, timer); 76 struct intel_engine_cs *engine; 77 ktime_t dt, last, timestamp; 78 enum intel_engine_id id; 79 s64 max_busy[3] = {}; 80 81 timestamp = 0; 82 for_each_engine(engine, rps_to_gt(rps), id) { 83 s64 busy; 84 int i; 85 86 dt = intel_engine_get_busy_time(engine, ×tamp); 87 last = engine->stats.rps; 88 engine->stats.rps = dt; 89 90 busy = ktime_to_ns(ktime_sub(dt, last)); 91 for (i = 0; i < ARRAY_SIZE(max_busy); i++) { 92 if (busy > max_busy[i]) 93 swap(busy, max_busy[i]); 94 } 95 } 96 last = rps->pm_timestamp; 97 rps->pm_timestamp = timestamp; 98 99 if (intel_rps_is_active(rps)) { 100 s64 busy; 101 int i; 102 103 dt = ktime_sub(timestamp, last); 104 105 /* 106 * Our goal is to evaluate each engine independently, so we run 107 * at the lowest clocks required to sustain the heaviest 108 * workload. However, a task may be split into sequential 109 * dependent operations across a set of engines, such that 110 * the independent contributions do not account for high load, 111 * but overall the task is GPU bound. For example, consider 112 * video decode on vcs followed by colour post-processing 113 * on vecs, followed by general post-processing on rcs. 114 * Since multi-engines being active does imply a single 115 * continuous workload across all engines, we hedge our 116 * bets by only contributing a factor of the distributed 117 * load into our busyness calculation. 118 */ 119 busy = max_busy[0]; 120 for (i = 1; i < ARRAY_SIZE(max_busy); i++) { 121 if (!max_busy[i]) 122 break; 123 124 busy += div_u64(max_busy[i], 1 << i); 125 } 126 GT_TRACE(rps_to_gt(rps), 127 "busy:%lld [%d%%], max:[%lld, %lld, %lld], interval:%d\n", 128 busy, (int)div64_u64(100 * busy, dt), 129 max_busy[0], max_busy[1], max_busy[2], 130 rps->pm_interval); 131 132 if (100 * busy > rps->power.up_threshold * dt && 133 rps->cur_freq < rps->max_freq_softlimit) { 134 rps->pm_iir |= GEN6_PM_RP_UP_THRESHOLD; 135 rps->pm_interval = 1; 136 schedule_work(&rps->work); 137 } else if (100 * busy < rps->power.down_threshold * dt && 138 rps->cur_freq > rps->min_freq_softlimit) { 139 rps->pm_iir |= GEN6_PM_RP_DOWN_THRESHOLD; 140 rps->pm_interval = 1; 141 schedule_work(&rps->work); 142 } else { 143 rps->last_adj = 0; 144 } 145 146 mod_timer(&rps->timer, 147 jiffies + msecs_to_jiffies(rps->pm_interval)); 148 rps->pm_interval = min(rps->pm_interval * 2, BUSY_MAX_EI); 149 } 150 } 151 152 static void rps_start_timer(struct intel_rps *rps) 153 { 154 rps->pm_timestamp = ktime_sub(ktime_get(), rps->pm_timestamp); 155 rps->pm_interval = 1; 156 mod_timer(&rps->timer, jiffies + 1); 157 } 158 159 static void rps_stop_timer(struct intel_rps *rps) 160 { 161 del_timer_sync(&rps->timer); 162 rps->pm_timestamp = ktime_sub(ktime_get(), rps->pm_timestamp); 163 cancel_work_sync(&rps->work); 164 } 165 166 static u32 rps_pm_mask(struct intel_rps *rps, u8 val) 167 { 168 u32 mask = 0; 169 170 /* We use UP_EI_EXPIRED interrupts for both up/down in manual mode */ 171 if (val > rps->min_freq_softlimit) 172 mask |= (GEN6_PM_RP_UP_EI_EXPIRED | 173 GEN6_PM_RP_DOWN_THRESHOLD | 174 GEN6_PM_RP_DOWN_TIMEOUT); 175 176 if (val < rps->max_freq_softlimit) 177 mask |= GEN6_PM_RP_UP_EI_EXPIRED | GEN6_PM_RP_UP_THRESHOLD; 178 179 mask &= rps->pm_events; 180 181 return rps_pm_sanitize_mask(rps, ~mask); 182 } 183 184 static void rps_reset_ei(struct intel_rps *rps) 185 { 186 memset(&rps->ei, 0, sizeof(rps->ei)); 187 } 188 189 static void rps_enable_interrupts(struct intel_rps *rps) 190 { 191 struct intel_gt *gt = rps_to_gt(rps); 192 193 GEM_BUG_ON(rps_uses_slpc(rps)); 194 195 GT_TRACE(gt, "interrupts:on rps->pm_events: %x, rps_pm_mask:%x\n", 196 rps->pm_events, rps_pm_mask(rps, rps->last_freq)); 197 198 rps_reset_ei(rps); 199 200 spin_lock_irq(gt->irq_lock); 201 gen6_gt_pm_enable_irq(gt, rps->pm_events); 202 spin_unlock_irq(gt->irq_lock); 203 204 intel_uncore_write(gt->uncore, 205 GEN6_PMINTRMSK, rps_pm_mask(rps, rps->last_freq)); 206 } 207 208 static void gen6_rps_reset_interrupts(struct intel_rps *rps) 209 { 210 gen6_gt_pm_reset_iir(rps_to_gt(rps), GEN6_PM_RPS_EVENTS); 211 } 212 213 static void gen11_rps_reset_interrupts(struct intel_rps *rps) 214 { 215 while (gen11_gt_reset_one_iir(rps_to_gt(rps), 0, GEN11_GTPM)) 216 ; 217 } 218 219 static void rps_reset_interrupts(struct intel_rps *rps) 220 { 221 struct intel_gt *gt = rps_to_gt(rps); 222 223 spin_lock_irq(gt->irq_lock); 224 if (GRAPHICS_VER(gt->i915) >= 11) 225 gen11_rps_reset_interrupts(rps); 226 else 227 gen6_rps_reset_interrupts(rps); 228 229 rps->pm_iir = 0; 230 spin_unlock_irq(gt->irq_lock); 231 } 232 233 static void rps_disable_interrupts(struct intel_rps *rps) 234 { 235 struct intel_gt *gt = rps_to_gt(rps); 236 237 intel_uncore_write(gt->uncore, 238 GEN6_PMINTRMSK, rps_pm_sanitize_mask(rps, ~0u)); 239 240 spin_lock_irq(gt->irq_lock); 241 gen6_gt_pm_disable_irq(gt, GEN6_PM_RPS_EVENTS); 242 spin_unlock_irq(gt->irq_lock); 243 244 intel_synchronize_irq(gt->i915); 245 246 /* 247 * Now that we will not be generating any more work, flush any 248 * outstanding tasks. As we are called on the RPS idle path, 249 * we will reset the GPU to minimum frequencies, so the current 250 * state of the worker can be discarded. 251 */ 252 cancel_work_sync(&rps->work); 253 254 rps_reset_interrupts(rps); 255 GT_TRACE(gt, "interrupts:off\n"); 256 } 257 258 static const struct cparams { 259 u16 i; 260 u16 t; 261 u16 m; 262 u16 c; 263 } cparams[] = { 264 { 1, 1333, 301, 28664 }, 265 { 1, 1066, 294, 24460 }, 266 { 1, 800, 294, 25192 }, 267 { 0, 1333, 276, 27605 }, 268 { 0, 1066, 276, 27605 }, 269 { 0, 800, 231, 23784 }, 270 }; 271 272 static void gen5_rps_init(struct intel_rps *rps) 273 { 274 struct drm_i915_private *i915 = rps_to_i915(rps); 275 struct intel_uncore *uncore = rps_to_uncore(rps); 276 u8 fmax, fmin, fstart; 277 u32 rgvmodectl; 278 int c_m, i; 279 280 if (i915->fsb_freq <= 3200) 281 c_m = 0; 282 else if (i915->fsb_freq <= 4800) 283 c_m = 1; 284 else 285 c_m = 2; 286 287 for (i = 0; i < ARRAY_SIZE(cparams); i++) { 288 if (cparams[i].i == c_m && cparams[i].t == i915->mem_freq) { 289 rps->ips.m = cparams[i].m; 290 rps->ips.c = cparams[i].c; 291 break; 292 } 293 } 294 295 rgvmodectl = intel_uncore_read(uncore, MEMMODECTL); 296 297 /* Set up min, max, and cur for interrupt handling */ 298 fmax = (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT; 299 fmin = (rgvmodectl & MEMMODE_FMIN_MASK); 300 fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >> 301 MEMMODE_FSTART_SHIFT; 302 drm_dbg(&i915->drm, "fmax: %d, fmin: %d, fstart: %d\n", 303 fmax, fmin, fstart); 304 305 rps->min_freq = fmax; 306 rps->efficient_freq = fstart; 307 rps->max_freq = fmin; 308 } 309 310 static unsigned long 311 __ips_chipset_val(struct intel_ips *ips) 312 { 313 struct intel_uncore *uncore = 314 rps_to_uncore(container_of(ips, struct intel_rps, ips)); 315 unsigned long now = jiffies_to_msecs(jiffies), dt; 316 unsigned long result; 317 u64 total, delta; 318 319 lockdep_assert_held(&mchdev_lock); 320 321 /* 322 * Prevent division-by-zero if we are asking too fast. 323 * Also, we don't get interesting results if we are polling 324 * faster than once in 10ms, so just return the saved value 325 * in such cases. 326 */ 327 dt = now - ips->last_time1; 328 if (dt <= 10) 329 return ips->chipset_power; 330 331 /* FIXME: handle per-counter overflow */ 332 total = intel_uncore_read(uncore, DMIEC); 333 total += intel_uncore_read(uncore, DDREC); 334 total += intel_uncore_read(uncore, CSIEC); 335 336 delta = total - ips->last_count1; 337 338 result = div_u64(div_u64(ips->m * delta, dt) + ips->c, 10); 339 340 ips->last_count1 = total; 341 ips->last_time1 = now; 342 343 ips->chipset_power = result; 344 345 return result; 346 } 347 348 static unsigned long ips_mch_val(struct intel_uncore *uncore) 349 { 350 unsigned int m, x, b; 351 u32 tsfs; 352 353 tsfs = intel_uncore_read(uncore, TSFS); 354 x = intel_uncore_read8(uncore, TR1); 355 356 b = tsfs & TSFS_INTR_MASK; 357 m = (tsfs & TSFS_SLOPE_MASK) >> TSFS_SLOPE_SHIFT; 358 359 return m * x / 127 - b; 360 } 361 362 static int _pxvid_to_vd(u8 pxvid) 363 { 364 if (pxvid == 0) 365 return 0; 366 367 if (pxvid >= 8 && pxvid < 31) 368 pxvid = 31; 369 370 return (pxvid + 2) * 125; 371 } 372 373 static u32 pvid_to_extvid(struct drm_i915_private *i915, u8 pxvid) 374 { 375 const int vd = _pxvid_to_vd(pxvid); 376 377 if (INTEL_INFO(i915)->is_mobile) 378 return max(vd - 1125, 0); 379 380 return vd; 381 } 382 383 static void __gen5_ips_update(struct intel_ips *ips) 384 { 385 struct intel_uncore *uncore = 386 rps_to_uncore(container_of(ips, struct intel_rps, ips)); 387 u64 now, delta, dt; 388 u32 count; 389 390 lockdep_assert_held(&mchdev_lock); 391 392 now = ktime_get_raw_ns(); 393 dt = now - ips->last_time2; 394 do_div(dt, NSEC_PER_MSEC); 395 396 /* Don't divide by 0 */ 397 if (dt <= 10) 398 return; 399 400 count = intel_uncore_read(uncore, GFXEC); 401 delta = count - ips->last_count2; 402 403 ips->last_count2 = count; 404 ips->last_time2 = now; 405 406 /* More magic constants... */ 407 ips->gfx_power = div_u64(delta * 1181, dt * 10); 408 } 409 410 static void gen5_rps_update(struct intel_rps *rps) 411 { 412 spin_lock_irq(&mchdev_lock); 413 __gen5_ips_update(&rps->ips); 414 spin_unlock_irq(&mchdev_lock); 415 } 416 417 static unsigned int gen5_invert_freq(struct intel_rps *rps, 418 unsigned int val) 419 { 420 /* Invert the frequency bin into an ips delay */ 421 val = rps->max_freq - val; 422 val = rps->min_freq + val; 423 424 return val; 425 } 426 427 static int __gen5_rps_set(struct intel_rps *rps, u8 val) 428 { 429 struct intel_uncore *uncore = rps_to_uncore(rps); 430 u16 rgvswctl; 431 432 lockdep_assert_held(&mchdev_lock); 433 434 rgvswctl = intel_uncore_read16(uncore, MEMSWCTL); 435 if (rgvswctl & MEMCTL_CMD_STS) { 436 drm_dbg(&rps_to_i915(rps)->drm, 437 "gpu busy, RCS change rejected\n"); 438 return -EBUSY; /* still busy with another command */ 439 } 440 441 /* Invert the frequency bin into an ips delay */ 442 val = gen5_invert_freq(rps, val); 443 444 rgvswctl = 445 (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) | 446 (val << MEMCTL_FREQ_SHIFT) | 447 MEMCTL_SFCAVM; 448 intel_uncore_write16(uncore, MEMSWCTL, rgvswctl); 449 intel_uncore_posting_read16(uncore, MEMSWCTL); 450 451 rgvswctl |= MEMCTL_CMD_STS; 452 intel_uncore_write16(uncore, MEMSWCTL, rgvswctl); 453 454 return 0; 455 } 456 457 static int gen5_rps_set(struct intel_rps *rps, u8 val) 458 { 459 int err; 460 461 spin_lock_irq(&mchdev_lock); 462 err = __gen5_rps_set(rps, val); 463 spin_unlock_irq(&mchdev_lock); 464 465 return err; 466 } 467 468 static unsigned long intel_pxfreq(u32 vidfreq) 469 { 470 int div = (vidfreq & 0x3f0000) >> 16; 471 int post = (vidfreq & 0x3000) >> 12; 472 int pre = (vidfreq & 0x7); 473 474 if (!pre) 475 return 0; 476 477 return div * 133333 / (pre << post); 478 } 479 480 static unsigned int init_emon(struct intel_uncore *uncore) 481 { 482 u8 pxw[16]; 483 int i; 484 485 /* Disable to program */ 486 intel_uncore_write(uncore, ECR, 0); 487 intel_uncore_posting_read(uncore, ECR); 488 489 /* Program energy weights for various events */ 490 intel_uncore_write(uncore, SDEW, 0x15040d00); 491 intel_uncore_write(uncore, CSIEW0, 0x007f0000); 492 intel_uncore_write(uncore, CSIEW1, 0x1e220004); 493 intel_uncore_write(uncore, CSIEW2, 0x04000004); 494 495 for (i = 0; i < 5; i++) 496 intel_uncore_write(uncore, PEW(i), 0); 497 for (i = 0; i < 3; i++) 498 intel_uncore_write(uncore, DEW(i), 0); 499 500 /* Program P-state weights to account for frequency power adjustment */ 501 for (i = 0; i < 16; i++) { 502 u32 pxvidfreq = intel_uncore_read(uncore, PXVFREQ(i)); 503 unsigned int freq = intel_pxfreq(pxvidfreq); 504 unsigned int vid = 505 (pxvidfreq & PXVFREQ_PX_MASK) >> PXVFREQ_PX_SHIFT; 506 unsigned int val; 507 508 val = vid * vid * freq / 1000 * 255; 509 val /= 127 * 127 * 900; 510 511 pxw[i] = val; 512 } 513 /* Render standby states get 0 weight */ 514 pxw[14] = 0; 515 pxw[15] = 0; 516 517 for (i = 0; i < 4; i++) { 518 intel_uncore_write(uncore, PXW(i), 519 pxw[i * 4 + 0] << 24 | 520 pxw[i * 4 + 1] << 16 | 521 pxw[i * 4 + 2] << 8 | 522 pxw[i * 4 + 3] << 0); 523 } 524 525 /* Adjust magic regs to magic values (more experimental results) */ 526 intel_uncore_write(uncore, OGW0, 0); 527 intel_uncore_write(uncore, OGW1, 0); 528 intel_uncore_write(uncore, EG0, 0x00007f00); 529 intel_uncore_write(uncore, EG1, 0x0000000e); 530 intel_uncore_write(uncore, EG2, 0x000e0000); 531 intel_uncore_write(uncore, EG3, 0x68000300); 532 intel_uncore_write(uncore, EG4, 0x42000000); 533 intel_uncore_write(uncore, EG5, 0x00140031); 534 intel_uncore_write(uncore, EG6, 0); 535 intel_uncore_write(uncore, EG7, 0); 536 537 for (i = 0; i < 8; i++) 538 intel_uncore_write(uncore, PXWL(i), 0); 539 540 /* Enable PMON + select events */ 541 intel_uncore_write(uncore, ECR, 0x80000019); 542 543 return intel_uncore_read(uncore, LCFUSE02) & LCFUSE_HIV_MASK; 544 } 545 546 static bool gen5_rps_enable(struct intel_rps *rps) 547 { 548 struct drm_i915_private *i915 = rps_to_i915(rps); 549 struct intel_uncore *uncore = rps_to_uncore(rps); 550 u8 fstart, vstart; 551 u32 rgvmodectl; 552 553 spin_lock_irq(&mchdev_lock); 554 555 rgvmodectl = intel_uncore_read(uncore, MEMMODECTL); 556 557 /* Enable temp reporting */ 558 intel_uncore_write16(uncore, PMMISC, 559 intel_uncore_read16(uncore, PMMISC) | MCPPCE_EN); 560 intel_uncore_write16(uncore, TSC1, 561 intel_uncore_read16(uncore, TSC1) | TSE); 562 563 /* 100ms RC evaluation intervals */ 564 intel_uncore_write(uncore, RCUPEI, 100000); 565 intel_uncore_write(uncore, RCDNEI, 100000); 566 567 /* Set max/min thresholds to 90ms and 80ms respectively */ 568 intel_uncore_write(uncore, RCBMAXAVG, 90000); 569 intel_uncore_write(uncore, RCBMINAVG, 80000); 570 571 intel_uncore_write(uncore, MEMIHYST, 1); 572 573 /* Set up min, max, and cur for interrupt handling */ 574 fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >> 575 MEMMODE_FSTART_SHIFT; 576 577 vstart = (intel_uncore_read(uncore, PXVFREQ(fstart)) & 578 PXVFREQ_PX_MASK) >> PXVFREQ_PX_SHIFT; 579 580 intel_uncore_write(uncore, 581 MEMINTREN, 582 MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN); 583 584 intel_uncore_write(uncore, VIDSTART, vstart); 585 intel_uncore_posting_read(uncore, VIDSTART); 586 587 rgvmodectl |= MEMMODE_SWMODE_EN; 588 intel_uncore_write(uncore, MEMMODECTL, rgvmodectl); 589 590 if (wait_for_atomic((intel_uncore_read(uncore, MEMSWCTL) & 591 MEMCTL_CMD_STS) == 0, 10)) 592 drm_err(&uncore->i915->drm, 593 "stuck trying to change perf mode\n"); 594 mdelay(1); 595 596 __gen5_rps_set(rps, rps->cur_freq); 597 598 rps->ips.last_count1 = intel_uncore_read(uncore, DMIEC); 599 rps->ips.last_count1 += intel_uncore_read(uncore, DDREC); 600 rps->ips.last_count1 += intel_uncore_read(uncore, CSIEC); 601 rps->ips.last_time1 = jiffies_to_msecs(jiffies); 602 603 rps->ips.last_count2 = intel_uncore_read(uncore, GFXEC); 604 rps->ips.last_time2 = ktime_get_raw_ns(); 605 606 spin_lock(&i915->irq_lock); 607 ilk_enable_display_irq(i915, DE_PCU_EVENT); 608 spin_unlock(&i915->irq_lock); 609 610 spin_unlock_irq(&mchdev_lock); 611 612 rps->ips.corr = init_emon(uncore); 613 614 return true; 615 } 616 617 static void gen5_rps_disable(struct intel_rps *rps) 618 { 619 struct drm_i915_private *i915 = rps_to_i915(rps); 620 struct intel_uncore *uncore = rps_to_uncore(rps); 621 u16 rgvswctl; 622 623 spin_lock_irq(&mchdev_lock); 624 625 spin_lock(&i915->irq_lock); 626 ilk_disable_display_irq(i915, DE_PCU_EVENT); 627 spin_unlock(&i915->irq_lock); 628 629 rgvswctl = intel_uncore_read16(uncore, MEMSWCTL); 630 631 /* Ack interrupts, disable EFC interrupt */ 632 intel_uncore_rmw(uncore, MEMINTREN, MEMINT_EVAL_CHG_EN, 0); 633 intel_uncore_write(uncore, MEMINTRSTS, MEMINT_EVAL_CHG); 634 635 /* Go back to the starting frequency */ 636 __gen5_rps_set(rps, rps->idle_freq); 637 mdelay(1); 638 rgvswctl |= MEMCTL_CMD_STS; 639 intel_uncore_write(uncore, MEMSWCTL, rgvswctl); 640 mdelay(1); 641 642 spin_unlock_irq(&mchdev_lock); 643 } 644 645 static u32 rps_limits(struct intel_rps *rps, u8 val) 646 { 647 u32 limits; 648 649 /* 650 * Only set the down limit when we've reached the lowest level to avoid 651 * getting more interrupts, otherwise leave this clear. This prevents a 652 * race in the hw when coming out of rc6: There's a tiny window where 653 * the hw runs at the minimal clock before selecting the desired 654 * frequency, if the down threshold expires in that window we will not 655 * receive a down interrupt. 656 */ 657 if (GRAPHICS_VER(rps_to_i915(rps)) >= 9) { 658 limits = rps->max_freq_softlimit << 23; 659 if (val <= rps->min_freq_softlimit) 660 limits |= rps->min_freq_softlimit << 14; 661 } else { 662 limits = rps->max_freq_softlimit << 24; 663 if (val <= rps->min_freq_softlimit) 664 limits |= rps->min_freq_softlimit << 16; 665 } 666 667 return limits; 668 } 669 670 static void rps_set_power(struct intel_rps *rps, int new_power) 671 { 672 struct intel_gt *gt = rps_to_gt(rps); 673 struct intel_uncore *uncore = gt->uncore; 674 u32 ei_up = 0, ei_down = 0; 675 676 lockdep_assert_held(&rps->power.mutex); 677 678 if (new_power == rps->power.mode) 679 return; 680 681 /* Note the units here are not exactly 1us, but 1280ns. */ 682 switch (new_power) { 683 case LOW_POWER: 684 ei_up = 16000; 685 ei_down = 32000; 686 break; 687 688 case BETWEEN: 689 ei_up = 13000; 690 ei_down = 32000; 691 break; 692 693 case HIGH_POWER: 694 ei_up = 10000; 695 ei_down = 32000; 696 break; 697 } 698 699 /* When byt can survive without system hang with dynamic 700 * sw freq adjustments, this restriction can be lifted. 701 */ 702 if (IS_VALLEYVIEW(gt->i915)) 703 goto skip_hw_write; 704 705 GT_TRACE(gt, 706 "changing power mode [%d], up %d%% @ %dus, down %d%% @ %dus\n", 707 new_power, 708 rps->power.up_threshold, ei_up, 709 rps->power.down_threshold, ei_down); 710 711 set(uncore, GEN6_RP_UP_EI, 712 intel_gt_ns_to_pm_interval(gt, ei_up * 1000)); 713 set(uncore, GEN6_RP_UP_THRESHOLD, 714 intel_gt_ns_to_pm_interval(gt, 715 ei_up * rps->power.up_threshold * 10)); 716 717 set(uncore, GEN6_RP_DOWN_EI, 718 intel_gt_ns_to_pm_interval(gt, ei_down * 1000)); 719 set(uncore, GEN6_RP_DOWN_THRESHOLD, 720 intel_gt_ns_to_pm_interval(gt, 721 ei_down * 722 rps->power.down_threshold * 10)); 723 724 set(uncore, GEN6_RP_CONTROL, 725 (GRAPHICS_VER(gt->i915) > 9 ? 0 : GEN6_RP_MEDIA_TURBO) | 726 GEN6_RP_MEDIA_HW_NORMAL_MODE | 727 GEN6_RP_MEDIA_IS_GFX | 728 GEN6_RP_ENABLE | 729 GEN6_RP_UP_BUSY_AVG | 730 GEN6_RP_DOWN_IDLE_AVG); 731 732 skip_hw_write: 733 rps->power.mode = new_power; 734 } 735 736 static void gen6_rps_set_thresholds(struct intel_rps *rps, u8 val) 737 { 738 int new_power; 739 740 new_power = rps->power.mode; 741 switch (rps->power.mode) { 742 case LOW_POWER: 743 if (val > rps->efficient_freq + 1 && 744 val > rps->cur_freq) 745 new_power = BETWEEN; 746 break; 747 748 case BETWEEN: 749 if (val <= rps->efficient_freq && 750 val < rps->cur_freq) 751 new_power = LOW_POWER; 752 else if (val >= rps->rp0_freq && 753 val > rps->cur_freq) 754 new_power = HIGH_POWER; 755 break; 756 757 case HIGH_POWER: 758 if (val < (rps->rp1_freq + rps->rp0_freq) >> 1 && 759 val < rps->cur_freq) 760 new_power = BETWEEN; 761 break; 762 } 763 /* Max/min bins are special */ 764 if (val <= rps->min_freq_softlimit) 765 new_power = LOW_POWER; 766 if (val >= rps->max_freq_softlimit) 767 new_power = HIGH_POWER; 768 769 mutex_lock(&rps->power.mutex); 770 if (rps->power.interactive) 771 new_power = HIGH_POWER; 772 rps_set_power(rps, new_power); 773 mutex_unlock(&rps->power.mutex); 774 } 775 776 void intel_rps_mark_interactive(struct intel_rps *rps, bool interactive) 777 { 778 GT_TRACE(rps_to_gt(rps), "mark interactive: %s\n", 779 str_yes_no(interactive)); 780 781 mutex_lock(&rps->power.mutex); 782 if (interactive) { 783 if (!rps->power.interactive++ && intel_rps_is_active(rps)) 784 rps_set_power(rps, HIGH_POWER); 785 } else { 786 GEM_BUG_ON(!rps->power.interactive); 787 rps->power.interactive--; 788 } 789 mutex_unlock(&rps->power.mutex); 790 } 791 792 static int gen6_rps_set(struct intel_rps *rps, u8 val) 793 { 794 struct intel_uncore *uncore = rps_to_uncore(rps); 795 struct drm_i915_private *i915 = rps_to_i915(rps); 796 u32 swreq; 797 798 GEM_BUG_ON(rps_uses_slpc(rps)); 799 800 if (GRAPHICS_VER(i915) >= 9) 801 swreq = GEN9_FREQUENCY(val); 802 else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) 803 swreq = HSW_FREQUENCY(val); 804 else 805 swreq = (GEN6_FREQUENCY(val) | 806 GEN6_OFFSET(0) | 807 GEN6_AGGRESSIVE_TURBO); 808 set(uncore, GEN6_RPNSWREQ, swreq); 809 810 GT_TRACE(rps_to_gt(rps), "set val:%x, freq:%d, swreq:%x\n", 811 val, intel_gpu_freq(rps, val), swreq); 812 813 return 0; 814 } 815 816 static int vlv_rps_set(struct intel_rps *rps, u8 val) 817 { 818 struct drm_i915_private *i915 = rps_to_i915(rps); 819 int err; 820 821 vlv_punit_get(i915); 822 err = vlv_punit_write(i915, PUNIT_REG_GPU_FREQ_REQ, val); 823 vlv_punit_put(i915); 824 825 GT_TRACE(rps_to_gt(rps), "set val:%x, freq:%d\n", 826 val, intel_gpu_freq(rps, val)); 827 828 return err; 829 } 830 831 static int rps_set(struct intel_rps *rps, u8 val, bool update) 832 { 833 struct drm_i915_private *i915 = rps_to_i915(rps); 834 int err; 835 836 if (val == rps->last_freq) 837 return 0; 838 839 if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) 840 err = vlv_rps_set(rps, val); 841 else if (GRAPHICS_VER(i915) >= 6) 842 err = gen6_rps_set(rps, val); 843 else 844 err = gen5_rps_set(rps, val); 845 if (err) 846 return err; 847 848 if (update && GRAPHICS_VER(i915) >= 6) 849 gen6_rps_set_thresholds(rps, val); 850 rps->last_freq = val; 851 852 return 0; 853 } 854 855 void intel_rps_unpark(struct intel_rps *rps) 856 { 857 if (!intel_rps_is_enabled(rps)) 858 return; 859 860 GT_TRACE(rps_to_gt(rps), "unpark:%x\n", rps->cur_freq); 861 862 /* 863 * Use the user's desired frequency as a guide, but for better 864 * performance, jump directly to RPe as our starting frequency. 865 */ 866 mutex_lock(&rps->lock); 867 868 intel_rps_set_active(rps); 869 intel_rps_set(rps, 870 clamp(rps->cur_freq, 871 rps->min_freq_softlimit, 872 rps->max_freq_softlimit)); 873 874 mutex_unlock(&rps->lock); 875 876 rps->pm_iir = 0; 877 if (intel_rps_has_interrupts(rps)) 878 rps_enable_interrupts(rps); 879 if (intel_rps_uses_timer(rps)) 880 rps_start_timer(rps); 881 882 if (GRAPHICS_VER(rps_to_i915(rps)) == 5) 883 gen5_rps_update(rps); 884 } 885 886 void intel_rps_park(struct intel_rps *rps) 887 { 888 int adj; 889 890 if (!intel_rps_is_enabled(rps)) 891 return; 892 893 if (!intel_rps_clear_active(rps)) 894 return; 895 896 if (intel_rps_uses_timer(rps)) 897 rps_stop_timer(rps); 898 if (intel_rps_has_interrupts(rps)) 899 rps_disable_interrupts(rps); 900 901 if (rps->last_freq <= rps->idle_freq) 902 return; 903 904 /* 905 * The punit delays the write of the frequency and voltage until it 906 * determines the GPU is awake. During normal usage we don't want to 907 * waste power changing the frequency if the GPU is sleeping (rc6). 908 * However, the GPU and driver is now idle and we do not want to delay 909 * switching to minimum voltage (reducing power whilst idle) as we do 910 * not expect to be woken in the near future and so must flush the 911 * change by waking the device. 912 * 913 * We choose to take the media powerwell (either would do to trick the 914 * punit into committing the voltage change) as that takes a lot less 915 * power than the render powerwell. 916 */ 917 intel_uncore_forcewake_get(rps_to_uncore(rps), FORCEWAKE_MEDIA); 918 rps_set(rps, rps->idle_freq, false); 919 intel_uncore_forcewake_put(rps_to_uncore(rps), FORCEWAKE_MEDIA); 920 921 /* 922 * Since we will try and restart from the previously requested 923 * frequency on unparking, treat this idle point as a downclock 924 * interrupt and reduce the frequency for resume. If we park/unpark 925 * more frequently than the rps worker can run, we will not respond 926 * to any EI and never see a change in frequency. 927 * 928 * (Note we accommodate Cherryview's limitation of only using an 929 * even bin by applying it to all.) 930 */ 931 adj = rps->last_adj; 932 if (adj < 0) 933 adj *= 2; 934 else /* CHV needs even encode values */ 935 adj = -2; 936 rps->last_adj = adj; 937 rps->cur_freq = max_t(int, rps->cur_freq + adj, rps->min_freq); 938 if (rps->cur_freq < rps->efficient_freq) { 939 rps->cur_freq = rps->efficient_freq; 940 rps->last_adj = 0; 941 } 942 943 GT_TRACE(rps_to_gt(rps), "park:%x\n", rps->cur_freq); 944 } 945 946 u32 intel_rps_get_boost_frequency(struct intel_rps *rps) 947 { 948 struct intel_guc_slpc *slpc; 949 950 if (rps_uses_slpc(rps)) { 951 slpc = rps_to_slpc(rps); 952 953 return slpc->boost_freq; 954 } else { 955 return intel_gpu_freq(rps, rps->boost_freq); 956 } 957 } 958 959 static int rps_set_boost_freq(struct intel_rps *rps, u32 val) 960 { 961 bool boost = false; 962 963 /* Validate against (static) hardware limits */ 964 val = intel_freq_opcode(rps, val); 965 if (val < rps->min_freq || val > rps->max_freq) 966 return -EINVAL; 967 968 mutex_lock(&rps->lock); 969 if (val != rps->boost_freq) { 970 rps->boost_freq = val; 971 boost = atomic_read(&rps->num_waiters); 972 } 973 mutex_unlock(&rps->lock); 974 if (boost) 975 schedule_work(&rps->work); 976 977 return 0; 978 } 979 980 int intel_rps_set_boost_frequency(struct intel_rps *rps, u32 freq) 981 { 982 struct intel_guc_slpc *slpc; 983 984 if (rps_uses_slpc(rps)) { 985 slpc = rps_to_slpc(rps); 986 987 return intel_guc_slpc_set_boost_freq(slpc, freq); 988 } else { 989 return rps_set_boost_freq(rps, freq); 990 } 991 } 992 993 void intel_rps_dec_waiters(struct intel_rps *rps) 994 { 995 struct intel_guc_slpc *slpc; 996 997 if (rps_uses_slpc(rps)) { 998 slpc = rps_to_slpc(rps); 999 1000 intel_guc_slpc_dec_waiters(slpc); 1001 } else { 1002 atomic_dec(&rps->num_waiters); 1003 } 1004 } 1005 1006 void intel_rps_boost(struct i915_request *rq) 1007 { 1008 struct intel_guc_slpc *slpc; 1009 1010 if (i915_request_signaled(rq) || i915_request_has_waitboost(rq)) 1011 return; 1012 1013 /* Serializes with i915_request_retire() */ 1014 if (!test_and_set_bit(I915_FENCE_FLAG_BOOST, &rq->fence.flags)) { 1015 struct intel_rps *rps = &READ_ONCE(rq->engine)->gt->rps; 1016 1017 if (rps_uses_slpc(rps)) { 1018 slpc = rps_to_slpc(rps); 1019 1020 if (slpc->min_freq_softlimit >= slpc->boost_freq) 1021 return; 1022 1023 /* Return if old value is non zero */ 1024 if (!atomic_fetch_inc(&slpc->num_waiters)) { 1025 GT_TRACE(rps_to_gt(rps), "boost fence:%llx:%llx\n", 1026 rq->fence.context, rq->fence.seqno); 1027 schedule_work(&slpc->boost_work); 1028 } 1029 1030 return; 1031 } 1032 1033 if (atomic_fetch_inc(&rps->num_waiters)) 1034 return; 1035 1036 if (!intel_rps_is_active(rps)) 1037 return; 1038 1039 GT_TRACE(rps_to_gt(rps), "boost fence:%llx:%llx\n", 1040 rq->fence.context, rq->fence.seqno); 1041 1042 if (READ_ONCE(rps->cur_freq) < rps->boost_freq) 1043 schedule_work(&rps->work); 1044 1045 WRITE_ONCE(rps->boosts, rps->boosts + 1); /* debug only */ 1046 } 1047 } 1048 1049 int intel_rps_set(struct intel_rps *rps, u8 val) 1050 { 1051 int err; 1052 1053 lockdep_assert_held(&rps->lock); 1054 GEM_BUG_ON(val > rps->max_freq); 1055 GEM_BUG_ON(val < rps->min_freq); 1056 1057 if (intel_rps_is_active(rps)) { 1058 err = rps_set(rps, val, true); 1059 if (err) 1060 return err; 1061 1062 /* 1063 * Make sure we continue to get interrupts 1064 * until we hit the minimum or maximum frequencies. 1065 */ 1066 if (intel_rps_has_interrupts(rps)) { 1067 struct intel_uncore *uncore = rps_to_uncore(rps); 1068 1069 set(uncore, 1070 GEN6_RP_INTERRUPT_LIMITS, rps_limits(rps, val)); 1071 1072 set(uncore, GEN6_PMINTRMSK, rps_pm_mask(rps, val)); 1073 } 1074 } 1075 1076 rps->cur_freq = val; 1077 return 0; 1078 } 1079 1080 static u32 intel_rps_read_state_cap(struct intel_rps *rps) 1081 { 1082 struct drm_i915_private *i915 = rps_to_i915(rps); 1083 struct intel_uncore *uncore = rps_to_uncore(rps); 1084 1085 if (IS_PONTEVECCHIO(i915)) 1086 return intel_uncore_read(uncore, PVC_RP_STATE_CAP); 1087 else if (IS_XEHPSDV(i915)) 1088 return intel_uncore_read(uncore, XEHPSDV_RP_STATE_CAP); 1089 else if (IS_GEN9_LP(i915)) 1090 return intel_uncore_read(uncore, BXT_RP_STATE_CAP); 1091 else 1092 return intel_uncore_read(uncore, GEN6_RP_STATE_CAP); 1093 } 1094 1095 static void 1096 mtl_get_freq_caps(struct intel_rps *rps, struct intel_rps_freq_caps *caps) 1097 { 1098 struct intel_uncore *uncore = rps_to_uncore(rps); 1099 u32 rp_state_cap = rps_to_gt(rps)->type == GT_MEDIA ? 1100 intel_uncore_read(uncore, MTL_MEDIAP_STATE_CAP) : 1101 intel_uncore_read(uncore, MTL_RP_STATE_CAP); 1102 u32 rpe = rps_to_gt(rps)->type == GT_MEDIA ? 1103 intel_uncore_read(uncore, MTL_MPE_FREQUENCY) : 1104 intel_uncore_read(uncore, MTL_GT_RPE_FREQUENCY); 1105 1106 /* MTL values are in units of 16.67 MHz */ 1107 caps->rp0_freq = REG_FIELD_GET(MTL_RP0_CAP_MASK, rp_state_cap); 1108 caps->min_freq = REG_FIELD_GET(MTL_RPN_CAP_MASK, rp_state_cap); 1109 caps->rp1_freq = REG_FIELD_GET(MTL_RPE_MASK, rpe); 1110 } 1111 1112 static void 1113 __gen6_rps_get_freq_caps(struct intel_rps *rps, struct intel_rps_freq_caps *caps) 1114 { 1115 struct drm_i915_private *i915 = rps_to_i915(rps); 1116 u32 rp_state_cap; 1117 1118 rp_state_cap = intel_rps_read_state_cap(rps); 1119 1120 /* static values from HW: RP0 > RP1 > RPn (min_freq) */ 1121 if (IS_GEN9_LP(i915)) { 1122 caps->rp0_freq = (rp_state_cap >> 16) & 0xff; 1123 caps->rp1_freq = (rp_state_cap >> 8) & 0xff; 1124 caps->min_freq = (rp_state_cap >> 0) & 0xff; 1125 } else { 1126 caps->rp0_freq = (rp_state_cap >> 0) & 0xff; 1127 if (GRAPHICS_VER(i915) >= 10) 1128 caps->rp1_freq = REG_FIELD_GET(RPE_MASK, 1129 intel_uncore_read(to_gt(i915)->uncore, 1130 GEN10_FREQ_INFO_REC)); 1131 else 1132 caps->rp1_freq = (rp_state_cap >> 8) & 0xff; 1133 caps->min_freq = (rp_state_cap >> 16) & 0xff; 1134 } 1135 1136 if (IS_GEN9_BC(i915) || GRAPHICS_VER(i915) >= 11) { 1137 /* 1138 * In this case rp_state_cap register reports frequencies in 1139 * units of 50 MHz. Convert these to the actual "hw unit", i.e. 1140 * units of 16.67 MHz 1141 */ 1142 caps->rp0_freq *= GEN9_FREQ_SCALER; 1143 caps->rp1_freq *= GEN9_FREQ_SCALER; 1144 caps->min_freq *= GEN9_FREQ_SCALER; 1145 } 1146 } 1147 1148 /** 1149 * gen6_rps_get_freq_caps - Get freq caps exposed by HW 1150 * @rps: the intel_rps structure 1151 * @caps: returned freq caps 1152 * 1153 * Returned "caps" frequencies should be converted to MHz using 1154 * intel_gpu_freq() 1155 */ 1156 void gen6_rps_get_freq_caps(struct intel_rps *rps, struct intel_rps_freq_caps *caps) 1157 { 1158 struct drm_i915_private *i915 = rps_to_i915(rps); 1159 1160 if (IS_METEORLAKE(i915)) 1161 return mtl_get_freq_caps(rps, caps); 1162 else 1163 return __gen6_rps_get_freq_caps(rps, caps); 1164 } 1165 1166 static void gen6_rps_init(struct intel_rps *rps) 1167 { 1168 struct drm_i915_private *i915 = rps_to_i915(rps); 1169 struct intel_rps_freq_caps caps; 1170 1171 gen6_rps_get_freq_caps(rps, &caps); 1172 rps->rp0_freq = caps.rp0_freq; 1173 rps->rp1_freq = caps.rp1_freq; 1174 rps->min_freq = caps.min_freq; 1175 1176 /* hw_max = RP0 until we check for overclocking */ 1177 rps->max_freq = rps->rp0_freq; 1178 1179 rps->efficient_freq = rps->rp1_freq; 1180 if (IS_HASWELL(i915) || IS_BROADWELL(i915) || 1181 IS_GEN9_BC(i915) || GRAPHICS_VER(i915) >= 11) { 1182 u32 ddcc_status = 0; 1183 u32 mult = 1; 1184 1185 if (IS_GEN9_BC(i915) || GRAPHICS_VER(i915) >= 11) 1186 mult = GEN9_FREQ_SCALER; 1187 if (snb_pcode_read(rps_to_gt(rps)->uncore, 1188 HSW_PCODE_DYNAMIC_DUTY_CYCLE_CONTROL, 1189 &ddcc_status, NULL) == 0) 1190 rps->efficient_freq = 1191 clamp_t(u32, 1192 ((ddcc_status >> 8) & 0xff) * mult, 1193 rps->min_freq, 1194 rps->max_freq); 1195 } 1196 } 1197 1198 static bool rps_reset(struct intel_rps *rps) 1199 { 1200 struct drm_i915_private *i915 = rps_to_i915(rps); 1201 1202 /* force a reset */ 1203 rps->power.mode = -1; 1204 rps->last_freq = -1; 1205 1206 if (rps_set(rps, rps->min_freq, true)) { 1207 drm_err(&i915->drm, "Failed to reset RPS to initial values\n"); 1208 return false; 1209 } 1210 1211 rps->cur_freq = rps->min_freq; 1212 return true; 1213 } 1214 1215 /* See the Gen9_GT_PM_Programming_Guide doc for the below */ 1216 static bool gen9_rps_enable(struct intel_rps *rps) 1217 { 1218 struct intel_gt *gt = rps_to_gt(rps); 1219 struct intel_uncore *uncore = gt->uncore; 1220 1221 /* Program defaults and thresholds for RPS */ 1222 if (GRAPHICS_VER(gt->i915) == 9) 1223 intel_uncore_write_fw(uncore, GEN6_RC_VIDEO_FREQ, 1224 GEN9_FREQUENCY(rps->rp1_freq)); 1225 1226 intel_uncore_write_fw(uncore, GEN6_RP_IDLE_HYSTERSIS, 0xa); 1227 1228 rps->pm_events = GEN6_PM_RP_UP_THRESHOLD | GEN6_PM_RP_DOWN_THRESHOLD; 1229 1230 return rps_reset(rps); 1231 } 1232 1233 static bool gen8_rps_enable(struct intel_rps *rps) 1234 { 1235 struct intel_uncore *uncore = rps_to_uncore(rps); 1236 1237 intel_uncore_write_fw(uncore, GEN6_RC_VIDEO_FREQ, 1238 HSW_FREQUENCY(rps->rp1_freq)); 1239 1240 intel_uncore_write_fw(uncore, GEN6_RP_IDLE_HYSTERSIS, 10); 1241 1242 rps->pm_events = GEN6_PM_RP_UP_THRESHOLD | GEN6_PM_RP_DOWN_THRESHOLD; 1243 1244 return rps_reset(rps); 1245 } 1246 1247 static bool gen6_rps_enable(struct intel_rps *rps) 1248 { 1249 struct intel_uncore *uncore = rps_to_uncore(rps); 1250 1251 /* Power down if completely idle for over 50ms */ 1252 intel_uncore_write_fw(uncore, GEN6_RP_DOWN_TIMEOUT, 50000); 1253 intel_uncore_write_fw(uncore, GEN6_RP_IDLE_HYSTERSIS, 10); 1254 1255 rps->pm_events = (GEN6_PM_RP_UP_THRESHOLD | 1256 GEN6_PM_RP_DOWN_THRESHOLD | 1257 GEN6_PM_RP_DOWN_TIMEOUT); 1258 1259 return rps_reset(rps); 1260 } 1261 1262 static int chv_rps_max_freq(struct intel_rps *rps) 1263 { 1264 struct drm_i915_private *i915 = rps_to_i915(rps); 1265 struct intel_gt *gt = rps_to_gt(rps); 1266 u32 val; 1267 1268 val = vlv_punit_read(i915, FB_GFX_FMAX_AT_VMAX_FUSE); 1269 1270 switch (gt->info.sseu.eu_total) { 1271 case 8: 1272 /* (2 * 4) config */ 1273 val >>= FB_GFX_FMAX_AT_VMAX_2SS4EU_FUSE_SHIFT; 1274 break; 1275 case 12: 1276 /* (2 * 6) config */ 1277 val >>= FB_GFX_FMAX_AT_VMAX_2SS6EU_FUSE_SHIFT; 1278 break; 1279 case 16: 1280 /* (2 * 8) config */ 1281 default: 1282 /* Setting (2 * 8) Min RP0 for any other combination */ 1283 val >>= FB_GFX_FMAX_AT_VMAX_2SS8EU_FUSE_SHIFT; 1284 break; 1285 } 1286 1287 return val & FB_GFX_FREQ_FUSE_MASK; 1288 } 1289 1290 static int chv_rps_rpe_freq(struct intel_rps *rps) 1291 { 1292 struct drm_i915_private *i915 = rps_to_i915(rps); 1293 u32 val; 1294 1295 val = vlv_punit_read(i915, PUNIT_GPU_DUTYCYCLE_REG); 1296 val >>= PUNIT_GPU_DUTYCYCLE_RPE_FREQ_SHIFT; 1297 1298 return val & PUNIT_GPU_DUTYCYCLE_RPE_FREQ_MASK; 1299 } 1300 1301 static int chv_rps_guar_freq(struct intel_rps *rps) 1302 { 1303 struct drm_i915_private *i915 = rps_to_i915(rps); 1304 u32 val; 1305 1306 val = vlv_punit_read(i915, FB_GFX_FMAX_AT_VMAX_FUSE); 1307 1308 return val & FB_GFX_FREQ_FUSE_MASK; 1309 } 1310 1311 static u32 chv_rps_min_freq(struct intel_rps *rps) 1312 { 1313 struct drm_i915_private *i915 = rps_to_i915(rps); 1314 u32 val; 1315 1316 val = vlv_punit_read(i915, FB_GFX_FMIN_AT_VMIN_FUSE); 1317 val >>= FB_GFX_FMIN_AT_VMIN_FUSE_SHIFT; 1318 1319 return val & FB_GFX_FREQ_FUSE_MASK; 1320 } 1321 1322 static bool chv_rps_enable(struct intel_rps *rps) 1323 { 1324 struct intel_uncore *uncore = rps_to_uncore(rps); 1325 struct drm_i915_private *i915 = rps_to_i915(rps); 1326 u32 val; 1327 1328 /* 1: Program defaults and thresholds for RPS*/ 1329 intel_uncore_write_fw(uncore, GEN6_RP_DOWN_TIMEOUT, 1000000); 1330 intel_uncore_write_fw(uncore, GEN6_RP_UP_THRESHOLD, 59400); 1331 intel_uncore_write_fw(uncore, GEN6_RP_DOWN_THRESHOLD, 245000); 1332 intel_uncore_write_fw(uncore, GEN6_RP_UP_EI, 66000); 1333 intel_uncore_write_fw(uncore, GEN6_RP_DOWN_EI, 350000); 1334 1335 intel_uncore_write_fw(uncore, GEN6_RP_IDLE_HYSTERSIS, 10); 1336 1337 /* 2: Enable RPS */ 1338 intel_uncore_write_fw(uncore, GEN6_RP_CONTROL, 1339 GEN6_RP_MEDIA_HW_NORMAL_MODE | 1340 GEN6_RP_MEDIA_IS_GFX | 1341 GEN6_RP_ENABLE | 1342 GEN6_RP_UP_BUSY_AVG | 1343 GEN6_RP_DOWN_IDLE_AVG); 1344 1345 rps->pm_events = (GEN6_PM_RP_UP_THRESHOLD | 1346 GEN6_PM_RP_DOWN_THRESHOLD | 1347 GEN6_PM_RP_DOWN_TIMEOUT); 1348 1349 /* Setting Fixed Bias */ 1350 vlv_punit_get(i915); 1351 1352 val = VLV_OVERRIDE_EN | VLV_SOC_TDP_EN | CHV_BIAS_CPU_50_SOC_50; 1353 vlv_punit_write(i915, VLV_TURBO_SOC_OVERRIDE, val); 1354 1355 val = vlv_punit_read(i915, PUNIT_REG_GPU_FREQ_STS); 1356 1357 vlv_punit_put(i915); 1358 1359 /* RPS code assumes GPLL is used */ 1360 drm_WARN_ONCE(&i915->drm, (val & GPLLENABLE) == 0, 1361 "GPLL not enabled\n"); 1362 1363 drm_dbg(&i915->drm, "GPLL enabled? %s\n", 1364 str_yes_no(val & GPLLENABLE)); 1365 drm_dbg(&i915->drm, "GPU status: 0x%08x\n", val); 1366 1367 return rps_reset(rps); 1368 } 1369 1370 static int vlv_rps_guar_freq(struct intel_rps *rps) 1371 { 1372 struct drm_i915_private *i915 = rps_to_i915(rps); 1373 u32 val, rp1; 1374 1375 val = vlv_nc_read(i915, IOSF_NC_FB_GFX_FREQ_FUSE); 1376 1377 rp1 = val & FB_GFX_FGUARANTEED_FREQ_FUSE_MASK; 1378 rp1 >>= FB_GFX_FGUARANTEED_FREQ_FUSE_SHIFT; 1379 1380 return rp1; 1381 } 1382 1383 static int vlv_rps_max_freq(struct intel_rps *rps) 1384 { 1385 struct drm_i915_private *i915 = rps_to_i915(rps); 1386 u32 val, rp0; 1387 1388 val = vlv_nc_read(i915, IOSF_NC_FB_GFX_FREQ_FUSE); 1389 1390 rp0 = (val & FB_GFX_MAX_FREQ_FUSE_MASK) >> FB_GFX_MAX_FREQ_FUSE_SHIFT; 1391 /* Clamp to max */ 1392 rp0 = min_t(u32, rp0, 0xea); 1393 1394 return rp0; 1395 } 1396 1397 static int vlv_rps_rpe_freq(struct intel_rps *rps) 1398 { 1399 struct drm_i915_private *i915 = rps_to_i915(rps); 1400 u32 val, rpe; 1401 1402 val = vlv_nc_read(i915, IOSF_NC_FB_GFX_FMAX_FUSE_LO); 1403 rpe = (val & FB_FMAX_VMIN_FREQ_LO_MASK) >> FB_FMAX_VMIN_FREQ_LO_SHIFT; 1404 val = vlv_nc_read(i915, IOSF_NC_FB_GFX_FMAX_FUSE_HI); 1405 rpe |= (val & FB_FMAX_VMIN_FREQ_HI_MASK) << 5; 1406 1407 return rpe; 1408 } 1409 1410 static int vlv_rps_min_freq(struct intel_rps *rps) 1411 { 1412 struct drm_i915_private *i915 = rps_to_i915(rps); 1413 u32 val; 1414 1415 val = vlv_punit_read(i915, PUNIT_REG_GPU_LFM) & 0xff; 1416 /* 1417 * According to the BYT Punit GPU turbo HAS 1.1.6.3 the minimum value 1418 * for the minimum frequency in GPLL mode is 0xc1. Contrary to this on 1419 * a BYT-M B0 the above register contains 0xbf. Moreover when setting 1420 * a frequency Punit will not allow values below 0xc0. Clamp it 0xc0 1421 * to make sure it matches what Punit accepts. 1422 */ 1423 return max_t(u32, val, 0xc0); 1424 } 1425 1426 static bool vlv_rps_enable(struct intel_rps *rps) 1427 { 1428 struct intel_uncore *uncore = rps_to_uncore(rps); 1429 struct drm_i915_private *i915 = rps_to_i915(rps); 1430 u32 val; 1431 1432 intel_uncore_write_fw(uncore, GEN6_RP_DOWN_TIMEOUT, 1000000); 1433 intel_uncore_write_fw(uncore, GEN6_RP_UP_THRESHOLD, 59400); 1434 intel_uncore_write_fw(uncore, GEN6_RP_DOWN_THRESHOLD, 245000); 1435 intel_uncore_write_fw(uncore, GEN6_RP_UP_EI, 66000); 1436 intel_uncore_write_fw(uncore, GEN6_RP_DOWN_EI, 350000); 1437 1438 intel_uncore_write_fw(uncore, GEN6_RP_IDLE_HYSTERSIS, 10); 1439 1440 intel_uncore_write_fw(uncore, GEN6_RP_CONTROL, 1441 GEN6_RP_MEDIA_TURBO | 1442 GEN6_RP_MEDIA_HW_NORMAL_MODE | 1443 GEN6_RP_MEDIA_IS_GFX | 1444 GEN6_RP_ENABLE | 1445 GEN6_RP_UP_BUSY_AVG | 1446 GEN6_RP_DOWN_IDLE_CONT); 1447 1448 /* WaGsvRC0ResidencyMethod:vlv */ 1449 rps->pm_events = GEN6_PM_RP_UP_EI_EXPIRED; 1450 1451 vlv_punit_get(i915); 1452 1453 /* Setting Fixed Bias */ 1454 val = VLV_OVERRIDE_EN | VLV_SOC_TDP_EN | VLV_BIAS_CPU_125_SOC_875; 1455 vlv_punit_write(i915, VLV_TURBO_SOC_OVERRIDE, val); 1456 1457 val = vlv_punit_read(i915, PUNIT_REG_GPU_FREQ_STS); 1458 1459 vlv_punit_put(i915); 1460 1461 /* RPS code assumes GPLL is used */ 1462 drm_WARN_ONCE(&i915->drm, (val & GPLLENABLE) == 0, 1463 "GPLL not enabled\n"); 1464 1465 drm_dbg(&i915->drm, "GPLL enabled? %s\n", 1466 str_yes_no(val & GPLLENABLE)); 1467 drm_dbg(&i915->drm, "GPU status: 0x%08x\n", val); 1468 1469 return rps_reset(rps); 1470 } 1471 1472 static unsigned long __ips_gfx_val(struct intel_ips *ips) 1473 { 1474 struct intel_rps *rps = container_of(ips, typeof(*rps), ips); 1475 struct intel_uncore *uncore = rps_to_uncore(rps); 1476 unsigned int t, state1, state2; 1477 u32 pxvid, ext_v; 1478 u64 corr, corr2; 1479 1480 lockdep_assert_held(&mchdev_lock); 1481 1482 pxvid = intel_uncore_read(uncore, PXVFREQ(rps->cur_freq)); 1483 pxvid = (pxvid >> 24) & 0x7f; 1484 ext_v = pvid_to_extvid(rps_to_i915(rps), pxvid); 1485 1486 state1 = ext_v; 1487 1488 /* Revel in the empirically derived constants */ 1489 1490 /* Correction factor in 1/100000 units */ 1491 t = ips_mch_val(uncore); 1492 if (t > 80) 1493 corr = t * 2349 + 135940; 1494 else if (t >= 50) 1495 corr = t * 964 + 29317; 1496 else /* < 50 */ 1497 corr = t * 301 + 1004; 1498 1499 corr = div_u64(corr * 150142 * state1, 10000) - 78642; 1500 corr2 = div_u64(corr, 100000) * ips->corr; 1501 1502 state2 = div_u64(corr2 * state1, 10000); 1503 state2 /= 100; /* convert to mW */ 1504 1505 __gen5_ips_update(ips); 1506 1507 return ips->gfx_power + state2; 1508 } 1509 1510 static bool has_busy_stats(struct intel_rps *rps) 1511 { 1512 struct intel_engine_cs *engine; 1513 enum intel_engine_id id; 1514 1515 for_each_engine(engine, rps_to_gt(rps), id) { 1516 if (!intel_engine_supports_stats(engine)) 1517 return false; 1518 } 1519 1520 return true; 1521 } 1522 1523 void intel_rps_enable(struct intel_rps *rps) 1524 { 1525 struct drm_i915_private *i915 = rps_to_i915(rps); 1526 struct intel_uncore *uncore = rps_to_uncore(rps); 1527 bool enabled = false; 1528 1529 if (!HAS_RPS(i915)) 1530 return; 1531 1532 if (rps_uses_slpc(rps)) 1533 return; 1534 1535 intel_gt_check_clock_frequency(rps_to_gt(rps)); 1536 1537 intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL); 1538 if (rps->max_freq <= rps->min_freq) 1539 /* leave disabled, no room for dynamic reclocking */; 1540 else if (IS_CHERRYVIEW(i915)) 1541 enabled = chv_rps_enable(rps); 1542 else if (IS_VALLEYVIEW(i915)) 1543 enabled = vlv_rps_enable(rps); 1544 else if (GRAPHICS_VER(i915) >= 9) 1545 enabled = gen9_rps_enable(rps); 1546 else if (GRAPHICS_VER(i915) >= 8) 1547 enabled = gen8_rps_enable(rps); 1548 else if (GRAPHICS_VER(i915) >= 6) 1549 enabled = gen6_rps_enable(rps); 1550 else if (IS_IRONLAKE_M(i915)) 1551 enabled = gen5_rps_enable(rps); 1552 else 1553 MISSING_CASE(GRAPHICS_VER(i915)); 1554 intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL); 1555 if (!enabled) 1556 return; 1557 1558 GT_TRACE(rps_to_gt(rps), 1559 "min:%x, max:%x, freq:[%d, %d], thresholds:[%u, %u]\n", 1560 rps->min_freq, rps->max_freq, 1561 intel_gpu_freq(rps, rps->min_freq), 1562 intel_gpu_freq(rps, rps->max_freq), 1563 rps->power.up_threshold, 1564 rps->power.down_threshold); 1565 1566 GEM_BUG_ON(rps->max_freq < rps->min_freq); 1567 GEM_BUG_ON(rps->idle_freq > rps->max_freq); 1568 1569 GEM_BUG_ON(rps->efficient_freq < rps->min_freq); 1570 GEM_BUG_ON(rps->efficient_freq > rps->max_freq); 1571 1572 if (has_busy_stats(rps)) 1573 intel_rps_set_timer(rps); 1574 else if (GRAPHICS_VER(i915) >= 6 && GRAPHICS_VER(i915) <= 11) 1575 intel_rps_set_interrupts(rps); 1576 else 1577 /* Ironlake currently uses intel_ips.ko */ {} 1578 1579 intel_rps_set_enabled(rps); 1580 } 1581 1582 static void gen6_rps_disable(struct intel_rps *rps) 1583 { 1584 set(rps_to_uncore(rps), GEN6_RP_CONTROL, 0); 1585 } 1586 1587 void intel_rps_disable(struct intel_rps *rps) 1588 { 1589 struct drm_i915_private *i915 = rps_to_i915(rps); 1590 1591 if (!intel_rps_is_enabled(rps)) 1592 return; 1593 1594 intel_rps_clear_enabled(rps); 1595 intel_rps_clear_interrupts(rps); 1596 intel_rps_clear_timer(rps); 1597 1598 if (GRAPHICS_VER(i915) >= 6) 1599 gen6_rps_disable(rps); 1600 else if (IS_IRONLAKE_M(i915)) 1601 gen5_rps_disable(rps); 1602 } 1603 1604 static int byt_gpu_freq(struct intel_rps *rps, int val) 1605 { 1606 /* 1607 * N = val - 0xb7 1608 * Slow = Fast = GPLL ref * N 1609 */ 1610 return DIV_ROUND_CLOSEST(rps->gpll_ref_freq * (val - 0xb7), 1000); 1611 } 1612 1613 static int byt_freq_opcode(struct intel_rps *rps, int val) 1614 { 1615 return DIV_ROUND_CLOSEST(1000 * val, rps->gpll_ref_freq) + 0xb7; 1616 } 1617 1618 static int chv_gpu_freq(struct intel_rps *rps, int val) 1619 { 1620 /* 1621 * N = val / 2 1622 * CU (slow) = CU2x (fast) / 2 = GPLL ref * N / 2 1623 */ 1624 return DIV_ROUND_CLOSEST(rps->gpll_ref_freq * val, 2 * 2 * 1000); 1625 } 1626 1627 static int chv_freq_opcode(struct intel_rps *rps, int val) 1628 { 1629 /* CHV needs even values */ 1630 return DIV_ROUND_CLOSEST(2 * 1000 * val, rps->gpll_ref_freq) * 2; 1631 } 1632 1633 int intel_gpu_freq(struct intel_rps *rps, int val) 1634 { 1635 struct drm_i915_private *i915 = rps_to_i915(rps); 1636 1637 if (GRAPHICS_VER(i915) >= 9) 1638 return DIV_ROUND_CLOSEST(val * GT_FREQUENCY_MULTIPLIER, 1639 GEN9_FREQ_SCALER); 1640 else if (IS_CHERRYVIEW(i915)) 1641 return chv_gpu_freq(rps, val); 1642 else if (IS_VALLEYVIEW(i915)) 1643 return byt_gpu_freq(rps, val); 1644 else if (GRAPHICS_VER(i915) >= 6) 1645 return val * GT_FREQUENCY_MULTIPLIER; 1646 else 1647 return val; 1648 } 1649 1650 int intel_freq_opcode(struct intel_rps *rps, int val) 1651 { 1652 struct drm_i915_private *i915 = rps_to_i915(rps); 1653 1654 if (GRAPHICS_VER(i915) >= 9) 1655 return DIV_ROUND_CLOSEST(val * GEN9_FREQ_SCALER, 1656 GT_FREQUENCY_MULTIPLIER); 1657 else if (IS_CHERRYVIEW(i915)) 1658 return chv_freq_opcode(rps, val); 1659 else if (IS_VALLEYVIEW(i915)) 1660 return byt_freq_opcode(rps, val); 1661 else if (GRAPHICS_VER(i915) >= 6) 1662 return DIV_ROUND_CLOSEST(val, GT_FREQUENCY_MULTIPLIER); 1663 else 1664 return val; 1665 } 1666 1667 static void vlv_init_gpll_ref_freq(struct intel_rps *rps) 1668 { 1669 struct drm_i915_private *i915 = rps_to_i915(rps); 1670 1671 rps->gpll_ref_freq = 1672 vlv_get_cck_clock(i915, "GPLL ref", 1673 CCK_GPLL_CLOCK_CONTROL, 1674 i915->czclk_freq); 1675 1676 drm_dbg(&i915->drm, "GPLL reference freq: %d kHz\n", 1677 rps->gpll_ref_freq); 1678 } 1679 1680 static void vlv_rps_init(struct intel_rps *rps) 1681 { 1682 struct drm_i915_private *i915 = rps_to_i915(rps); 1683 1684 vlv_iosf_sb_get(i915, 1685 BIT(VLV_IOSF_SB_PUNIT) | 1686 BIT(VLV_IOSF_SB_NC) | 1687 BIT(VLV_IOSF_SB_CCK)); 1688 1689 vlv_init_gpll_ref_freq(rps); 1690 1691 rps->max_freq = vlv_rps_max_freq(rps); 1692 rps->rp0_freq = rps->max_freq; 1693 drm_dbg(&i915->drm, "max GPU freq: %d MHz (%u)\n", 1694 intel_gpu_freq(rps, rps->max_freq), rps->max_freq); 1695 1696 rps->efficient_freq = vlv_rps_rpe_freq(rps); 1697 drm_dbg(&i915->drm, "RPe GPU freq: %d MHz (%u)\n", 1698 intel_gpu_freq(rps, rps->efficient_freq), rps->efficient_freq); 1699 1700 rps->rp1_freq = vlv_rps_guar_freq(rps); 1701 drm_dbg(&i915->drm, "RP1(Guar Freq) GPU freq: %d MHz (%u)\n", 1702 intel_gpu_freq(rps, rps->rp1_freq), rps->rp1_freq); 1703 1704 rps->min_freq = vlv_rps_min_freq(rps); 1705 drm_dbg(&i915->drm, "min GPU freq: %d MHz (%u)\n", 1706 intel_gpu_freq(rps, rps->min_freq), rps->min_freq); 1707 1708 vlv_iosf_sb_put(i915, 1709 BIT(VLV_IOSF_SB_PUNIT) | 1710 BIT(VLV_IOSF_SB_NC) | 1711 BIT(VLV_IOSF_SB_CCK)); 1712 } 1713 1714 static void chv_rps_init(struct intel_rps *rps) 1715 { 1716 struct drm_i915_private *i915 = rps_to_i915(rps); 1717 1718 vlv_iosf_sb_get(i915, 1719 BIT(VLV_IOSF_SB_PUNIT) | 1720 BIT(VLV_IOSF_SB_NC) | 1721 BIT(VLV_IOSF_SB_CCK)); 1722 1723 vlv_init_gpll_ref_freq(rps); 1724 1725 rps->max_freq = chv_rps_max_freq(rps); 1726 rps->rp0_freq = rps->max_freq; 1727 drm_dbg(&i915->drm, "max GPU freq: %d MHz (%u)\n", 1728 intel_gpu_freq(rps, rps->max_freq), rps->max_freq); 1729 1730 rps->efficient_freq = chv_rps_rpe_freq(rps); 1731 drm_dbg(&i915->drm, "RPe GPU freq: %d MHz (%u)\n", 1732 intel_gpu_freq(rps, rps->efficient_freq), rps->efficient_freq); 1733 1734 rps->rp1_freq = chv_rps_guar_freq(rps); 1735 drm_dbg(&i915->drm, "RP1(Guar) GPU freq: %d MHz (%u)\n", 1736 intel_gpu_freq(rps, rps->rp1_freq), rps->rp1_freq); 1737 1738 rps->min_freq = chv_rps_min_freq(rps); 1739 drm_dbg(&i915->drm, "min GPU freq: %d MHz (%u)\n", 1740 intel_gpu_freq(rps, rps->min_freq), rps->min_freq); 1741 1742 vlv_iosf_sb_put(i915, 1743 BIT(VLV_IOSF_SB_PUNIT) | 1744 BIT(VLV_IOSF_SB_NC) | 1745 BIT(VLV_IOSF_SB_CCK)); 1746 1747 drm_WARN_ONCE(&i915->drm, (rps->max_freq | rps->efficient_freq | 1748 rps->rp1_freq | rps->min_freq) & 1, 1749 "Odd GPU freq values\n"); 1750 } 1751 1752 static void vlv_c0_read(struct intel_uncore *uncore, struct intel_rps_ei *ei) 1753 { 1754 ei->ktime = ktime_get_raw(); 1755 ei->render_c0 = intel_uncore_read(uncore, VLV_RENDER_C0_COUNT); 1756 ei->media_c0 = intel_uncore_read(uncore, VLV_MEDIA_C0_COUNT); 1757 } 1758 1759 static u32 vlv_wa_c0_ei(struct intel_rps *rps, u32 pm_iir) 1760 { 1761 struct intel_uncore *uncore = rps_to_uncore(rps); 1762 const struct intel_rps_ei *prev = &rps->ei; 1763 struct intel_rps_ei now; 1764 u32 events = 0; 1765 1766 if ((pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) == 0) 1767 return 0; 1768 1769 vlv_c0_read(uncore, &now); 1770 1771 if (prev->ktime) { 1772 u64 time, c0; 1773 u32 render, media; 1774 1775 time = ktime_us_delta(now.ktime, prev->ktime); 1776 1777 time *= rps_to_i915(rps)->czclk_freq; 1778 1779 /* Workload can be split between render + media, 1780 * e.g. SwapBuffers being blitted in X after being rendered in 1781 * mesa. To account for this we need to combine both engines 1782 * into our activity counter. 1783 */ 1784 render = now.render_c0 - prev->render_c0; 1785 media = now.media_c0 - prev->media_c0; 1786 c0 = max(render, media); 1787 c0 *= 1000 * 100 << 8; /* to usecs and scale to threshold% */ 1788 1789 if (c0 > time * rps->power.up_threshold) 1790 events = GEN6_PM_RP_UP_THRESHOLD; 1791 else if (c0 < time * rps->power.down_threshold) 1792 events = GEN6_PM_RP_DOWN_THRESHOLD; 1793 } 1794 1795 rps->ei = now; 1796 return events; 1797 } 1798 1799 static void rps_work(struct work_struct *work) 1800 { 1801 struct intel_rps *rps = container_of(work, typeof(*rps), work); 1802 struct intel_gt *gt = rps_to_gt(rps); 1803 struct drm_i915_private *i915 = rps_to_i915(rps); 1804 bool client_boost = false; 1805 int new_freq, adj, min, max; 1806 u32 pm_iir = 0; 1807 1808 spin_lock_irq(gt->irq_lock); 1809 pm_iir = fetch_and_zero(&rps->pm_iir) & rps->pm_events; 1810 client_boost = atomic_read(&rps->num_waiters); 1811 spin_unlock_irq(gt->irq_lock); 1812 1813 /* Make sure we didn't queue anything we're not going to process. */ 1814 if (!pm_iir && !client_boost) 1815 goto out; 1816 1817 mutex_lock(&rps->lock); 1818 if (!intel_rps_is_active(rps)) { 1819 mutex_unlock(&rps->lock); 1820 return; 1821 } 1822 1823 pm_iir |= vlv_wa_c0_ei(rps, pm_iir); 1824 1825 adj = rps->last_adj; 1826 new_freq = rps->cur_freq; 1827 min = rps->min_freq_softlimit; 1828 max = rps->max_freq_softlimit; 1829 if (client_boost) 1830 max = rps->max_freq; 1831 1832 GT_TRACE(gt, 1833 "pm_iir:%x, client_boost:%s, last:%d, cur:%x, min:%x, max:%x\n", 1834 pm_iir, str_yes_no(client_boost), 1835 adj, new_freq, min, max); 1836 1837 if (client_boost && new_freq < rps->boost_freq) { 1838 new_freq = rps->boost_freq; 1839 adj = 0; 1840 } else if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) { 1841 if (adj > 0) 1842 adj *= 2; 1843 else /* CHV needs even encode values */ 1844 adj = IS_CHERRYVIEW(gt->i915) ? 2 : 1; 1845 1846 if (new_freq >= rps->max_freq_softlimit) 1847 adj = 0; 1848 } else if (client_boost) { 1849 adj = 0; 1850 } else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) { 1851 if (rps->cur_freq > rps->efficient_freq) 1852 new_freq = rps->efficient_freq; 1853 else if (rps->cur_freq > rps->min_freq_softlimit) 1854 new_freq = rps->min_freq_softlimit; 1855 adj = 0; 1856 } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) { 1857 if (adj < 0) 1858 adj *= 2; 1859 else /* CHV needs even encode values */ 1860 adj = IS_CHERRYVIEW(gt->i915) ? -2 : -1; 1861 1862 if (new_freq <= rps->min_freq_softlimit) 1863 adj = 0; 1864 } else { /* unknown event */ 1865 adj = 0; 1866 } 1867 1868 /* 1869 * sysfs frequency limits may have snuck in while 1870 * servicing the interrupt 1871 */ 1872 new_freq += adj; 1873 new_freq = clamp_t(int, new_freq, min, max); 1874 1875 if (intel_rps_set(rps, new_freq)) { 1876 drm_dbg(&i915->drm, "Failed to set new GPU frequency\n"); 1877 adj = 0; 1878 } 1879 rps->last_adj = adj; 1880 1881 mutex_unlock(&rps->lock); 1882 1883 out: 1884 spin_lock_irq(gt->irq_lock); 1885 gen6_gt_pm_unmask_irq(gt, rps->pm_events); 1886 spin_unlock_irq(gt->irq_lock); 1887 } 1888 1889 void gen11_rps_irq_handler(struct intel_rps *rps, u32 pm_iir) 1890 { 1891 struct intel_gt *gt = rps_to_gt(rps); 1892 const u32 events = rps->pm_events & pm_iir; 1893 1894 lockdep_assert_held(gt->irq_lock); 1895 1896 if (unlikely(!events)) 1897 return; 1898 1899 GT_TRACE(gt, "irq events:%x\n", events); 1900 1901 gen6_gt_pm_mask_irq(gt, events); 1902 1903 rps->pm_iir |= events; 1904 schedule_work(&rps->work); 1905 } 1906 1907 void gen6_rps_irq_handler(struct intel_rps *rps, u32 pm_iir) 1908 { 1909 struct intel_gt *gt = rps_to_gt(rps); 1910 u32 events; 1911 1912 events = pm_iir & rps->pm_events; 1913 if (events) { 1914 spin_lock(gt->irq_lock); 1915 1916 GT_TRACE(gt, "irq events:%x\n", events); 1917 1918 gen6_gt_pm_mask_irq(gt, events); 1919 rps->pm_iir |= events; 1920 1921 schedule_work(&rps->work); 1922 spin_unlock(gt->irq_lock); 1923 } 1924 1925 if (GRAPHICS_VER(gt->i915) >= 8) 1926 return; 1927 1928 if (pm_iir & PM_VEBOX_USER_INTERRUPT) 1929 intel_engine_cs_irq(gt->engine[VECS0], pm_iir >> 10); 1930 1931 if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT) 1932 drm_dbg(&rps_to_i915(rps)->drm, 1933 "Command parser error, pm_iir 0x%08x\n", pm_iir); 1934 } 1935 1936 void gen5_rps_irq_handler(struct intel_rps *rps) 1937 { 1938 struct intel_uncore *uncore = rps_to_uncore(rps); 1939 u32 busy_up, busy_down, max_avg, min_avg; 1940 u8 new_freq; 1941 1942 spin_lock(&mchdev_lock); 1943 1944 intel_uncore_write16(uncore, 1945 MEMINTRSTS, 1946 intel_uncore_read(uncore, MEMINTRSTS)); 1947 1948 intel_uncore_write16(uncore, MEMINTRSTS, MEMINT_EVAL_CHG); 1949 busy_up = intel_uncore_read(uncore, RCPREVBSYTUPAVG); 1950 busy_down = intel_uncore_read(uncore, RCPREVBSYTDNAVG); 1951 max_avg = intel_uncore_read(uncore, RCBMAXAVG); 1952 min_avg = intel_uncore_read(uncore, RCBMINAVG); 1953 1954 /* Handle RCS change request from hw */ 1955 new_freq = rps->cur_freq; 1956 if (busy_up > max_avg) 1957 new_freq++; 1958 else if (busy_down < min_avg) 1959 new_freq--; 1960 new_freq = clamp(new_freq, 1961 rps->min_freq_softlimit, 1962 rps->max_freq_softlimit); 1963 1964 if (new_freq != rps->cur_freq && !__gen5_rps_set(rps, new_freq)) 1965 rps->cur_freq = new_freq; 1966 1967 spin_unlock(&mchdev_lock); 1968 } 1969 1970 void intel_rps_init_early(struct intel_rps *rps) 1971 { 1972 mutex_init(&rps->lock); 1973 mutex_init(&rps->power.mutex); 1974 1975 INIT_WORK(&rps->work, rps_work); 1976 timer_setup(&rps->timer, rps_timer, 0); 1977 1978 atomic_set(&rps->num_waiters, 0); 1979 } 1980 1981 void intel_rps_init(struct intel_rps *rps) 1982 { 1983 struct drm_i915_private *i915 = rps_to_i915(rps); 1984 1985 if (rps_uses_slpc(rps)) 1986 return; 1987 1988 if (IS_CHERRYVIEW(i915)) 1989 chv_rps_init(rps); 1990 else if (IS_VALLEYVIEW(i915)) 1991 vlv_rps_init(rps); 1992 else if (GRAPHICS_VER(i915) >= 6) 1993 gen6_rps_init(rps); 1994 else if (IS_IRONLAKE_M(i915)) 1995 gen5_rps_init(rps); 1996 1997 /* Derive initial user preferences/limits from the hardware limits */ 1998 rps->max_freq_softlimit = rps->max_freq; 1999 rps_to_gt(rps)->defaults.max_freq = rps->max_freq_softlimit; 2000 rps->min_freq_softlimit = rps->min_freq; 2001 rps_to_gt(rps)->defaults.min_freq = rps->min_freq_softlimit; 2002 2003 /* After setting max-softlimit, find the overclock max freq */ 2004 if (GRAPHICS_VER(i915) == 6 || IS_IVYBRIDGE(i915) || IS_HASWELL(i915)) { 2005 u32 params = 0; 2006 2007 snb_pcode_read(rps_to_gt(rps)->uncore, GEN6_READ_OC_PARAMS, ¶ms, NULL); 2008 if (params & BIT(31)) { /* OC supported */ 2009 drm_dbg(&i915->drm, 2010 "Overclocking supported, max: %dMHz, overclock: %dMHz\n", 2011 (rps->max_freq & 0xff) * 50, 2012 (params & 0xff) * 50); 2013 rps->max_freq = params & 0xff; 2014 } 2015 } 2016 2017 /* Set default thresholds in % */ 2018 rps->power.up_threshold = 95; 2019 rps_to_gt(rps)->defaults.rps_up_threshold = rps->power.up_threshold; 2020 rps->power.down_threshold = 85; 2021 rps_to_gt(rps)->defaults.rps_down_threshold = rps->power.down_threshold; 2022 2023 /* Finally allow us to boost to max by default */ 2024 rps->boost_freq = rps->max_freq; 2025 rps->idle_freq = rps->min_freq; 2026 2027 /* Start in the middle, from here we will autotune based on workload */ 2028 rps->cur_freq = rps->efficient_freq; 2029 2030 rps->pm_intrmsk_mbz = 0; 2031 2032 /* 2033 * SNB,IVB,HSW can while VLV,CHV may hard hang on looping batchbuffer 2034 * if GEN6_PM_UP_EI_EXPIRED is masked. 2035 * 2036 * TODO: verify if this can be reproduced on VLV,CHV. 2037 */ 2038 if (GRAPHICS_VER(i915) <= 7) 2039 rps->pm_intrmsk_mbz |= GEN6_PM_RP_UP_EI_EXPIRED; 2040 2041 if (GRAPHICS_VER(i915) >= 8 && GRAPHICS_VER(i915) < 11) 2042 rps->pm_intrmsk_mbz |= GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC; 2043 2044 /* GuC needs ARAT expired interrupt unmasked */ 2045 if (intel_uc_uses_guc_submission(&rps_to_gt(rps)->uc)) 2046 rps->pm_intrmsk_mbz |= ARAT_EXPIRED_INTRMSK; 2047 } 2048 2049 void intel_rps_sanitize(struct intel_rps *rps) 2050 { 2051 if (rps_uses_slpc(rps)) 2052 return; 2053 2054 if (GRAPHICS_VER(rps_to_i915(rps)) >= 6) 2055 rps_disable_interrupts(rps); 2056 } 2057 2058 u32 intel_rps_read_rpstat(struct intel_rps *rps) 2059 { 2060 struct drm_i915_private *i915 = rps_to_i915(rps); 2061 i915_reg_t rpstat; 2062 2063 rpstat = (GRAPHICS_VER(i915) >= 12) ? GEN12_RPSTAT1 : GEN6_RPSTAT1; 2064 2065 return intel_uncore_read(rps_to_gt(rps)->uncore, rpstat); 2066 } 2067 2068 static u32 intel_rps_get_cagf(struct intel_rps *rps, u32 rpstat) 2069 { 2070 struct drm_i915_private *i915 = rps_to_i915(rps); 2071 u32 cagf; 2072 2073 if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 70)) 2074 cagf = REG_FIELD_GET(MTL_CAGF_MASK, rpstat); 2075 else if (GRAPHICS_VER(i915) >= 12) 2076 cagf = REG_FIELD_GET(GEN12_CAGF_MASK, rpstat); 2077 else if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) 2078 cagf = REG_FIELD_GET(RPE_MASK, rpstat); 2079 else if (GRAPHICS_VER(i915) >= 9) 2080 cagf = REG_FIELD_GET(GEN9_CAGF_MASK, rpstat); 2081 else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) 2082 cagf = REG_FIELD_GET(HSW_CAGF_MASK, rpstat); 2083 else if (GRAPHICS_VER(i915) >= 6) 2084 cagf = REG_FIELD_GET(GEN6_CAGF_MASK, rpstat); 2085 else 2086 cagf = gen5_invert_freq(rps, REG_FIELD_GET(MEMSTAT_PSTATE_MASK, rpstat)); 2087 2088 return cagf; 2089 } 2090 2091 static u32 __read_cagf(struct intel_rps *rps, bool take_fw) 2092 { 2093 struct drm_i915_private *i915 = rps_to_i915(rps); 2094 struct intel_uncore *uncore = rps_to_uncore(rps); 2095 i915_reg_t r = INVALID_MMIO_REG; 2096 u32 freq; 2097 2098 /* 2099 * For Gen12+ reading freq from HW does not need a forcewake and 2100 * registers will return 0 freq when GT is in RC6 2101 */ 2102 if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 70)) { 2103 r = MTL_MIRROR_TARGET_WP1; 2104 } else if (GRAPHICS_VER(i915) >= 12) { 2105 r = GEN12_RPSTAT1; 2106 } else if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) { 2107 vlv_punit_get(i915); 2108 freq = vlv_punit_read(i915, PUNIT_REG_GPU_FREQ_STS); 2109 vlv_punit_put(i915); 2110 } else if (GRAPHICS_VER(i915) >= 6) { 2111 r = GEN6_RPSTAT1; 2112 } else { 2113 r = MEMSTAT_ILK; 2114 } 2115 2116 if (i915_mmio_reg_valid(r)) 2117 freq = take_fw ? intel_uncore_read(uncore, r) : intel_uncore_read_fw(uncore, r); 2118 2119 return intel_rps_get_cagf(rps, freq); 2120 } 2121 2122 static u32 read_cagf(struct intel_rps *rps) 2123 { 2124 return __read_cagf(rps, true); 2125 } 2126 2127 u32 intel_rps_read_actual_frequency(struct intel_rps *rps) 2128 { 2129 struct intel_runtime_pm *rpm = rps_to_uncore(rps)->rpm; 2130 intel_wakeref_t wakeref; 2131 u32 freq = 0; 2132 2133 with_intel_runtime_pm_if_in_use(rpm, wakeref) 2134 freq = intel_gpu_freq(rps, read_cagf(rps)); 2135 2136 return freq; 2137 } 2138 2139 u32 intel_rps_read_actual_frequency_fw(struct intel_rps *rps) 2140 { 2141 return intel_gpu_freq(rps, __read_cagf(rps, false)); 2142 } 2143 2144 static u32 intel_rps_read_punit_req(struct intel_rps *rps) 2145 { 2146 struct intel_uncore *uncore = rps_to_uncore(rps); 2147 struct intel_runtime_pm *rpm = rps_to_uncore(rps)->rpm; 2148 intel_wakeref_t wakeref; 2149 u32 freq = 0; 2150 2151 with_intel_runtime_pm_if_in_use(rpm, wakeref) 2152 freq = intel_uncore_read(uncore, GEN6_RPNSWREQ); 2153 2154 return freq; 2155 } 2156 2157 static u32 intel_rps_get_req(u32 pureq) 2158 { 2159 u32 req = pureq >> GEN9_SW_REQ_UNSLICE_RATIO_SHIFT; 2160 2161 return req; 2162 } 2163 2164 u32 intel_rps_read_punit_req_frequency(struct intel_rps *rps) 2165 { 2166 u32 freq = intel_rps_get_req(intel_rps_read_punit_req(rps)); 2167 2168 return intel_gpu_freq(rps, freq); 2169 } 2170 2171 u32 intel_rps_get_requested_frequency(struct intel_rps *rps) 2172 { 2173 if (rps_uses_slpc(rps)) 2174 return intel_rps_read_punit_req_frequency(rps); 2175 else 2176 return intel_gpu_freq(rps, rps->cur_freq); 2177 } 2178 2179 u32 intel_rps_get_max_frequency(struct intel_rps *rps) 2180 { 2181 struct intel_guc_slpc *slpc = rps_to_slpc(rps); 2182 2183 if (rps_uses_slpc(rps)) 2184 return slpc->max_freq_softlimit; 2185 else 2186 return intel_gpu_freq(rps, rps->max_freq_softlimit); 2187 } 2188 2189 /** 2190 * intel_rps_get_max_raw_freq - returns the max frequency in some raw format. 2191 * @rps: the intel_rps structure 2192 * 2193 * Returns the max frequency in a raw format. In newer platforms raw is in 2194 * units of 50 MHz. 2195 */ 2196 u32 intel_rps_get_max_raw_freq(struct intel_rps *rps) 2197 { 2198 struct intel_guc_slpc *slpc = rps_to_slpc(rps); 2199 u32 freq; 2200 2201 if (rps_uses_slpc(rps)) { 2202 return DIV_ROUND_CLOSEST(slpc->rp0_freq, 2203 GT_FREQUENCY_MULTIPLIER); 2204 } else { 2205 freq = rps->max_freq; 2206 if (GRAPHICS_VER(rps_to_i915(rps)) >= 9) { 2207 /* Convert GT frequency to 50 MHz units */ 2208 freq /= GEN9_FREQ_SCALER; 2209 } 2210 return freq; 2211 } 2212 } 2213 2214 u32 intel_rps_get_rp0_frequency(struct intel_rps *rps) 2215 { 2216 struct intel_guc_slpc *slpc = rps_to_slpc(rps); 2217 2218 if (rps_uses_slpc(rps)) 2219 return slpc->rp0_freq; 2220 else 2221 return intel_gpu_freq(rps, rps->rp0_freq); 2222 } 2223 2224 u32 intel_rps_get_rp1_frequency(struct intel_rps *rps) 2225 { 2226 struct intel_guc_slpc *slpc = rps_to_slpc(rps); 2227 2228 if (rps_uses_slpc(rps)) 2229 return slpc->rp1_freq; 2230 else 2231 return intel_gpu_freq(rps, rps->rp1_freq); 2232 } 2233 2234 u32 intel_rps_get_rpn_frequency(struct intel_rps *rps) 2235 { 2236 struct intel_guc_slpc *slpc = rps_to_slpc(rps); 2237 2238 if (rps_uses_slpc(rps)) 2239 return slpc->min_freq; 2240 else 2241 return intel_gpu_freq(rps, rps->min_freq); 2242 } 2243 2244 static void rps_frequency_dump(struct intel_rps *rps, struct drm_printer *p) 2245 { 2246 struct intel_gt *gt = rps_to_gt(rps); 2247 struct drm_i915_private *i915 = gt->i915; 2248 struct intel_uncore *uncore = gt->uncore; 2249 struct intel_rps_freq_caps caps; 2250 u32 rp_state_limits; 2251 u32 gt_perf_status; 2252 u32 rpmodectl, rpinclimit, rpdeclimit; 2253 u32 rpstat, cagf, reqf; 2254 u32 rpcurupei, rpcurup, rpprevup; 2255 u32 rpcurdownei, rpcurdown, rpprevdown; 2256 u32 rpupei, rpupt, rpdownei, rpdownt; 2257 u32 pm_ier, pm_imr, pm_isr, pm_iir, pm_mask; 2258 2259 rp_state_limits = intel_uncore_read(uncore, GEN6_RP_STATE_LIMITS); 2260 gen6_rps_get_freq_caps(rps, &caps); 2261 if (IS_GEN9_LP(i915)) 2262 gt_perf_status = intel_uncore_read(uncore, BXT_GT_PERF_STATUS); 2263 else 2264 gt_perf_status = intel_uncore_read(uncore, GEN6_GT_PERF_STATUS); 2265 2266 /* RPSTAT1 is in the GT power well */ 2267 intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL); 2268 2269 reqf = intel_uncore_read(uncore, GEN6_RPNSWREQ); 2270 if (GRAPHICS_VER(i915) >= 9) { 2271 reqf >>= 23; 2272 } else { 2273 reqf &= ~GEN6_TURBO_DISABLE; 2274 if (IS_HASWELL(i915) || IS_BROADWELL(i915)) 2275 reqf >>= 24; 2276 else 2277 reqf >>= 25; 2278 } 2279 reqf = intel_gpu_freq(rps, reqf); 2280 2281 rpmodectl = intel_uncore_read(uncore, GEN6_RP_CONTROL); 2282 rpinclimit = intel_uncore_read(uncore, GEN6_RP_UP_THRESHOLD); 2283 rpdeclimit = intel_uncore_read(uncore, GEN6_RP_DOWN_THRESHOLD); 2284 2285 rpstat = intel_rps_read_rpstat(rps); 2286 rpcurupei = intel_uncore_read(uncore, GEN6_RP_CUR_UP_EI) & GEN6_CURICONT_MASK; 2287 rpcurup = intel_uncore_read(uncore, GEN6_RP_CUR_UP) & GEN6_CURBSYTAVG_MASK; 2288 rpprevup = intel_uncore_read(uncore, GEN6_RP_PREV_UP) & GEN6_CURBSYTAVG_MASK; 2289 rpcurdownei = intel_uncore_read(uncore, GEN6_RP_CUR_DOWN_EI) & GEN6_CURIAVG_MASK; 2290 rpcurdown = intel_uncore_read(uncore, GEN6_RP_CUR_DOWN) & GEN6_CURBSYTAVG_MASK; 2291 rpprevdown = intel_uncore_read(uncore, GEN6_RP_PREV_DOWN) & GEN6_CURBSYTAVG_MASK; 2292 2293 rpupei = intel_uncore_read(uncore, GEN6_RP_UP_EI); 2294 rpupt = intel_uncore_read(uncore, GEN6_RP_UP_THRESHOLD); 2295 2296 rpdownei = intel_uncore_read(uncore, GEN6_RP_DOWN_EI); 2297 rpdownt = intel_uncore_read(uncore, GEN6_RP_DOWN_THRESHOLD); 2298 2299 cagf = intel_rps_read_actual_frequency(rps); 2300 2301 intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL); 2302 2303 if (GRAPHICS_VER(i915) >= 11) { 2304 pm_ier = intel_uncore_read(uncore, GEN11_GPM_WGBOXPERF_INTR_ENABLE); 2305 pm_imr = intel_uncore_read(uncore, GEN11_GPM_WGBOXPERF_INTR_MASK); 2306 /* 2307 * The equivalent to the PM ISR & IIR cannot be read 2308 * without affecting the current state of the system 2309 */ 2310 pm_isr = 0; 2311 pm_iir = 0; 2312 } else if (GRAPHICS_VER(i915) >= 8) { 2313 pm_ier = intel_uncore_read(uncore, GEN8_GT_IER(2)); 2314 pm_imr = intel_uncore_read(uncore, GEN8_GT_IMR(2)); 2315 pm_isr = intel_uncore_read(uncore, GEN8_GT_ISR(2)); 2316 pm_iir = intel_uncore_read(uncore, GEN8_GT_IIR(2)); 2317 } else { 2318 pm_ier = intel_uncore_read(uncore, GEN6_PMIER); 2319 pm_imr = intel_uncore_read(uncore, GEN6_PMIMR); 2320 pm_isr = intel_uncore_read(uncore, GEN6_PMISR); 2321 pm_iir = intel_uncore_read(uncore, GEN6_PMIIR); 2322 } 2323 pm_mask = intel_uncore_read(uncore, GEN6_PMINTRMSK); 2324 2325 drm_printf(p, "Video Turbo Mode: %s\n", 2326 str_yes_no(rpmodectl & GEN6_RP_MEDIA_TURBO)); 2327 drm_printf(p, "HW control enabled: %s\n", 2328 str_yes_no(rpmodectl & GEN6_RP_ENABLE)); 2329 drm_printf(p, "SW control enabled: %s\n", 2330 str_yes_no((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) == GEN6_RP_MEDIA_SW_MODE)); 2331 2332 drm_printf(p, "PM IER=0x%08x IMR=0x%08x, MASK=0x%08x\n", 2333 pm_ier, pm_imr, pm_mask); 2334 if (GRAPHICS_VER(i915) <= 10) 2335 drm_printf(p, "PM ISR=0x%08x IIR=0x%08x\n", 2336 pm_isr, pm_iir); 2337 drm_printf(p, "pm_intrmsk_mbz: 0x%08x\n", 2338 rps->pm_intrmsk_mbz); 2339 drm_printf(p, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status); 2340 drm_printf(p, "Render p-state ratio: %d\n", 2341 (gt_perf_status & (GRAPHICS_VER(i915) >= 9 ? 0x1ff00 : 0xff00)) >> 8); 2342 drm_printf(p, "Render p-state VID: %d\n", 2343 gt_perf_status & 0xff); 2344 drm_printf(p, "Render p-state limit: %d\n", 2345 rp_state_limits & 0xff); 2346 drm_printf(p, "RPSTAT1: 0x%08x\n", rpstat); 2347 drm_printf(p, "RPMODECTL: 0x%08x\n", rpmodectl); 2348 drm_printf(p, "RPINCLIMIT: 0x%08x\n", rpinclimit); 2349 drm_printf(p, "RPDECLIMIT: 0x%08x\n", rpdeclimit); 2350 drm_printf(p, "RPNSWREQ: %dMHz\n", reqf); 2351 drm_printf(p, "CAGF: %dMHz\n", cagf); 2352 drm_printf(p, "RP CUR UP EI: %d (%lldns)\n", 2353 rpcurupei, 2354 intel_gt_pm_interval_to_ns(gt, rpcurupei)); 2355 drm_printf(p, "RP CUR UP: %d (%lldns)\n", 2356 rpcurup, intel_gt_pm_interval_to_ns(gt, rpcurup)); 2357 drm_printf(p, "RP PREV UP: %d (%lldns)\n", 2358 rpprevup, intel_gt_pm_interval_to_ns(gt, rpprevup)); 2359 drm_printf(p, "Up threshold: %d%%\n", 2360 rps->power.up_threshold); 2361 drm_printf(p, "RP UP EI: %d (%lldns)\n", 2362 rpupei, intel_gt_pm_interval_to_ns(gt, rpupei)); 2363 drm_printf(p, "RP UP THRESHOLD: %d (%lldns)\n", 2364 rpupt, intel_gt_pm_interval_to_ns(gt, rpupt)); 2365 2366 drm_printf(p, "RP CUR DOWN EI: %d (%lldns)\n", 2367 rpcurdownei, 2368 intel_gt_pm_interval_to_ns(gt, rpcurdownei)); 2369 drm_printf(p, "RP CUR DOWN: %d (%lldns)\n", 2370 rpcurdown, 2371 intel_gt_pm_interval_to_ns(gt, rpcurdown)); 2372 drm_printf(p, "RP PREV DOWN: %d (%lldns)\n", 2373 rpprevdown, 2374 intel_gt_pm_interval_to_ns(gt, rpprevdown)); 2375 drm_printf(p, "Down threshold: %d%%\n", 2376 rps->power.down_threshold); 2377 drm_printf(p, "RP DOWN EI: %d (%lldns)\n", 2378 rpdownei, intel_gt_pm_interval_to_ns(gt, rpdownei)); 2379 drm_printf(p, "RP DOWN THRESHOLD: %d (%lldns)\n", 2380 rpdownt, intel_gt_pm_interval_to_ns(gt, rpdownt)); 2381 2382 drm_printf(p, "Lowest (RPN) frequency: %dMHz\n", 2383 intel_gpu_freq(rps, caps.min_freq)); 2384 drm_printf(p, "Nominal (RP1) frequency: %dMHz\n", 2385 intel_gpu_freq(rps, caps.rp1_freq)); 2386 drm_printf(p, "Max non-overclocked (RP0) frequency: %dMHz\n", 2387 intel_gpu_freq(rps, caps.rp0_freq)); 2388 drm_printf(p, "Max overclocked frequency: %dMHz\n", 2389 intel_gpu_freq(rps, rps->max_freq)); 2390 2391 drm_printf(p, "Current freq: %d MHz\n", 2392 intel_gpu_freq(rps, rps->cur_freq)); 2393 drm_printf(p, "Actual freq: %d MHz\n", cagf); 2394 drm_printf(p, "Idle freq: %d MHz\n", 2395 intel_gpu_freq(rps, rps->idle_freq)); 2396 drm_printf(p, "Min freq: %d MHz\n", 2397 intel_gpu_freq(rps, rps->min_freq)); 2398 drm_printf(p, "Boost freq: %d MHz\n", 2399 intel_gpu_freq(rps, rps->boost_freq)); 2400 drm_printf(p, "Max freq: %d MHz\n", 2401 intel_gpu_freq(rps, rps->max_freq)); 2402 drm_printf(p, 2403 "efficient (RPe) frequency: %d MHz\n", 2404 intel_gpu_freq(rps, rps->efficient_freq)); 2405 } 2406 2407 static void slpc_frequency_dump(struct intel_rps *rps, struct drm_printer *p) 2408 { 2409 struct intel_gt *gt = rps_to_gt(rps); 2410 struct intel_uncore *uncore = gt->uncore; 2411 struct intel_rps_freq_caps caps; 2412 u32 pm_mask; 2413 2414 gen6_rps_get_freq_caps(rps, &caps); 2415 pm_mask = intel_uncore_read(uncore, GEN6_PMINTRMSK); 2416 2417 drm_printf(p, "PM MASK=0x%08x\n", pm_mask); 2418 drm_printf(p, "pm_intrmsk_mbz: 0x%08x\n", 2419 rps->pm_intrmsk_mbz); 2420 drm_printf(p, "RPSTAT1: 0x%08x\n", intel_rps_read_rpstat(rps)); 2421 drm_printf(p, "RPNSWREQ: %dMHz\n", intel_rps_get_requested_frequency(rps)); 2422 drm_printf(p, "Lowest (RPN) frequency: %dMHz\n", 2423 intel_gpu_freq(rps, caps.min_freq)); 2424 drm_printf(p, "Nominal (RP1) frequency: %dMHz\n", 2425 intel_gpu_freq(rps, caps.rp1_freq)); 2426 drm_printf(p, "Max non-overclocked (RP0) frequency: %dMHz\n", 2427 intel_gpu_freq(rps, caps.rp0_freq)); 2428 drm_printf(p, "Current freq: %d MHz\n", 2429 intel_rps_get_requested_frequency(rps)); 2430 drm_printf(p, "Actual freq: %d MHz\n", 2431 intel_rps_read_actual_frequency(rps)); 2432 drm_printf(p, "Min freq: %d MHz\n", 2433 intel_rps_get_min_frequency(rps)); 2434 drm_printf(p, "Boost freq: %d MHz\n", 2435 intel_rps_get_boost_frequency(rps)); 2436 drm_printf(p, "Max freq: %d MHz\n", 2437 intel_rps_get_max_frequency(rps)); 2438 drm_printf(p, 2439 "efficient (RPe) frequency: %d MHz\n", 2440 intel_gpu_freq(rps, caps.rp1_freq)); 2441 } 2442 2443 void gen6_rps_frequency_dump(struct intel_rps *rps, struct drm_printer *p) 2444 { 2445 if (rps_uses_slpc(rps)) 2446 return slpc_frequency_dump(rps, p); 2447 else 2448 return rps_frequency_dump(rps, p); 2449 } 2450 2451 static int set_max_freq(struct intel_rps *rps, u32 val) 2452 { 2453 struct drm_i915_private *i915 = rps_to_i915(rps); 2454 int ret = 0; 2455 2456 mutex_lock(&rps->lock); 2457 2458 val = intel_freq_opcode(rps, val); 2459 if (val < rps->min_freq || 2460 val > rps->max_freq || 2461 val < rps->min_freq_softlimit) { 2462 ret = -EINVAL; 2463 goto unlock; 2464 } 2465 2466 if (val > rps->rp0_freq) 2467 drm_dbg(&i915->drm, "User requested overclocking to %d\n", 2468 intel_gpu_freq(rps, val)); 2469 2470 rps->max_freq_softlimit = val; 2471 2472 val = clamp_t(int, rps->cur_freq, 2473 rps->min_freq_softlimit, 2474 rps->max_freq_softlimit); 2475 2476 /* 2477 * We still need *_set_rps to process the new max_delay and 2478 * update the interrupt limits and PMINTRMSK even though 2479 * frequency request may be unchanged. 2480 */ 2481 intel_rps_set(rps, val); 2482 2483 unlock: 2484 mutex_unlock(&rps->lock); 2485 2486 return ret; 2487 } 2488 2489 int intel_rps_set_max_frequency(struct intel_rps *rps, u32 val) 2490 { 2491 struct intel_guc_slpc *slpc = rps_to_slpc(rps); 2492 2493 if (rps_uses_slpc(rps)) 2494 return intel_guc_slpc_set_max_freq(slpc, val); 2495 else 2496 return set_max_freq(rps, val); 2497 } 2498 2499 u32 intel_rps_get_min_frequency(struct intel_rps *rps) 2500 { 2501 struct intel_guc_slpc *slpc = rps_to_slpc(rps); 2502 2503 if (rps_uses_slpc(rps)) 2504 return slpc->min_freq_softlimit; 2505 else 2506 return intel_gpu_freq(rps, rps->min_freq_softlimit); 2507 } 2508 2509 /** 2510 * intel_rps_get_min_raw_freq - returns the min frequency in some raw format. 2511 * @rps: the intel_rps structure 2512 * 2513 * Returns the min frequency in a raw format. In newer platforms raw is in 2514 * units of 50 MHz. 2515 */ 2516 u32 intel_rps_get_min_raw_freq(struct intel_rps *rps) 2517 { 2518 struct intel_guc_slpc *slpc = rps_to_slpc(rps); 2519 u32 freq; 2520 2521 if (rps_uses_slpc(rps)) { 2522 return DIV_ROUND_CLOSEST(slpc->min_freq, 2523 GT_FREQUENCY_MULTIPLIER); 2524 } else { 2525 freq = rps->min_freq; 2526 if (GRAPHICS_VER(rps_to_i915(rps)) >= 9) { 2527 /* Convert GT frequency to 50 MHz units */ 2528 freq /= GEN9_FREQ_SCALER; 2529 } 2530 return freq; 2531 } 2532 } 2533 2534 static int set_min_freq(struct intel_rps *rps, u32 val) 2535 { 2536 int ret = 0; 2537 2538 mutex_lock(&rps->lock); 2539 2540 val = intel_freq_opcode(rps, val); 2541 if (val < rps->min_freq || 2542 val > rps->max_freq || 2543 val > rps->max_freq_softlimit) { 2544 ret = -EINVAL; 2545 goto unlock; 2546 } 2547 2548 rps->min_freq_softlimit = val; 2549 2550 val = clamp_t(int, rps->cur_freq, 2551 rps->min_freq_softlimit, 2552 rps->max_freq_softlimit); 2553 2554 /* 2555 * We still need *_set_rps to process the new min_delay and 2556 * update the interrupt limits and PMINTRMSK even though 2557 * frequency request may be unchanged. 2558 */ 2559 intel_rps_set(rps, val); 2560 2561 unlock: 2562 mutex_unlock(&rps->lock); 2563 2564 return ret; 2565 } 2566 2567 int intel_rps_set_min_frequency(struct intel_rps *rps, u32 val) 2568 { 2569 struct intel_guc_slpc *slpc = rps_to_slpc(rps); 2570 2571 if (rps_uses_slpc(rps)) 2572 return intel_guc_slpc_set_min_freq(slpc, val); 2573 else 2574 return set_min_freq(rps, val); 2575 } 2576 2577 u8 intel_rps_get_up_threshold(struct intel_rps *rps) 2578 { 2579 return rps->power.up_threshold; 2580 } 2581 2582 static int rps_set_threshold(struct intel_rps *rps, u8 *threshold, u8 val) 2583 { 2584 int ret; 2585 2586 if (val > 100) 2587 return -EINVAL; 2588 2589 ret = mutex_lock_interruptible(&rps->lock); 2590 if (ret) 2591 return ret; 2592 2593 if (*threshold == val) 2594 goto out_unlock; 2595 2596 *threshold = val; 2597 2598 /* Force reset. */ 2599 rps->last_freq = -1; 2600 mutex_lock(&rps->power.mutex); 2601 rps->power.mode = -1; 2602 mutex_unlock(&rps->power.mutex); 2603 2604 intel_rps_set(rps, clamp(rps->cur_freq, 2605 rps->min_freq_softlimit, 2606 rps->max_freq_softlimit)); 2607 2608 out_unlock: 2609 mutex_unlock(&rps->lock); 2610 2611 return ret; 2612 } 2613 2614 int intel_rps_set_up_threshold(struct intel_rps *rps, u8 threshold) 2615 { 2616 return rps_set_threshold(rps, &rps->power.up_threshold, threshold); 2617 } 2618 2619 u8 intel_rps_get_down_threshold(struct intel_rps *rps) 2620 { 2621 return rps->power.down_threshold; 2622 } 2623 2624 int intel_rps_set_down_threshold(struct intel_rps *rps, u8 threshold) 2625 { 2626 return rps_set_threshold(rps, &rps->power.down_threshold, threshold); 2627 } 2628 2629 static void intel_rps_set_manual(struct intel_rps *rps, bool enable) 2630 { 2631 struct intel_uncore *uncore = rps_to_uncore(rps); 2632 u32 state = enable ? GEN9_RPSWCTL_ENABLE : GEN9_RPSWCTL_DISABLE; 2633 2634 /* Allow punit to process software requests */ 2635 intel_uncore_write(uncore, GEN6_RP_CONTROL, state); 2636 } 2637 2638 void intel_rps_raise_unslice(struct intel_rps *rps) 2639 { 2640 struct intel_uncore *uncore = rps_to_uncore(rps); 2641 2642 mutex_lock(&rps->lock); 2643 2644 if (rps_uses_slpc(rps)) { 2645 /* RP limits have not been initialized yet for SLPC path */ 2646 struct intel_rps_freq_caps caps; 2647 2648 gen6_rps_get_freq_caps(rps, &caps); 2649 2650 intel_rps_set_manual(rps, true); 2651 intel_uncore_write(uncore, GEN6_RPNSWREQ, 2652 ((caps.rp0_freq << 2653 GEN9_SW_REQ_UNSLICE_RATIO_SHIFT) | 2654 GEN9_IGNORE_SLICE_RATIO)); 2655 intel_rps_set_manual(rps, false); 2656 } else { 2657 intel_rps_set(rps, rps->rp0_freq); 2658 } 2659 2660 mutex_unlock(&rps->lock); 2661 } 2662 2663 void intel_rps_lower_unslice(struct intel_rps *rps) 2664 { 2665 struct intel_uncore *uncore = rps_to_uncore(rps); 2666 2667 mutex_lock(&rps->lock); 2668 2669 if (rps_uses_slpc(rps)) { 2670 /* RP limits have not been initialized yet for SLPC path */ 2671 struct intel_rps_freq_caps caps; 2672 2673 gen6_rps_get_freq_caps(rps, &caps); 2674 2675 intel_rps_set_manual(rps, true); 2676 intel_uncore_write(uncore, GEN6_RPNSWREQ, 2677 ((caps.min_freq << 2678 GEN9_SW_REQ_UNSLICE_RATIO_SHIFT) | 2679 GEN9_IGNORE_SLICE_RATIO)); 2680 intel_rps_set_manual(rps, false); 2681 } else { 2682 intel_rps_set(rps, rps->min_freq); 2683 } 2684 2685 mutex_unlock(&rps->lock); 2686 } 2687 2688 static u32 rps_read_mmio(struct intel_rps *rps, i915_reg_t reg32) 2689 { 2690 struct intel_gt *gt = rps_to_gt(rps); 2691 intel_wakeref_t wakeref; 2692 u32 val; 2693 2694 with_intel_runtime_pm(gt->uncore->rpm, wakeref) 2695 val = intel_uncore_read(gt->uncore, reg32); 2696 2697 return val; 2698 } 2699 2700 bool rps_read_mask_mmio(struct intel_rps *rps, 2701 i915_reg_t reg32, u32 mask) 2702 { 2703 return rps_read_mmio(rps, reg32) & mask; 2704 } 2705 2706 /* External interface for intel_ips.ko */ 2707 2708 static struct drm_i915_private __rcu *ips_mchdev; 2709 2710 /* 2711 * Tells the intel_ips driver that the i915 driver is now loaded, if 2712 * IPS got loaded first. 2713 * 2714 * This awkward dance is so that neither module has to depend on the 2715 * other in order for IPS to do the appropriate communication of 2716 * GPU turbo limits to i915. 2717 */ 2718 static void 2719 ips_ping_for_i915_load(void) 2720 { 2721 void (*link)(void); 2722 2723 link = symbol_get(ips_link_to_i915_driver); 2724 if (link) { 2725 link(); 2726 symbol_put(ips_link_to_i915_driver); 2727 } 2728 } 2729 2730 void intel_rps_driver_register(struct intel_rps *rps) 2731 { 2732 struct intel_gt *gt = rps_to_gt(rps); 2733 2734 /* 2735 * We only register the i915 ips part with intel-ips once everything is 2736 * set up, to avoid intel-ips sneaking in and reading bogus values. 2737 */ 2738 if (GRAPHICS_VER(gt->i915) == 5) { 2739 GEM_BUG_ON(ips_mchdev); 2740 rcu_assign_pointer(ips_mchdev, gt->i915); 2741 ips_ping_for_i915_load(); 2742 } 2743 } 2744 2745 void intel_rps_driver_unregister(struct intel_rps *rps) 2746 { 2747 if (rcu_access_pointer(ips_mchdev) == rps_to_i915(rps)) 2748 rcu_assign_pointer(ips_mchdev, NULL); 2749 } 2750 2751 static struct drm_i915_private *mchdev_get(void) 2752 { 2753 struct drm_i915_private *i915; 2754 2755 rcu_read_lock(); 2756 i915 = rcu_dereference(ips_mchdev); 2757 if (i915 && !kref_get_unless_zero(&i915->drm.ref)) 2758 i915 = NULL; 2759 rcu_read_unlock(); 2760 2761 return i915; 2762 } 2763 2764 /** 2765 * i915_read_mch_val - return value for IPS use 2766 * 2767 * Calculate and return a value for the IPS driver to use when deciding whether 2768 * we have thermal and power headroom to increase CPU or GPU power budget. 2769 */ 2770 unsigned long i915_read_mch_val(void) 2771 { 2772 struct drm_i915_private *i915; 2773 unsigned long chipset_val = 0; 2774 unsigned long graphics_val = 0; 2775 intel_wakeref_t wakeref; 2776 2777 i915 = mchdev_get(); 2778 if (!i915) 2779 return 0; 2780 2781 with_intel_runtime_pm(&i915->runtime_pm, wakeref) { 2782 struct intel_ips *ips = &to_gt(i915)->rps.ips; 2783 2784 spin_lock_irq(&mchdev_lock); 2785 chipset_val = __ips_chipset_val(ips); 2786 graphics_val = __ips_gfx_val(ips); 2787 spin_unlock_irq(&mchdev_lock); 2788 } 2789 2790 drm_dev_put(&i915->drm); 2791 return chipset_val + graphics_val; 2792 } 2793 EXPORT_SYMBOL_GPL(i915_read_mch_val); 2794 2795 /** 2796 * i915_gpu_raise - raise GPU frequency limit 2797 * 2798 * Raise the limit; IPS indicates we have thermal headroom. 2799 */ 2800 bool i915_gpu_raise(void) 2801 { 2802 struct drm_i915_private *i915; 2803 struct intel_rps *rps; 2804 2805 i915 = mchdev_get(); 2806 if (!i915) 2807 return false; 2808 2809 rps = &to_gt(i915)->rps; 2810 2811 spin_lock_irq(&mchdev_lock); 2812 if (rps->max_freq_softlimit < rps->max_freq) 2813 rps->max_freq_softlimit++; 2814 spin_unlock_irq(&mchdev_lock); 2815 2816 drm_dev_put(&i915->drm); 2817 return true; 2818 } 2819 EXPORT_SYMBOL_GPL(i915_gpu_raise); 2820 2821 /** 2822 * i915_gpu_lower - lower GPU frequency limit 2823 * 2824 * IPS indicates we're close to a thermal limit, so throttle back the GPU 2825 * frequency maximum. 2826 */ 2827 bool i915_gpu_lower(void) 2828 { 2829 struct drm_i915_private *i915; 2830 struct intel_rps *rps; 2831 2832 i915 = mchdev_get(); 2833 if (!i915) 2834 return false; 2835 2836 rps = &to_gt(i915)->rps; 2837 2838 spin_lock_irq(&mchdev_lock); 2839 if (rps->max_freq_softlimit > rps->min_freq) 2840 rps->max_freq_softlimit--; 2841 spin_unlock_irq(&mchdev_lock); 2842 2843 drm_dev_put(&i915->drm); 2844 return true; 2845 } 2846 EXPORT_SYMBOL_GPL(i915_gpu_lower); 2847 2848 /** 2849 * i915_gpu_busy - indicate GPU business to IPS 2850 * 2851 * Tell the IPS driver whether or not the GPU is busy. 2852 */ 2853 bool i915_gpu_busy(void) 2854 { 2855 struct drm_i915_private *i915; 2856 bool ret; 2857 2858 i915 = mchdev_get(); 2859 if (!i915) 2860 return false; 2861 2862 ret = to_gt(i915)->awake; 2863 2864 drm_dev_put(&i915->drm); 2865 return ret; 2866 } 2867 EXPORT_SYMBOL_GPL(i915_gpu_busy); 2868 2869 /** 2870 * i915_gpu_turbo_disable - disable graphics turbo 2871 * 2872 * Disable graphics turbo by resetting the max frequency and setting the 2873 * current frequency to the default. 2874 */ 2875 bool i915_gpu_turbo_disable(void) 2876 { 2877 struct drm_i915_private *i915; 2878 struct intel_rps *rps; 2879 bool ret; 2880 2881 i915 = mchdev_get(); 2882 if (!i915) 2883 return false; 2884 2885 rps = &to_gt(i915)->rps; 2886 2887 spin_lock_irq(&mchdev_lock); 2888 rps->max_freq_softlimit = rps->min_freq; 2889 ret = !__gen5_rps_set(&to_gt(i915)->rps, rps->min_freq); 2890 spin_unlock_irq(&mchdev_lock); 2891 2892 drm_dev_put(&i915->drm); 2893 return ret; 2894 } 2895 EXPORT_SYMBOL_GPL(i915_gpu_turbo_disable); 2896 2897 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) 2898 #include "selftest_rps.c" 2899 #include "selftest_slpc.c" 2900 #endif 2901