1 /* 2 * SPDX-License-Identifier: MIT 3 * 4 * Copyright © 2019 Intel Corporation 5 */ 6 7 #include <drm/i915_drm.h> 8 9 #include "i915_drv.h" 10 #include "intel_gt.h" 11 #include "intel_gt_clock_utils.h" 12 #include "intel_gt_irq.h" 13 #include "intel_gt_pm_irq.h" 14 #include "intel_rps.h" 15 #include "intel_sideband.h" 16 #include "../../../platform/x86/intel_ips.h" 17 18 #define BUSY_MAX_EI 20u /* ms */ 19 20 /* 21 * Lock protecting IPS related data structures 22 */ 23 static DEFINE_SPINLOCK(mchdev_lock); 24 25 static struct intel_gt *rps_to_gt(struct intel_rps *rps) 26 { 27 return container_of(rps, struct intel_gt, rps); 28 } 29 30 static struct drm_i915_private *rps_to_i915(struct intel_rps *rps) 31 { 32 return rps_to_gt(rps)->i915; 33 } 34 35 static struct intel_uncore *rps_to_uncore(struct intel_rps *rps) 36 { 37 return rps_to_gt(rps)->uncore; 38 } 39 40 static u32 rps_pm_sanitize_mask(struct intel_rps *rps, u32 mask) 41 { 42 return mask & ~rps->pm_intrmsk_mbz; 43 } 44 45 static inline void set(struct intel_uncore *uncore, i915_reg_t reg, u32 val) 46 { 47 intel_uncore_write_fw(uncore, reg, val); 48 } 49 50 static void rps_timer(struct timer_list *t) 51 { 52 struct intel_rps *rps = from_timer(rps, t, timer); 53 struct intel_engine_cs *engine; 54 ktime_t dt, last, timestamp; 55 enum intel_engine_id id; 56 s64 max_busy[3] = {}; 57 58 timestamp = 0; 59 for_each_engine(engine, rps_to_gt(rps), id) { 60 s64 busy; 61 int i; 62 63 dt = intel_engine_get_busy_time(engine, ×tamp); 64 last = engine->stats.rps; 65 engine->stats.rps = dt; 66 67 busy = ktime_to_ns(ktime_sub(dt, last)); 68 for (i = 0; i < ARRAY_SIZE(max_busy); i++) { 69 if (busy > max_busy[i]) 70 swap(busy, max_busy[i]); 71 } 72 } 73 last = rps->pm_timestamp; 74 rps->pm_timestamp = timestamp; 75 76 if (intel_rps_is_active(rps)) { 77 s64 busy; 78 int i; 79 80 dt = ktime_sub(timestamp, last); 81 82 /* 83 * Our goal is to evaluate each engine independently, so we run 84 * at the lowest clocks required to sustain the heaviest 85 * workload. However, a task may be split into sequential 86 * dependent operations across a set of engines, such that 87 * the independent contributions do not account for high load, 88 * but overall the task is GPU bound. For example, consider 89 * video decode on vcs followed by colour post-processing 90 * on vecs, followed by general post-processing on rcs. 91 * Since multi-engines being active does imply a single 92 * continuous workload across all engines, we hedge our 93 * bets by only contributing a factor of the distributed 94 * load into our busyness calculation. 95 */ 96 busy = max_busy[0]; 97 for (i = 1; i < ARRAY_SIZE(max_busy); i++) { 98 if (!max_busy[i]) 99 break; 100 101 busy += div_u64(max_busy[i], 1 << i); 102 } 103 GT_TRACE(rps_to_gt(rps), 104 "busy:%lld [%d%%], max:[%lld, %lld, %lld], interval:%d\n", 105 busy, (int)div64_u64(100 * busy, dt), 106 max_busy[0], max_busy[1], max_busy[2], 107 rps->pm_interval); 108 109 if (100 * busy > rps->power.up_threshold * dt && 110 rps->cur_freq < rps->max_freq_softlimit) { 111 rps->pm_iir |= GEN6_PM_RP_UP_THRESHOLD; 112 rps->pm_interval = 1; 113 schedule_work(&rps->work); 114 } else if (100 * busy < rps->power.down_threshold * dt && 115 rps->cur_freq > rps->min_freq_softlimit) { 116 rps->pm_iir |= GEN6_PM_RP_DOWN_THRESHOLD; 117 rps->pm_interval = 1; 118 schedule_work(&rps->work); 119 } else { 120 rps->last_adj = 0; 121 } 122 123 mod_timer(&rps->timer, 124 jiffies + msecs_to_jiffies(rps->pm_interval)); 125 rps->pm_interval = min(rps->pm_interval * 2, BUSY_MAX_EI); 126 } 127 } 128 129 static void rps_start_timer(struct intel_rps *rps) 130 { 131 rps->pm_timestamp = ktime_sub(ktime_get(), rps->pm_timestamp); 132 rps->pm_interval = 1; 133 mod_timer(&rps->timer, jiffies + 1); 134 } 135 136 static void rps_stop_timer(struct intel_rps *rps) 137 { 138 del_timer_sync(&rps->timer); 139 rps->pm_timestamp = ktime_sub(ktime_get(), rps->pm_timestamp); 140 cancel_work_sync(&rps->work); 141 } 142 143 static u32 rps_pm_mask(struct intel_rps *rps, u8 val) 144 { 145 u32 mask = 0; 146 147 /* We use UP_EI_EXPIRED interrupts for both up/down in manual mode */ 148 if (val > rps->min_freq_softlimit) 149 mask |= (GEN6_PM_RP_UP_EI_EXPIRED | 150 GEN6_PM_RP_DOWN_THRESHOLD | 151 GEN6_PM_RP_DOWN_TIMEOUT); 152 153 if (val < rps->max_freq_softlimit) 154 mask |= GEN6_PM_RP_UP_EI_EXPIRED | GEN6_PM_RP_UP_THRESHOLD; 155 156 mask &= rps->pm_events; 157 158 return rps_pm_sanitize_mask(rps, ~mask); 159 } 160 161 static void rps_reset_ei(struct intel_rps *rps) 162 { 163 memset(&rps->ei, 0, sizeof(rps->ei)); 164 } 165 166 static void rps_enable_interrupts(struct intel_rps *rps) 167 { 168 struct intel_gt *gt = rps_to_gt(rps); 169 170 GT_TRACE(gt, "interrupts:on rps->pm_events: %x, rps_pm_mask:%x\n", 171 rps->pm_events, rps_pm_mask(rps, rps->last_freq)); 172 173 rps_reset_ei(rps); 174 175 spin_lock_irq(>->irq_lock); 176 gen6_gt_pm_enable_irq(gt, rps->pm_events); 177 spin_unlock_irq(>->irq_lock); 178 179 intel_uncore_write(gt->uncore, 180 GEN6_PMINTRMSK, rps_pm_mask(rps, rps->last_freq)); 181 } 182 183 static void gen6_rps_reset_interrupts(struct intel_rps *rps) 184 { 185 gen6_gt_pm_reset_iir(rps_to_gt(rps), GEN6_PM_RPS_EVENTS); 186 } 187 188 static void gen11_rps_reset_interrupts(struct intel_rps *rps) 189 { 190 while (gen11_gt_reset_one_iir(rps_to_gt(rps), 0, GEN11_GTPM)) 191 ; 192 } 193 194 static void rps_reset_interrupts(struct intel_rps *rps) 195 { 196 struct intel_gt *gt = rps_to_gt(rps); 197 198 spin_lock_irq(>->irq_lock); 199 if (INTEL_GEN(gt->i915) >= 11) 200 gen11_rps_reset_interrupts(rps); 201 else 202 gen6_rps_reset_interrupts(rps); 203 204 rps->pm_iir = 0; 205 spin_unlock_irq(>->irq_lock); 206 } 207 208 static void rps_disable_interrupts(struct intel_rps *rps) 209 { 210 struct intel_gt *gt = rps_to_gt(rps); 211 212 intel_uncore_write(gt->uncore, 213 GEN6_PMINTRMSK, rps_pm_sanitize_mask(rps, ~0u)); 214 215 spin_lock_irq(>->irq_lock); 216 gen6_gt_pm_disable_irq(gt, GEN6_PM_RPS_EVENTS); 217 spin_unlock_irq(>->irq_lock); 218 219 intel_synchronize_irq(gt->i915); 220 221 /* 222 * Now that we will not be generating any more work, flush any 223 * outstanding tasks. As we are called on the RPS idle path, 224 * we will reset the GPU to minimum frequencies, so the current 225 * state of the worker can be discarded. 226 */ 227 cancel_work_sync(&rps->work); 228 229 rps_reset_interrupts(rps); 230 GT_TRACE(gt, "interrupts:off\n"); 231 } 232 233 static const struct cparams { 234 u16 i; 235 u16 t; 236 u16 m; 237 u16 c; 238 } cparams[] = { 239 { 1, 1333, 301, 28664 }, 240 { 1, 1066, 294, 24460 }, 241 { 1, 800, 294, 25192 }, 242 { 0, 1333, 276, 27605 }, 243 { 0, 1066, 276, 27605 }, 244 { 0, 800, 231, 23784 }, 245 }; 246 247 static void gen5_rps_init(struct intel_rps *rps) 248 { 249 struct drm_i915_private *i915 = rps_to_i915(rps); 250 struct intel_uncore *uncore = rps_to_uncore(rps); 251 u8 fmax, fmin, fstart; 252 u32 rgvmodectl; 253 int c_m, i; 254 255 if (i915->fsb_freq <= 3200) 256 c_m = 0; 257 else if (i915->fsb_freq <= 4800) 258 c_m = 1; 259 else 260 c_m = 2; 261 262 for (i = 0; i < ARRAY_SIZE(cparams); i++) { 263 if (cparams[i].i == c_m && cparams[i].t == i915->mem_freq) { 264 rps->ips.m = cparams[i].m; 265 rps->ips.c = cparams[i].c; 266 break; 267 } 268 } 269 270 rgvmodectl = intel_uncore_read(uncore, MEMMODECTL); 271 272 /* Set up min, max, and cur for interrupt handling */ 273 fmax = (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT; 274 fmin = (rgvmodectl & MEMMODE_FMIN_MASK); 275 fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >> 276 MEMMODE_FSTART_SHIFT; 277 drm_dbg(&i915->drm, "fmax: %d, fmin: %d, fstart: %d\n", 278 fmax, fmin, fstart); 279 280 rps->min_freq = fmax; 281 rps->efficient_freq = fstart; 282 rps->max_freq = fmin; 283 } 284 285 static unsigned long 286 __ips_chipset_val(struct intel_ips *ips) 287 { 288 struct intel_uncore *uncore = 289 rps_to_uncore(container_of(ips, struct intel_rps, ips)); 290 unsigned long now = jiffies_to_msecs(jiffies), dt; 291 unsigned long result; 292 u64 total, delta; 293 294 lockdep_assert_held(&mchdev_lock); 295 296 /* 297 * Prevent division-by-zero if we are asking too fast. 298 * Also, we don't get interesting results if we are polling 299 * faster than once in 10ms, so just return the saved value 300 * in such cases. 301 */ 302 dt = now - ips->last_time1; 303 if (dt <= 10) 304 return ips->chipset_power; 305 306 /* FIXME: handle per-counter overflow */ 307 total = intel_uncore_read(uncore, DMIEC); 308 total += intel_uncore_read(uncore, DDREC); 309 total += intel_uncore_read(uncore, CSIEC); 310 311 delta = total - ips->last_count1; 312 313 result = div_u64(div_u64(ips->m * delta, dt) + ips->c, 10); 314 315 ips->last_count1 = total; 316 ips->last_time1 = now; 317 318 ips->chipset_power = result; 319 320 return result; 321 } 322 323 static unsigned long ips_mch_val(struct intel_uncore *uncore) 324 { 325 unsigned int m, x, b; 326 u32 tsfs; 327 328 tsfs = intel_uncore_read(uncore, TSFS); 329 x = intel_uncore_read8(uncore, TR1); 330 331 b = tsfs & TSFS_INTR_MASK; 332 m = (tsfs & TSFS_SLOPE_MASK) >> TSFS_SLOPE_SHIFT; 333 334 return m * x / 127 - b; 335 } 336 337 static int _pxvid_to_vd(u8 pxvid) 338 { 339 if (pxvid == 0) 340 return 0; 341 342 if (pxvid >= 8 && pxvid < 31) 343 pxvid = 31; 344 345 return (pxvid + 2) * 125; 346 } 347 348 static u32 pvid_to_extvid(struct drm_i915_private *i915, u8 pxvid) 349 { 350 const int vd = _pxvid_to_vd(pxvid); 351 352 if (INTEL_INFO(i915)->is_mobile) 353 return max(vd - 1125, 0); 354 355 return vd; 356 } 357 358 static void __gen5_ips_update(struct intel_ips *ips) 359 { 360 struct intel_uncore *uncore = 361 rps_to_uncore(container_of(ips, struct intel_rps, ips)); 362 u64 now, delta, dt; 363 u32 count; 364 365 lockdep_assert_held(&mchdev_lock); 366 367 now = ktime_get_raw_ns(); 368 dt = now - ips->last_time2; 369 do_div(dt, NSEC_PER_MSEC); 370 371 /* Don't divide by 0 */ 372 if (dt <= 10) 373 return; 374 375 count = intel_uncore_read(uncore, GFXEC); 376 delta = count - ips->last_count2; 377 378 ips->last_count2 = count; 379 ips->last_time2 = now; 380 381 /* More magic constants... */ 382 ips->gfx_power = div_u64(delta * 1181, dt * 10); 383 } 384 385 static void gen5_rps_update(struct intel_rps *rps) 386 { 387 spin_lock_irq(&mchdev_lock); 388 __gen5_ips_update(&rps->ips); 389 spin_unlock_irq(&mchdev_lock); 390 } 391 392 static bool gen5_rps_set(struct intel_rps *rps, u8 val) 393 { 394 struct intel_uncore *uncore = rps_to_uncore(rps); 395 u16 rgvswctl; 396 397 lockdep_assert_held(&mchdev_lock); 398 399 rgvswctl = intel_uncore_read16(uncore, MEMSWCTL); 400 if (rgvswctl & MEMCTL_CMD_STS) { 401 DRM_DEBUG("gpu busy, RCS change rejected\n"); 402 return false; /* still busy with another command */ 403 } 404 405 /* Invert the frequency bin into an ips delay */ 406 val = rps->max_freq - val; 407 val = rps->min_freq + val; 408 409 rgvswctl = 410 (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) | 411 (val << MEMCTL_FREQ_SHIFT) | 412 MEMCTL_SFCAVM; 413 intel_uncore_write16(uncore, MEMSWCTL, rgvswctl); 414 intel_uncore_posting_read16(uncore, MEMSWCTL); 415 416 rgvswctl |= MEMCTL_CMD_STS; 417 intel_uncore_write16(uncore, MEMSWCTL, rgvswctl); 418 419 return true; 420 } 421 422 static unsigned long intel_pxfreq(u32 vidfreq) 423 { 424 int div = (vidfreq & 0x3f0000) >> 16; 425 int post = (vidfreq & 0x3000) >> 12; 426 int pre = (vidfreq & 0x7); 427 428 if (!pre) 429 return 0; 430 431 return div * 133333 / (pre << post); 432 } 433 434 static unsigned int init_emon(struct intel_uncore *uncore) 435 { 436 u8 pxw[16]; 437 int i; 438 439 /* Disable to program */ 440 intel_uncore_write(uncore, ECR, 0); 441 intel_uncore_posting_read(uncore, ECR); 442 443 /* Program energy weights for various events */ 444 intel_uncore_write(uncore, SDEW, 0x15040d00); 445 intel_uncore_write(uncore, CSIEW0, 0x007f0000); 446 intel_uncore_write(uncore, CSIEW1, 0x1e220004); 447 intel_uncore_write(uncore, CSIEW2, 0x04000004); 448 449 for (i = 0; i < 5; i++) 450 intel_uncore_write(uncore, PEW(i), 0); 451 for (i = 0; i < 3; i++) 452 intel_uncore_write(uncore, DEW(i), 0); 453 454 /* Program P-state weights to account for frequency power adjustment */ 455 for (i = 0; i < 16; i++) { 456 u32 pxvidfreq = intel_uncore_read(uncore, PXVFREQ(i)); 457 unsigned int freq = intel_pxfreq(pxvidfreq); 458 unsigned int vid = 459 (pxvidfreq & PXVFREQ_PX_MASK) >> PXVFREQ_PX_SHIFT; 460 unsigned int val; 461 462 val = vid * vid * freq / 1000 * 255; 463 val /= 127 * 127 * 900; 464 465 pxw[i] = val; 466 } 467 /* Render standby states get 0 weight */ 468 pxw[14] = 0; 469 pxw[15] = 0; 470 471 for (i = 0; i < 4; i++) { 472 intel_uncore_write(uncore, PXW(i), 473 pxw[i * 4 + 0] << 24 | 474 pxw[i * 4 + 1] << 16 | 475 pxw[i * 4 + 2] << 8 | 476 pxw[i * 4 + 3] << 0); 477 } 478 479 /* Adjust magic regs to magic values (more experimental results) */ 480 intel_uncore_write(uncore, OGW0, 0); 481 intel_uncore_write(uncore, OGW1, 0); 482 intel_uncore_write(uncore, EG0, 0x00007f00); 483 intel_uncore_write(uncore, EG1, 0x0000000e); 484 intel_uncore_write(uncore, EG2, 0x000e0000); 485 intel_uncore_write(uncore, EG3, 0x68000300); 486 intel_uncore_write(uncore, EG4, 0x42000000); 487 intel_uncore_write(uncore, EG5, 0x00140031); 488 intel_uncore_write(uncore, EG6, 0); 489 intel_uncore_write(uncore, EG7, 0); 490 491 for (i = 0; i < 8; i++) 492 intel_uncore_write(uncore, PXWL(i), 0); 493 494 /* Enable PMON + select events */ 495 intel_uncore_write(uncore, ECR, 0x80000019); 496 497 return intel_uncore_read(uncore, LCFUSE02) & LCFUSE_HIV_MASK; 498 } 499 500 static bool gen5_rps_enable(struct intel_rps *rps) 501 { 502 struct intel_uncore *uncore = rps_to_uncore(rps); 503 u8 fstart, vstart; 504 u32 rgvmodectl; 505 506 spin_lock_irq(&mchdev_lock); 507 508 rgvmodectl = intel_uncore_read(uncore, MEMMODECTL); 509 510 /* Enable temp reporting */ 511 intel_uncore_write16(uncore, PMMISC, 512 intel_uncore_read16(uncore, PMMISC) | MCPPCE_EN); 513 intel_uncore_write16(uncore, TSC1, 514 intel_uncore_read16(uncore, TSC1) | TSE); 515 516 /* 100ms RC evaluation intervals */ 517 intel_uncore_write(uncore, RCUPEI, 100000); 518 intel_uncore_write(uncore, RCDNEI, 100000); 519 520 /* Set max/min thresholds to 90ms and 80ms respectively */ 521 intel_uncore_write(uncore, RCBMAXAVG, 90000); 522 intel_uncore_write(uncore, RCBMINAVG, 80000); 523 524 intel_uncore_write(uncore, MEMIHYST, 1); 525 526 /* Set up min, max, and cur for interrupt handling */ 527 fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >> 528 MEMMODE_FSTART_SHIFT; 529 530 vstart = (intel_uncore_read(uncore, PXVFREQ(fstart)) & 531 PXVFREQ_PX_MASK) >> PXVFREQ_PX_SHIFT; 532 533 intel_uncore_write(uncore, 534 MEMINTREN, 535 MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN); 536 537 intel_uncore_write(uncore, VIDSTART, vstart); 538 intel_uncore_posting_read(uncore, VIDSTART); 539 540 rgvmodectl |= MEMMODE_SWMODE_EN; 541 intel_uncore_write(uncore, MEMMODECTL, rgvmodectl); 542 543 if (wait_for_atomic((intel_uncore_read(uncore, MEMSWCTL) & 544 MEMCTL_CMD_STS) == 0, 10)) 545 drm_err(&uncore->i915->drm, 546 "stuck trying to change perf mode\n"); 547 mdelay(1); 548 549 gen5_rps_set(rps, rps->cur_freq); 550 551 rps->ips.last_count1 = intel_uncore_read(uncore, DMIEC); 552 rps->ips.last_count1 += intel_uncore_read(uncore, DDREC); 553 rps->ips.last_count1 += intel_uncore_read(uncore, CSIEC); 554 rps->ips.last_time1 = jiffies_to_msecs(jiffies); 555 556 rps->ips.last_count2 = intel_uncore_read(uncore, GFXEC); 557 rps->ips.last_time2 = ktime_get_raw_ns(); 558 559 spin_unlock_irq(&mchdev_lock); 560 561 rps->ips.corr = init_emon(uncore); 562 563 return true; 564 } 565 566 static void gen5_rps_disable(struct intel_rps *rps) 567 { 568 struct intel_uncore *uncore = rps_to_uncore(rps); 569 u16 rgvswctl; 570 571 spin_lock_irq(&mchdev_lock); 572 573 rgvswctl = intel_uncore_read16(uncore, MEMSWCTL); 574 575 /* Ack interrupts, disable EFC interrupt */ 576 intel_uncore_write(uncore, MEMINTREN, 577 intel_uncore_read(uncore, MEMINTREN) & 578 ~MEMINT_EVAL_CHG_EN); 579 intel_uncore_write(uncore, MEMINTRSTS, MEMINT_EVAL_CHG); 580 intel_uncore_write(uncore, DEIER, 581 intel_uncore_read(uncore, DEIER) & ~DE_PCU_EVENT); 582 intel_uncore_write(uncore, DEIIR, DE_PCU_EVENT); 583 intel_uncore_write(uncore, DEIMR, 584 intel_uncore_read(uncore, DEIMR) | DE_PCU_EVENT); 585 586 /* Go back to the starting frequency */ 587 gen5_rps_set(rps, rps->idle_freq); 588 mdelay(1); 589 rgvswctl |= MEMCTL_CMD_STS; 590 intel_uncore_write(uncore, MEMSWCTL, rgvswctl); 591 mdelay(1); 592 593 spin_unlock_irq(&mchdev_lock); 594 } 595 596 static u32 rps_limits(struct intel_rps *rps, u8 val) 597 { 598 u32 limits; 599 600 /* 601 * Only set the down limit when we've reached the lowest level to avoid 602 * getting more interrupts, otherwise leave this clear. This prevents a 603 * race in the hw when coming out of rc6: There's a tiny window where 604 * the hw runs at the minimal clock before selecting the desired 605 * frequency, if the down threshold expires in that window we will not 606 * receive a down interrupt. 607 */ 608 if (INTEL_GEN(rps_to_i915(rps)) >= 9) { 609 limits = rps->max_freq_softlimit << 23; 610 if (val <= rps->min_freq_softlimit) 611 limits |= rps->min_freq_softlimit << 14; 612 } else { 613 limits = rps->max_freq_softlimit << 24; 614 if (val <= rps->min_freq_softlimit) 615 limits |= rps->min_freq_softlimit << 16; 616 } 617 618 return limits; 619 } 620 621 static void rps_set_power(struct intel_rps *rps, int new_power) 622 { 623 struct intel_gt *gt = rps_to_gt(rps); 624 struct intel_uncore *uncore = gt->uncore; 625 u32 threshold_up = 0, threshold_down = 0; /* in % */ 626 u32 ei_up = 0, ei_down = 0; 627 628 lockdep_assert_held(&rps->power.mutex); 629 630 if (new_power == rps->power.mode) 631 return; 632 633 threshold_up = 95; 634 threshold_down = 85; 635 636 /* Note the units here are not exactly 1us, but 1280ns. */ 637 switch (new_power) { 638 case LOW_POWER: 639 ei_up = 16000; 640 ei_down = 32000; 641 break; 642 643 case BETWEEN: 644 ei_up = 13000; 645 ei_down = 32000; 646 break; 647 648 case HIGH_POWER: 649 ei_up = 10000; 650 ei_down = 32000; 651 break; 652 } 653 654 /* When byt can survive without system hang with dynamic 655 * sw freq adjustments, this restriction can be lifted. 656 */ 657 if (IS_VALLEYVIEW(gt->i915)) 658 goto skip_hw_write; 659 660 GT_TRACE(gt, 661 "changing power mode [%d], up %d%% @ %dus, down %d%% @ %dus\n", 662 new_power, threshold_up, ei_up, threshold_down, ei_down); 663 664 set(uncore, GEN6_RP_UP_EI, 665 intel_gt_ns_to_pm_interval(gt, ei_up * 1000)); 666 set(uncore, GEN6_RP_UP_THRESHOLD, 667 intel_gt_ns_to_pm_interval(gt, ei_up * threshold_up * 10)); 668 669 set(uncore, GEN6_RP_DOWN_EI, 670 intel_gt_ns_to_pm_interval(gt, ei_down * 1000)); 671 set(uncore, GEN6_RP_DOWN_THRESHOLD, 672 intel_gt_ns_to_pm_interval(gt, ei_down * threshold_down * 10)); 673 674 set(uncore, GEN6_RP_CONTROL, 675 (INTEL_GEN(gt->i915) > 9 ? 0 : GEN6_RP_MEDIA_TURBO) | 676 GEN6_RP_MEDIA_HW_NORMAL_MODE | 677 GEN6_RP_MEDIA_IS_GFX | 678 GEN6_RP_ENABLE | 679 GEN6_RP_UP_BUSY_AVG | 680 GEN6_RP_DOWN_IDLE_AVG); 681 682 skip_hw_write: 683 rps->power.mode = new_power; 684 rps->power.up_threshold = threshold_up; 685 rps->power.down_threshold = threshold_down; 686 } 687 688 static void gen6_rps_set_thresholds(struct intel_rps *rps, u8 val) 689 { 690 int new_power; 691 692 new_power = rps->power.mode; 693 switch (rps->power.mode) { 694 case LOW_POWER: 695 if (val > rps->efficient_freq + 1 && 696 val > rps->cur_freq) 697 new_power = BETWEEN; 698 break; 699 700 case BETWEEN: 701 if (val <= rps->efficient_freq && 702 val < rps->cur_freq) 703 new_power = LOW_POWER; 704 else if (val >= rps->rp0_freq && 705 val > rps->cur_freq) 706 new_power = HIGH_POWER; 707 break; 708 709 case HIGH_POWER: 710 if (val < (rps->rp1_freq + rps->rp0_freq) >> 1 && 711 val < rps->cur_freq) 712 new_power = BETWEEN; 713 break; 714 } 715 /* Max/min bins are special */ 716 if (val <= rps->min_freq_softlimit) 717 new_power = LOW_POWER; 718 if (val >= rps->max_freq_softlimit) 719 new_power = HIGH_POWER; 720 721 mutex_lock(&rps->power.mutex); 722 if (rps->power.interactive) 723 new_power = HIGH_POWER; 724 rps_set_power(rps, new_power); 725 mutex_unlock(&rps->power.mutex); 726 } 727 728 void intel_rps_mark_interactive(struct intel_rps *rps, bool interactive) 729 { 730 GT_TRACE(rps_to_gt(rps), "mark interactive: %s\n", yesno(interactive)); 731 732 mutex_lock(&rps->power.mutex); 733 if (interactive) { 734 if (!rps->power.interactive++ && intel_rps_is_active(rps)) 735 rps_set_power(rps, HIGH_POWER); 736 } else { 737 GEM_BUG_ON(!rps->power.interactive); 738 rps->power.interactive--; 739 } 740 mutex_unlock(&rps->power.mutex); 741 } 742 743 static int gen6_rps_set(struct intel_rps *rps, u8 val) 744 { 745 struct intel_uncore *uncore = rps_to_uncore(rps); 746 struct drm_i915_private *i915 = rps_to_i915(rps); 747 u32 swreq; 748 749 if (INTEL_GEN(i915) >= 9) 750 swreq = GEN9_FREQUENCY(val); 751 else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) 752 swreq = HSW_FREQUENCY(val); 753 else 754 swreq = (GEN6_FREQUENCY(val) | 755 GEN6_OFFSET(0) | 756 GEN6_AGGRESSIVE_TURBO); 757 set(uncore, GEN6_RPNSWREQ, swreq); 758 759 GT_TRACE(rps_to_gt(rps), "set val:%x, freq:%d, swreq:%x\n", 760 val, intel_gpu_freq(rps, val), swreq); 761 762 return 0; 763 } 764 765 static int vlv_rps_set(struct intel_rps *rps, u8 val) 766 { 767 struct drm_i915_private *i915 = rps_to_i915(rps); 768 int err; 769 770 vlv_punit_get(i915); 771 err = vlv_punit_write(i915, PUNIT_REG_GPU_FREQ_REQ, val); 772 vlv_punit_put(i915); 773 774 GT_TRACE(rps_to_gt(rps), "set val:%x, freq:%d\n", 775 val, intel_gpu_freq(rps, val)); 776 777 return err; 778 } 779 780 static int rps_set(struct intel_rps *rps, u8 val, bool update) 781 { 782 struct drm_i915_private *i915 = rps_to_i915(rps); 783 int err; 784 785 if (INTEL_GEN(i915) < 6) 786 return 0; 787 788 if (val == rps->last_freq) 789 return 0; 790 791 if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) 792 err = vlv_rps_set(rps, val); 793 else 794 err = gen6_rps_set(rps, val); 795 if (err) 796 return err; 797 798 if (update) 799 gen6_rps_set_thresholds(rps, val); 800 rps->last_freq = val; 801 802 return 0; 803 } 804 805 void intel_rps_unpark(struct intel_rps *rps) 806 { 807 if (!intel_rps_is_enabled(rps)) 808 return; 809 810 GT_TRACE(rps_to_gt(rps), "unpark:%x\n", rps->cur_freq); 811 812 /* 813 * Use the user's desired frequency as a guide, but for better 814 * performance, jump directly to RPe as our starting frequency. 815 */ 816 mutex_lock(&rps->lock); 817 818 intel_rps_set_active(rps); 819 intel_rps_set(rps, 820 clamp(rps->cur_freq, 821 rps->min_freq_softlimit, 822 rps->max_freq_softlimit)); 823 824 mutex_unlock(&rps->lock); 825 826 rps->pm_iir = 0; 827 if (intel_rps_has_interrupts(rps)) 828 rps_enable_interrupts(rps); 829 if (intel_rps_uses_timer(rps)) 830 rps_start_timer(rps); 831 832 if (IS_GEN(rps_to_i915(rps), 5)) 833 gen5_rps_update(rps); 834 } 835 836 void intel_rps_park(struct intel_rps *rps) 837 { 838 int adj; 839 840 if (!intel_rps_clear_active(rps)) 841 return; 842 843 if (intel_rps_uses_timer(rps)) 844 rps_stop_timer(rps); 845 if (intel_rps_has_interrupts(rps)) 846 rps_disable_interrupts(rps); 847 848 if (rps->last_freq <= rps->idle_freq) 849 return; 850 851 /* 852 * The punit delays the write of the frequency and voltage until it 853 * determines the GPU is awake. During normal usage we don't want to 854 * waste power changing the frequency if the GPU is sleeping (rc6). 855 * However, the GPU and driver is now idle and we do not want to delay 856 * switching to minimum voltage (reducing power whilst idle) as we do 857 * not expect to be woken in the near future and so must flush the 858 * change by waking the device. 859 * 860 * We choose to take the media powerwell (either would do to trick the 861 * punit into committing the voltage change) as that takes a lot less 862 * power than the render powerwell. 863 */ 864 intel_uncore_forcewake_get(rps_to_uncore(rps), FORCEWAKE_MEDIA); 865 rps_set(rps, rps->idle_freq, false); 866 intel_uncore_forcewake_put(rps_to_uncore(rps), FORCEWAKE_MEDIA); 867 868 /* 869 * Since we will try and restart from the previously requested 870 * frequency on unparking, treat this idle point as a downclock 871 * interrupt and reduce the frequency for resume. If we park/unpark 872 * more frequently than the rps worker can run, we will not respond 873 * to any EI and never see a change in frequency. 874 * 875 * (Note we accommodate Cherryview's limitation of only using an 876 * even bin by applying it to all.) 877 */ 878 adj = rps->last_adj; 879 if (adj < 0) 880 adj *= 2; 881 else /* CHV needs even encode values */ 882 adj = -2; 883 rps->last_adj = adj; 884 rps->cur_freq = max_t(int, rps->cur_freq + adj, rps->min_freq); 885 886 GT_TRACE(rps_to_gt(rps), "park:%x\n", rps->cur_freq); 887 } 888 889 void intel_rps_boost(struct i915_request *rq) 890 { 891 struct intel_rps *rps = &READ_ONCE(rq->engine)->gt->rps; 892 unsigned long flags; 893 894 if (i915_request_signaled(rq) || !intel_rps_is_active(rps)) 895 return; 896 897 /* Serializes with i915_request_retire() */ 898 spin_lock_irqsave(&rq->lock, flags); 899 if (!i915_request_has_waitboost(rq) && 900 !dma_fence_is_signaled_locked(&rq->fence)) { 901 set_bit(I915_FENCE_FLAG_BOOST, &rq->fence.flags); 902 903 GT_TRACE(rps_to_gt(rps), "boost fence:%llx:%llx\n", 904 rq->fence.context, rq->fence.seqno); 905 906 if (!atomic_fetch_inc(&rps->num_waiters) && 907 READ_ONCE(rps->cur_freq) < rps->boost_freq) 908 schedule_work(&rps->work); 909 910 atomic_inc(&rps->boosts); 911 } 912 spin_unlock_irqrestore(&rq->lock, flags); 913 } 914 915 int intel_rps_set(struct intel_rps *rps, u8 val) 916 { 917 int err; 918 919 lockdep_assert_held(&rps->lock); 920 GEM_BUG_ON(val > rps->max_freq); 921 GEM_BUG_ON(val < rps->min_freq); 922 923 if (intel_rps_is_active(rps)) { 924 err = rps_set(rps, val, true); 925 if (err) 926 return err; 927 928 /* 929 * Make sure we continue to get interrupts 930 * until we hit the minimum or maximum frequencies. 931 */ 932 if (intel_rps_has_interrupts(rps)) { 933 struct intel_uncore *uncore = rps_to_uncore(rps); 934 935 set(uncore, 936 GEN6_RP_INTERRUPT_LIMITS, rps_limits(rps, val)); 937 938 set(uncore, GEN6_PMINTRMSK, rps_pm_mask(rps, val)); 939 } 940 } 941 942 rps->cur_freq = val; 943 return 0; 944 } 945 946 static void gen6_rps_init(struct intel_rps *rps) 947 { 948 struct drm_i915_private *i915 = rps_to_i915(rps); 949 struct intel_uncore *uncore = rps_to_uncore(rps); 950 951 /* All of these values are in units of 50MHz */ 952 953 /* static values from HW: RP0 > RP1 > RPn (min_freq) */ 954 if (IS_GEN9_LP(i915)) { 955 u32 rp_state_cap = intel_uncore_read(uncore, BXT_RP_STATE_CAP); 956 957 rps->rp0_freq = (rp_state_cap >> 16) & 0xff; 958 rps->rp1_freq = (rp_state_cap >> 8) & 0xff; 959 rps->min_freq = (rp_state_cap >> 0) & 0xff; 960 } else { 961 u32 rp_state_cap = intel_uncore_read(uncore, GEN6_RP_STATE_CAP); 962 963 rps->rp0_freq = (rp_state_cap >> 0) & 0xff; 964 rps->rp1_freq = (rp_state_cap >> 8) & 0xff; 965 rps->min_freq = (rp_state_cap >> 16) & 0xff; 966 } 967 968 /* hw_max = RP0 until we check for overclocking */ 969 rps->max_freq = rps->rp0_freq; 970 971 rps->efficient_freq = rps->rp1_freq; 972 if (IS_HASWELL(i915) || IS_BROADWELL(i915) || 973 IS_GEN9_BC(i915) || INTEL_GEN(i915) >= 10) { 974 u32 ddcc_status = 0; 975 976 if (sandybridge_pcode_read(i915, 977 HSW_PCODE_DYNAMIC_DUTY_CYCLE_CONTROL, 978 &ddcc_status, NULL) == 0) 979 rps->efficient_freq = 980 clamp_t(u8, 981 (ddcc_status >> 8) & 0xff, 982 rps->min_freq, 983 rps->max_freq); 984 } 985 986 if (IS_GEN9_BC(i915) || INTEL_GEN(i915) >= 10) { 987 /* Store the frequency values in 16.66 MHZ units, which is 988 * the natural hardware unit for SKL 989 */ 990 rps->rp0_freq *= GEN9_FREQ_SCALER; 991 rps->rp1_freq *= GEN9_FREQ_SCALER; 992 rps->min_freq *= GEN9_FREQ_SCALER; 993 rps->max_freq *= GEN9_FREQ_SCALER; 994 rps->efficient_freq *= GEN9_FREQ_SCALER; 995 } 996 } 997 998 static bool rps_reset(struct intel_rps *rps) 999 { 1000 struct drm_i915_private *i915 = rps_to_i915(rps); 1001 1002 /* force a reset */ 1003 rps->power.mode = -1; 1004 rps->last_freq = -1; 1005 1006 if (rps_set(rps, rps->min_freq, true)) { 1007 drm_err(&i915->drm, "Failed to reset RPS to initial values\n"); 1008 return false; 1009 } 1010 1011 rps->cur_freq = rps->min_freq; 1012 return true; 1013 } 1014 1015 /* See the Gen9_GT_PM_Programming_Guide doc for the below */ 1016 static bool gen9_rps_enable(struct intel_rps *rps) 1017 { 1018 struct intel_gt *gt = rps_to_gt(rps); 1019 struct intel_uncore *uncore = gt->uncore; 1020 1021 /* Program defaults and thresholds for RPS */ 1022 if (IS_GEN(gt->i915, 9)) 1023 intel_uncore_write_fw(uncore, GEN6_RC_VIDEO_FREQ, 1024 GEN9_FREQUENCY(rps->rp1_freq)); 1025 1026 intel_uncore_write_fw(uncore, GEN6_RP_IDLE_HYSTERSIS, 0xa); 1027 1028 rps->pm_events = GEN6_PM_RP_UP_THRESHOLD | GEN6_PM_RP_DOWN_THRESHOLD; 1029 1030 return rps_reset(rps); 1031 } 1032 1033 static bool gen8_rps_enable(struct intel_rps *rps) 1034 { 1035 struct intel_uncore *uncore = rps_to_uncore(rps); 1036 1037 intel_uncore_write_fw(uncore, GEN6_RC_VIDEO_FREQ, 1038 HSW_FREQUENCY(rps->rp1_freq)); 1039 1040 intel_uncore_write_fw(uncore, GEN6_RP_IDLE_HYSTERSIS, 10); 1041 1042 rps->pm_events = GEN6_PM_RP_UP_THRESHOLD | GEN6_PM_RP_DOWN_THRESHOLD; 1043 1044 return rps_reset(rps); 1045 } 1046 1047 static bool gen6_rps_enable(struct intel_rps *rps) 1048 { 1049 struct intel_uncore *uncore = rps_to_uncore(rps); 1050 1051 /* Power down if completely idle for over 50ms */ 1052 intel_uncore_write_fw(uncore, GEN6_RP_DOWN_TIMEOUT, 50000); 1053 intel_uncore_write_fw(uncore, GEN6_RP_IDLE_HYSTERSIS, 10); 1054 1055 rps->pm_events = (GEN6_PM_RP_UP_THRESHOLD | 1056 GEN6_PM_RP_DOWN_THRESHOLD | 1057 GEN6_PM_RP_DOWN_TIMEOUT); 1058 1059 return rps_reset(rps); 1060 } 1061 1062 static int chv_rps_max_freq(struct intel_rps *rps) 1063 { 1064 struct drm_i915_private *i915 = rps_to_i915(rps); 1065 struct intel_gt *gt = rps_to_gt(rps); 1066 u32 val; 1067 1068 val = vlv_punit_read(i915, FB_GFX_FMAX_AT_VMAX_FUSE); 1069 1070 switch (gt->info.sseu.eu_total) { 1071 case 8: 1072 /* (2 * 4) config */ 1073 val >>= FB_GFX_FMAX_AT_VMAX_2SS4EU_FUSE_SHIFT; 1074 break; 1075 case 12: 1076 /* (2 * 6) config */ 1077 val >>= FB_GFX_FMAX_AT_VMAX_2SS6EU_FUSE_SHIFT; 1078 break; 1079 case 16: 1080 /* (2 * 8) config */ 1081 default: 1082 /* Setting (2 * 8) Min RP0 for any other combination */ 1083 val >>= FB_GFX_FMAX_AT_VMAX_2SS8EU_FUSE_SHIFT; 1084 break; 1085 } 1086 1087 return val & FB_GFX_FREQ_FUSE_MASK; 1088 } 1089 1090 static int chv_rps_rpe_freq(struct intel_rps *rps) 1091 { 1092 struct drm_i915_private *i915 = rps_to_i915(rps); 1093 u32 val; 1094 1095 val = vlv_punit_read(i915, PUNIT_GPU_DUTYCYCLE_REG); 1096 val >>= PUNIT_GPU_DUTYCYCLE_RPE_FREQ_SHIFT; 1097 1098 return val & PUNIT_GPU_DUTYCYCLE_RPE_FREQ_MASK; 1099 } 1100 1101 static int chv_rps_guar_freq(struct intel_rps *rps) 1102 { 1103 struct drm_i915_private *i915 = rps_to_i915(rps); 1104 u32 val; 1105 1106 val = vlv_punit_read(i915, FB_GFX_FMAX_AT_VMAX_FUSE); 1107 1108 return val & FB_GFX_FREQ_FUSE_MASK; 1109 } 1110 1111 static u32 chv_rps_min_freq(struct intel_rps *rps) 1112 { 1113 struct drm_i915_private *i915 = rps_to_i915(rps); 1114 u32 val; 1115 1116 val = vlv_punit_read(i915, FB_GFX_FMIN_AT_VMIN_FUSE); 1117 val >>= FB_GFX_FMIN_AT_VMIN_FUSE_SHIFT; 1118 1119 return val & FB_GFX_FREQ_FUSE_MASK; 1120 } 1121 1122 static bool chv_rps_enable(struct intel_rps *rps) 1123 { 1124 struct intel_uncore *uncore = rps_to_uncore(rps); 1125 struct drm_i915_private *i915 = rps_to_i915(rps); 1126 u32 val; 1127 1128 /* 1: Program defaults and thresholds for RPS*/ 1129 intel_uncore_write_fw(uncore, GEN6_RP_DOWN_TIMEOUT, 1000000); 1130 intel_uncore_write_fw(uncore, GEN6_RP_UP_THRESHOLD, 59400); 1131 intel_uncore_write_fw(uncore, GEN6_RP_DOWN_THRESHOLD, 245000); 1132 intel_uncore_write_fw(uncore, GEN6_RP_UP_EI, 66000); 1133 intel_uncore_write_fw(uncore, GEN6_RP_DOWN_EI, 350000); 1134 1135 intel_uncore_write_fw(uncore, GEN6_RP_IDLE_HYSTERSIS, 10); 1136 1137 /* 2: Enable RPS */ 1138 intel_uncore_write_fw(uncore, GEN6_RP_CONTROL, 1139 GEN6_RP_MEDIA_HW_NORMAL_MODE | 1140 GEN6_RP_MEDIA_IS_GFX | 1141 GEN6_RP_ENABLE | 1142 GEN6_RP_UP_BUSY_AVG | 1143 GEN6_RP_DOWN_IDLE_AVG); 1144 1145 rps->pm_events = (GEN6_PM_RP_UP_THRESHOLD | 1146 GEN6_PM_RP_DOWN_THRESHOLD | 1147 GEN6_PM_RP_DOWN_TIMEOUT); 1148 1149 /* Setting Fixed Bias */ 1150 vlv_punit_get(i915); 1151 1152 val = VLV_OVERRIDE_EN | VLV_SOC_TDP_EN | CHV_BIAS_CPU_50_SOC_50; 1153 vlv_punit_write(i915, VLV_TURBO_SOC_OVERRIDE, val); 1154 1155 val = vlv_punit_read(i915, PUNIT_REG_GPU_FREQ_STS); 1156 1157 vlv_punit_put(i915); 1158 1159 /* RPS code assumes GPLL is used */ 1160 drm_WARN_ONCE(&i915->drm, (val & GPLLENABLE) == 0, 1161 "GPLL not enabled\n"); 1162 1163 drm_dbg(&i915->drm, "GPLL enabled? %s\n", yesno(val & GPLLENABLE)); 1164 drm_dbg(&i915->drm, "GPU status: 0x%08x\n", val); 1165 1166 return rps_reset(rps); 1167 } 1168 1169 static int vlv_rps_guar_freq(struct intel_rps *rps) 1170 { 1171 struct drm_i915_private *i915 = rps_to_i915(rps); 1172 u32 val, rp1; 1173 1174 val = vlv_nc_read(i915, IOSF_NC_FB_GFX_FREQ_FUSE); 1175 1176 rp1 = val & FB_GFX_FGUARANTEED_FREQ_FUSE_MASK; 1177 rp1 >>= FB_GFX_FGUARANTEED_FREQ_FUSE_SHIFT; 1178 1179 return rp1; 1180 } 1181 1182 static int vlv_rps_max_freq(struct intel_rps *rps) 1183 { 1184 struct drm_i915_private *i915 = rps_to_i915(rps); 1185 u32 val, rp0; 1186 1187 val = vlv_nc_read(i915, IOSF_NC_FB_GFX_FREQ_FUSE); 1188 1189 rp0 = (val & FB_GFX_MAX_FREQ_FUSE_MASK) >> FB_GFX_MAX_FREQ_FUSE_SHIFT; 1190 /* Clamp to max */ 1191 rp0 = min_t(u32, rp0, 0xea); 1192 1193 return rp0; 1194 } 1195 1196 static int vlv_rps_rpe_freq(struct intel_rps *rps) 1197 { 1198 struct drm_i915_private *i915 = rps_to_i915(rps); 1199 u32 val, rpe; 1200 1201 val = vlv_nc_read(i915, IOSF_NC_FB_GFX_FMAX_FUSE_LO); 1202 rpe = (val & FB_FMAX_VMIN_FREQ_LO_MASK) >> FB_FMAX_VMIN_FREQ_LO_SHIFT; 1203 val = vlv_nc_read(i915, IOSF_NC_FB_GFX_FMAX_FUSE_HI); 1204 rpe |= (val & FB_FMAX_VMIN_FREQ_HI_MASK) << 5; 1205 1206 return rpe; 1207 } 1208 1209 static int vlv_rps_min_freq(struct intel_rps *rps) 1210 { 1211 struct drm_i915_private *i915 = rps_to_i915(rps); 1212 u32 val; 1213 1214 val = vlv_punit_read(i915, PUNIT_REG_GPU_LFM) & 0xff; 1215 /* 1216 * According to the BYT Punit GPU turbo HAS 1.1.6.3 the minimum value 1217 * for the minimum frequency in GPLL mode is 0xc1. Contrary to this on 1218 * a BYT-M B0 the above register contains 0xbf. Moreover when setting 1219 * a frequency Punit will not allow values below 0xc0. Clamp it 0xc0 1220 * to make sure it matches what Punit accepts. 1221 */ 1222 return max_t(u32, val, 0xc0); 1223 } 1224 1225 static bool vlv_rps_enable(struct intel_rps *rps) 1226 { 1227 struct intel_uncore *uncore = rps_to_uncore(rps); 1228 struct drm_i915_private *i915 = rps_to_i915(rps); 1229 u32 val; 1230 1231 intel_uncore_write_fw(uncore, GEN6_RP_DOWN_TIMEOUT, 1000000); 1232 intel_uncore_write_fw(uncore, GEN6_RP_UP_THRESHOLD, 59400); 1233 intel_uncore_write_fw(uncore, GEN6_RP_DOWN_THRESHOLD, 245000); 1234 intel_uncore_write_fw(uncore, GEN6_RP_UP_EI, 66000); 1235 intel_uncore_write_fw(uncore, GEN6_RP_DOWN_EI, 350000); 1236 1237 intel_uncore_write_fw(uncore, GEN6_RP_IDLE_HYSTERSIS, 10); 1238 1239 intel_uncore_write_fw(uncore, GEN6_RP_CONTROL, 1240 GEN6_RP_MEDIA_TURBO | 1241 GEN6_RP_MEDIA_HW_NORMAL_MODE | 1242 GEN6_RP_MEDIA_IS_GFX | 1243 GEN6_RP_ENABLE | 1244 GEN6_RP_UP_BUSY_AVG | 1245 GEN6_RP_DOWN_IDLE_CONT); 1246 1247 /* WaGsvRC0ResidencyMethod:vlv */ 1248 rps->pm_events = GEN6_PM_RP_UP_EI_EXPIRED; 1249 1250 vlv_punit_get(i915); 1251 1252 /* Setting Fixed Bias */ 1253 val = VLV_OVERRIDE_EN | VLV_SOC_TDP_EN | VLV_BIAS_CPU_125_SOC_875; 1254 vlv_punit_write(i915, VLV_TURBO_SOC_OVERRIDE, val); 1255 1256 val = vlv_punit_read(i915, PUNIT_REG_GPU_FREQ_STS); 1257 1258 vlv_punit_put(i915); 1259 1260 /* RPS code assumes GPLL is used */ 1261 drm_WARN_ONCE(&i915->drm, (val & GPLLENABLE) == 0, 1262 "GPLL not enabled\n"); 1263 1264 drm_dbg(&i915->drm, "GPLL enabled? %s\n", yesno(val & GPLLENABLE)); 1265 drm_dbg(&i915->drm, "GPU status: 0x%08x\n", val); 1266 1267 return rps_reset(rps); 1268 } 1269 1270 static unsigned long __ips_gfx_val(struct intel_ips *ips) 1271 { 1272 struct intel_rps *rps = container_of(ips, typeof(*rps), ips); 1273 struct intel_uncore *uncore = rps_to_uncore(rps); 1274 unsigned long t, corr, state1, corr2, state2; 1275 u32 pxvid, ext_v; 1276 1277 lockdep_assert_held(&mchdev_lock); 1278 1279 pxvid = intel_uncore_read(uncore, PXVFREQ(rps->cur_freq)); 1280 pxvid = (pxvid >> 24) & 0x7f; 1281 ext_v = pvid_to_extvid(rps_to_i915(rps), pxvid); 1282 1283 state1 = ext_v; 1284 1285 /* Revel in the empirically derived constants */ 1286 1287 /* Correction factor in 1/100000 units */ 1288 t = ips_mch_val(uncore); 1289 if (t > 80) 1290 corr = t * 2349 + 135940; 1291 else if (t >= 50) 1292 corr = t * 964 + 29317; 1293 else /* < 50 */ 1294 corr = t * 301 + 1004; 1295 1296 corr = corr * 150142 * state1 / 10000 - 78642; 1297 corr /= 100000; 1298 corr2 = corr * ips->corr; 1299 1300 state2 = corr2 * state1 / 10000; 1301 state2 /= 100; /* convert to mW */ 1302 1303 __gen5_ips_update(ips); 1304 1305 return ips->gfx_power + state2; 1306 } 1307 1308 static bool has_busy_stats(struct intel_rps *rps) 1309 { 1310 struct intel_engine_cs *engine; 1311 enum intel_engine_id id; 1312 1313 for_each_engine(engine, rps_to_gt(rps), id) { 1314 if (!intel_engine_supports_stats(engine)) 1315 return false; 1316 } 1317 1318 return true; 1319 } 1320 1321 void intel_rps_enable(struct intel_rps *rps) 1322 { 1323 struct drm_i915_private *i915 = rps_to_i915(rps); 1324 struct intel_uncore *uncore = rps_to_uncore(rps); 1325 bool enabled = false; 1326 1327 if (!HAS_RPS(i915)) 1328 return; 1329 1330 intel_gt_check_clock_frequency(rps_to_gt(rps)); 1331 1332 intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL); 1333 if (rps->max_freq <= rps->min_freq) 1334 /* leave disabled, no room for dynamic reclocking */; 1335 else if (IS_CHERRYVIEW(i915)) 1336 enabled = chv_rps_enable(rps); 1337 else if (IS_VALLEYVIEW(i915)) 1338 enabled = vlv_rps_enable(rps); 1339 else if (INTEL_GEN(i915) >= 9) 1340 enabled = gen9_rps_enable(rps); 1341 else if (INTEL_GEN(i915) >= 8) 1342 enabled = gen8_rps_enable(rps); 1343 else if (INTEL_GEN(i915) >= 6) 1344 enabled = gen6_rps_enable(rps); 1345 else if (IS_IRONLAKE_M(i915)) 1346 enabled = gen5_rps_enable(rps); 1347 else 1348 MISSING_CASE(INTEL_GEN(i915)); 1349 intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL); 1350 if (!enabled) 1351 return; 1352 1353 GT_TRACE(rps_to_gt(rps), 1354 "min:%x, max:%x, freq:[%d, %d]\n", 1355 rps->min_freq, rps->max_freq, 1356 intel_gpu_freq(rps, rps->min_freq), 1357 intel_gpu_freq(rps, rps->max_freq)); 1358 1359 GEM_BUG_ON(rps->max_freq < rps->min_freq); 1360 GEM_BUG_ON(rps->idle_freq > rps->max_freq); 1361 1362 GEM_BUG_ON(rps->efficient_freq < rps->min_freq); 1363 GEM_BUG_ON(rps->efficient_freq > rps->max_freq); 1364 1365 if (has_busy_stats(rps)) 1366 intel_rps_set_timer(rps); 1367 else if (INTEL_GEN(i915) >= 6) 1368 intel_rps_set_interrupts(rps); 1369 else 1370 /* Ironlake currently uses intel_ips.ko */ {} 1371 1372 intel_rps_set_enabled(rps); 1373 } 1374 1375 static void gen6_rps_disable(struct intel_rps *rps) 1376 { 1377 set(rps_to_uncore(rps), GEN6_RP_CONTROL, 0); 1378 } 1379 1380 void intel_rps_disable(struct intel_rps *rps) 1381 { 1382 struct drm_i915_private *i915 = rps_to_i915(rps); 1383 1384 intel_rps_clear_enabled(rps); 1385 intel_rps_clear_interrupts(rps); 1386 intel_rps_clear_timer(rps); 1387 1388 if (INTEL_GEN(i915) >= 6) 1389 gen6_rps_disable(rps); 1390 else if (IS_IRONLAKE_M(i915)) 1391 gen5_rps_disable(rps); 1392 } 1393 1394 static int byt_gpu_freq(struct intel_rps *rps, int val) 1395 { 1396 /* 1397 * N = val - 0xb7 1398 * Slow = Fast = GPLL ref * N 1399 */ 1400 return DIV_ROUND_CLOSEST(rps->gpll_ref_freq * (val - 0xb7), 1000); 1401 } 1402 1403 static int byt_freq_opcode(struct intel_rps *rps, int val) 1404 { 1405 return DIV_ROUND_CLOSEST(1000 * val, rps->gpll_ref_freq) + 0xb7; 1406 } 1407 1408 static int chv_gpu_freq(struct intel_rps *rps, int val) 1409 { 1410 /* 1411 * N = val / 2 1412 * CU (slow) = CU2x (fast) / 2 = GPLL ref * N / 2 1413 */ 1414 return DIV_ROUND_CLOSEST(rps->gpll_ref_freq * val, 2 * 2 * 1000); 1415 } 1416 1417 static int chv_freq_opcode(struct intel_rps *rps, int val) 1418 { 1419 /* CHV needs even values */ 1420 return DIV_ROUND_CLOSEST(2 * 1000 * val, rps->gpll_ref_freq) * 2; 1421 } 1422 1423 int intel_gpu_freq(struct intel_rps *rps, int val) 1424 { 1425 struct drm_i915_private *i915 = rps_to_i915(rps); 1426 1427 if (INTEL_GEN(i915) >= 9) 1428 return DIV_ROUND_CLOSEST(val * GT_FREQUENCY_MULTIPLIER, 1429 GEN9_FREQ_SCALER); 1430 else if (IS_CHERRYVIEW(i915)) 1431 return chv_gpu_freq(rps, val); 1432 else if (IS_VALLEYVIEW(i915)) 1433 return byt_gpu_freq(rps, val); 1434 else 1435 return val * GT_FREQUENCY_MULTIPLIER; 1436 } 1437 1438 int intel_freq_opcode(struct intel_rps *rps, int val) 1439 { 1440 struct drm_i915_private *i915 = rps_to_i915(rps); 1441 1442 if (INTEL_GEN(i915) >= 9) 1443 return DIV_ROUND_CLOSEST(val * GEN9_FREQ_SCALER, 1444 GT_FREQUENCY_MULTIPLIER); 1445 else if (IS_CHERRYVIEW(i915)) 1446 return chv_freq_opcode(rps, val); 1447 else if (IS_VALLEYVIEW(i915)) 1448 return byt_freq_opcode(rps, val); 1449 else 1450 return DIV_ROUND_CLOSEST(val, GT_FREQUENCY_MULTIPLIER); 1451 } 1452 1453 static void vlv_init_gpll_ref_freq(struct intel_rps *rps) 1454 { 1455 struct drm_i915_private *i915 = rps_to_i915(rps); 1456 1457 rps->gpll_ref_freq = 1458 vlv_get_cck_clock(i915, "GPLL ref", 1459 CCK_GPLL_CLOCK_CONTROL, 1460 i915->czclk_freq); 1461 1462 drm_dbg(&i915->drm, "GPLL reference freq: %d kHz\n", 1463 rps->gpll_ref_freq); 1464 } 1465 1466 static void vlv_rps_init(struct intel_rps *rps) 1467 { 1468 struct drm_i915_private *i915 = rps_to_i915(rps); 1469 u32 val; 1470 1471 vlv_iosf_sb_get(i915, 1472 BIT(VLV_IOSF_SB_PUNIT) | 1473 BIT(VLV_IOSF_SB_NC) | 1474 BIT(VLV_IOSF_SB_CCK)); 1475 1476 vlv_init_gpll_ref_freq(rps); 1477 1478 val = vlv_punit_read(i915, PUNIT_REG_GPU_FREQ_STS); 1479 switch ((val >> 6) & 3) { 1480 case 0: 1481 case 1: 1482 i915->mem_freq = 800; 1483 break; 1484 case 2: 1485 i915->mem_freq = 1066; 1486 break; 1487 case 3: 1488 i915->mem_freq = 1333; 1489 break; 1490 } 1491 drm_dbg(&i915->drm, "DDR speed: %d MHz\n", i915->mem_freq); 1492 1493 rps->max_freq = vlv_rps_max_freq(rps); 1494 rps->rp0_freq = rps->max_freq; 1495 drm_dbg(&i915->drm, "max GPU freq: %d MHz (%u)\n", 1496 intel_gpu_freq(rps, rps->max_freq), rps->max_freq); 1497 1498 rps->efficient_freq = vlv_rps_rpe_freq(rps); 1499 drm_dbg(&i915->drm, "RPe GPU freq: %d MHz (%u)\n", 1500 intel_gpu_freq(rps, rps->efficient_freq), rps->efficient_freq); 1501 1502 rps->rp1_freq = vlv_rps_guar_freq(rps); 1503 drm_dbg(&i915->drm, "RP1(Guar Freq) GPU freq: %d MHz (%u)\n", 1504 intel_gpu_freq(rps, rps->rp1_freq), rps->rp1_freq); 1505 1506 rps->min_freq = vlv_rps_min_freq(rps); 1507 drm_dbg(&i915->drm, "min GPU freq: %d MHz (%u)\n", 1508 intel_gpu_freq(rps, rps->min_freq), rps->min_freq); 1509 1510 vlv_iosf_sb_put(i915, 1511 BIT(VLV_IOSF_SB_PUNIT) | 1512 BIT(VLV_IOSF_SB_NC) | 1513 BIT(VLV_IOSF_SB_CCK)); 1514 } 1515 1516 static void chv_rps_init(struct intel_rps *rps) 1517 { 1518 struct drm_i915_private *i915 = rps_to_i915(rps); 1519 u32 val; 1520 1521 vlv_iosf_sb_get(i915, 1522 BIT(VLV_IOSF_SB_PUNIT) | 1523 BIT(VLV_IOSF_SB_NC) | 1524 BIT(VLV_IOSF_SB_CCK)); 1525 1526 vlv_init_gpll_ref_freq(rps); 1527 1528 val = vlv_cck_read(i915, CCK_FUSE_REG); 1529 1530 switch ((val >> 2) & 0x7) { 1531 case 3: 1532 i915->mem_freq = 2000; 1533 break; 1534 default: 1535 i915->mem_freq = 1600; 1536 break; 1537 } 1538 drm_dbg(&i915->drm, "DDR speed: %d MHz\n", i915->mem_freq); 1539 1540 rps->max_freq = chv_rps_max_freq(rps); 1541 rps->rp0_freq = rps->max_freq; 1542 drm_dbg(&i915->drm, "max GPU freq: %d MHz (%u)\n", 1543 intel_gpu_freq(rps, rps->max_freq), rps->max_freq); 1544 1545 rps->efficient_freq = chv_rps_rpe_freq(rps); 1546 drm_dbg(&i915->drm, "RPe GPU freq: %d MHz (%u)\n", 1547 intel_gpu_freq(rps, rps->efficient_freq), rps->efficient_freq); 1548 1549 rps->rp1_freq = chv_rps_guar_freq(rps); 1550 drm_dbg(&i915->drm, "RP1(Guar) GPU freq: %d MHz (%u)\n", 1551 intel_gpu_freq(rps, rps->rp1_freq), rps->rp1_freq); 1552 1553 rps->min_freq = chv_rps_min_freq(rps); 1554 drm_dbg(&i915->drm, "min GPU freq: %d MHz (%u)\n", 1555 intel_gpu_freq(rps, rps->min_freq), rps->min_freq); 1556 1557 vlv_iosf_sb_put(i915, 1558 BIT(VLV_IOSF_SB_PUNIT) | 1559 BIT(VLV_IOSF_SB_NC) | 1560 BIT(VLV_IOSF_SB_CCK)); 1561 1562 drm_WARN_ONCE(&i915->drm, (rps->max_freq | rps->efficient_freq | 1563 rps->rp1_freq | rps->min_freq) & 1, 1564 "Odd GPU freq values\n"); 1565 } 1566 1567 static void vlv_c0_read(struct intel_uncore *uncore, struct intel_rps_ei *ei) 1568 { 1569 ei->ktime = ktime_get_raw(); 1570 ei->render_c0 = intel_uncore_read(uncore, VLV_RENDER_C0_COUNT); 1571 ei->media_c0 = intel_uncore_read(uncore, VLV_MEDIA_C0_COUNT); 1572 } 1573 1574 static u32 vlv_wa_c0_ei(struct intel_rps *rps, u32 pm_iir) 1575 { 1576 struct intel_uncore *uncore = rps_to_uncore(rps); 1577 const struct intel_rps_ei *prev = &rps->ei; 1578 struct intel_rps_ei now; 1579 u32 events = 0; 1580 1581 if ((pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) == 0) 1582 return 0; 1583 1584 vlv_c0_read(uncore, &now); 1585 1586 if (prev->ktime) { 1587 u64 time, c0; 1588 u32 render, media; 1589 1590 time = ktime_us_delta(now.ktime, prev->ktime); 1591 1592 time *= rps_to_i915(rps)->czclk_freq; 1593 1594 /* Workload can be split between render + media, 1595 * e.g. SwapBuffers being blitted in X after being rendered in 1596 * mesa. To account for this we need to combine both engines 1597 * into our activity counter. 1598 */ 1599 render = now.render_c0 - prev->render_c0; 1600 media = now.media_c0 - prev->media_c0; 1601 c0 = max(render, media); 1602 c0 *= 1000 * 100 << 8; /* to usecs and scale to threshold% */ 1603 1604 if (c0 > time * rps->power.up_threshold) 1605 events = GEN6_PM_RP_UP_THRESHOLD; 1606 else if (c0 < time * rps->power.down_threshold) 1607 events = GEN6_PM_RP_DOWN_THRESHOLD; 1608 } 1609 1610 rps->ei = now; 1611 return events; 1612 } 1613 1614 static void rps_work(struct work_struct *work) 1615 { 1616 struct intel_rps *rps = container_of(work, typeof(*rps), work); 1617 struct intel_gt *gt = rps_to_gt(rps); 1618 struct drm_i915_private *i915 = rps_to_i915(rps); 1619 bool client_boost = false; 1620 int new_freq, adj, min, max; 1621 u32 pm_iir = 0; 1622 1623 spin_lock_irq(>->irq_lock); 1624 pm_iir = fetch_and_zero(&rps->pm_iir) & rps->pm_events; 1625 client_boost = atomic_read(&rps->num_waiters); 1626 spin_unlock_irq(>->irq_lock); 1627 1628 /* Make sure we didn't queue anything we're not going to process. */ 1629 if (!pm_iir && !client_boost) 1630 goto out; 1631 1632 mutex_lock(&rps->lock); 1633 if (!intel_rps_is_active(rps)) { 1634 mutex_unlock(&rps->lock); 1635 return; 1636 } 1637 1638 pm_iir |= vlv_wa_c0_ei(rps, pm_iir); 1639 1640 adj = rps->last_adj; 1641 new_freq = rps->cur_freq; 1642 min = rps->min_freq_softlimit; 1643 max = rps->max_freq_softlimit; 1644 if (client_boost) 1645 max = rps->max_freq; 1646 1647 GT_TRACE(gt, 1648 "pm_iir:%x, client_boost:%s, last:%d, cur:%x, min:%x, max:%x\n", 1649 pm_iir, yesno(client_boost), 1650 adj, new_freq, min, max); 1651 1652 if (client_boost && new_freq < rps->boost_freq) { 1653 new_freq = rps->boost_freq; 1654 adj = 0; 1655 } else if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) { 1656 if (adj > 0) 1657 adj *= 2; 1658 else /* CHV needs even encode values */ 1659 adj = IS_CHERRYVIEW(gt->i915) ? 2 : 1; 1660 1661 if (new_freq >= rps->max_freq_softlimit) 1662 adj = 0; 1663 } else if (client_boost) { 1664 adj = 0; 1665 } else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) { 1666 if (rps->cur_freq > rps->efficient_freq) 1667 new_freq = rps->efficient_freq; 1668 else if (rps->cur_freq > rps->min_freq_softlimit) 1669 new_freq = rps->min_freq_softlimit; 1670 adj = 0; 1671 } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) { 1672 if (adj < 0) 1673 adj *= 2; 1674 else /* CHV needs even encode values */ 1675 adj = IS_CHERRYVIEW(gt->i915) ? -2 : -1; 1676 1677 if (new_freq <= rps->min_freq_softlimit) 1678 adj = 0; 1679 } else { /* unknown event */ 1680 adj = 0; 1681 } 1682 1683 /* 1684 * sysfs frequency limits may have snuck in while 1685 * servicing the interrupt 1686 */ 1687 new_freq += adj; 1688 new_freq = clamp_t(int, new_freq, min, max); 1689 1690 if (intel_rps_set(rps, new_freq)) { 1691 drm_dbg(&i915->drm, "Failed to set new GPU frequency\n"); 1692 adj = 0; 1693 } 1694 rps->last_adj = adj; 1695 1696 mutex_unlock(&rps->lock); 1697 1698 out: 1699 spin_lock_irq(>->irq_lock); 1700 gen6_gt_pm_unmask_irq(gt, rps->pm_events); 1701 spin_unlock_irq(>->irq_lock); 1702 } 1703 1704 void gen11_rps_irq_handler(struct intel_rps *rps, u32 pm_iir) 1705 { 1706 struct intel_gt *gt = rps_to_gt(rps); 1707 const u32 events = rps->pm_events & pm_iir; 1708 1709 lockdep_assert_held(>->irq_lock); 1710 1711 if (unlikely(!events)) 1712 return; 1713 1714 GT_TRACE(gt, "irq events:%x\n", events); 1715 1716 gen6_gt_pm_mask_irq(gt, events); 1717 1718 rps->pm_iir |= events; 1719 schedule_work(&rps->work); 1720 } 1721 1722 void gen6_rps_irq_handler(struct intel_rps *rps, u32 pm_iir) 1723 { 1724 struct intel_gt *gt = rps_to_gt(rps); 1725 u32 events; 1726 1727 events = pm_iir & rps->pm_events; 1728 if (events) { 1729 spin_lock(>->irq_lock); 1730 1731 GT_TRACE(gt, "irq events:%x\n", events); 1732 1733 gen6_gt_pm_mask_irq(gt, events); 1734 rps->pm_iir |= events; 1735 1736 schedule_work(&rps->work); 1737 spin_unlock(>->irq_lock); 1738 } 1739 1740 if (INTEL_GEN(gt->i915) >= 8) 1741 return; 1742 1743 if (pm_iir & PM_VEBOX_USER_INTERRUPT) 1744 intel_engine_signal_breadcrumbs(gt->engine[VECS0]); 1745 1746 if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT) 1747 DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir); 1748 } 1749 1750 void gen5_rps_irq_handler(struct intel_rps *rps) 1751 { 1752 struct intel_uncore *uncore = rps_to_uncore(rps); 1753 u32 busy_up, busy_down, max_avg, min_avg; 1754 u8 new_freq; 1755 1756 spin_lock(&mchdev_lock); 1757 1758 intel_uncore_write16(uncore, 1759 MEMINTRSTS, 1760 intel_uncore_read(uncore, MEMINTRSTS)); 1761 1762 intel_uncore_write16(uncore, MEMINTRSTS, MEMINT_EVAL_CHG); 1763 busy_up = intel_uncore_read(uncore, RCPREVBSYTUPAVG); 1764 busy_down = intel_uncore_read(uncore, RCPREVBSYTDNAVG); 1765 max_avg = intel_uncore_read(uncore, RCBMAXAVG); 1766 min_avg = intel_uncore_read(uncore, RCBMINAVG); 1767 1768 /* Handle RCS change request from hw */ 1769 new_freq = rps->cur_freq; 1770 if (busy_up > max_avg) 1771 new_freq++; 1772 else if (busy_down < min_avg) 1773 new_freq--; 1774 new_freq = clamp(new_freq, 1775 rps->min_freq_softlimit, 1776 rps->max_freq_softlimit); 1777 1778 if (new_freq != rps->cur_freq && gen5_rps_set(rps, new_freq)) 1779 rps->cur_freq = new_freq; 1780 1781 spin_unlock(&mchdev_lock); 1782 } 1783 1784 void intel_rps_init_early(struct intel_rps *rps) 1785 { 1786 mutex_init(&rps->lock); 1787 mutex_init(&rps->power.mutex); 1788 1789 INIT_WORK(&rps->work, rps_work); 1790 timer_setup(&rps->timer, rps_timer, 0); 1791 1792 atomic_set(&rps->num_waiters, 0); 1793 } 1794 1795 void intel_rps_init(struct intel_rps *rps) 1796 { 1797 struct drm_i915_private *i915 = rps_to_i915(rps); 1798 1799 if (IS_CHERRYVIEW(i915)) 1800 chv_rps_init(rps); 1801 else if (IS_VALLEYVIEW(i915)) 1802 vlv_rps_init(rps); 1803 else if (INTEL_GEN(i915) >= 6) 1804 gen6_rps_init(rps); 1805 else if (IS_IRONLAKE_M(i915)) 1806 gen5_rps_init(rps); 1807 1808 /* Derive initial user preferences/limits from the hardware limits */ 1809 rps->max_freq_softlimit = rps->max_freq; 1810 rps->min_freq_softlimit = rps->min_freq; 1811 1812 /* After setting max-softlimit, find the overclock max freq */ 1813 if (IS_GEN(i915, 6) || IS_IVYBRIDGE(i915) || IS_HASWELL(i915)) { 1814 u32 params = 0; 1815 1816 sandybridge_pcode_read(i915, GEN6_READ_OC_PARAMS, 1817 ¶ms, NULL); 1818 if (params & BIT(31)) { /* OC supported */ 1819 drm_dbg(&i915->drm, 1820 "Overclocking supported, max: %dMHz, overclock: %dMHz\n", 1821 (rps->max_freq & 0xff) * 50, 1822 (params & 0xff) * 50); 1823 rps->max_freq = params & 0xff; 1824 } 1825 } 1826 1827 /* Finally allow us to boost to max by default */ 1828 rps->boost_freq = rps->max_freq; 1829 rps->idle_freq = rps->min_freq; 1830 1831 /* Start in the middle, from here we will autotune based on workload */ 1832 rps->cur_freq = rps->efficient_freq; 1833 1834 rps->pm_intrmsk_mbz = 0; 1835 1836 /* 1837 * SNB,IVB,HSW can while VLV,CHV may hard hang on looping batchbuffer 1838 * if GEN6_PM_UP_EI_EXPIRED is masked. 1839 * 1840 * TODO: verify if this can be reproduced on VLV,CHV. 1841 */ 1842 if (INTEL_GEN(i915) <= 7) 1843 rps->pm_intrmsk_mbz |= GEN6_PM_RP_UP_EI_EXPIRED; 1844 1845 if (INTEL_GEN(i915) >= 8 && INTEL_GEN(i915) < 11) 1846 rps->pm_intrmsk_mbz |= GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC; 1847 } 1848 1849 void intel_rps_sanitize(struct intel_rps *rps) 1850 { 1851 if (INTEL_GEN(rps_to_i915(rps)) >= 6) 1852 rps_disable_interrupts(rps); 1853 } 1854 1855 u32 intel_rps_get_cagf(struct intel_rps *rps, u32 rpstat) 1856 { 1857 struct drm_i915_private *i915 = rps_to_i915(rps); 1858 u32 cagf; 1859 1860 if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) 1861 cagf = (rpstat >> 8) & 0xff; 1862 else if (INTEL_GEN(i915) >= 9) 1863 cagf = (rpstat & GEN9_CAGF_MASK) >> GEN9_CAGF_SHIFT; 1864 else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) 1865 cagf = (rpstat & HSW_CAGF_MASK) >> HSW_CAGF_SHIFT; 1866 else 1867 cagf = (rpstat & GEN6_CAGF_MASK) >> GEN6_CAGF_SHIFT; 1868 1869 return cagf; 1870 } 1871 1872 static u32 read_cagf(struct intel_rps *rps) 1873 { 1874 struct drm_i915_private *i915 = rps_to_i915(rps); 1875 u32 freq; 1876 1877 if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) { 1878 vlv_punit_get(i915); 1879 freq = vlv_punit_read(i915, PUNIT_REG_GPU_FREQ_STS); 1880 vlv_punit_put(i915); 1881 } else { 1882 freq = intel_uncore_read(rps_to_uncore(rps), GEN6_RPSTAT1); 1883 } 1884 1885 return intel_rps_get_cagf(rps, freq); 1886 } 1887 1888 u32 intel_rps_read_actual_frequency(struct intel_rps *rps) 1889 { 1890 struct intel_runtime_pm *rpm = rps_to_uncore(rps)->rpm; 1891 intel_wakeref_t wakeref; 1892 u32 freq = 0; 1893 1894 with_intel_runtime_pm_if_in_use(rpm, wakeref) 1895 freq = intel_gpu_freq(rps, read_cagf(rps)); 1896 1897 return freq; 1898 } 1899 1900 /* External interface for intel_ips.ko */ 1901 1902 static struct drm_i915_private __rcu *ips_mchdev; 1903 1904 /** 1905 * Tells the intel_ips driver that the i915 driver is now loaded, if 1906 * IPS got loaded first. 1907 * 1908 * This awkward dance is so that neither module has to depend on the 1909 * other in order for IPS to do the appropriate communication of 1910 * GPU turbo limits to i915. 1911 */ 1912 static void 1913 ips_ping_for_i915_load(void) 1914 { 1915 void (*link)(void); 1916 1917 link = symbol_get(ips_link_to_i915_driver); 1918 if (link) { 1919 link(); 1920 symbol_put(ips_link_to_i915_driver); 1921 } 1922 } 1923 1924 void intel_rps_driver_register(struct intel_rps *rps) 1925 { 1926 struct intel_gt *gt = rps_to_gt(rps); 1927 1928 /* 1929 * We only register the i915 ips part with intel-ips once everything is 1930 * set up, to avoid intel-ips sneaking in and reading bogus values. 1931 */ 1932 if (IS_GEN(gt->i915, 5)) { 1933 GEM_BUG_ON(ips_mchdev); 1934 rcu_assign_pointer(ips_mchdev, gt->i915); 1935 ips_ping_for_i915_load(); 1936 } 1937 } 1938 1939 void intel_rps_driver_unregister(struct intel_rps *rps) 1940 { 1941 if (rcu_access_pointer(ips_mchdev) == rps_to_i915(rps)) 1942 rcu_assign_pointer(ips_mchdev, NULL); 1943 } 1944 1945 static struct drm_i915_private *mchdev_get(void) 1946 { 1947 struct drm_i915_private *i915; 1948 1949 rcu_read_lock(); 1950 i915 = rcu_dereference(ips_mchdev); 1951 if (!kref_get_unless_zero(&i915->drm.ref)) 1952 i915 = NULL; 1953 rcu_read_unlock(); 1954 1955 return i915; 1956 } 1957 1958 /** 1959 * i915_read_mch_val - return value for IPS use 1960 * 1961 * Calculate and return a value for the IPS driver to use when deciding whether 1962 * we have thermal and power headroom to increase CPU or GPU power budget. 1963 */ 1964 unsigned long i915_read_mch_val(void) 1965 { 1966 struct drm_i915_private *i915; 1967 unsigned long chipset_val = 0; 1968 unsigned long graphics_val = 0; 1969 intel_wakeref_t wakeref; 1970 1971 i915 = mchdev_get(); 1972 if (!i915) 1973 return 0; 1974 1975 with_intel_runtime_pm(&i915->runtime_pm, wakeref) { 1976 struct intel_ips *ips = &i915->gt.rps.ips; 1977 1978 spin_lock_irq(&mchdev_lock); 1979 chipset_val = __ips_chipset_val(ips); 1980 graphics_val = __ips_gfx_val(ips); 1981 spin_unlock_irq(&mchdev_lock); 1982 } 1983 1984 drm_dev_put(&i915->drm); 1985 return chipset_val + graphics_val; 1986 } 1987 EXPORT_SYMBOL_GPL(i915_read_mch_val); 1988 1989 /** 1990 * i915_gpu_raise - raise GPU frequency limit 1991 * 1992 * Raise the limit; IPS indicates we have thermal headroom. 1993 */ 1994 bool i915_gpu_raise(void) 1995 { 1996 struct drm_i915_private *i915; 1997 struct intel_rps *rps; 1998 1999 i915 = mchdev_get(); 2000 if (!i915) 2001 return false; 2002 2003 rps = &i915->gt.rps; 2004 2005 spin_lock_irq(&mchdev_lock); 2006 if (rps->max_freq_softlimit < rps->max_freq) 2007 rps->max_freq_softlimit++; 2008 spin_unlock_irq(&mchdev_lock); 2009 2010 drm_dev_put(&i915->drm); 2011 return true; 2012 } 2013 EXPORT_SYMBOL_GPL(i915_gpu_raise); 2014 2015 /** 2016 * i915_gpu_lower - lower GPU frequency limit 2017 * 2018 * IPS indicates we're close to a thermal limit, so throttle back the GPU 2019 * frequency maximum. 2020 */ 2021 bool i915_gpu_lower(void) 2022 { 2023 struct drm_i915_private *i915; 2024 struct intel_rps *rps; 2025 2026 i915 = mchdev_get(); 2027 if (!i915) 2028 return false; 2029 2030 rps = &i915->gt.rps; 2031 2032 spin_lock_irq(&mchdev_lock); 2033 if (rps->max_freq_softlimit > rps->min_freq) 2034 rps->max_freq_softlimit--; 2035 spin_unlock_irq(&mchdev_lock); 2036 2037 drm_dev_put(&i915->drm); 2038 return true; 2039 } 2040 EXPORT_SYMBOL_GPL(i915_gpu_lower); 2041 2042 /** 2043 * i915_gpu_busy - indicate GPU business to IPS 2044 * 2045 * Tell the IPS driver whether or not the GPU is busy. 2046 */ 2047 bool i915_gpu_busy(void) 2048 { 2049 struct drm_i915_private *i915; 2050 bool ret; 2051 2052 i915 = mchdev_get(); 2053 if (!i915) 2054 return false; 2055 2056 ret = i915->gt.awake; 2057 2058 drm_dev_put(&i915->drm); 2059 return ret; 2060 } 2061 EXPORT_SYMBOL_GPL(i915_gpu_busy); 2062 2063 /** 2064 * i915_gpu_turbo_disable - disable graphics turbo 2065 * 2066 * Disable graphics turbo by resetting the max frequency and setting the 2067 * current frequency to the default. 2068 */ 2069 bool i915_gpu_turbo_disable(void) 2070 { 2071 struct drm_i915_private *i915; 2072 struct intel_rps *rps; 2073 bool ret; 2074 2075 i915 = mchdev_get(); 2076 if (!i915) 2077 return false; 2078 2079 rps = &i915->gt.rps; 2080 2081 spin_lock_irq(&mchdev_lock); 2082 rps->max_freq_softlimit = rps->min_freq; 2083 ret = gen5_rps_set(&i915->gt.rps, rps->min_freq); 2084 spin_unlock_irq(&mchdev_lock); 2085 2086 drm_dev_put(&i915->drm); 2087 return ret; 2088 } 2089 EXPORT_SYMBOL_GPL(i915_gpu_turbo_disable); 2090 2091 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) 2092 #include "selftest_rps.c" 2093 #endif 2094