1 /* 2 * SPDX-License-Identifier: MIT 3 * 4 * Copyright © 2019 Intel Corporation 5 */ 6 7 #include <drm/i915_drm.h> 8 9 #include "i915_drv.h" 10 #include "intel_breadcrumbs.h" 11 #include "intel_gt.h" 12 #include "intel_gt_clock_utils.h" 13 #include "intel_gt_irq.h" 14 #include "intel_gt_pm_irq.h" 15 #include "intel_rps.h" 16 #include "intel_sideband.h" 17 #include "../../../platform/x86/intel_ips.h" 18 19 #define BUSY_MAX_EI 20u /* ms */ 20 21 /* 22 * Lock protecting IPS related data structures 23 */ 24 static DEFINE_SPINLOCK(mchdev_lock); 25 26 static struct intel_gt *rps_to_gt(struct intel_rps *rps) 27 { 28 return container_of(rps, struct intel_gt, rps); 29 } 30 31 static struct drm_i915_private *rps_to_i915(struct intel_rps *rps) 32 { 33 return rps_to_gt(rps)->i915; 34 } 35 36 static struct intel_uncore *rps_to_uncore(struct intel_rps *rps) 37 { 38 return rps_to_gt(rps)->uncore; 39 } 40 41 static u32 rps_pm_sanitize_mask(struct intel_rps *rps, u32 mask) 42 { 43 return mask & ~rps->pm_intrmsk_mbz; 44 } 45 46 static inline void set(struct intel_uncore *uncore, i915_reg_t reg, u32 val) 47 { 48 intel_uncore_write_fw(uncore, reg, val); 49 } 50 51 static void rps_timer(struct timer_list *t) 52 { 53 struct intel_rps *rps = from_timer(rps, t, timer); 54 struct intel_engine_cs *engine; 55 ktime_t dt, last, timestamp; 56 enum intel_engine_id id; 57 s64 max_busy[3] = {}; 58 59 timestamp = 0; 60 for_each_engine(engine, rps_to_gt(rps), id) { 61 s64 busy; 62 int i; 63 64 dt = intel_engine_get_busy_time(engine, ×tamp); 65 last = engine->stats.rps; 66 engine->stats.rps = dt; 67 68 busy = ktime_to_ns(ktime_sub(dt, last)); 69 for (i = 0; i < ARRAY_SIZE(max_busy); i++) { 70 if (busy > max_busy[i]) 71 swap(busy, max_busy[i]); 72 } 73 } 74 last = rps->pm_timestamp; 75 rps->pm_timestamp = timestamp; 76 77 if (intel_rps_is_active(rps)) { 78 s64 busy; 79 int i; 80 81 dt = ktime_sub(timestamp, last); 82 83 /* 84 * Our goal is to evaluate each engine independently, so we run 85 * at the lowest clocks required to sustain the heaviest 86 * workload. However, a task may be split into sequential 87 * dependent operations across a set of engines, such that 88 * the independent contributions do not account for high load, 89 * but overall the task is GPU bound. For example, consider 90 * video decode on vcs followed by colour post-processing 91 * on vecs, followed by general post-processing on rcs. 92 * Since multi-engines being active does imply a single 93 * continuous workload across all engines, we hedge our 94 * bets by only contributing a factor of the distributed 95 * load into our busyness calculation. 96 */ 97 busy = max_busy[0]; 98 for (i = 1; i < ARRAY_SIZE(max_busy); i++) { 99 if (!max_busy[i]) 100 break; 101 102 busy += div_u64(max_busy[i], 1 << i); 103 } 104 GT_TRACE(rps_to_gt(rps), 105 "busy:%lld [%d%%], max:[%lld, %lld, %lld], interval:%d\n", 106 busy, (int)div64_u64(100 * busy, dt), 107 max_busy[0], max_busy[1], max_busy[2], 108 rps->pm_interval); 109 110 if (100 * busy > rps->power.up_threshold * dt && 111 rps->cur_freq < rps->max_freq_softlimit) { 112 rps->pm_iir |= GEN6_PM_RP_UP_THRESHOLD; 113 rps->pm_interval = 1; 114 schedule_work(&rps->work); 115 } else if (100 * busy < rps->power.down_threshold * dt && 116 rps->cur_freq > rps->min_freq_softlimit) { 117 rps->pm_iir |= GEN6_PM_RP_DOWN_THRESHOLD; 118 rps->pm_interval = 1; 119 schedule_work(&rps->work); 120 } else { 121 rps->last_adj = 0; 122 } 123 124 mod_timer(&rps->timer, 125 jiffies + msecs_to_jiffies(rps->pm_interval)); 126 rps->pm_interval = min(rps->pm_interval * 2, BUSY_MAX_EI); 127 } 128 } 129 130 static void rps_start_timer(struct intel_rps *rps) 131 { 132 rps->pm_timestamp = ktime_sub(ktime_get(), rps->pm_timestamp); 133 rps->pm_interval = 1; 134 mod_timer(&rps->timer, jiffies + 1); 135 } 136 137 static void rps_stop_timer(struct intel_rps *rps) 138 { 139 del_timer_sync(&rps->timer); 140 rps->pm_timestamp = ktime_sub(ktime_get(), rps->pm_timestamp); 141 cancel_work_sync(&rps->work); 142 } 143 144 static u32 rps_pm_mask(struct intel_rps *rps, u8 val) 145 { 146 u32 mask = 0; 147 148 /* We use UP_EI_EXPIRED interrupts for both up/down in manual mode */ 149 if (val > rps->min_freq_softlimit) 150 mask |= (GEN6_PM_RP_UP_EI_EXPIRED | 151 GEN6_PM_RP_DOWN_THRESHOLD | 152 GEN6_PM_RP_DOWN_TIMEOUT); 153 154 if (val < rps->max_freq_softlimit) 155 mask |= GEN6_PM_RP_UP_EI_EXPIRED | GEN6_PM_RP_UP_THRESHOLD; 156 157 mask &= rps->pm_events; 158 159 return rps_pm_sanitize_mask(rps, ~mask); 160 } 161 162 static void rps_reset_ei(struct intel_rps *rps) 163 { 164 memset(&rps->ei, 0, sizeof(rps->ei)); 165 } 166 167 static void rps_enable_interrupts(struct intel_rps *rps) 168 { 169 struct intel_gt *gt = rps_to_gt(rps); 170 171 GT_TRACE(gt, "interrupts:on rps->pm_events: %x, rps_pm_mask:%x\n", 172 rps->pm_events, rps_pm_mask(rps, rps->last_freq)); 173 174 rps_reset_ei(rps); 175 176 spin_lock_irq(>->irq_lock); 177 gen6_gt_pm_enable_irq(gt, rps->pm_events); 178 spin_unlock_irq(>->irq_lock); 179 180 intel_uncore_write(gt->uncore, 181 GEN6_PMINTRMSK, rps_pm_mask(rps, rps->last_freq)); 182 } 183 184 static void gen6_rps_reset_interrupts(struct intel_rps *rps) 185 { 186 gen6_gt_pm_reset_iir(rps_to_gt(rps), GEN6_PM_RPS_EVENTS); 187 } 188 189 static void gen11_rps_reset_interrupts(struct intel_rps *rps) 190 { 191 while (gen11_gt_reset_one_iir(rps_to_gt(rps), 0, GEN11_GTPM)) 192 ; 193 } 194 195 static void rps_reset_interrupts(struct intel_rps *rps) 196 { 197 struct intel_gt *gt = rps_to_gt(rps); 198 199 spin_lock_irq(>->irq_lock); 200 if (INTEL_GEN(gt->i915) >= 11) 201 gen11_rps_reset_interrupts(rps); 202 else 203 gen6_rps_reset_interrupts(rps); 204 205 rps->pm_iir = 0; 206 spin_unlock_irq(>->irq_lock); 207 } 208 209 static void rps_disable_interrupts(struct intel_rps *rps) 210 { 211 struct intel_gt *gt = rps_to_gt(rps); 212 213 intel_uncore_write(gt->uncore, 214 GEN6_PMINTRMSK, rps_pm_sanitize_mask(rps, ~0u)); 215 216 spin_lock_irq(>->irq_lock); 217 gen6_gt_pm_disable_irq(gt, GEN6_PM_RPS_EVENTS); 218 spin_unlock_irq(>->irq_lock); 219 220 intel_synchronize_irq(gt->i915); 221 222 /* 223 * Now that we will not be generating any more work, flush any 224 * outstanding tasks. As we are called on the RPS idle path, 225 * we will reset the GPU to minimum frequencies, so the current 226 * state of the worker can be discarded. 227 */ 228 cancel_work_sync(&rps->work); 229 230 rps_reset_interrupts(rps); 231 GT_TRACE(gt, "interrupts:off\n"); 232 } 233 234 static const struct cparams { 235 u16 i; 236 u16 t; 237 u16 m; 238 u16 c; 239 } cparams[] = { 240 { 1, 1333, 301, 28664 }, 241 { 1, 1066, 294, 24460 }, 242 { 1, 800, 294, 25192 }, 243 { 0, 1333, 276, 27605 }, 244 { 0, 1066, 276, 27605 }, 245 { 0, 800, 231, 23784 }, 246 }; 247 248 static void gen5_rps_init(struct intel_rps *rps) 249 { 250 struct drm_i915_private *i915 = rps_to_i915(rps); 251 struct intel_uncore *uncore = rps_to_uncore(rps); 252 u8 fmax, fmin, fstart; 253 u32 rgvmodectl; 254 int c_m, i; 255 256 if (i915->fsb_freq <= 3200) 257 c_m = 0; 258 else if (i915->fsb_freq <= 4800) 259 c_m = 1; 260 else 261 c_m = 2; 262 263 for (i = 0; i < ARRAY_SIZE(cparams); i++) { 264 if (cparams[i].i == c_m && cparams[i].t == i915->mem_freq) { 265 rps->ips.m = cparams[i].m; 266 rps->ips.c = cparams[i].c; 267 break; 268 } 269 } 270 271 rgvmodectl = intel_uncore_read(uncore, MEMMODECTL); 272 273 /* Set up min, max, and cur for interrupt handling */ 274 fmax = (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT; 275 fmin = (rgvmodectl & MEMMODE_FMIN_MASK); 276 fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >> 277 MEMMODE_FSTART_SHIFT; 278 drm_dbg(&i915->drm, "fmax: %d, fmin: %d, fstart: %d\n", 279 fmax, fmin, fstart); 280 281 rps->min_freq = fmax; 282 rps->efficient_freq = fstart; 283 rps->max_freq = fmin; 284 } 285 286 static unsigned long 287 __ips_chipset_val(struct intel_ips *ips) 288 { 289 struct intel_uncore *uncore = 290 rps_to_uncore(container_of(ips, struct intel_rps, ips)); 291 unsigned long now = jiffies_to_msecs(jiffies), dt; 292 unsigned long result; 293 u64 total, delta; 294 295 lockdep_assert_held(&mchdev_lock); 296 297 /* 298 * Prevent division-by-zero if we are asking too fast. 299 * Also, we don't get interesting results if we are polling 300 * faster than once in 10ms, so just return the saved value 301 * in such cases. 302 */ 303 dt = now - ips->last_time1; 304 if (dt <= 10) 305 return ips->chipset_power; 306 307 /* FIXME: handle per-counter overflow */ 308 total = intel_uncore_read(uncore, DMIEC); 309 total += intel_uncore_read(uncore, DDREC); 310 total += intel_uncore_read(uncore, CSIEC); 311 312 delta = total - ips->last_count1; 313 314 result = div_u64(div_u64(ips->m * delta, dt) + ips->c, 10); 315 316 ips->last_count1 = total; 317 ips->last_time1 = now; 318 319 ips->chipset_power = result; 320 321 return result; 322 } 323 324 static unsigned long ips_mch_val(struct intel_uncore *uncore) 325 { 326 unsigned int m, x, b; 327 u32 tsfs; 328 329 tsfs = intel_uncore_read(uncore, TSFS); 330 x = intel_uncore_read8(uncore, TR1); 331 332 b = tsfs & TSFS_INTR_MASK; 333 m = (tsfs & TSFS_SLOPE_MASK) >> TSFS_SLOPE_SHIFT; 334 335 return m * x / 127 - b; 336 } 337 338 static int _pxvid_to_vd(u8 pxvid) 339 { 340 if (pxvid == 0) 341 return 0; 342 343 if (pxvid >= 8 && pxvid < 31) 344 pxvid = 31; 345 346 return (pxvid + 2) * 125; 347 } 348 349 static u32 pvid_to_extvid(struct drm_i915_private *i915, u8 pxvid) 350 { 351 const int vd = _pxvid_to_vd(pxvid); 352 353 if (INTEL_INFO(i915)->is_mobile) 354 return max(vd - 1125, 0); 355 356 return vd; 357 } 358 359 static void __gen5_ips_update(struct intel_ips *ips) 360 { 361 struct intel_uncore *uncore = 362 rps_to_uncore(container_of(ips, struct intel_rps, ips)); 363 u64 now, delta, dt; 364 u32 count; 365 366 lockdep_assert_held(&mchdev_lock); 367 368 now = ktime_get_raw_ns(); 369 dt = now - ips->last_time2; 370 do_div(dt, NSEC_PER_MSEC); 371 372 /* Don't divide by 0 */ 373 if (dt <= 10) 374 return; 375 376 count = intel_uncore_read(uncore, GFXEC); 377 delta = count - ips->last_count2; 378 379 ips->last_count2 = count; 380 ips->last_time2 = now; 381 382 /* More magic constants... */ 383 ips->gfx_power = div_u64(delta * 1181, dt * 10); 384 } 385 386 static void gen5_rps_update(struct intel_rps *rps) 387 { 388 spin_lock_irq(&mchdev_lock); 389 __gen5_ips_update(&rps->ips); 390 spin_unlock_irq(&mchdev_lock); 391 } 392 393 static unsigned int gen5_invert_freq(struct intel_rps *rps, 394 unsigned int val) 395 { 396 /* Invert the frequency bin into an ips delay */ 397 val = rps->max_freq - val; 398 val = rps->min_freq + val; 399 400 return val; 401 } 402 403 static bool gen5_rps_set(struct intel_rps *rps, u8 val) 404 { 405 struct intel_uncore *uncore = rps_to_uncore(rps); 406 u16 rgvswctl; 407 408 lockdep_assert_held(&mchdev_lock); 409 410 rgvswctl = intel_uncore_read16(uncore, MEMSWCTL); 411 if (rgvswctl & MEMCTL_CMD_STS) { 412 DRM_DEBUG("gpu busy, RCS change rejected\n"); 413 return false; /* still busy with another command */ 414 } 415 416 /* Invert the frequency bin into an ips delay */ 417 val = gen5_invert_freq(rps, val); 418 419 rgvswctl = 420 (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) | 421 (val << MEMCTL_FREQ_SHIFT) | 422 MEMCTL_SFCAVM; 423 intel_uncore_write16(uncore, MEMSWCTL, rgvswctl); 424 intel_uncore_posting_read16(uncore, MEMSWCTL); 425 426 rgvswctl |= MEMCTL_CMD_STS; 427 intel_uncore_write16(uncore, MEMSWCTL, rgvswctl); 428 429 return true; 430 } 431 432 static unsigned long intel_pxfreq(u32 vidfreq) 433 { 434 int div = (vidfreq & 0x3f0000) >> 16; 435 int post = (vidfreq & 0x3000) >> 12; 436 int pre = (vidfreq & 0x7); 437 438 if (!pre) 439 return 0; 440 441 return div * 133333 / (pre << post); 442 } 443 444 static unsigned int init_emon(struct intel_uncore *uncore) 445 { 446 u8 pxw[16]; 447 int i; 448 449 /* Disable to program */ 450 intel_uncore_write(uncore, ECR, 0); 451 intel_uncore_posting_read(uncore, ECR); 452 453 /* Program energy weights for various events */ 454 intel_uncore_write(uncore, SDEW, 0x15040d00); 455 intel_uncore_write(uncore, CSIEW0, 0x007f0000); 456 intel_uncore_write(uncore, CSIEW1, 0x1e220004); 457 intel_uncore_write(uncore, CSIEW2, 0x04000004); 458 459 for (i = 0; i < 5; i++) 460 intel_uncore_write(uncore, PEW(i), 0); 461 for (i = 0; i < 3; i++) 462 intel_uncore_write(uncore, DEW(i), 0); 463 464 /* Program P-state weights to account for frequency power adjustment */ 465 for (i = 0; i < 16; i++) { 466 u32 pxvidfreq = intel_uncore_read(uncore, PXVFREQ(i)); 467 unsigned int freq = intel_pxfreq(pxvidfreq); 468 unsigned int vid = 469 (pxvidfreq & PXVFREQ_PX_MASK) >> PXVFREQ_PX_SHIFT; 470 unsigned int val; 471 472 val = vid * vid * freq / 1000 * 255; 473 val /= 127 * 127 * 900; 474 475 pxw[i] = val; 476 } 477 /* Render standby states get 0 weight */ 478 pxw[14] = 0; 479 pxw[15] = 0; 480 481 for (i = 0; i < 4; i++) { 482 intel_uncore_write(uncore, PXW(i), 483 pxw[i * 4 + 0] << 24 | 484 pxw[i * 4 + 1] << 16 | 485 pxw[i * 4 + 2] << 8 | 486 pxw[i * 4 + 3] << 0); 487 } 488 489 /* Adjust magic regs to magic values (more experimental results) */ 490 intel_uncore_write(uncore, OGW0, 0); 491 intel_uncore_write(uncore, OGW1, 0); 492 intel_uncore_write(uncore, EG0, 0x00007f00); 493 intel_uncore_write(uncore, EG1, 0x0000000e); 494 intel_uncore_write(uncore, EG2, 0x000e0000); 495 intel_uncore_write(uncore, EG3, 0x68000300); 496 intel_uncore_write(uncore, EG4, 0x42000000); 497 intel_uncore_write(uncore, EG5, 0x00140031); 498 intel_uncore_write(uncore, EG6, 0); 499 intel_uncore_write(uncore, EG7, 0); 500 501 for (i = 0; i < 8; i++) 502 intel_uncore_write(uncore, PXWL(i), 0); 503 504 /* Enable PMON + select events */ 505 intel_uncore_write(uncore, ECR, 0x80000019); 506 507 return intel_uncore_read(uncore, LCFUSE02) & LCFUSE_HIV_MASK; 508 } 509 510 static bool gen5_rps_enable(struct intel_rps *rps) 511 { 512 struct drm_i915_private *i915 = rps_to_i915(rps); 513 struct intel_uncore *uncore = rps_to_uncore(rps); 514 u8 fstart, vstart; 515 u32 rgvmodectl; 516 517 spin_lock_irq(&mchdev_lock); 518 519 rgvmodectl = intel_uncore_read(uncore, MEMMODECTL); 520 521 /* Enable temp reporting */ 522 intel_uncore_write16(uncore, PMMISC, 523 intel_uncore_read16(uncore, PMMISC) | MCPPCE_EN); 524 intel_uncore_write16(uncore, TSC1, 525 intel_uncore_read16(uncore, TSC1) | TSE); 526 527 /* 100ms RC evaluation intervals */ 528 intel_uncore_write(uncore, RCUPEI, 100000); 529 intel_uncore_write(uncore, RCDNEI, 100000); 530 531 /* Set max/min thresholds to 90ms and 80ms respectively */ 532 intel_uncore_write(uncore, RCBMAXAVG, 90000); 533 intel_uncore_write(uncore, RCBMINAVG, 80000); 534 535 intel_uncore_write(uncore, MEMIHYST, 1); 536 537 /* Set up min, max, and cur for interrupt handling */ 538 fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >> 539 MEMMODE_FSTART_SHIFT; 540 541 vstart = (intel_uncore_read(uncore, PXVFREQ(fstart)) & 542 PXVFREQ_PX_MASK) >> PXVFREQ_PX_SHIFT; 543 544 intel_uncore_write(uncore, 545 MEMINTREN, 546 MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN); 547 548 intel_uncore_write(uncore, VIDSTART, vstart); 549 intel_uncore_posting_read(uncore, VIDSTART); 550 551 rgvmodectl |= MEMMODE_SWMODE_EN; 552 intel_uncore_write(uncore, MEMMODECTL, rgvmodectl); 553 554 if (wait_for_atomic((intel_uncore_read(uncore, MEMSWCTL) & 555 MEMCTL_CMD_STS) == 0, 10)) 556 drm_err(&uncore->i915->drm, 557 "stuck trying to change perf mode\n"); 558 mdelay(1); 559 560 gen5_rps_set(rps, rps->cur_freq); 561 562 rps->ips.last_count1 = intel_uncore_read(uncore, DMIEC); 563 rps->ips.last_count1 += intel_uncore_read(uncore, DDREC); 564 rps->ips.last_count1 += intel_uncore_read(uncore, CSIEC); 565 rps->ips.last_time1 = jiffies_to_msecs(jiffies); 566 567 rps->ips.last_count2 = intel_uncore_read(uncore, GFXEC); 568 rps->ips.last_time2 = ktime_get_raw_ns(); 569 570 spin_lock(&i915->irq_lock); 571 ilk_enable_display_irq(i915, DE_PCU_EVENT); 572 spin_unlock(&i915->irq_lock); 573 574 spin_unlock_irq(&mchdev_lock); 575 576 rps->ips.corr = init_emon(uncore); 577 578 return true; 579 } 580 581 static void gen5_rps_disable(struct intel_rps *rps) 582 { 583 struct drm_i915_private *i915 = rps_to_i915(rps); 584 struct intel_uncore *uncore = rps_to_uncore(rps); 585 u16 rgvswctl; 586 587 spin_lock_irq(&mchdev_lock); 588 589 spin_lock(&i915->irq_lock); 590 ilk_disable_display_irq(i915, DE_PCU_EVENT); 591 spin_unlock(&i915->irq_lock); 592 593 rgvswctl = intel_uncore_read16(uncore, MEMSWCTL); 594 595 /* Ack interrupts, disable EFC interrupt */ 596 intel_uncore_write(uncore, MEMINTREN, 597 intel_uncore_read(uncore, MEMINTREN) & 598 ~MEMINT_EVAL_CHG_EN); 599 intel_uncore_write(uncore, MEMINTRSTS, MEMINT_EVAL_CHG); 600 601 /* Go back to the starting frequency */ 602 gen5_rps_set(rps, rps->idle_freq); 603 mdelay(1); 604 rgvswctl |= MEMCTL_CMD_STS; 605 intel_uncore_write(uncore, MEMSWCTL, rgvswctl); 606 mdelay(1); 607 608 spin_unlock_irq(&mchdev_lock); 609 } 610 611 static u32 rps_limits(struct intel_rps *rps, u8 val) 612 { 613 u32 limits; 614 615 /* 616 * Only set the down limit when we've reached the lowest level to avoid 617 * getting more interrupts, otherwise leave this clear. This prevents a 618 * race in the hw when coming out of rc6: There's a tiny window where 619 * the hw runs at the minimal clock before selecting the desired 620 * frequency, if the down threshold expires in that window we will not 621 * receive a down interrupt. 622 */ 623 if (INTEL_GEN(rps_to_i915(rps)) >= 9) { 624 limits = rps->max_freq_softlimit << 23; 625 if (val <= rps->min_freq_softlimit) 626 limits |= rps->min_freq_softlimit << 14; 627 } else { 628 limits = rps->max_freq_softlimit << 24; 629 if (val <= rps->min_freq_softlimit) 630 limits |= rps->min_freq_softlimit << 16; 631 } 632 633 return limits; 634 } 635 636 static void rps_set_power(struct intel_rps *rps, int new_power) 637 { 638 struct intel_gt *gt = rps_to_gt(rps); 639 struct intel_uncore *uncore = gt->uncore; 640 u32 threshold_up = 0, threshold_down = 0; /* in % */ 641 u32 ei_up = 0, ei_down = 0; 642 643 lockdep_assert_held(&rps->power.mutex); 644 645 if (new_power == rps->power.mode) 646 return; 647 648 threshold_up = 95; 649 threshold_down = 85; 650 651 /* Note the units here are not exactly 1us, but 1280ns. */ 652 switch (new_power) { 653 case LOW_POWER: 654 ei_up = 16000; 655 ei_down = 32000; 656 break; 657 658 case BETWEEN: 659 ei_up = 13000; 660 ei_down = 32000; 661 break; 662 663 case HIGH_POWER: 664 ei_up = 10000; 665 ei_down = 32000; 666 break; 667 } 668 669 /* When byt can survive without system hang with dynamic 670 * sw freq adjustments, this restriction can be lifted. 671 */ 672 if (IS_VALLEYVIEW(gt->i915)) 673 goto skip_hw_write; 674 675 GT_TRACE(gt, 676 "changing power mode [%d], up %d%% @ %dus, down %d%% @ %dus\n", 677 new_power, threshold_up, ei_up, threshold_down, ei_down); 678 679 set(uncore, GEN6_RP_UP_EI, 680 intel_gt_ns_to_pm_interval(gt, ei_up * 1000)); 681 set(uncore, GEN6_RP_UP_THRESHOLD, 682 intel_gt_ns_to_pm_interval(gt, ei_up * threshold_up * 10)); 683 684 set(uncore, GEN6_RP_DOWN_EI, 685 intel_gt_ns_to_pm_interval(gt, ei_down * 1000)); 686 set(uncore, GEN6_RP_DOWN_THRESHOLD, 687 intel_gt_ns_to_pm_interval(gt, ei_down * threshold_down * 10)); 688 689 set(uncore, GEN6_RP_CONTROL, 690 (INTEL_GEN(gt->i915) > 9 ? 0 : GEN6_RP_MEDIA_TURBO) | 691 GEN6_RP_MEDIA_HW_NORMAL_MODE | 692 GEN6_RP_MEDIA_IS_GFX | 693 GEN6_RP_ENABLE | 694 GEN6_RP_UP_BUSY_AVG | 695 GEN6_RP_DOWN_IDLE_AVG); 696 697 skip_hw_write: 698 rps->power.mode = new_power; 699 rps->power.up_threshold = threshold_up; 700 rps->power.down_threshold = threshold_down; 701 } 702 703 static void gen6_rps_set_thresholds(struct intel_rps *rps, u8 val) 704 { 705 int new_power; 706 707 new_power = rps->power.mode; 708 switch (rps->power.mode) { 709 case LOW_POWER: 710 if (val > rps->efficient_freq + 1 && 711 val > rps->cur_freq) 712 new_power = BETWEEN; 713 break; 714 715 case BETWEEN: 716 if (val <= rps->efficient_freq && 717 val < rps->cur_freq) 718 new_power = LOW_POWER; 719 else if (val >= rps->rp0_freq && 720 val > rps->cur_freq) 721 new_power = HIGH_POWER; 722 break; 723 724 case HIGH_POWER: 725 if (val < (rps->rp1_freq + rps->rp0_freq) >> 1 && 726 val < rps->cur_freq) 727 new_power = BETWEEN; 728 break; 729 } 730 /* Max/min bins are special */ 731 if (val <= rps->min_freq_softlimit) 732 new_power = LOW_POWER; 733 if (val >= rps->max_freq_softlimit) 734 new_power = HIGH_POWER; 735 736 mutex_lock(&rps->power.mutex); 737 if (rps->power.interactive) 738 new_power = HIGH_POWER; 739 rps_set_power(rps, new_power); 740 mutex_unlock(&rps->power.mutex); 741 } 742 743 void intel_rps_mark_interactive(struct intel_rps *rps, bool interactive) 744 { 745 GT_TRACE(rps_to_gt(rps), "mark interactive: %s\n", yesno(interactive)); 746 747 mutex_lock(&rps->power.mutex); 748 if (interactive) { 749 if (!rps->power.interactive++ && intel_rps_is_active(rps)) 750 rps_set_power(rps, HIGH_POWER); 751 } else { 752 GEM_BUG_ON(!rps->power.interactive); 753 rps->power.interactive--; 754 } 755 mutex_unlock(&rps->power.mutex); 756 } 757 758 static int gen6_rps_set(struct intel_rps *rps, u8 val) 759 { 760 struct intel_uncore *uncore = rps_to_uncore(rps); 761 struct drm_i915_private *i915 = rps_to_i915(rps); 762 u32 swreq; 763 764 if (INTEL_GEN(i915) >= 9) 765 swreq = GEN9_FREQUENCY(val); 766 else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) 767 swreq = HSW_FREQUENCY(val); 768 else 769 swreq = (GEN6_FREQUENCY(val) | 770 GEN6_OFFSET(0) | 771 GEN6_AGGRESSIVE_TURBO); 772 set(uncore, GEN6_RPNSWREQ, swreq); 773 774 GT_TRACE(rps_to_gt(rps), "set val:%x, freq:%d, swreq:%x\n", 775 val, intel_gpu_freq(rps, val), swreq); 776 777 return 0; 778 } 779 780 static int vlv_rps_set(struct intel_rps *rps, u8 val) 781 { 782 struct drm_i915_private *i915 = rps_to_i915(rps); 783 int err; 784 785 vlv_punit_get(i915); 786 err = vlv_punit_write(i915, PUNIT_REG_GPU_FREQ_REQ, val); 787 vlv_punit_put(i915); 788 789 GT_TRACE(rps_to_gt(rps), "set val:%x, freq:%d\n", 790 val, intel_gpu_freq(rps, val)); 791 792 return err; 793 } 794 795 static int rps_set(struct intel_rps *rps, u8 val, bool update) 796 { 797 struct drm_i915_private *i915 = rps_to_i915(rps); 798 int err; 799 800 if (INTEL_GEN(i915) < 6) 801 return 0; 802 803 if (val == rps->last_freq) 804 return 0; 805 806 if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) 807 err = vlv_rps_set(rps, val); 808 else 809 err = gen6_rps_set(rps, val); 810 if (err) 811 return err; 812 813 if (update) 814 gen6_rps_set_thresholds(rps, val); 815 rps->last_freq = val; 816 817 return 0; 818 } 819 820 void intel_rps_unpark(struct intel_rps *rps) 821 { 822 if (!intel_rps_is_enabled(rps)) 823 return; 824 825 GT_TRACE(rps_to_gt(rps), "unpark:%x\n", rps->cur_freq); 826 827 /* 828 * Use the user's desired frequency as a guide, but for better 829 * performance, jump directly to RPe as our starting frequency. 830 */ 831 mutex_lock(&rps->lock); 832 833 intel_rps_set_active(rps); 834 intel_rps_set(rps, 835 clamp(rps->cur_freq, 836 rps->min_freq_softlimit, 837 rps->max_freq_softlimit)); 838 839 mutex_unlock(&rps->lock); 840 841 rps->pm_iir = 0; 842 if (intel_rps_has_interrupts(rps)) 843 rps_enable_interrupts(rps); 844 if (intel_rps_uses_timer(rps)) 845 rps_start_timer(rps); 846 847 if (IS_GEN(rps_to_i915(rps), 5)) 848 gen5_rps_update(rps); 849 } 850 851 void intel_rps_park(struct intel_rps *rps) 852 { 853 int adj; 854 855 if (!intel_rps_clear_active(rps)) 856 return; 857 858 if (intel_rps_uses_timer(rps)) 859 rps_stop_timer(rps); 860 if (intel_rps_has_interrupts(rps)) 861 rps_disable_interrupts(rps); 862 863 if (rps->last_freq <= rps->idle_freq) 864 return; 865 866 /* 867 * The punit delays the write of the frequency and voltage until it 868 * determines the GPU is awake. During normal usage we don't want to 869 * waste power changing the frequency if the GPU is sleeping (rc6). 870 * However, the GPU and driver is now idle and we do not want to delay 871 * switching to minimum voltage (reducing power whilst idle) as we do 872 * not expect to be woken in the near future and so must flush the 873 * change by waking the device. 874 * 875 * We choose to take the media powerwell (either would do to trick the 876 * punit into committing the voltage change) as that takes a lot less 877 * power than the render powerwell. 878 */ 879 intel_uncore_forcewake_get(rps_to_uncore(rps), FORCEWAKE_MEDIA); 880 rps_set(rps, rps->idle_freq, false); 881 intel_uncore_forcewake_put(rps_to_uncore(rps), FORCEWAKE_MEDIA); 882 883 /* 884 * Since we will try and restart from the previously requested 885 * frequency on unparking, treat this idle point as a downclock 886 * interrupt and reduce the frequency for resume. If we park/unpark 887 * more frequently than the rps worker can run, we will not respond 888 * to any EI and never see a change in frequency. 889 * 890 * (Note we accommodate Cherryview's limitation of only using an 891 * even bin by applying it to all.) 892 */ 893 adj = rps->last_adj; 894 if (adj < 0) 895 adj *= 2; 896 else /* CHV needs even encode values */ 897 adj = -2; 898 rps->last_adj = adj; 899 rps->cur_freq = max_t(int, rps->cur_freq + adj, rps->min_freq); 900 901 GT_TRACE(rps_to_gt(rps), "park:%x\n", rps->cur_freq); 902 } 903 904 void intel_rps_boost(struct i915_request *rq) 905 { 906 struct intel_rps *rps = &READ_ONCE(rq->engine)->gt->rps; 907 unsigned long flags; 908 909 if (i915_request_signaled(rq) || !intel_rps_is_active(rps)) 910 return; 911 912 /* Serializes with i915_request_retire() */ 913 spin_lock_irqsave(&rq->lock, flags); 914 if (!i915_request_has_waitboost(rq) && 915 !dma_fence_is_signaled_locked(&rq->fence)) { 916 set_bit(I915_FENCE_FLAG_BOOST, &rq->fence.flags); 917 918 GT_TRACE(rps_to_gt(rps), "boost fence:%llx:%llx\n", 919 rq->fence.context, rq->fence.seqno); 920 921 if (!atomic_fetch_inc(&rps->num_waiters) && 922 READ_ONCE(rps->cur_freq) < rps->boost_freq) 923 schedule_work(&rps->work); 924 925 atomic_inc(&rps->boosts); 926 } 927 spin_unlock_irqrestore(&rq->lock, flags); 928 } 929 930 int intel_rps_set(struct intel_rps *rps, u8 val) 931 { 932 int err; 933 934 lockdep_assert_held(&rps->lock); 935 GEM_BUG_ON(val > rps->max_freq); 936 GEM_BUG_ON(val < rps->min_freq); 937 938 if (intel_rps_is_active(rps)) { 939 err = rps_set(rps, val, true); 940 if (err) 941 return err; 942 943 /* 944 * Make sure we continue to get interrupts 945 * until we hit the minimum or maximum frequencies. 946 */ 947 if (intel_rps_has_interrupts(rps)) { 948 struct intel_uncore *uncore = rps_to_uncore(rps); 949 950 set(uncore, 951 GEN6_RP_INTERRUPT_LIMITS, rps_limits(rps, val)); 952 953 set(uncore, GEN6_PMINTRMSK, rps_pm_mask(rps, val)); 954 } 955 } 956 957 rps->cur_freq = val; 958 return 0; 959 } 960 961 static void gen6_rps_init(struct intel_rps *rps) 962 { 963 struct drm_i915_private *i915 = rps_to_i915(rps); 964 struct intel_uncore *uncore = rps_to_uncore(rps); 965 966 /* All of these values are in units of 50MHz */ 967 968 /* static values from HW: RP0 > RP1 > RPn (min_freq) */ 969 if (IS_GEN9_LP(i915)) { 970 u32 rp_state_cap = intel_uncore_read(uncore, BXT_RP_STATE_CAP); 971 972 rps->rp0_freq = (rp_state_cap >> 16) & 0xff; 973 rps->rp1_freq = (rp_state_cap >> 8) & 0xff; 974 rps->min_freq = (rp_state_cap >> 0) & 0xff; 975 } else { 976 u32 rp_state_cap = intel_uncore_read(uncore, GEN6_RP_STATE_CAP); 977 978 rps->rp0_freq = (rp_state_cap >> 0) & 0xff; 979 rps->rp1_freq = (rp_state_cap >> 8) & 0xff; 980 rps->min_freq = (rp_state_cap >> 16) & 0xff; 981 } 982 983 /* hw_max = RP0 until we check for overclocking */ 984 rps->max_freq = rps->rp0_freq; 985 986 rps->efficient_freq = rps->rp1_freq; 987 if (IS_HASWELL(i915) || IS_BROADWELL(i915) || 988 IS_GEN9_BC(i915) || INTEL_GEN(i915) >= 10) { 989 u32 ddcc_status = 0; 990 991 if (sandybridge_pcode_read(i915, 992 HSW_PCODE_DYNAMIC_DUTY_CYCLE_CONTROL, 993 &ddcc_status, NULL) == 0) 994 rps->efficient_freq = 995 clamp_t(u8, 996 (ddcc_status >> 8) & 0xff, 997 rps->min_freq, 998 rps->max_freq); 999 } 1000 1001 if (IS_GEN9_BC(i915) || INTEL_GEN(i915) >= 10) { 1002 /* Store the frequency values in 16.66 MHZ units, which is 1003 * the natural hardware unit for SKL 1004 */ 1005 rps->rp0_freq *= GEN9_FREQ_SCALER; 1006 rps->rp1_freq *= GEN9_FREQ_SCALER; 1007 rps->min_freq *= GEN9_FREQ_SCALER; 1008 rps->max_freq *= GEN9_FREQ_SCALER; 1009 rps->efficient_freq *= GEN9_FREQ_SCALER; 1010 } 1011 } 1012 1013 static bool rps_reset(struct intel_rps *rps) 1014 { 1015 struct drm_i915_private *i915 = rps_to_i915(rps); 1016 1017 /* force a reset */ 1018 rps->power.mode = -1; 1019 rps->last_freq = -1; 1020 1021 if (rps_set(rps, rps->min_freq, true)) { 1022 drm_err(&i915->drm, "Failed to reset RPS to initial values\n"); 1023 return false; 1024 } 1025 1026 rps->cur_freq = rps->min_freq; 1027 return true; 1028 } 1029 1030 /* See the Gen9_GT_PM_Programming_Guide doc for the below */ 1031 static bool gen9_rps_enable(struct intel_rps *rps) 1032 { 1033 struct intel_gt *gt = rps_to_gt(rps); 1034 struct intel_uncore *uncore = gt->uncore; 1035 1036 /* Program defaults and thresholds for RPS */ 1037 if (IS_GEN(gt->i915, 9)) 1038 intel_uncore_write_fw(uncore, GEN6_RC_VIDEO_FREQ, 1039 GEN9_FREQUENCY(rps->rp1_freq)); 1040 1041 intel_uncore_write_fw(uncore, GEN6_RP_IDLE_HYSTERSIS, 0xa); 1042 1043 rps->pm_events = GEN6_PM_RP_UP_THRESHOLD | GEN6_PM_RP_DOWN_THRESHOLD; 1044 1045 return rps_reset(rps); 1046 } 1047 1048 static bool gen8_rps_enable(struct intel_rps *rps) 1049 { 1050 struct intel_uncore *uncore = rps_to_uncore(rps); 1051 1052 intel_uncore_write_fw(uncore, GEN6_RC_VIDEO_FREQ, 1053 HSW_FREQUENCY(rps->rp1_freq)); 1054 1055 intel_uncore_write_fw(uncore, GEN6_RP_IDLE_HYSTERSIS, 10); 1056 1057 rps->pm_events = GEN6_PM_RP_UP_THRESHOLD | GEN6_PM_RP_DOWN_THRESHOLD; 1058 1059 return rps_reset(rps); 1060 } 1061 1062 static bool gen6_rps_enable(struct intel_rps *rps) 1063 { 1064 struct intel_uncore *uncore = rps_to_uncore(rps); 1065 1066 /* Power down if completely idle for over 50ms */ 1067 intel_uncore_write_fw(uncore, GEN6_RP_DOWN_TIMEOUT, 50000); 1068 intel_uncore_write_fw(uncore, GEN6_RP_IDLE_HYSTERSIS, 10); 1069 1070 rps->pm_events = (GEN6_PM_RP_UP_THRESHOLD | 1071 GEN6_PM_RP_DOWN_THRESHOLD | 1072 GEN6_PM_RP_DOWN_TIMEOUT); 1073 1074 return rps_reset(rps); 1075 } 1076 1077 static int chv_rps_max_freq(struct intel_rps *rps) 1078 { 1079 struct drm_i915_private *i915 = rps_to_i915(rps); 1080 struct intel_gt *gt = rps_to_gt(rps); 1081 u32 val; 1082 1083 val = vlv_punit_read(i915, FB_GFX_FMAX_AT_VMAX_FUSE); 1084 1085 switch (gt->info.sseu.eu_total) { 1086 case 8: 1087 /* (2 * 4) config */ 1088 val >>= FB_GFX_FMAX_AT_VMAX_2SS4EU_FUSE_SHIFT; 1089 break; 1090 case 12: 1091 /* (2 * 6) config */ 1092 val >>= FB_GFX_FMAX_AT_VMAX_2SS6EU_FUSE_SHIFT; 1093 break; 1094 case 16: 1095 /* (2 * 8) config */ 1096 default: 1097 /* Setting (2 * 8) Min RP0 for any other combination */ 1098 val >>= FB_GFX_FMAX_AT_VMAX_2SS8EU_FUSE_SHIFT; 1099 break; 1100 } 1101 1102 return val & FB_GFX_FREQ_FUSE_MASK; 1103 } 1104 1105 static int chv_rps_rpe_freq(struct intel_rps *rps) 1106 { 1107 struct drm_i915_private *i915 = rps_to_i915(rps); 1108 u32 val; 1109 1110 val = vlv_punit_read(i915, PUNIT_GPU_DUTYCYCLE_REG); 1111 val >>= PUNIT_GPU_DUTYCYCLE_RPE_FREQ_SHIFT; 1112 1113 return val & PUNIT_GPU_DUTYCYCLE_RPE_FREQ_MASK; 1114 } 1115 1116 static int chv_rps_guar_freq(struct intel_rps *rps) 1117 { 1118 struct drm_i915_private *i915 = rps_to_i915(rps); 1119 u32 val; 1120 1121 val = vlv_punit_read(i915, FB_GFX_FMAX_AT_VMAX_FUSE); 1122 1123 return val & FB_GFX_FREQ_FUSE_MASK; 1124 } 1125 1126 static u32 chv_rps_min_freq(struct intel_rps *rps) 1127 { 1128 struct drm_i915_private *i915 = rps_to_i915(rps); 1129 u32 val; 1130 1131 val = vlv_punit_read(i915, FB_GFX_FMIN_AT_VMIN_FUSE); 1132 val >>= FB_GFX_FMIN_AT_VMIN_FUSE_SHIFT; 1133 1134 return val & FB_GFX_FREQ_FUSE_MASK; 1135 } 1136 1137 static bool chv_rps_enable(struct intel_rps *rps) 1138 { 1139 struct intel_uncore *uncore = rps_to_uncore(rps); 1140 struct drm_i915_private *i915 = rps_to_i915(rps); 1141 u32 val; 1142 1143 /* 1: Program defaults and thresholds for RPS*/ 1144 intel_uncore_write_fw(uncore, GEN6_RP_DOWN_TIMEOUT, 1000000); 1145 intel_uncore_write_fw(uncore, GEN6_RP_UP_THRESHOLD, 59400); 1146 intel_uncore_write_fw(uncore, GEN6_RP_DOWN_THRESHOLD, 245000); 1147 intel_uncore_write_fw(uncore, GEN6_RP_UP_EI, 66000); 1148 intel_uncore_write_fw(uncore, GEN6_RP_DOWN_EI, 350000); 1149 1150 intel_uncore_write_fw(uncore, GEN6_RP_IDLE_HYSTERSIS, 10); 1151 1152 /* 2: Enable RPS */ 1153 intel_uncore_write_fw(uncore, GEN6_RP_CONTROL, 1154 GEN6_RP_MEDIA_HW_NORMAL_MODE | 1155 GEN6_RP_MEDIA_IS_GFX | 1156 GEN6_RP_ENABLE | 1157 GEN6_RP_UP_BUSY_AVG | 1158 GEN6_RP_DOWN_IDLE_AVG); 1159 1160 rps->pm_events = (GEN6_PM_RP_UP_THRESHOLD | 1161 GEN6_PM_RP_DOWN_THRESHOLD | 1162 GEN6_PM_RP_DOWN_TIMEOUT); 1163 1164 /* Setting Fixed Bias */ 1165 vlv_punit_get(i915); 1166 1167 val = VLV_OVERRIDE_EN | VLV_SOC_TDP_EN | CHV_BIAS_CPU_50_SOC_50; 1168 vlv_punit_write(i915, VLV_TURBO_SOC_OVERRIDE, val); 1169 1170 val = vlv_punit_read(i915, PUNIT_REG_GPU_FREQ_STS); 1171 1172 vlv_punit_put(i915); 1173 1174 /* RPS code assumes GPLL is used */ 1175 drm_WARN_ONCE(&i915->drm, (val & GPLLENABLE) == 0, 1176 "GPLL not enabled\n"); 1177 1178 drm_dbg(&i915->drm, "GPLL enabled? %s\n", yesno(val & GPLLENABLE)); 1179 drm_dbg(&i915->drm, "GPU status: 0x%08x\n", val); 1180 1181 return rps_reset(rps); 1182 } 1183 1184 static int vlv_rps_guar_freq(struct intel_rps *rps) 1185 { 1186 struct drm_i915_private *i915 = rps_to_i915(rps); 1187 u32 val, rp1; 1188 1189 val = vlv_nc_read(i915, IOSF_NC_FB_GFX_FREQ_FUSE); 1190 1191 rp1 = val & FB_GFX_FGUARANTEED_FREQ_FUSE_MASK; 1192 rp1 >>= FB_GFX_FGUARANTEED_FREQ_FUSE_SHIFT; 1193 1194 return rp1; 1195 } 1196 1197 static int vlv_rps_max_freq(struct intel_rps *rps) 1198 { 1199 struct drm_i915_private *i915 = rps_to_i915(rps); 1200 u32 val, rp0; 1201 1202 val = vlv_nc_read(i915, IOSF_NC_FB_GFX_FREQ_FUSE); 1203 1204 rp0 = (val & FB_GFX_MAX_FREQ_FUSE_MASK) >> FB_GFX_MAX_FREQ_FUSE_SHIFT; 1205 /* Clamp to max */ 1206 rp0 = min_t(u32, rp0, 0xea); 1207 1208 return rp0; 1209 } 1210 1211 static int vlv_rps_rpe_freq(struct intel_rps *rps) 1212 { 1213 struct drm_i915_private *i915 = rps_to_i915(rps); 1214 u32 val, rpe; 1215 1216 val = vlv_nc_read(i915, IOSF_NC_FB_GFX_FMAX_FUSE_LO); 1217 rpe = (val & FB_FMAX_VMIN_FREQ_LO_MASK) >> FB_FMAX_VMIN_FREQ_LO_SHIFT; 1218 val = vlv_nc_read(i915, IOSF_NC_FB_GFX_FMAX_FUSE_HI); 1219 rpe |= (val & FB_FMAX_VMIN_FREQ_HI_MASK) << 5; 1220 1221 return rpe; 1222 } 1223 1224 static int vlv_rps_min_freq(struct intel_rps *rps) 1225 { 1226 struct drm_i915_private *i915 = rps_to_i915(rps); 1227 u32 val; 1228 1229 val = vlv_punit_read(i915, PUNIT_REG_GPU_LFM) & 0xff; 1230 /* 1231 * According to the BYT Punit GPU turbo HAS 1.1.6.3 the minimum value 1232 * for the minimum frequency in GPLL mode is 0xc1. Contrary to this on 1233 * a BYT-M B0 the above register contains 0xbf. Moreover when setting 1234 * a frequency Punit will not allow values below 0xc0. Clamp it 0xc0 1235 * to make sure it matches what Punit accepts. 1236 */ 1237 return max_t(u32, val, 0xc0); 1238 } 1239 1240 static bool vlv_rps_enable(struct intel_rps *rps) 1241 { 1242 struct intel_uncore *uncore = rps_to_uncore(rps); 1243 struct drm_i915_private *i915 = rps_to_i915(rps); 1244 u32 val; 1245 1246 intel_uncore_write_fw(uncore, GEN6_RP_DOWN_TIMEOUT, 1000000); 1247 intel_uncore_write_fw(uncore, GEN6_RP_UP_THRESHOLD, 59400); 1248 intel_uncore_write_fw(uncore, GEN6_RP_DOWN_THRESHOLD, 245000); 1249 intel_uncore_write_fw(uncore, GEN6_RP_UP_EI, 66000); 1250 intel_uncore_write_fw(uncore, GEN6_RP_DOWN_EI, 350000); 1251 1252 intel_uncore_write_fw(uncore, GEN6_RP_IDLE_HYSTERSIS, 10); 1253 1254 intel_uncore_write_fw(uncore, GEN6_RP_CONTROL, 1255 GEN6_RP_MEDIA_TURBO | 1256 GEN6_RP_MEDIA_HW_NORMAL_MODE | 1257 GEN6_RP_MEDIA_IS_GFX | 1258 GEN6_RP_ENABLE | 1259 GEN6_RP_UP_BUSY_AVG | 1260 GEN6_RP_DOWN_IDLE_CONT); 1261 1262 /* WaGsvRC0ResidencyMethod:vlv */ 1263 rps->pm_events = GEN6_PM_RP_UP_EI_EXPIRED; 1264 1265 vlv_punit_get(i915); 1266 1267 /* Setting Fixed Bias */ 1268 val = VLV_OVERRIDE_EN | VLV_SOC_TDP_EN | VLV_BIAS_CPU_125_SOC_875; 1269 vlv_punit_write(i915, VLV_TURBO_SOC_OVERRIDE, val); 1270 1271 val = vlv_punit_read(i915, PUNIT_REG_GPU_FREQ_STS); 1272 1273 vlv_punit_put(i915); 1274 1275 /* RPS code assumes GPLL is used */ 1276 drm_WARN_ONCE(&i915->drm, (val & GPLLENABLE) == 0, 1277 "GPLL not enabled\n"); 1278 1279 drm_dbg(&i915->drm, "GPLL enabled? %s\n", yesno(val & GPLLENABLE)); 1280 drm_dbg(&i915->drm, "GPU status: 0x%08x\n", val); 1281 1282 return rps_reset(rps); 1283 } 1284 1285 static unsigned long __ips_gfx_val(struct intel_ips *ips) 1286 { 1287 struct intel_rps *rps = container_of(ips, typeof(*rps), ips); 1288 struct intel_uncore *uncore = rps_to_uncore(rps); 1289 unsigned int t, state1, state2; 1290 u32 pxvid, ext_v; 1291 u64 corr, corr2; 1292 1293 lockdep_assert_held(&mchdev_lock); 1294 1295 pxvid = intel_uncore_read(uncore, PXVFREQ(rps->cur_freq)); 1296 pxvid = (pxvid >> 24) & 0x7f; 1297 ext_v = pvid_to_extvid(rps_to_i915(rps), pxvid); 1298 1299 state1 = ext_v; 1300 1301 /* Revel in the empirically derived constants */ 1302 1303 /* Correction factor in 1/100000 units */ 1304 t = ips_mch_val(uncore); 1305 if (t > 80) 1306 corr = t * 2349 + 135940; 1307 else if (t >= 50) 1308 corr = t * 964 + 29317; 1309 else /* < 50 */ 1310 corr = t * 301 + 1004; 1311 1312 corr = div_u64(corr * 150142 * state1, 10000) - 78642; 1313 corr2 = div_u64(corr, 100000) * ips->corr; 1314 1315 state2 = div_u64(corr2 * state1, 10000); 1316 state2 /= 100; /* convert to mW */ 1317 1318 __gen5_ips_update(ips); 1319 1320 return ips->gfx_power + state2; 1321 } 1322 1323 static bool has_busy_stats(struct intel_rps *rps) 1324 { 1325 struct intel_engine_cs *engine; 1326 enum intel_engine_id id; 1327 1328 for_each_engine(engine, rps_to_gt(rps), id) { 1329 if (!intel_engine_supports_stats(engine)) 1330 return false; 1331 } 1332 1333 return true; 1334 } 1335 1336 void intel_rps_enable(struct intel_rps *rps) 1337 { 1338 struct drm_i915_private *i915 = rps_to_i915(rps); 1339 struct intel_uncore *uncore = rps_to_uncore(rps); 1340 bool enabled = false; 1341 1342 if (!HAS_RPS(i915)) 1343 return; 1344 1345 intel_gt_check_clock_frequency(rps_to_gt(rps)); 1346 1347 intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL); 1348 if (rps->max_freq <= rps->min_freq) 1349 /* leave disabled, no room for dynamic reclocking */; 1350 else if (IS_CHERRYVIEW(i915)) 1351 enabled = chv_rps_enable(rps); 1352 else if (IS_VALLEYVIEW(i915)) 1353 enabled = vlv_rps_enable(rps); 1354 else if (INTEL_GEN(i915) >= 9) 1355 enabled = gen9_rps_enable(rps); 1356 else if (INTEL_GEN(i915) >= 8) 1357 enabled = gen8_rps_enable(rps); 1358 else if (INTEL_GEN(i915) >= 6) 1359 enabled = gen6_rps_enable(rps); 1360 else if (IS_IRONLAKE_M(i915)) 1361 enabled = gen5_rps_enable(rps); 1362 else 1363 MISSING_CASE(INTEL_GEN(i915)); 1364 intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL); 1365 if (!enabled) 1366 return; 1367 1368 GT_TRACE(rps_to_gt(rps), 1369 "min:%x, max:%x, freq:[%d, %d]\n", 1370 rps->min_freq, rps->max_freq, 1371 intel_gpu_freq(rps, rps->min_freq), 1372 intel_gpu_freq(rps, rps->max_freq)); 1373 1374 GEM_BUG_ON(rps->max_freq < rps->min_freq); 1375 GEM_BUG_ON(rps->idle_freq > rps->max_freq); 1376 1377 GEM_BUG_ON(rps->efficient_freq < rps->min_freq); 1378 GEM_BUG_ON(rps->efficient_freq > rps->max_freq); 1379 1380 if (has_busy_stats(rps)) 1381 intel_rps_set_timer(rps); 1382 else if (INTEL_GEN(i915) >= 6) 1383 intel_rps_set_interrupts(rps); 1384 else 1385 /* Ironlake currently uses intel_ips.ko */ {} 1386 1387 intel_rps_set_enabled(rps); 1388 } 1389 1390 static void gen6_rps_disable(struct intel_rps *rps) 1391 { 1392 set(rps_to_uncore(rps), GEN6_RP_CONTROL, 0); 1393 } 1394 1395 void intel_rps_disable(struct intel_rps *rps) 1396 { 1397 struct drm_i915_private *i915 = rps_to_i915(rps); 1398 1399 intel_rps_clear_enabled(rps); 1400 intel_rps_clear_interrupts(rps); 1401 intel_rps_clear_timer(rps); 1402 1403 if (INTEL_GEN(i915) >= 6) 1404 gen6_rps_disable(rps); 1405 else if (IS_IRONLAKE_M(i915)) 1406 gen5_rps_disable(rps); 1407 } 1408 1409 static int byt_gpu_freq(struct intel_rps *rps, int val) 1410 { 1411 /* 1412 * N = val - 0xb7 1413 * Slow = Fast = GPLL ref * N 1414 */ 1415 return DIV_ROUND_CLOSEST(rps->gpll_ref_freq * (val - 0xb7), 1000); 1416 } 1417 1418 static int byt_freq_opcode(struct intel_rps *rps, int val) 1419 { 1420 return DIV_ROUND_CLOSEST(1000 * val, rps->gpll_ref_freq) + 0xb7; 1421 } 1422 1423 static int chv_gpu_freq(struct intel_rps *rps, int val) 1424 { 1425 /* 1426 * N = val / 2 1427 * CU (slow) = CU2x (fast) / 2 = GPLL ref * N / 2 1428 */ 1429 return DIV_ROUND_CLOSEST(rps->gpll_ref_freq * val, 2 * 2 * 1000); 1430 } 1431 1432 static int chv_freq_opcode(struct intel_rps *rps, int val) 1433 { 1434 /* CHV needs even values */ 1435 return DIV_ROUND_CLOSEST(2 * 1000 * val, rps->gpll_ref_freq) * 2; 1436 } 1437 1438 int intel_gpu_freq(struct intel_rps *rps, int val) 1439 { 1440 struct drm_i915_private *i915 = rps_to_i915(rps); 1441 1442 if (INTEL_GEN(i915) >= 9) 1443 return DIV_ROUND_CLOSEST(val * GT_FREQUENCY_MULTIPLIER, 1444 GEN9_FREQ_SCALER); 1445 else if (IS_CHERRYVIEW(i915)) 1446 return chv_gpu_freq(rps, val); 1447 else if (IS_VALLEYVIEW(i915)) 1448 return byt_gpu_freq(rps, val); 1449 else if (INTEL_GEN(i915) >= 6) 1450 return val * GT_FREQUENCY_MULTIPLIER; 1451 else 1452 return val; 1453 } 1454 1455 int intel_freq_opcode(struct intel_rps *rps, int val) 1456 { 1457 struct drm_i915_private *i915 = rps_to_i915(rps); 1458 1459 if (INTEL_GEN(i915) >= 9) 1460 return DIV_ROUND_CLOSEST(val * GEN9_FREQ_SCALER, 1461 GT_FREQUENCY_MULTIPLIER); 1462 else if (IS_CHERRYVIEW(i915)) 1463 return chv_freq_opcode(rps, val); 1464 else if (IS_VALLEYVIEW(i915)) 1465 return byt_freq_opcode(rps, val); 1466 else if (INTEL_GEN(i915) >= 6) 1467 return DIV_ROUND_CLOSEST(val, GT_FREQUENCY_MULTIPLIER); 1468 else 1469 return val; 1470 } 1471 1472 static void vlv_init_gpll_ref_freq(struct intel_rps *rps) 1473 { 1474 struct drm_i915_private *i915 = rps_to_i915(rps); 1475 1476 rps->gpll_ref_freq = 1477 vlv_get_cck_clock(i915, "GPLL ref", 1478 CCK_GPLL_CLOCK_CONTROL, 1479 i915->czclk_freq); 1480 1481 drm_dbg(&i915->drm, "GPLL reference freq: %d kHz\n", 1482 rps->gpll_ref_freq); 1483 } 1484 1485 static void vlv_rps_init(struct intel_rps *rps) 1486 { 1487 struct drm_i915_private *i915 = rps_to_i915(rps); 1488 u32 val; 1489 1490 vlv_iosf_sb_get(i915, 1491 BIT(VLV_IOSF_SB_PUNIT) | 1492 BIT(VLV_IOSF_SB_NC) | 1493 BIT(VLV_IOSF_SB_CCK)); 1494 1495 vlv_init_gpll_ref_freq(rps); 1496 1497 val = vlv_punit_read(i915, PUNIT_REG_GPU_FREQ_STS); 1498 switch ((val >> 6) & 3) { 1499 case 0: 1500 case 1: 1501 i915->mem_freq = 800; 1502 break; 1503 case 2: 1504 i915->mem_freq = 1066; 1505 break; 1506 case 3: 1507 i915->mem_freq = 1333; 1508 break; 1509 } 1510 drm_dbg(&i915->drm, "DDR speed: %d MHz\n", i915->mem_freq); 1511 1512 rps->max_freq = vlv_rps_max_freq(rps); 1513 rps->rp0_freq = rps->max_freq; 1514 drm_dbg(&i915->drm, "max GPU freq: %d MHz (%u)\n", 1515 intel_gpu_freq(rps, rps->max_freq), rps->max_freq); 1516 1517 rps->efficient_freq = vlv_rps_rpe_freq(rps); 1518 drm_dbg(&i915->drm, "RPe GPU freq: %d MHz (%u)\n", 1519 intel_gpu_freq(rps, rps->efficient_freq), rps->efficient_freq); 1520 1521 rps->rp1_freq = vlv_rps_guar_freq(rps); 1522 drm_dbg(&i915->drm, "RP1(Guar Freq) GPU freq: %d MHz (%u)\n", 1523 intel_gpu_freq(rps, rps->rp1_freq), rps->rp1_freq); 1524 1525 rps->min_freq = vlv_rps_min_freq(rps); 1526 drm_dbg(&i915->drm, "min GPU freq: %d MHz (%u)\n", 1527 intel_gpu_freq(rps, rps->min_freq), rps->min_freq); 1528 1529 vlv_iosf_sb_put(i915, 1530 BIT(VLV_IOSF_SB_PUNIT) | 1531 BIT(VLV_IOSF_SB_NC) | 1532 BIT(VLV_IOSF_SB_CCK)); 1533 } 1534 1535 static void chv_rps_init(struct intel_rps *rps) 1536 { 1537 struct drm_i915_private *i915 = rps_to_i915(rps); 1538 u32 val; 1539 1540 vlv_iosf_sb_get(i915, 1541 BIT(VLV_IOSF_SB_PUNIT) | 1542 BIT(VLV_IOSF_SB_NC) | 1543 BIT(VLV_IOSF_SB_CCK)); 1544 1545 vlv_init_gpll_ref_freq(rps); 1546 1547 val = vlv_cck_read(i915, CCK_FUSE_REG); 1548 1549 switch ((val >> 2) & 0x7) { 1550 case 3: 1551 i915->mem_freq = 2000; 1552 break; 1553 default: 1554 i915->mem_freq = 1600; 1555 break; 1556 } 1557 drm_dbg(&i915->drm, "DDR speed: %d MHz\n", i915->mem_freq); 1558 1559 rps->max_freq = chv_rps_max_freq(rps); 1560 rps->rp0_freq = rps->max_freq; 1561 drm_dbg(&i915->drm, "max GPU freq: %d MHz (%u)\n", 1562 intel_gpu_freq(rps, rps->max_freq), rps->max_freq); 1563 1564 rps->efficient_freq = chv_rps_rpe_freq(rps); 1565 drm_dbg(&i915->drm, "RPe GPU freq: %d MHz (%u)\n", 1566 intel_gpu_freq(rps, rps->efficient_freq), rps->efficient_freq); 1567 1568 rps->rp1_freq = chv_rps_guar_freq(rps); 1569 drm_dbg(&i915->drm, "RP1(Guar) GPU freq: %d MHz (%u)\n", 1570 intel_gpu_freq(rps, rps->rp1_freq), rps->rp1_freq); 1571 1572 rps->min_freq = chv_rps_min_freq(rps); 1573 drm_dbg(&i915->drm, "min GPU freq: %d MHz (%u)\n", 1574 intel_gpu_freq(rps, rps->min_freq), rps->min_freq); 1575 1576 vlv_iosf_sb_put(i915, 1577 BIT(VLV_IOSF_SB_PUNIT) | 1578 BIT(VLV_IOSF_SB_NC) | 1579 BIT(VLV_IOSF_SB_CCK)); 1580 1581 drm_WARN_ONCE(&i915->drm, (rps->max_freq | rps->efficient_freq | 1582 rps->rp1_freq | rps->min_freq) & 1, 1583 "Odd GPU freq values\n"); 1584 } 1585 1586 static void vlv_c0_read(struct intel_uncore *uncore, struct intel_rps_ei *ei) 1587 { 1588 ei->ktime = ktime_get_raw(); 1589 ei->render_c0 = intel_uncore_read(uncore, VLV_RENDER_C0_COUNT); 1590 ei->media_c0 = intel_uncore_read(uncore, VLV_MEDIA_C0_COUNT); 1591 } 1592 1593 static u32 vlv_wa_c0_ei(struct intel_rps *rps, u32 pm_iir) 1594 { 1595 struct intel_uncore *uncore = rps_to_uncore(rps); 1596 const struct intel_rps_ei *prev = &rps->ei; 1597 struct intel_rps_ei now; 1598 u32 events = 0; 1599 1600 if ((pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) == 0) 1601 return 0; 1602 1603 vlv_c0_read(uncore, &now); 1604 1605 if (prev->ktime) { 1606 u64 time, c0; 1607 u32 render, media; 1608 1609 time = ktime_us_delta(now.ktime, prev->ktime); 1610 1611 time *= rps_to_i915(rps)->czclk_freq; 1612 1613 /* Workload can be split between render + media, 1614 * e.g. SwapBuffers being blitted in X after being rendered in 1615 * mesa. To account for this we need to combine both engines 1616 * into our activity counter. 1617 */ 1618 render = now.render_c0 - prev->render_c0; 1619 media = now.media_c0 - prev->media_c0; 1620 c0 = max(render, media); 1621 c0 *= 1000 * 100 << 8; /* to usecs and scale to threshold% */ 1622 1623 if (c0 > time * rps->power.up_threshold) 1624 events = GEN6_PM_RP_UP_THRESHOLD; 1625 else if (c0 < time * rps->power.down_threshold) 1626 events = GEN6_PM_RP_DOWN_THRESHOLD; 1627 } 1628 1629 rps->ei = now; 1630 return events; 1631 } 1632 1633 static void rps_work(struct work_struct *work) 1634 { 1635 struct intel_rps *rps = container_of(work, typeof(*rps), work); 1636 struct intel_gt *gt = rps_to_gt(rps); 1637 struct drm_i915_private *i915 = rps_to_i915(rps); 1638 bool client_boost = false; 1639 int new_freq, adj, min, max; 1640 u32 pm_iir = 0; 1641 1642 spin_lock_irq(>->irq_lock); 1643 pm_iir = fetch_and_zero(&rps->pm_iir) & rps->pm_events; 1644 client_boost = atomic_read(&rps->num_waiters); 1645 spin_unlock_irq(>->irq_lock); 1646 1647 /* Make sure we didn't queue anything we're not going to process. */ 1648 if (!pm_iir && !client_boost) 1649 goto out; 1650 1651 mutex_lock(&rps->lock); 1652 if (!intel_rps_is_active(rps)) { 1653 mutex_unlock(&rps->lock); 1654 return; 1655 } 1656 1657 pm_iir |= vlv_wa_c0_ei(rps, pm_iir); 1658 1659 adj = rps->last_adj; 1660 new_freq = rps->cur_freq; 1661 min = rps->min_freq_softlimit; 1662 max = rps->max_freq_softlimit; 1663 if (client_boost) 1664 max = rps->max_freq; 1665 1666 GT_TRACE(gt, 1667 "pm_iir:%x, client_boost:%s, last:%d, cur:%x, min:%x, max:%x\n", 1668 pm_iir, yesno(client_boost), 1669 adj, new_freq, min, max); 1670 1671 if (client_boost && new_freq < rps->boost_freq) { 1672 new_freq = rps->boost_freq; 1673 adj = 0; 1674 } else if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) { 1675 if (adj > 0) 1676 adj *= 2; 1677 else /* CHV needs even encode values */ 1678 adj = IS_CHERRYVIEW(gt->i915) ? 2 : 1; 1679 1680 if (new_freq >= rps->max_freq_softlimit) 1681 adj = 0; 1682 } else if (client_boost) { 1683 adj = 0; 1684 } else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) { 1685 if (rps->cur_freq > rps->efficient_freq) 1686 new_freq = rps->efficient_freq; 1687 else if (rps->cur_freq > rps->min_freq_softlimit) 1688 new_freq = rps->min_freq_softlimit; 1689 adj = 0; 1690 } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) { 1691 if (adj < 0) 1692 adj *= 2; 1693 else /* CHV needs even encode values */ 1694 adj = IS_CHERRYVIEW(gt->i915) ? -2 : -1; 1695 1696 if (new_freq <= rps->min_freq_softlimit) 1697 adj = 0; 1698 } else { /* unknown event */ 1699 adj = 0; 1700 } 1701 1702 /* 1703 * sysfs frequency limits may have snuck in while 1704 * servicing the interrupt 1705 */ 1706 new_freq += adj; 1707 new_freq = clamp_t(int, new_freq, min, max); 1708 1709 if (intel_rps_set(rps, new_freq)) { 1710 drm_dbg(&i915->drm, "Failed to set new GPU frequency\n"); 1711 adj = 0; 1712 } 1713 rps->last_adj = adj; 1714 1715 mutex_unlock(&rps->lock); 1716 1717 out: 1718 spin_lock_irq(>->irq_lock); 1719 gen6_gt_pm_unmask_irq(gt, rps->pm_events); 1720 spin_unlock_irq(>->irq_lock); 1721 } 1722 1723 void gen11_rps_irq_handler(struct intel_rps *rps, u32 pm_iir) 1724 { 1725 struct intel_gt *gt = rps_to_gt(rps); 1726 const u32 events = rps->pm_events & pm_iir; 1727 1728 lockdep_assert_held(>->irq_lock); 1729 1730 if (unlikely(!events)) 1731 return; 1732 1733 GT_TRACE(gt, "irq events:%x\n", events); 1734 1735 gen6_gt_pm_mask_irq(gt, events); 1736 1737 rps->pm_iir |= events; 1738 schedule_work(&rps->work); 1739 } 1740 1741 void gen6_rps_irq_handler(struct intel_rps *rps, u32 pm_iir) 1742 { 1743 struct intel_gt *gt = rps_to_gt(rps); 1744 u32 events; 1745 1746 events = pm_iir & rps->pm_events; 1747 if (events) { 1748 spin_lock(>->irq_lock); 1749 1750 GT_TRACE(gt, "irq events:%x\n", events); 1751 1752 gen6_gt_pm_mask_irq(gt, events); 1753 rps->pm_iir |= events; 1754 1755 schedule_work(&rps->work); 1756 spin_unlock(>->irq_lock); 1757 } 1758 1759 if (INTEL_GEN(gt->i915) >= 8) 1760 return; 1761 1762 if (pm_iir & PM_VEBOX_USER_INTERRUPT) 1763 intel_engine_signal_breadcrumbs(gt->engine[VECS0]); 1764 1765 if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT) 1766 DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir); 1767 } 1768 1769 void gen5_rps_irq_handler(struct intel_rps *rps) 1770 { 1771 struct intel_uncore *uncore = rps_to_uncore(rps); 1772 u32 busy_up, busy_down, max_avg, min_avg; 1773 u8 new_freq; 1774 1775 spin_lock(&mchdev_lock); 1776 1777 intel_uncore_write16(uncore, 1778 MEMINTRSTS, 1779 intel_uncore_read(uncore, MEMINTRSTS)); 1780 1781 intel_uncore_write16(uncore, MEMINTRSTS, MEMINT_EVAL_CHG); 1782 busy_up = intel_uncore_read(uncore, RCPREVBSYTUPAVG); 1783 busy_down = intel_uncore_read(uncore, RCPREVBSYTDNAVG); 1784 max_avg = intel_uncore_read(uncore, RCBMAXAVG); 1785 min_avg = intel_uncore_read(uncore, RCBMINAVG); 1786 1787 /* Handle RCS change request from hw */ 1788 new_freq = rps->cur_freq; 1789 if (busy_up > max_avg) 1790 new_freq++; 1791 else if (busy_down < min_avg) 1792 new_freq--; 1793 new_freq = clamp(new_freq, 1794 rps->min_freq_softlimit, 1795 rps->max_freq_softlimit); 1796 1797 if (new_freq != rps->cur_freq && gen5_rps_set(rps, new_freq)) 1798 rps->cur_freq = new_freq; 1799 1800 spin_unlock(&mchdev_lock); 1801 } 1802 1803 void intel_rps_init_early(struct intel_rps *rps) 1804 { 1805 mutex_init(&rps->lock); 1806 mutex_init(&rps->power.mutex); 1807 1808 INIT_WORK(&rps->work, rps_work); 1809 timer_setup(&rps->timer, rps_timer, 0); 1810 1811 atomic_set(&rps->num_waiters, 0); 1812 } 1813 1814 void intel_rps_init(struct intel_rps *rps) 1815 { 1816 struct drm_i915_private *i915 = rps_to_i915(rps); 1817 1818 if (IS_CHERRYVIEW(i915)) 1819 chv_rps_init(rps); 1820 else if (IS_VALLEYVIEW(i915)) 1821 vlv_rps_init(rps); 1822 else if (INTEL_GEN(i915) >= 6) 1823 gen6_rps_init(rps); 1824 else if (IS_IRONLAKE_M(i915)) 1825 gen5_rps_init(rps); 1826 1827 /* Derive initial user preferences/limits from the hardware limits */ 1828 rps->max_freq_softlimit = rps->max_freq; 1829 rps->min_freq_softlimit = rps->min_freq; 1830 1831 /* After setting max-softlimit, find the overclock max freq */ 1832 if (IS_GEN(i915, 6) || IS_IVYBRIDGE(i915) || IS_HASWELL(i915)) { 1833 u32 params = 0; 1834 1835 sandybridge_pcode_read(i915, GEN6_READ_OC_PARAMS, 1836 ¶ms, NULL); 1837 if (params & BIT(31)) { /* OC supported */ 1838 drm_dbg(&i915->drm, 1839 "Overclocking supported, max: %dMHz, overclock: %dMHz\n", 1840 (rps->max_freq & 0xff) * 50, 1841 (params & 0xff) * 50); 1842 rps->max_freq = params & 0xff; 1843 } 1844 } 1845 1846 /* Finally allow us to boost to max by default */ 1847 rps->boost_freq = rps->max_freq; 1848 rps->idle_freq = rps->min_freq; 1849 1850 /* Start in the middle, from here we will autotune based on workload */ 1851 rps->cur_freq = rps->efficient_freq; 1852 1853 rps->pm_intrmsk_mbz = 0; 1854 1855 /* 1856 * SNB,IVB,HSW can while VLV,CHV may hard hang on looping batchbuffer 1857 * if GEN6_PM_UP_EI_EXPIRED is masked. 1858 * 1859 * TODO: verify if this can be reproduced on VLV,CHV. 1860 */ 1861 if (INTEL_GEN(i915) <= 7) 1862 rps->pm_intrmsk_mbz |= GEN6_PM_RP_UP_EI_EXPIRED; 1863 1864 if (INTEL_GEN(i915) >= 8 && INTEL_GEN(i915) < 11) 1865 rps->pm_intrmsk_mbz |= GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC; 1866 } 1867 1868 void intel_rps_sanitize(struct intel_rps *rps) 1869 { 1870 if (INTEL_GEN(rps_to_i915(rps)) >= 6) 1871 rps_disable_interrupts(rps); 1872 } 1873 1874 u32 intel_rps_get_cagf(struct intel_rps *rps, u32 rpstat) 1875 { 1876 struct drm_i915_private *i915 = rps_to_i915(rps); 1877 u32 cagf; 1878 1879 if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) 1880 cagf = (rpstat >> 8) & 0xff; 1881 else if (INTEL_GEN(i915) >= 9) 1882 cagf = (rpstat & GEN9_CAGF_MASK) >> GEN9_CAGF_SHIFT; 1883 else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) 1884 cagf = (rpstat & HSW_CAGF_MASK) >> HSW_CAGF_SHIFT; 1885 else if (INTEL_GEN(i915) >= 6) 1886 cagf = (rpstat & GEN6_CAGF_MASK) >> GEN6_CAGF_SHIFT; 1887 else 1888 cagf = gen5_invert_freq(rps, (rpstat & MEMSTAT_PSTATE_MASK) >> 1889 MEMSTAT_PSTATE_SHIFT); 1890 1891 return cagf; 1892 } 1893 1894 static u32 read_cagf(struct intel_rps *rps) 1895 { 1896 struct drm_i915_private *i915 = rps_to_i915(rps); 1897 struct intel_uncore *uncore = rps_to_uncore(rps); 1898 u32 freq; 1899 1900 if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) { 1901 vlv_punit_get(i915); 1902 freq = vlv_punit_read(i915, PUNIT_REG_GPU_FREQ_STS); 1903 vlv_punit_put(i915); 1904 } else if (INTEL_GEN(i915) >= 6) { 1905 freq = intel_uncore_read(uncore, GEN6_RPSTAT1); 1906 } else { 1907 freq = intel_uncore_read(uncore, MEMSTAT_ILK); 1908 } 1909 1910 return intel_rps_get_cagf(rps, freq); 1911 } 1912 1913 u32 intel_rps_read_actual_frequency(struct intel_rps *rps) 1914 { 1915 struct intel_runtime_pm *rpm = rps_to_uncore(rps)->rpm; 1916 intel_wakeref_t wakeref; 1917 u32 freq = 0; 1918 1919 with_intel_runtime_pm_if_in_use(rpm, wakeref) 1920 freq = intel_gpu_freq(rps, read_cagf(rps)); 1921 1922 return freq; 1923 } 1924 1925 /* External interface for intel_ips.ko */ 1926 1927 static struct drm_i915_private __rcu *ips_mchdev; 1928 1929 /** 1930 * Tells the intel_ips driver that the i915 driver is now loaded, if 1931 * IPS got loaded first. 1932 * 1933 * This awkward dance is so that neither module has to depend on the 1934 * other in order for IPS to do the appropriate communication of 1935 * GPU turbo limits to i915. 1936 */ 1937 static void 1938 ips_ping_for_i915_load(void) 1939 { 1940 void (*link)(void); 1941 1942 link = symbol_get(ips_link_to_i915_driver); 1943 if (link) { 1944 link(); 1945 symbol_put(ips_link_to_i915_driver); 1946 } 1947 } 1948 1949 void intel_rps_driver_register(struct intel_rps *rps) 1950 { 1951 struct intel_gt *gt = rps_to_gt(rps); 1952 1953 /* 1954 * We only register the i915 ips part with intel-ips once everything is 1955 * set up, to avoid intel-ips sneaking in and reading bogus values. 1956 */ 1957 if (IS_GEN(gt->i915, 5)) { 1958 GEM_BUG_ON(ips_mchdev); 1959 rcu_assign_pointer(ips_mchdev, gt->i915); 1960 ips_ping_for_i915_load(); 1961 } 1962 } 1963 1964 void intel_rps_driver_unregister(struct intel_rps *rps) 1965 { 1966 if (rcu_access_pointer(ips_mchdev) == rps_to_i915(rps)) 1967 rcu_assign_pointer(ips_mchdev, NULL); 1968 } 1969 1970 static struct drm_i915_private *mchdev_get(void) 1971 { 1972 struct drm_i915_private *i915; 1973 1974 rcu_read_lock(); 1975 i915 = rcu_dereference(ips_mchdev); 1976 if (!kref_get_unless_zero(&i915->drm.ref)) 1977 i915 = NULL; 1978 rcu_read_unlock(); 1979 1980 return i915; 1981 } 1982 1983 /** 1984 * i915_read_mch_val - return value for IPS use 1985 * 1986 * Calculate and return a value for the IPS driver to use when deciding whether 1987 * we have thermal and power headroom to increase CPU or GPU power budget. 1988 */ 1989 unsigned long i915_read_mch_val(void) 1990 { 1991 struct drm_i915_private *i915; 1992 unsigned long chipset_val = 0; 1993 unsigned long graphics_val = 0; 1994 intel_wakeref_t wakeref; 1995 1996 i915 = mchdev_get(); 1997 if (!i915) 1998 return 0; 1999 2000 with_intel_runtime_pm(&i915->runtime_pm, wakeref) { 2001 struct intel_ips *ips = &i915->gt.rps.ips; 2002 2003 spin_lock_irq(&mchdev_lock); 2004 chipset_val = __ips_chipset_val(ips); 2005 graphics_val = __ips_gfx_val(ips); 2006 spin_unlock_irq(&mchdev_lock); 2007 } 2008 2009 drm_dev_put(&i915->drm); 2010 return chipset_val + graphics_val; 2011 } 2012 EXPORT_SYMBOL_GPL(i915_read_mch_val); 2013 2014 /** 2015 * i915_gpu_raise - raise GPU frequency limit 2016 * 2017 * Raise the limit; IPS indicates we have thermal headroom. 2018 */ 2019 bool i915_gpu_raise(void) 2020 { 2021 struct drm_i915_private *i915; 2022 struct intel_rps *rps; 2023 2024 i915 = mchdev_get(); 2025 if (!i915) 2026 return false; 2027 2028 rps = &i915->gt.rps; 2029 2030 spin_lock_irq(&mchdev_lock); 2031 if (rps->max_freq_softlimit < rps->max_freq) 2032 rps->max_freq_softlimit++; 2033 spin_unlock_irq(&mchdev_lock); 2034 2035 drm_dev_put(&i915->drm); 2036 return true; 2037 } 2038 EXPORT_SYMBOL_GPL(i915_gpu_raise); 2039 2040 /** 2041 * i915_gpu_lower - lower GPU frequency limit 2042 * 2043 * IPS indicates we're close to a thermal limit, so throttle back the GPU 2044 * frequency maximum. 2045 */ 2046 bool i915_gpu_lower(void) 2047 { 2048 struct drm_i915_private *i915; 2049 struct intel_rps *rps; 2050 2051 i915 = mchdev_get(); 2052 if (!i915) 2053 return false; 2054 2055 rps = &i915->gt.rps; 2056 2057 spin_lock_irq(&mchdev_lock); 2058 if (rps->max_freq_softlimit > rps->min_freq) 2059 rps->max_freq_softlimit--; 2060 spin_unlock_irq(&mchdev_lock); 2061 2062 drm_dev_put(&i915->drm); 2063 return true; 2064 } 2065 EXPORT_SYMBOL_GPL(i915_gpu_lower); 2066 2067 /** 2068 * i915_gpu_busy - indicate GPU business to IPS 2069 * 2070 * Tell the IPS driver whether or not the GPU is busy. 2071 */ 2072 bool i915_gpu_busy(void) 2073 { 2074 struct drm_i915_private *i915; 2075 bool ret; 2076 2077 i915 = mchdev_get(); 2078 if (!i915) 2079 return false; 2080 2081 ret = i915->gt.awake; 2082 2083 drm_dev_put(&i915->drm); 2084 return ret; 2085 } 2086 EXPORT_SYMBOL_GPL(i915_gpu_busy); 2087 2088 /** 2089 * i915_gpu_turbo_disable - disable graphics turbo 2090 * 2091 * Disable graphics turbo by resetting the max frequency and setting the 2092 * current frequency to the default. 2093 */ 2094 bool i915_gpu_turbo_disable(void) 2095 { 2096 struct drm_i915_private *i915; 2097 struct intel_rps *rps; 2098 bool ret; 2099 2100 i915 = mchdev_get(); 2101 if (!i915) 2102 return false; 2103 2104 rps = &i915->gt.rps; 2105 2106 spin_lock_irq(&mchdev_lock); 2107 rps->max_freq_softlimit = rps->min_freq; 2108 ret = gen5_rps_set(&i915->gt.rps, rps->min_freq); 2109 spin_unlock_irq(&mchdev_lock); 2110 2111 drm_dev_put(&i915->drm); 2112 return ret; 2113 } 2114 EXPORT_SYMBOL_GPL(i915_gpu_turbo_disable); 2115 2116 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) 2117 #include "selftest_rps.c" 2118 #endif 2119