1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright © 2021 Intel Corporation 4 */ 5 6 #include "i915_drv.h" 7 #include "i915_reg.h" 8 #include "intel_atomic.h" 9 #include "intel_de.h" 10 #include "intel_display_types.h" 11 #include "intel_drrs.h" 12 #include "intel_panel.h" 13 14 /** 15 * DOC: Display Refresh Rate Switching (DRRS) 16 * 17 * Display Refresh Rate Switching (DRRS) is a power conservation feature 18 * which enables swtching between low and high refresh rates, 19 * dynamically, based on the usage scenario. This feature is applicable 20 * for internal panels. 21 * 22 * Indication that the panel supports DRRS is given by the panel EDID, which 23 * would list multiple refresh rates for one resolution. 24 * 25 * DRRS is of 2 types - static and seamless. 26 * Static DRRS involves changing refresh rate (RR) by doing a full modeset 27 * (may appear as a blink on screen) and is used in dock-undock scenario. 28 * Seamless DRRS involves changing RR without any visual effect to the user 29 * and can be used during normal system usage. This is done by programming 30 * certain registers. 31 * 32 * Support for static/seamless DRRS may be indicated in the VBT based on 33 * inputs from the panel spec. 34 * 35 * DRRS saves power by switching to low RR based on usage scenarios. 36 * 37 * The implementation is based on frontbuffer tracking implementation. When 38 * there is a disturbance on the screen triggered by user activity or a periodic 39 * system activity, DRRS is disabled (RR is changed to high RR). When there is 40 * no movement on screen, after a timeout of 1 second, a switch to low RR is 41 * made. 42 * 43 * For integration with frontbuffer tracking code, intel_drrs_invalidate() 44 * and intel_drrs_flush() are called. 45 * 46 * DRRS can be further extended to support other internal panels and also 47 * the scenario of video playback wherein RR is set based on the rate 48 * requested by userspace. 49 */ 50 51 const char *intel_drrs_type_str(enum drrs_type drrs_type) 52 { 53 static const char * const str[] = { 54 [DRRS_TYPE_NONE] = "none", 55 [DRRS_TYPE_STATIC] = "static", 56 [DRRS_TYPE_SEAMLESS] = "seamless", 57 }; 58 59 if (drrs_type >= ARRAY_SIZE(str)) 60 return "<invalid>"; 61 62 return str[drrs_type]; 63 } 64 65 static void 66 intel_drrs_set_refresh_rate_pipeconf(struct intel_crtc *crtc, 67 enum drrs_refresh_rate refresh_rate) 68 { 69 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 70 enum transcoder cpu_transcoder = crtc->drrs.cpu_transcoder; 71 u32 bit; 72 73 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 74 bit = TRANSCONF_REFRESH_RATE_ALT_VLV; 75 else 76 bit = TRANSCONF_REFRESH_RATE_ALT_ILK; 77 78 intel_de_rmw(dev_priv, TRANSCONF(cpu_transcoder), 79 bit, refresh_rate == DRRS_REFRESH_RATE_LOW ? bit : 0); 80 } 81 82 static void 83 intel_drrs_set_refresh_rate_m_n(struct intel_crtc *crtc, 84 enum drrs_refresh_rate refresh_rate) 85 { 86 intel_cpu_transcoder_set_m1_n1(crtc, crtc->drrs.cpu_transcoder, 87 refresh_rate == DRRS_REFRESH_RATE_LOW ? 88 &crtc->drrs.m2_n2 : &crtc->drrs.m_n); 89 } 90 91 bool intel_drrs_is_active(struct intel_crtc *crtc) 92 { 93 return crtc->drrs.cpu_transcoder != INVALID_TRANSCODER; 94 } 95 96 static void intel_drrs_set_state(struct intel_crtc *crtc, 97 enum drrs_refresh_rate refresh_rate) 98 { 99 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 100 101 if (refresh_rate == crtc->drrs.refresh_rate) 102 return; 103 104 if (intel_cpu_transcoder_has_m2_n2(dev_priv, crtc->drrs.cpu_transcoder)) 105 intel_drrs_set_refresh_rate_pipeconf(crtc, refresh_rate); 106 else 107 intel_drrs_set_refresh_rate_m_n(crtc, refresh_rate); 108 109 crtc->drrs.refresh_rate = refresh_rate; 110 } 111 112 static void intel_drrs_schedule_work(struct intel_crtc *crtc) 113 { 114 mod_delayed_work(system_wq, &crtc->drrs.work, msecs_to_jiffies(1000)); 115 } 116 117 static unsigned int intel_drrs_frontbuffer_bits(const struct intel_crtc_state *crtc_state) 118 { 119 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 120 struct drm_i915_private *i915 = to_i915(crtc->base.dev); 121 unsigned int frontbuffer_bits; 122 123 frontbuffer_bits = INTEL_FRONTBUFFER_ALL_MASK(crtc->pipe); 124 125 for_each_intel_crtc_in_pipe_mask(&i915->drm, crtc, 126 crtc_state->bigjoiner_pipes) 127 frontbuffer_bits |= INTEL_FRONTBUFFER_ALL_MASK(crtc->pipe); 128 129 return frontbuffer_bits; 130 } 131 132 /** 133 * intel_drrs_activate - activate DRRS 134 * @crtc_state: the crtc state 135 * 136 * Activates DRRS on the crtc. 137 */ 138 void intel_drrs_activate(const struct intel_crtc_state *crtc_state) 139 { 140 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 141 142 if (!crtc_state->has_drrs) 143 return; 144 145 if (!crtc_state->hw.active) 146 return; 147 148 if (intel_crtc_is_bigjoiner_slave(crtc_state)) 149 return; 150 151 mutex_lock(&crtc->drrs.mutex); 152 153 crtc->drrs.cpu_transcoder = crtc_state->cpu_transcoder; 154 crtc->drrs.m_n = crtc_state->dp_m_n; 155 crtc->drrs.m2_n2 = crtc_state->dp_m2_n2; 156 crtc->drrs.frontbuffer_bits = intel_drrs_frontbuffer_bits(crtc_state); 157 crtc->drrs.busy_frontbuffer_bits = 0; 158 159 intel_drrs_schedule_work(crtc); 160 161 mutex_unlock(&crtc->drrs.mutex); 162 } 163 164 /** 165 * intel_drrs_deactivate - deactivate DRRS 166 * @old_crtc_state: the old crtc state 167 * 168 * Deactivates DRRS on the crtc. 169 */ 170 void intel_drrs_deactivate(const struct intel_crtc_state *old_crtc_state) 171 { 172 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc); 173 174 if (!old_crtc_state->has_drrs) 175 return; 176 177 if (!old_crtc_state->hw.active) 178 return; 179 180 if (intel_crtc_is_bigjoiner_slave(old_crtc_state)) 181 return; 182 183 mutex_lock(&crtc->drrs.mutex); 184 185 if (intel_drrs_is_active(crtc)) 186 intel_drrs_set_state(crtc, DRRS_REFRESH_RATE_HIGH); 187 188 crtc->drrs.cpu_transcoder = INVALID_TRANSCODER; 189 crtc->drrs.frontbuffer_bits = 0; 190 crtc->drrs.busy_frontbuffer_bits = 0; 191 192 mutex_unlock(&crtc->drrs.mutex); 193 194 cancel_delayed_work_sync(&crtc->drrs.work); 195 } 196 197 static void intel_drrs_downclock_work(struct work_struct *work) 198 { 199 struct intel_crtc *crtc = container_of(work, typeof(*crtc), drrs.work.work); 200 201 mutex_lock(&crtc->drrs.mutex); 202 203 if (intel_drrs_is_active(crtc) && !crtc->drrs.busy_frontbuffer_bits) 204 intel_drrs_set_state(crtc, DRRS_REFRESH_RATE_LOW); 205 206 mutex_unlock(&crtc->drrs.mutex); 207 } 208 209 static void intel_drrs_frontbuffer_update(struct drm_i915_private *dev_priv, 210 unsigned int all_frontbuffer_bits, 211 bool invalidate) 212 { 213 struct intel_crtc *crtc; 214 215 for_each_intel_crtc(&dev_priv->drm, crtc) { 216 unsigned int frontbuffer_bits; 217 218 mutex_lock(&crtc->drrs.mutex); 219 220 frontbuffer_bits = all_frontbuffer_bits & crtc->drrs.frontbuffer_bits; 221 if (!frontbuffer_bits) { 222 mutex_unlock(&crtc->drrs.mutex); 223 continue; 224 } 225 226 if (invalidate) 227 crtc->drrs.busy_frontbuffer_bits |= frontbuffer_bits; 228 else 229 crtc->drrs.busy_frontbuffer_bits &= ~frontbuffer_bits; 230 231 /* flush/invalidate means busy screen hence upclock */ 232 intel_drrs_set_state(crtc, DRRS_REFRESH_RATE_HIGH); 233 234 /* 235 * flush also means no more activity hence schedule downclock, if all 236 * other fbs are quiescent too 237 */ 238 if (!crtc->drrs.busy_frontbuffer_bits) 239 intel_drrs_schedule_work(crtc); 240 else 241 cancel_delayed_work(&crtc->drrs.work); 242 243 mutex_unlock(&crtc->drrs.mutex); 244 } 245 } 246 247 /** 248 * intel_drrs_invalidate - Disable Idleness DRRS 249 * @dev_priv: i915 device 250 * @frontbuffer_bits: frontbuffer plane tracking bits 251 * 252 * This function gets called everytime rendering on the given planes start. 253 * Hence DRRS needs to be Upclocked, i.e. (LOW_RR -> HIGH_RR). 254 * 255 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits. 256 */ 257 void intel_drrs_invalidate(struct drm_i915_private *dev_priv, 258 unsigned int frontbuffer_bits) 259 { 260 intel_drrs_frontbuffer_update(dev_priv, frontbuffer_bits, true); 261 } 262 263 /** 264 * intel_drrs_flush - Restart Idleness DRRS 265 * @dev_priv: i915 device 266 * @frontbuffer_bits: frontbuffer plane tracking bits 267 * 268 * This function gets called every time rendering on the given planes has 269 * completed or flip on a crtc is completed. So DRRS should be upclocked 270 * (LOW_RR -> HIGH_RR). And also Idleness detection should be started again, 271 * if no other planes are dirty. 272 * 273 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits. 274 */ 275 void intel_drrs_flush(struct drm_i915_private *dev_priv, 276 unsigned int frontbuffer_bits) 277 { 278 intel_drrs_frontbuffer_update(dev_priv, frontbuffer_bits, false); 279 } 280 281 /** 282 * intel_drrs_crtc_init - Init DRRS for CRTC 283 * @crtc: crtc 284 * 285 * This function is called only once at driver load to initialize basic 286 * DRRS stuff. 287 * 288 */ 289 void intel_drrs_crtc_init(struct intel_crtc *crtc) 290 { 291 INIT_DELAYED_WORK(&crtc->drrs.work, intel_drrs_downclock_work); 292 mutex_init(&crtc->drrs.mutex); 293 crtc->drrs.cpu_transcoder = INVALID_TRANSCODER; 294 } 295 296 static int intel_drrs_debugfs_status_show(struct seq_file *m, void *unused) 297 { 298 struct intel_crtc *crtc = m->private; 299 const struct intel_crtc_state *crtc_state; 300 int ret; 301 302 ret = drm_modeset_lock_single_interruptible(&crtc->base.mutex); 303 if (ret) 304 return ret; 305 306 crtc_state = to_intel_crtc_state(crtc->base.state); 307 308 mutex_lock(&crtc->drrs.mutex); 309 310 seq_printf(m, "DRRS enabled: %s\n", 311 str_yes_no(crtc_state->has_drrs)); 312 313 seq_printf(m, "DRRS active: %s\n", 314 str_yes_no(intel_drrs_is_active(crtc))); 315 316 seq_printf(m, "DRRS refresh rate: %s\n", 317 crtc->drrs.refresh_rate == DRRS_REFRESH_RATE_LOW ? 318 "low" : "high"); 319 320 seq_printf(m, "DRRS busy frontbuffer bits: 0x%x\n", 321 crtc->drrs.busy_frontbuffer_bits); 322 323 mutex_unlock(&crtc->drrs.mutex); 324 325 drm_modeset_unlock(&crtc->base.mutex); 326 327 return 0; 328 } 329 330 DEFINE_SHOW_ATTRIBUTE(intel_drrs_debugfs_status); 331 332 static int intel_drrs_debugfs_ctl_set(void *data, u64 val) 333 { 334 struct intel_crtc *crtc = data; 335 struct drm_i915_private *i915 = to_i915(crtc->base.dev); 336 struct intel_crtc_state *crtc_state; 337 struct drm_crtc_commit *commit; 338 int ret; 339 340 ret = drm_modeset_lock_single_interruptible(&crtc->base.mutex); 341 if (ret) 342 return ret; 343 344 crtc_state = to_intel_crtc_state(crtc->base.state); 345 346 if (!crtc_state->hw.active || 347 !crtc_state->has_drrs) 348 goto out; 349 350 commit = crtc_state->uapi.commit; 351 if (commit) { 352 ret = wait_for_completion_interruptible(&commit->hw_done); 353 if (ret) 354 goto out; 355 } 356 357 drm_dbg(&i915->drm, 358 "Manually %sactivating DRRS\n", val ? "" : "de"); 359 360 if (val) 361 intel_drrs_activate(crtc_state); 362 else 363 intel_drrs_deactivate(crtc_state); 364 365 out: 366 drm_modeset_unlock(&crtc->base.mutex); 367 368 return ret; 369 } 370 371 DEFINE_DEBUGFS_ATTRIBUTE(intel_drrs_debugfs_ctl_fops, 372 NULL, intel_drrs_debugfs_ctl_set, "%llu\n"); 373 374 void intel_drrs_crtc_debugfs_add(struct intel_crtc *crtc) 375 { 376 debugfs_create_file("i915_drrs_status", 0444, crtc->base.debugfs_entry, 377 crtc, &intel_drrs_debugfs_status_fops); 378 379 debugfs_create_file_unsafe("i915_drrs_ctl", 0644, crtc->base.debugfs_entry, 380 crtc, &intel_drrs_debugfs_ctl_fops); 381 } 382 383 static int intel_drrs_debugfs_type_show(struct seq_file *m, void *unused) 384 { 385 struct intel_connector *connector = m->private; 386 387 seq_printf(m, "DRRS type: %s\n", 388 intel_drrs_type_str(intel_panel_drrs_type(connector))); 389 390 return 0; 391 } 392 393 DEFINE_SHOW_ATTRIBUTE(intel_drrs_debugfs_type); 394 395 void intel_drrs_connector_debugfs_add(struct intel_connector *connector) 396 { 397 if (intel_panel_drrs_type(connector) == DRRS_TYPE_NONE) 398 return; 399 400 debugfs_create_file("i915_drrs_type", 0444, connector->base.debugfs_entry, 401 connector, &intel_drrs_debugfs_type_fops); 402 } 403