1 /* 2 * Copyright © 2015 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 */ 23 24 #include <linux/kernel.h> 25 26 #include "i915_drv.h" 27 #include "i915_irq.h" 28 #include "intel_display_types.h" 29 #include "intel_hotplug.h" 30 #include "intel_hotplug_irq.h" 31 32 /** 33 * DOC: Hotplug 34 * 35 * Simply put, hotplug occurs when a display is connected to or disconnected 36 * from the system. However, there may be adapters and docking stations and 37 * Display Port short pulses and MST devices involved, complicating matters. 38 * 39 * Hotplug in i915 is handled in many different levels of abstraction. 40 * 41 * The platform dependent interrupt handling code in i915_irq.c enables, 42 * disables, and does preliminary handling of the interrupts. The interrupt 43 * handlers gather the hotplug detect (HPD) information from relevant registers 44 * into a platform independent mask of hotplug pins that have fired. 45 * 46 * The platform independent interrupt handler intel_hpd_irq_handler() in 47 * intel_hotplug.c does hotplug irq storm detection and mitigation, and passes 48 * further processing to appropriate bottom halves (Display Port specific and 49 * regular hotplug). 50 * 51 * The Display Port work function i915_digport_work_func() calls into 52 * intel_dp_hpd_pulse() via hooks, which handles DP short pulses and DP MST long 53 * pulses, with failures and non-MST long pulses triggering regular hotplug 54 * processing on the connector. 55 * 56 * The regular hotplug work function i915_hotplug_work_func() calls connector 57 * detect hooks, and, if connector status changes, triggers sending of hotplug 58 * uevent to userspace via drm_kms_helper_hotplug_event(). 59 * 60 * Finally, the userspace is responsible for triggering a modeset upon receiving 61 * the hotplug uevent, disabling or enabling the crtc as needed. 62 * 63 * The hotplug interrupt storm detection and mitigation code keeps track of the 64 * number of interrupts per hotplug pin per a period of time, and if the number 65 * of interrupts exceeds a certain threshold, the interrupt is disabled for a 66 * while before being re-enabled. The intention is to mitigate issues raising 67 * from broken hardware triggering massive amounts of interrupts and grinding 68 * the system to a halt. 69 * 70 * Current implementation expects that hotplug interrupt storm will not be 71 * seen when display port sink is connected, hence on platforms whose DP 72 * callback is handled by i915_digport_work_func reenabling of hpd is not 73 * performed (it was never expected to be disabled in the first place ;) ) 74 * this is specific to DP sinks handled by this routine and any other display 75 * such as HDMI or DVI enabled on the same port will have proper logic since 76 * it will use i915_hotplug_work_func where this logic is handled. 77 */ 78 79 /** 80 * intel_hpd_pin_default - return default pin associated with certain port. 81 * @dev_priv: private driver data pointer 82 * @port: the hpd port to get associated pin 83 * 84 * It is only valid and used by digital port encoder. 85 * 86 * Return pin that is associatade with @port. 87 */ 88 enum hpd_pin intel_hpd_pin_default(struct drm_i915_private *dev_priv, 89 enum port port) 90 { 91 return HPD_PORT_A + port - PORT_A; 92 } 93 94 /* Threshold == 5 for long IRQs, 50 for short */ 95 #define HPD_STORM_DEFAULT_THRESHOLD 50 96 97 #define HPD_STORM_DETECT_PERIOD 1000 98 #define HPD_STORM_REENABLE_DELAY (2 * 60 * 1000) 99 #define HPD_RETRY_DELAY 1000 100 101 static enum hpd_pin 102 intel_connector_hpd_pin(struct intel_connector *connector) 103 { 104 struct intel_encoder *encoder = intel_attached_encoder(connector); 105 106 /* 107 * MST connectors get their encoder attached dynamically 108 * so need to make sure we have an encoder here. But since 109 * MST encoders have their hpd_pin set to HPD_NONE we don't 110 * have to special case them beyond that. 111 */ 112 return encoder ? encoder->hpd_pin : HPD_NONE; 113 } 114 115 /** 116 * intel_hpd_irq_storm_detect - gather stats and detect HPD IRQ storm on a pin 117 * @dev_priv: private driver data pointer 118 * @pin: the pin to gather stats on 119 * @long_hpd: whether the HPD IRQ was long or short 120 * 121 * Gather stats about HPD IRQs from the specified @pin, and detect IRQ 122 * storms. Only the pin specific stats and state are changed, the caller is 123 * responsible for further action. 124 * 125 * The number of IRQs that are allowed within @HPD_STORM_DETECT_PERIOD is 126 * stored in @dev_priv->display.hotplug.hpd_storm_threshold which defaults to 127 * @HPD_STORM_DEFAULT_THRESHOLD. Long IRQs count as +10 to this threshold, and 128 * short IRQs count as +1. If this threshold is exceeded, it's considered an 129 * IRQ storm and the IRQ state is set to @HPD_MARK_DISABLED. 130 * 131 * By default, most systems will only count long IRQs towards 132 * &dev_priv->display.hotplug.hpd_storm_threshold. However, some older systems also 133 * suffer from short IRQ storms and must also track these. Because short IRQ 134 * storms are naturally caused by sideband interactions with DP MST devices, 135 * short IRQ detection is only enabled for systems without DP MST support. 136 * Systems which are new enough to support DP MST are far less likely to 137 * suffer from IRQ storms at all, so this is fine. 138 * 139 * The HPD threshold can be controlled through i915_hpd_storm_ctl in debugfs, 140 * and should only be adjusted for automated hotplug testing. 141 * 142 * Return true if an IRQ storm was detected on @pin. 143 */ 144 static bool intel_hpd_irq_storm_detect(struct drm_i915_private *dev_priv, 145 enum hpd_pin pin, bool long_hpd) 146 { 147 struct intel_hotplug *hpd = &dev_priv->display.hotplug; 148 unsigned long start = hpd->stats[pin].last_jiffies; 149 unsigned long end = start + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD); 150 const int increment = long_hpd ? 10 : 1; 151 const int threshold = hpd->hpd_storm_threshold; 152 bool storm = false; 153 154 if (!threshold || 155 (!long_hpd && !dev_priv->display.hotplug.hpd_short_storm_enabled)) 156 return false; 157 158 if (!time_in_range(jiffies, start, end)) { 159 hpd->stats[pin].last_jiffies = jiffies; 160 hpd->stats[pin].count = 0; 161 } 162 163 hpd->stats[pin].count += increment; 164 if (hpd->stats[pin].count > threshold) { 165 hpd->stats[pin].state = HPD_MARK_DISABLED; 166 drm_dbg_kms(&dev_priv->drm, 167 "HPD interrupt storm detected on PIN %d\n", pin); 168 storm = true; 169 } else { 170 drm_dbg_kms(&dev_priv->drm, 171 "Received HPD interrupt on PIN %d - cnt: %d\n", 172 pin, 173 hpd->stats[pin].count); 174 } 175 176 return storm; 177 } 178 179 static void 180 intel_hpd_irq_storm_switch_to_polling(struct drm_i915_private *dev_priv) 181 { 182 struct drm_connector_list_iter conn_iter; 183 struct intel_connector *connector; 184 bool hpd_disabled = false; 185 186 lockdep_assert_held(&dev_priv->irq_lock); 187 188 drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter); 189 for_each_intel_connector_iter(connector, &conn_iter) { 190 enum hpd_pin pin; 191 192 if (connector->base.polled != DRM_CONNECTOR_POLL_HPD) 193 continue; 194 195 pin = intel_connector_hpd_pin(connector); 196 if (pin == HPD_NONE || 197 dev_priv->display.hotplug.stats[pin].state != HPD_MARK_DISABLED) 198 continue; 199 200 drm_info(&dev_priv->drm, 201 "HPD interrupt storm detected on connector %s: " 202 "switching from hotplug detection to polling\n", 203 connector->base.name); 204 205 dev_priv->display.hotplug.stats[pin].state = HPD_DISABLED; 206 connector->base.polled = DRM_CONNECTOR_POLL_CONNECT | 207 DRM_CONNECTOR_POLL_DISCONNECT; 208 hpd_disabled = true; 209 } 210 drm_connector_list_iter_end(&conn_iter); 211 212 /* Enable polling and queue hotplug re-enabling. */ 213 if (hpd_disabled) { 214 drm_kms_helper_poll_enable(&dev_priv->drm); 215 mod_delayed_work(system_wq, &dev_priv->display.hotplug.reenable_work, 216 msecs_to_jiffies(HPD_STORM_REENABLE_DELAY)); 217 } 218 } 219 220 static void intel_hpd_irq_storm_reenable_work(struct work_struct *work) 221 { 222 struct drm_i915_private *dev_priv = 223 container_of(work, typeof(*dev_priv), 224 display.hotplug.reenable_work.work); 225 struct drm_connector_list_iter conn_iter; 226 struct intel_connector *connector; 227 intel_wakeref_t wakeref; 228 enum hpd_pin pin; 229 230 wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); 231 232 spin_lock_irq(&dev_priv->irq_lock); 233 234 drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter); 235 for_each_intel_connector_iter(connector, &conn_iter) { 236 pin = intel_connector_hpd_pin(connector); 237 if (pin == HPD_NONE || 238 dev_priv->display.hotplug.stats[pin].state != HPD_DISABLED) 239 continue; 240 241 if (connector->base.polled != connector->polled) 242 drm_dbg(&dev_priv->drm, 243 "Reenabling HPD on connector %s\n", 244 connector->base.name); 245 connector->base.polled = connector->polled; 246 } 247 drm_connector_list_iter_end(&conn_iter); 248 249 for_each_hpd_pin(pin) { 250 if (dev_priv->display.hotplug.stats[pin].state == HPD_DISABLED) 251 dev_priv->display.hotplug.stats[pin].state = HPD_ENABLED; 252 } 253 254 intel_hpd_irq_setup(dev_priv); 255 256 spin_unlock_irq(&dev_priv->irq_lock); 257 258 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); 259 } 260 261 enum intel_hotplug_state 262 intel_encoder_hotplug(struct intel_encoder *encoder, 263 struct intel_connector *connector) 264 { 265 struct drm_device *dev = connector->base.dev; 266 enum drm_connector_status old_status; 267 u64 old_epoch_counter; 268 bool ret = false; 269 270 drm_WARN_ON(dev, !mutex_is_locked(&dev->mode_config.mutex)); 271 old_status = connector->base.status; 272 old_epoch_counter = connector->base.epoch_counter; 273 274 connector->base.status = 275 drm_helper_probe_detect(&connector->base, NULL, false); 276 277 if (old_epoch_counter != connector->base.epoch_counter) 278 ret = true; 279 280 if (ret) { 281 drm_dbg_kms(dev, "[CONNECTOR:%d:%s] status updated from %s to %s (epoch counter %llu->%llu)\n", 282 connector->base.base.id, 283 connector->base.name, 284 drm_get_connector_status_name(old_status), 285 drm_get_connector_status_name(connector->base.status), 286 old_epoch_counter, 287 connector->base.epoch_counter); 288 return INTEL_HOTPLUG_CHANGED; 289 } 290 return INTEL_HOTPLUG_UNCHANGED; 291 } 292 293 static bool intel_encoder_has_hpd_pulse(struct intel_encoder *encoder) 294 { 295 return intel_encoder_is_dig_port(encoder) && 296 enc_to_dig_port(encoder)->hpd_pulse != NULL; 297 } 298 299 static void i915_digport_work_func(struct work_struct *work) 300 { 301 struct drm_i915_private *dev_priv = 302 container_of(work, struct drm_i915_private, display.hotplug.dig_port_work); 303 u32 long_port_mask, short_port_mask; 304 struct intel_encoder *encoder; 305 u32 old_bits = 0; 306 307 spin_lock_irq(&dev_priv->irq_lock); 308 long_port_mask = dev_priv->display.hotplug.long_port_mask; 309 dev_priv->display.hotplug.long_port_mask = 0; 310 short_port_mask = dev_priv->display.hotplug.short_port_mask; 311 dev_priv->display.hotplug.short_port_mask = 0; 312 spin_unlock_irq(&dev_priv->irq_lock); 313 314 for_each_intel_encoder(&dev_priv->drm, encoder) { 315 struct intel_digital_port *dig_port; 316 enum port port = encoder->port; 317 bool long_hpd, short_hpd; 318 enum irqreturn ret; 319 320 if (!intel_encoder_has_hpd_pulse(encoder)) 321 continue; 322 323 long_hpd = long_port_mask & BIT(port); 324 short_hpd = short_port_mask & BIT(port); 325 326 if (!long_hpd && !short_hpd) 327 continue; 328 329 dig_port = enc_to_dig_port(encoder); 330 331 ret = dig_port->hpd_pulse(dig_port, long_hpd); 332 if (ret == IRQ_NONE) { 333 /* fall back to old school hpd */ 334 old_bits |= BIT(encoder->hpd_pin); 335 } 336 } 337 338 if (old_bits) { 339 spin_lock_irq(&dev_priv->irq_lock); 340 dev_priv->display.hotplug.event_bits |= old_bits; 341 spin_unlock_irq(&dev_priv->irq_lock); 342 queue_delayed_work(system_wq, &dev_priv->display.hotplug.hotplug_work, 0); 343 } 344 } 345 346 /** 347 * intel_hpd_trigger_irq - trigger an hpd irq event for a port 348 * @dig_port: digital port 349 * 350 * Trigger an HPD interrupt event for the given port, emulating a short pulse 351 * generated by the sink, and schedule the dig port work to handle it. 352 */ 353 void intel_hpd_trigger_irq(struct intel_digital_port *dig_port) 354 { 355 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); 356 357 spin_lock_irq(&i915->irq_lock); 358 i915->display.hotplug.short_port_mask |= BIT(dig_port->base.port); 359 spin_unlock_irq(&i915->irq_lock); 360 361 queue_work(i915->display.hotplug.dp_wq, &i915->display.hotplug.dig_port_work); 362 } 363 364 /* 365 * Handle hotplug events outside the interrupt handler proper. 366 */ 367 static void i915_hotplug_work_func(struct work_struct *work) 368 { 369 struct drm_i915_private *dev_priv = 370 container_of(work, struct drm_i915_private, 371 display.hotplug.hotplug_work.work); 372 struct drm_connector_list_iter conn_iter; 373 struct intel_connector *connector; 374 u32 changed = 0, retry = 0; 375 u32 hpd_event_bits; 376 u32 hpd_retry_bits; 377 378 mutex_lock(&dev_priv->drm.mode_config.mutex); 379 drm_dbg_kms(&dev_priv->drm, "running encoder hotplug functions\n"); 380 381 spin_lock_irq(&dev_priv->irq_lock); 382 383 hpd_event_bits = dev_priv->display.hotplug.event_bits; 384 dev_priv->display.hotplug.event_bits = 0; 385 hpd_retry_bits = dev_priv->display.hotplug.retry_bits; 386 dev_priv->display.hotplug.retry_bits = 0; 387 388 /* Enable polling for connectors which had HPD IRQ storms */ 389 intel_hpd_irq_storm_switch_to_polling(dev_priv); 390 391 spin_unlock_irq(&dev_priv->irq_lock); 392 393 /* Skip calling encode hotplug handlers if ignore long HPD set*/ 394 if (dev_priv->display.hotplug.ignore_long_hpd) { 395 drm_dbg_kms(&dev_priv->drm, "Ignore HPD flag on - skip encoder hotplug handlers\n"); 396 mutex_unlock(&dev_priv->drm.mode_config.mutex); 397 return; 398 } 399 400 drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter); 401 for_each_intel_connector_iter(connector, &conn_iter) { 402 enum hpd_pin pin; 403 u32 hpd_bit; 404 405 pin = intel_connector_hpd_pin(connector); 406 if (pin == HPD_NONE) 407 continue; 408 409 hpd_bit = BIT(pin); 410 if ((hpd_event_bits | hpd_retry_bits) & hpd_bit) { 411 struct intel_encoder *encoder = 412 intel_attached_encoder(connector); 413 414 if (hpd_event_bits & hpd_bit) 415 connector->hotplug_retries = 0; 416 else 417 connector->hotplug_retries++; 418 419 drm_dbg_kms(&dev_priv->drm, 420 "Connector %s (pin %i) received hotplug event. (retry %d)\n", 421 connector->base.name, pin, 422 connector->hotplug_retries); 423 424 switch (encoder->hotplug(encoder, connector)) { 425 case INTEL_HOTPLUG_UNCHANGED: 426 break; 427 case INTEL_HOTPLUG_CHANGED: 428 changed |= hpd_bit; 429 break; 430 case INTEL_HOTPLUG_RETRY: 431 retry |= hpd_bit; 432 break; 433 } 434 } 435 } 436 drm_connector_list_iter_end(&conn_iter); 437 mutex_unlock(&dev_priv->drm.mode_config.mutex); 438 439 if (changed) 440 drm_kms_helper_hotplug_event(&dev_priv->drm); 441 442 /* Remove shared HPD pins that have changed */ 443 retry &= ~changed; 444 if (retry) { 445 spin_lock_irq(&dev_priv->irq_lock); 446 dev_priv->display.hotplug.retry_bits |= retry; 447 spin_unlock_irq(&dev_priv->irq_lock); 448 449 mod_delayed_work(system_wq, &dev_priv->display.hotplug.hotplug_work, 450 msecs_to_jiffies(HPD_RETRY_DELAY)); 451 } 452 } 453 454 455 /** 456 * intel_hpd_irq_handler - main hotplug irq handler 457 * @dev_priv: drm_i915_private 458 * @pin_mask: a mask of hpd pins that have triggered the irq 459 * @long_mask: a mask of hpd pins that may be long hpd pulses 460 * 461 * This is the main hotplug irq handler for all platforms. The platform specific 462 * irq handlers call the platform specific hotplug irq handlers, which read and 463 * decode the appropriate registers into bitmasks about hpd pins that have 464 * triggered (@pin_mask), and which of those pins may be long pulses 465 * (@long_mask). The @long_mask is ignored if the port corresponding to the pin 466 * is not a digital port. 467 * 468 * Here, we do hotplug irq storm detection and mitigation, and pass further 469 * processing to appropriate bottom halves. 470 */ 471 void intel_hpd_irq_handler(struct drm_i915_private *dev_priv, 472 u32 pin_mask, u32 long_mask) 473 { 474 struct intel_encoder *encoder; 475 bool storm_detected = false; 476 bool queue_dig = false, queue_hp = false; 477 u32 long_hpd_pulse_mask = 0; 478 u32 short_hpd_pulse_mask = 0; 479 enum hpd_pin pin; 480 481 if (!pin_mask) 482 return; 483 484 spin_lock(&dev_priv->irq_lock); 485 486 /* 487 * Determine whether ->hpd_pulse() exists for each pin, and 488 * whether we have a short or a long pulse. This is needed 489 * as each pin may have up to two encoders (HDMI and DP) and 490 * only the one of them (DP) will have ->hpd_pulse(). 491 */ 492 for_each_intel_encoder(&dev_priv->drm, encoder) { 493 enum port port = encoder->port; 494 bool long_hpd; 495 496 pin = encoder->hpd_pin; 497 if (!(BIT(pin) & pin_mask)) 498 continue; 499 500 if (!intel_encoder_has_hpd_pulse(encoder)) 501 continue; 502 503 long_hpd = long_mask & BIT(pin); 504 505 drm_dbg(&dev_priv->drm, 506 "digital hpd on [ENCODER:%d:%s] - %s\n", 507 encoder->base.base.id, encoder->base.name, 508 long_hpd ? "long" : "short"); 509 queue_dig = true; 510 511 if (long_hpd) { 512 long_hpd_pulse_mask |= BIT(pin); 513 dev_priv->display.hotplug.long_port_mask |= BIT(port); 514 } else { 515 short_hpd_pulse_mask |= BIT(pin); 516 dev_priv->display.hotplug.short_port_mask |= BIT(port); 517 } 518 } 519 520 /* Now process each pin just once */ 521 for_each_hpd_pin(pin) { 522 bool long_hpd; 523 524 if (!(BIT(pin) & pin_mask)) 525 continue; 526 527 if (dev_priv->display.hotplug.stats[pin].state == HPD_DISABLED) { 528 /* 529 * On GMCH platforms the interrupt mask bits only 530 * prevent irq generation, not the setting of the 531 * hotplug bits itself. So only WARN about unexpected 532 * interrupts on saner platforms. 533 */ 534 drm_WARN_ONCE(&dev_priv->drm, !HAS_GMCH(dev_priv), 535 "Received HPD interrupt on pin %d although disabled\n", 536 pin); 537 continue; 538 } 539 540 if (dev_priv->display.hotplug.stats[pin].state != HPD_ENABLED) 541 continue; 542 543 /* 544 * Delegate to ->hpd_pulse() if one of the encoders for this 545 * pin has it, otherwise let the hotplug_work deal with this 546 * pin directly. 547 */ 548 if (((short_hpd_pulse_mask | long_hpd_pulse_mask) & BIT(pin))) { 549 long_hpd = long_hpd_pulse_mask & BIT(pin); 550 } else { 551 dev_priv->display.hotplug.event_bits |= BIT(pin); 552 long_hpd = true; 553 queue_hp = true; 554 } 555 556 if (intel_hpd_irq_storm_detect(dev_priv, pin, long_hpd)) { 557 dev_priv->display.hotplug.event_bits &= ~BIT(pin); 558 storm_detected = true; 559 queue_hp = true; 560 } 561 } 562 563 /* 564 * Disable any IRQs that storms were detected on. Polling enablement 565 * happens later in our hotplug work. 566 */ 567 if (storm_detected) 568 intel_hpd_irq_setup(dev_priv); 569 spin_unlock(&dev_priv->irq_lock); 570 571 /* 572 * Our hotplug handler can grab modeset locks (by calling down into the 573 * fb helpers). Hence it must not be run on our own dev-priv->wq work 574 * queue for otherwise the flush_work in the pageflip code will 575 * deadlock. 576 */ 577 if (queue_dig) 578 queue_work(dev_priv->display.hotplug.dp_wq, &dev_priv->display.hotplug.dig_port_work); 579 if (queue_hp) 580 queue_delayed_work(system_wq, &dev_priv->display.hotplug.hotplug_work, 0); 581 } 582 583 /** 584 * intel_hpd_init - initializes and enables hpd support 585 * @dev_priv: i915 device instance 586 * 587 * This function enables the hotplug support. It requires that interrupts have 588 * already been enabled with intel_irq_init_hw(). From this point on hotplug and 589 * poll request can run concurrently to other code, so locking rules must be 590 * obeyed. 591 * 592 * This is a separate step from interrupt enabling to simplify the locking rules 593 * in the driver load and resume code. 594 * 595 * Also see: intel_hpd_poll_enable() and intel_hpd_poll_disable(). 596 */ 597 void intel_hpd_init(struct drm_i915_private *dev_priv) 598 { 599 int i; 600 601 if (!HAS_DISPLAY(dev_priv)) 602 return; 603 604 for_each_hpd_pin(i) { 605 dev_priv->display.hotplug.stats[i].count = 0; 606 dev_priv->display.hotplug.stats[i].state = HPD_ENABLED; 607 } 608 609 /* 610 * Interrupt setup is already guaranteed to be single-threaded, this is 611 * just to make the assert_spin_locked checks happy. 612 */ 613 spin_lock_irq(&dev_priv->irq_lock); 614 intel_hpd_irq_setup(dev_priv); 615 spin_unlock_irq(&dev_priv->irq_lock); 616 } 617 618 static void i915_hpd_poll_init_work(struct work_struct *work) 619 { 620 struct drm_i915_private *dev_priv = 621 container_of(work, struct drm_i915_private, 622 display.hotplug.poll_init_work); 623 struct drm_connector_list_iter conn_iter; 624 struct intel_connector *connector; 625 bool enabled; 626 627 mutex_lock(&dev_priv->drm.mode_config.mutex); 628 629 enabled = READ_ONCE(dev_priv->display.hotplug.poll_enabled); 630 631 drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter); 632 for_each_intel_connector_iter(connector, &conn_iter) { 633 enum hpd_pin pin; 634 635 pin = intel_connector_hpd_pin(connector); 636 if (pin == HPD_NONE) 637 continue; 638 639 connector->base.polled = connector->polled; 640 641 if (enabled && connector->base.polled == DRM_CONNECTOR_POLL_HPD) 642 connector->base.polled = DRM_CONNECTOR_POLL_CONNECT | 643 DRM_CONNECTOR_POLL_DISCONNECT; 644 } 645 drm_connector_list_iter_end(&conn_iter); 646 647 if (enabled) 648 drm_kms_helper_poll_enable(&dev_priv->drm); 649 650 mutex_unlock(&dev_priv->drm.mode_config.mutex); 651 652 /* 653 * We might have missed any hotplugs that happened while we were 654 * in the middle of disabling polling 655 */ 656 if (!enabled) 657 drm_helper_hpd_irq_event(&dev_priv->drm); 658 } 659 660 /** 661 * intel_hpd_poll_enable - enable polling for connectors with hpd 662 * @dev_priv: i915 device instance 663 * 664 * This function enables polling for all connectors which support HPD. 665 * Under certain conditions HPD may not be functional. On most Intel GPUs, 666 * this happens when we enter runtime suspend. 667 * On Valleyview and Cherryview systems, this also happens when we shut off all 668 * of the powerwells. 669 * 670 * Since this function can get called in contexts where we're already holding 671 * dev->mode_config.mutex, we do the actual hotplug enabling in a seperate 672 * worker. 673 * 674 * Also see: intel_hpd_init() and intel_hpd_poll_disable(). 675 */ 676 void intel_hpd_poll_enable(struct drm_i915_private *dev_priv) 677 { 678 if (!HAS_DISPLAY(dev_priv) || 679 !INTEL_DISPLAY_ENABLED(dev_priv)) 680 return; 681 682 WRITE_ONCE(dev_priv->display.hotplug.poll_enabled, true); 683 684 /* 685 * We might already be holding dev->mode_config.mutex, so do this in a 686 * seperate worker 687 * As well, there's no issue if we race here since we always reschedule 688 * this worker anyway 689 */ 690 schedule_work(&dev_priv->display.hotplug.poll_init_work); 691 } 692 693 /** 694 * intel_hpd_poll_disable - disable polling for connectors with hpd 695 * @dev_priv: i915 device instance 696 * 697 * This function disables polling for all connectors which support HPD. 698 * Under certain conditions HPD may not be functional. On most Intel GPUs, 699 * this happens when we enter runtime suspend. 700 * On Valleyview and Cherryview systems, this also happens when we shut off all 701 * of the powerwells. 702 * 703 * Since this function can get called in contexts where we're already holding 704 * dev->mode_config.mutex, we do the actual hotplug enabling in a seperate 705 * worker. 706 * 707 * Also used during driver init to initialize connector->polled 708 * appropriately for all connectors. 709 * 710 * Also see: intel_hpd_init() and intel_hpd_poll_enable(). 711 */ 712 void intel_hpd_poll_disable(struct drm_i915_private *dev_priv) 713 { 714 if (!HAS_DISPLAY(dev_priv)) 715 return; 716 717 WRITE_ONCE(dev_priv->display.hotplug.poll_enabled, false); 718 schedule_work(&dev_priv->display.hotplug.poll_init_work); 719 } 720 721 void intel_hpd_init_early(struct drm_i915_private *i915) 722 { 723 INIT_DELAYED_WORK(&i915->display.hotplug.hotplug_work, 724 i915_hotplug_work_func); 725 INIT_WORK(&i915->display.hotplug.dig_port_work, i915_digport_work_func); 726 INIT_WORK(&i915->display.hotplug.poll_init_work, i915_hpd_poll_init_work); 727 INIT_DELAYED_WORK(&i915->display.hotplug.reenable_work, 728 intel_hpd_irq_storm_reenable_work); 729 730 i915->display.hotplug.hpd_storm_threshold = HPD_STORM_DEFAULT_THRESHOLD; 731 /* If we have MST support, we want to avoid doing short HPD IRQ storm 732 * detection, as short HPD storms will occur as a natural part of 733 * sideband messaging with MST. 734 * On older platforms however, IRQ storms can occur with both long and 735 * short pulses, as seen on some G4x systems. 736 */ 737 i915->display.hotplug.hpd_short_storm_enabled = !HAS_DP_MST(i915); 738 } 739 740 void intel_hpd_cancel_work(struct drm_i915_private *dev_priv) 741 { 742 if (!HAS_DISPLAY(dev_priv)) 743 return; 744 745 spin_lock_irq(&dev_priv->irq_lock); 746 747 dev_priv->display.hotplug.long_port_mask = 0; 748 dev_priv->display.hotplug.short_port_mask = 0; 749 dev_priv->display.hotplug.event_bits = 0; 750 dev_priv->display.hotplug.retry_bits = 0; 751 752 spin_unlock_irq(&dev_priv->irq_lock); 753 754 cancel_work_sync(&dev_priv->display.hotplug.dig_port_work); 755 cancel_delayed_work_sync(&dev_priv->display.hotplug.hotplug_work); 756 cancel_work_sync(&dev_priv->display.hotplug.poll_init_work); 757 cancel_delayed_work_sync(&dev_priv->display.hotplug.reenable_work); 758 } 759 760 bool intel_hpd_disable(struct drm_i915_private *dev_priv, enum hpd_pin pin) 761 { 762 bool ret = false; 763 764 if (pin == HPD_NONE) 765 return false; 766 767 spin_lock_irq(&dev_priv->irq_lock); 768 if (dev_priv->display.hotplug.stats[pin].state == HPD_ENABLED) { 769 dev_priv->display.hotplug.stats[pin].state = HPD_DISABLED; 770 ret = true; 771 } 772 spin_unlock_irq(&dev_priv->irq_lock); 773 774 return ret; 775 } 776 777 void intel_hpd_enable(struct drm_i915_private *dev_priv, enum hpd_pin pin) 778 { 779 if (pin == HPD_NONE) 780 return; 781 782 spin_lock_irq(&dev_priv->irq_lock); 783 dev_priv->display.hotplug.stats[pin].state = HPD_ENABLED; 784 spin_unlock_irq(&dev_priv->irq_lock); 785 } 786 787 static int i915_hpd_storm_ctl_show(struct seq_file *m, void *data) 788 { 789 struct drm_i915_private *dev_priv = m->private; 790 struct intel_hotplug *hotplug = &dev_priv->display.hotplug; 791 792 /* Synchronize with everything first in case there's been an HPD 793 * storm, but we haven't finished handling it in the kernel yet 794 */ 795 intel_synchronize_irq(dev_priv); 796 flush_work(&dev_priv->display.hotplug.dig_port_work); 797 flush_delayed_work(&dev_priv->display.hotplug.hotplug_work); 798 799 seq_printf(m, "Threshold: %d\n", hotplug->hpd_storm_threshold); 800 seq_printf(m, "Detected: %s\n", 801 str_yes_no(delayed_work_pending(&hotplug->reenable_work))); 802 803 return 0; 804 } 805 806 static ssize_t i915_hpd_storm_ctl_write(struct file *file, 807 const char __user *ubuf, size_t len, 808 loff_t *offp) 809 { 810 struct seq_file *m = file->private_data; 811 struct drm_i915_private *dev_priv = m->private; 812 struct intel_hotplug *hotplug = &dev_priv->display.hotplug; 813 unsigned int new_threshold; 814 int i; 815 char *newline; 816 char tmp[16]; 817 818 if (len >= sizeof(tmp)) 819 return -EINVAL; 820 821 if (copy_from_user(tmp, ubuf, len)) 822 return -EFAULT; 823 824 tmp[len] = '\0'; 825 826 /* Strip newline, if any */ 827 newline = strchr(tmp, '\n'); 828 if (newline) 829 *newline = '\0'; 830 831 if (strcmp(tmp, "reset") == 0) 832 new_threshold = HPD_STORM_DEFAULT_THRESHOLD; 833 else if (kstrtouint(tmp, 10, &new_threshold) != 0) 834 return -EINVAL; 835 836 if (new_threshold > 0) 837 drm_dbg_kms(&dev_priv->drm, 838 "Setting HPD storm detection threshold to %d\n", 839 new_threshold); 840 else 841 drm_dbg_kms(&dev_priv->drm, "Disabling HPD storm detection\n"); 842 843 spin_lock_irq(&dev_priv->irq_lock); 844 hotplug->hpd_storm_threshold = new_threshold; 845 /* Reset the HPD storm stats so we don't accidentally trigger a storm */ 846 for_each_hpd_pin(i) 847 hotplug->stats[i].count = 0; 848 spin_unlock_irq(&dev_priv->irq_lock); 849 850 /* Re-enable hpd immediately if we were in an irq storm */ 851 flush_delayed_work(&dev_priv->display.hotplug.reenable_work); 852 853 return len; 854 } 855 856 static int i915_hpd_storm_ctl_open(struct inode *inode, struct file *file) 857 { 858 return single_open(file, i915_hpd_storm_ctl_show, inode->i_private); 859 } 860 861 static const struct file_operations i915_hpd_storm_ctl_fops = { 862 .owner = THIS_MODULE, 863 .open = i915_hpd_storm_ctl_open, 864 .read = seq_read, 865 .llseek = seq_lseek, 866 .release = single_release, 867 .write = i915_hpd_storm_ctl_write 868 }; 869 870 static int i915_hpd_short_storm_ctl_show(struct seq_file *m, void *data) 871 { 872 struct drm_i915_private *dev_priv = m->private; 873 874 seq_printf(m, "Enabled: %s\n", 875 str_yes_no(dev_priv->display.hotplug.hpd_short_storm_enabled)); 876 877 return 0; 878 } 879 880 static int 881 i915_hpd_short_storm_ctl_open(struct inode *inode, struct file *file) 882 { 883 return single_open(file, i915_hpd_short_storm_ctl_show, 884 inode->i_private); 885 } 886 887 static ssize_t i915_hpd_short_storm_ctl_write(struct file *file, 888 const char __user *ubuf, 889 size_t len, loff_t *offp) 890 { 891 struct seq_file *m = file->private_data; 892 struct drm_i915_private *dev_priv = m->private; 893 struct intel_hotplug *hotplug = &dev_priv->display.hotplug; 894 char *newline; 895 char tmp[16]; 896 int i; 897 bool new_state; 898 899 if (len >= sizeof(tmp)) 900 return -EINVAL; 901 902 if (copy_from_user(tmp, ubuf, len)) 903 return -EFAULT; 904 905 tmp[len] = '\0'; 906 907 /* Strip newline, if any */ 908 newline = strchr(tmp, '\n'); 909 if (newline) 910 *newline = '\0'; 911 912 /* Reset to the "default" state for this system */ 913 if (strcmp(tmp, "reset") == 0) 914 new_state = !HAS_DP_MST(dev_priv); 915 else if (kstrtobool(tmp, &new_state) != 0) 916 return -EINVAL; 917 918 drm_dbg_kms(&dev_priv->drm, "%sabling HPD short storm detection\n", 919 new_state ? "En" : "Dis"); 920 921 spin_lock_irq(&dev_priv->irq_lock); 922 hotplug->hpd_short_storm_enabled = new_state; 923 /* Reset the HPD storm stats so we don't accidentally trigger a storm */ 924 for_each_hpd_pin(i) 925 hotplug->stats[i].count = 0; 926 spin_unlock_irq(&dev_priv->irq_lock); 927 928 /* Re-enable hpd immediately if we were in an irq storm */ 929 flush_delayed_work(&dev_priv->display.hotplug.reenable_work); 930 931 return len; 932 } 933 934 static const struct file_operations i915_hpd_short_storm_ctl_fops = { 935 .owner = THIS_MODULE, 936 .open = i915_hpd_short_storm_ctl_open, 937 .read = seq_read, 938 .llseek = seq_lseek, 939 .release = single_release, 940 .write = i915_hpd_short_storm_ctl_write, 941 }; 942 943 void intel_hpd_debugfs_register(struct drm_i915_private *i915) 944 { 945 struct drm_minor *minor = i915->drm.primary; 946 947 debugfs_create_file("i915_hpd_storm_ctl", 0644, minor->debugfs_root, 948 i915, &i915_hpd_storm_ctl_fops); 949 debugfs_create_file("i915_hpd_short_storm_ctl", 0644, minor->debugfs_root, 950 i915, &i915_hpd_short_storm_ctl_fops); 951 debugfs_create_bool("i915_ignore_long_hpd", 0644, minor->debugfs_root, 952 &i915->display.hotplug.ignore_long_hpd); 953 } 954