1 /* 2 * Copyright © 2015 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 */ 23 24 #include <linux/kernel.h> 25 26 #include "i915_drv.h" 27 #include "i915_irq.h" 28 #include "intel_display_types.h" 29 #include "intel_hotplug.h" 30 #include "intel_hotplug_irq.h" 31 32 /** 33 * DOC: Hotplug 34 * 35 * Simply put, hotplug occurs when a display is connected to or disconnected 36 * from the system. However, there may be adapters and docking stations and 37 * Display Port short pulses and MST devices involved, complicating matters. 38 * 39 * Hotplug in i915 is handled in many different levels of abstraction. 40 * 41 * The platform dependent interrupt handling code in i915_irq.c enables, 42 * disables, and does preliminary handling of the interrupts. The interrupt 43 * handlers gather the hotplug detect (HPD) information from relevant registers 44 * into a platform independent mask of hotplug pins that have fired. 45 * 46 * The platform independent interrupt handler intel_hpd_irq_handler() in 47 * intel_hotplug.c does hotplug irq storm detection and mitigation, and passes 48 * further processing to appropriate bottom halves (Display Port specific and 49 * regular hotplug). 50 * 51 * The Display Port work function i915_digport_work_func() calls into 52 * intel_dp_hpd_pulse() via hooks, which handles DP short pulses and DP MST long 53 * pulses, with failures and non-MST long pulses triggering regular hotplug 54 * processing on the connector. 55 * 56 * The regular hotplug work function i915_hotplug_work_func() calls connector 57 * detect hooks, and, if connector status changes, triggers sending of hotplug 58 * uevent to userspace via drm_kms_helper_hotplug_event(). 59 * 60 * Finally, the userspace is responsible for triggering a modeset upon receiving 61 * the hotplug uevent, disabling or enabling the crtc as needed. 62 * 63 * The hotplug interrupt storm detection and mitigation code keeps track of the 64 * number of interrupts per hotplug pin per a period of time, and if the number 65 * of interrupts exceeds a certain threshold, the interrupt is disabled for a 66 * while before being re-enabled. The intention is to mitigate issues raising 67 * from broken hardware triggering massive amounts of interrupts and grinding 68 * the system to a halt. 69 * 70 * Current implementation expects that hotplug interrupt storm will not be 71 * seen when display port sink is connected, hence on platforms whose DP 72 * callback is handled by i915_digport_work_func reenabling of hpd is not 73 * performed (it was never expected to be disabled in the first place ;) ) 74 * this is specific to DP sinks handled by this routine and any other display 75 * such as HDMI or DVI enabled on the same port will have proper logic since 76 * it will use i915_hotplug_work_func where this logic is handled. 77 */ 78 79 /** 80 * intel_hpd_pin_default - return default pin associated with certain port. 81 * @dev_priv: private driver data pointer 82 * @port: the hpd port to get associated pin 83 * 84 * It is only valid and used by digital port encoder. 85 * 86 * Return pin that is associatade with @port. 87 */ 88 enum hpd_pin intel_hpd_pin_default(struct drm_i915_private *dev_priv, 89 enum port port) 90 { 91 return HPD_PORT_A + port - PORT_A; 92 } 93 94 /* Threshold == 5 for long IRQs, 50 for short */ 95 #define HPD_STORM_DEFAULT_THRESHOLD 50 96 97 #define HPD_STORM_DETECT_PERIOD 1000 98 #define HPD_STORM_REENABLE_DELAY (2 * 60 * 1000) 99 #define HPD_RETRY_DELAY 1000 100 101 static enum hpd_pin 102 intel_connector_hpd_pin(struct intel_connector *connector) 103 { 104 struct intel_encoder *encoder = intel_attached_encoder(connector); 105 106 /* 107 * MST connectors get their encoder attached dynamically 108 * so need to make sure we have an encoder here. But since 109 * MST encoders have their hpd_pin set to HPD_NONE we don't 110 * have to special case them beyond that. 111 */ 112 return encoder ? encoder->hpd_pin : HPD_NONE; 113 } 114 115 /** 116 * intel_hpd_irq_storm_detect - gather stats and detect HPD IRQ storm on a pin 117 * @dev_priv: private driver data pointer 118 * @pin: the pin to gather stats on 119 * @long_hpd: whether the HPD IRQ was long or short 120 * 121 * Gather stats about HPD IRQs from the specified @pin, and detect IRQ 122 * storms. Only the pin specific stats and state are changed, the caller is 123 * responsible for further action. 124 * 125 * The number of IRQs that are allowed within @HPD_STORM_DETECT_PERIOD is 126 * stored in @dev_priv->display.hotplug.hpd_storm_threshold which defaults to 127 * @HPD_STORM_DEFAULT_THRESHOLD. Long IRQs count as +10 to this threshold, and 128 * short IRQs count as +1. If this threshold is exceeded, it's considered an 129 * IRQ storm and the IRQ state is set to @HPD_MARK_DISABLED. 130 * 131 * By default, most systems will only count long IRQs towards 132 * &dev_priv->display.hotplug.hpd_storm_threshold. However, some older systems also 133 * suffer from short IRQ storms and must also track these. Because short IRQ 134 * storms are naturally caused by sideband interactions with DP MST devices, 135 * short IRQ detection is only enabled for systems without DP MST support. 136 * Systems which are new enough to support DP MST are far less likely to 137 * suffer from IRQ storms at all, so this is fine. 138 * 139 * The HPD threshold can be controlled through i915_hpd_storm_ctl in debugfs, 140 * and should only be adjusted for automated hotplug testing. 141 * 142 * Return true if an IRQ storm was detected on @pin. 143 */ 144 static bool intel_hpd_irq_storm_detect(struct drm_i915_private *dev_priv, 145 enum hpd_pin pin, bool long_hpd) 146 { 147 struct intel_hotplug *hpd = &dev_priv->display.hotplug; 148 unsigned long start = hpd->stats[pin].last_jiffies; 149 unsigned long end = start + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD); 150 const int increment = long_hpd ? 10 : 1; 151 const int threshold = hpd->hpd_storm_threshold; 152 bool storm = false; 153 154 if (!threshold || 155 (!long_hpd && !dev_priv->display.hotplug.hpd_short_storm_enabled)) 156 return false; 157 158 if (!time_in_range(jiffies, start, end)) { 159 hpd->stats[pin].last_jiffies = jiffies; 160 hpd->stats[pin].count = 0; 161 } 162 163 hpd->stats[pin].count += increment; 164 if (hpd->stats[pin].count > threshold) { 165 hpd->stats[pin].state = HPD_MARK_DISABLED; 166 drm_dbg_kms(&dev_priv->drm, 167 "HPD interrupt storm detected on PIN %d\n", pin); 168 storm = true; 169 } else { 170 drm_dbg_kms(&dev_priv->drm, 171 "Received HPD interrupt on PIN %d - cnt: %d\n", 172 pin, 173 hpd->stats[pin].count); 174 } 175 176 return storm; 177 } 178 179 static void 180 intel_hpd_irq_storm_switch_to_polling(struct drm_i915_private *dev_priv) 181 { 182 struct drm_connector_list_iter conn_iter; 183 struct intel_connector *connector; 184 bool hpd_disabled = false; 185 186 lockdep_assert_held(&dev_priv->irq_lock); 187 188 drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter); 189 for_each_intel_connector_iter(connector, &conn_iter) { 190 enum hpd_pin pin; 191 192 if (connector->base.polled != DRM_CONNECTOR_POLL_HPD) 193 continue; 194 195 pin = intel_connector_hpd_pin(connector); 196 if (pin == HPD_NONE || 197 dev_priv->display.hotplug.stats[pin].state != HPD_MARK_DISABLED) 198 continue; 199 200 drm_info(&dev_priv->drm, 201 "HPD interrupt storm detected on connector %s: " 202 "switching from hotplug detection to polling\n", 203 connector->base.name); 204 205 dev_priv->display.hotplug.stats[pin].state = HPD_DISABLED; 206 connector->base.polled = DRM_CONNECTOR_POLL_CONNECT | 207 DRM_CONNECTOR_POLL_DISCONNECT; 208 hpd_disabled = true; 209 } 210 drm_connector_list_iter_end(&conn_iter); 211 212 /* Enable polling and queue hotplug re-enabling. */ 213 if (hpd_disabled) { 214 drm_kms_helper_poll_enable(&dev_priv->drm); 215 mod_delayed_work(dev_priv->unordered_wq, 216 &dev_priv->display.hotplug.reenable_work, 217 msecs_to_jiffies(HPD_STORM_REENABLE_DELAY)); 218 } 219 } 220 221 static void intel_hpd_irq_storm_reenable_work(struct work_struct *work) 222 { 223 struct drm_i915_private *dev_priv = 224 container_of(work, typeof(*dev_priv), 225 display.hotplug.reenable_work.work); 226 struct drm_connector_list_iter conn_iter; 227 struct intel_connector *connector; 228 intel_wakeref_t wakeref; 229 enum hpd_pin pin; 230 231 wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); 232 233 spin_lock_irq(&dev_priv->irq_lock); 234 235 drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter); 236 for_each_intel_connector_iter(connector, &conn_iter) { 237 pin = intel_connector_hpd_pin(connector); 238 if (pin == HPD_NONE || 239 dev_priv->display.hotplug.stats[pin].state != HPD_DISABLED) 240 continue; 241 242 if (connector->base.polled != connector->polled) 243 drm_dbg(&dev_priv->drm, 244 "Reenabling HPD on connector %s\n", 245 connector->base.name); 246 connector->base.polled = connector->polled; 247 } 248 drm_connector_list_iter_end(&conn_iter); 249 250 for_each_hpd_pin(pin) { 251 if (dev_priv->display.hotplug.stats[pin].state == HPD_DISABLED) 252 dev_priv->display.hotplug.stats[pin].state = HPD_ENABLED; 253 } 254 255 intel_hpd_irq_setup(dev_priv); 256 257 spin_unlock_irq(&dev_priv->irq_lock); 258 259 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); 260 } 261 262 enum intel_hotplug_state 263 intel_encoder_hotplug(struct intel_encoder *encoder, 264 struct intel_connector *connector) 265 { 266 struct drm_device *dev = connector->base.dev; 267 enum drm_connector_status old_status; 268 u64 old_epoch_counter; 269 bool ret = false; 270 271 drm_WARN_ON(dev, !mutex_is_locked(&dev->mode_config.mutex)); 272 old_status = connector->base.status; 273 old_epoch_counter = connector->base.epoch_counter; 274 275 connector->base.status = 276 drm_helper_probe_detect(&connector->base, NULL, false); 277 278 if (old_epoch_counter != connector->base.epoch_counter) 279 ret = true; 280 281 if (ret) { 282 drm_dbg_kms(dev, "[CONNECTOR:%d:%s] status updated from %s to %s (epoch counter %llu->%llu)\n", 283 connector->base.base.id, 284 connector->base.name, 285 drm_get_connector_status_name(old_status), 286 drm_get_connector_status_name(connector->base.status), 287 old_epoch_counter, 288 connector->base.epoch_counter); 289 return INTEL_HOTPLUG_CHANGED; 290 } 291 return INTEL_HOTPLUG_UNCHANGED; 292 } 293 294 static bool intel_encoder_has_hpd_pulse(struct intel_encoder *encoder) 295 { 296 return intel_encoder_is_dig_port(encoder) && 297 enc_to_dig_port(encoder)->hpd_pulse != NULL; 298 } 299 300 static void i915_digport_work_func(struct work_struct *work) 301 { 302 struct drm_i915_private *dev_priv = 303 container_of(work, struct drm_i915_private, display.hotplug.dig_port_work); 304 u32 long_port_mask, short_port_mask; 305 struct intel_encoder *encoder; 306 u32 old_bits = 0; 307 308 spin_lock_irq(&dev_priv->irq_lock); 309 long_port_mask = dev_priv->display.hotplug.long_port_mask; 310 dev_priv->display.hotplug.long_port_mask = 0; 311 short_port_mask = dev_priv->display.hotplug.short_port_mask; 312 dev_priv->display.hotplug.short_port_mask = 0; 313 spin_unlock_irq(&dev_priv->irq_lock); 314 315 for_each_intel_encoder(&dev_priv->drm, encoder) { 316 struct intel_digital_port *dig_port; 317 enum port port = encoder->port; 318 bool long_hpd, short_hpd; 319 enum irqreturn ret; 320 321 if (!intel_encoder_has_hpd_pulse(encoder)) 322 continue; 323 324 long_hpd = long_port_mask & BIT(port); 325 short_hpd = short_port_mask & BIT(port); 326 327 if (!long_hpd && !short_hpd) 328 continue; 329 330 dig_port = enc_to_dig_port(encoder); 331 332 ret = dig_port->hpd_pulse(dig_port, long_hpd); 333 if (ret == IRQ_NONE) { 334 /* fall back to old school hpd */ 335 old_bits |= BIT(encoder->hpd_pin); 336 } 337 } 338 339 if (old_bits) { 340 spin_lock_irq(&dev_priv->irq_lock); 341 dev_priv->display.hotplug.event_bits |= old_bits; 342 spin_unlock_irq(&dev_priv->irq_lock); 343 queue_delayed_work(dev_priv->unordered_wq, 344 &dev_priv->display.hotplug.hotplug_work, 0); 345 } 346 } 347 348 /** 349 * intel_hpd_trigger_irq - trigger an hpd irq event for a port 350 * @dig_port: digital port 351 * 352 * Trigger an HPD interrupt event for the given port, emulating a short pulse 353 * generated by the sink, and schedule the dig port work to handle it. 354 */ 355 void intel_hpd_trigger_irq(struct intel_digital_port *dig_port) 356 { 357 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); 358 359 spin_lock_irq(&i915->irq_lock); 360 i915->display.hotplug.short_port_mask |= BIT(dig_port->base.port); 361 spin_unlock_irq(&i915->irq_lock); 362 363 queue_work(i915->display.hotplug.dp_wq, &i915->display.hotplug.dig_port_work); 364 } 365 366 /* 367 * Handle hotplug events outside the interrupt handler proper. 368 */ 369 static void i915_hotplug_work_func(struct work_struct *work) 370 { 371 struct drm_i915_private *dev_priv = 372 container_of(work, struct drm_i915_private, 373 display.hotplug.hotplug_work.work); 374 struct drm_connector_list_iter conn_iter; 375 struct intel_connector *connector; 376 u32 changed = 0, retry = 0; 377 u32 hpd_event_bits; 378 u32 hpd_retry_bits; 379 380 mutex_lock(&dev_priv->drm.mode_config.mutex); 381 drm_dbg_kms(&dev_priv->drm, "running encoder hotplug functions\n"); 382 383 spin_lock_irq(&dev_priv->irq_lock); 384 385 hpd_event_bits = dev_priv->display.hotplug.event_bits; 386 dev_priv->display.hotplug.event_bits = 0; 387 hpd_retry_bits = dev_priv->display.hotplug.retry_bits; 388 dev_priv->display.hotplug.retry_bits = 0; 389 390 /* Enable polling for connectors which had HPD IRQ storms */ 391 intel_hpd_irq_storm_switch_to_polling(dev_priv); 392 393 spin_unlock_irq(&dev_priv->irq_lock); 394 395 /* Skip calling encode hotplug handlers if ignore long HPD set*/ 396 if (dev_priv->display.hotplug.ignore_long_hpd) { 397 drm_dbg_kms(&dev_priv->drm, "Ignore HPD flag on - skip encoder hotplug handlers\n"); 398 mutex_unlock(&dev_priv->drm.mode_config.mutex); 399 return; 400 } 401 402 drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter); 403 for_each_intel_connector_iter(connector, &conn_iter) { 404 enum hpd_pin pin; 405 u32 hpd_bit; 406 407 pin = intel_connector_hpd_pin(connector); 408 if (pin == HPD_NONE) 409 continue; 410 411 hpd_bit = BIT(pin); 412 if ((hpd_event_bits | hpd_retry_bits) & hpd_bit) { 413 struct intel_encoder *encoder = 414 intel_attached_encoder(connector); 415 416 if (hpd_event_bits & hpd_bit) 417 connector->hotplug_retries = 0; 418 else 419 connector->hotplug_retries++; 420 421 drm_dbg_kms(&dev_priv->drm, 422 "Connector %s (pin %i) received hotplug event. (retry %d)\n", 423 connector->base.name, pin, 424 connector->hotplug_retries); 425 426 switch (encoder->hotplug(encoder, connector)) { 427 case INTEL_HOTPLUG_UNCHANGED: 428 break; 429 case INTEL_HOTPLUG_CHANGED: 430 changed |= hpd_bit; 431 break; 432 case INTEL_HOTPLUG_RETRY: 433 retry |= hpd_bit; 434 break; 435 } 436 } 437 } 438 drm_connector_list_iter_end(&conn_iter); 439 mutex_unlock(&dev_priv->drm.mode_config.mutex); 440 441 if (changed) 442 drm_kms_helper_hotplug_event(&dev_priv->drm); 443 444 /* Remove shared HPD pins that have changed */ 445 retry &= ~changed; 446 if (retry) { 447 spin_lock_irq(&dev_priv->irq_lock); 448 dev_priv->display.hotplug.retry_bits |= retry; 449 spin_unlock_irq(&dev_priv->irq_lock); 450 451 mod_delayed_work(dev_priv->unordered_wq, 452 &dev_priv->display.hotplug.hotplug_work, 453 msecs_to_jiffies(HPD_RETRY_DELAY)); 454 } 455 } 456 457 458 /** 459 * intel_hpd_irq_handler - main hotplug irq handler 460 * @dev_priv: drm_i915_private 461 * @pin_mask: a mask of hpd pins that have triggered the irq 462 * @long_mask: a mask of hpd pins that may be long hpd pulses 463 * 464 * This is the main hotplug irq handler for all platforms. The platform specific 465 * irq handlers call the platform specific hotplug irq handlers, which read and 466 * decode the appropriate registers into bitmasks about hpd pins that have 467 * triggered (@pin_mask), and which of those pins may be long pulses 468 * (@long_mask). The @long_mask is ignored if the port corresponding to the pin 469 * is not a digital port. 470 * 471 * Here, we do hotplug irq storm detection and mitigation, and pass further 472 * processing to appropriate bottom halves. 473 */ 474 void intel_hpd_irq_handler(struct drm_i915_private *dev_priv, 475 u32 pin_mask, u32 long_mask) 476 { 477 struct intel_encoder *encoder; 478 bool storm_detected = false; 479 bool queue_dig = false, queue_hp = false; 480 u32 long_hpd_pulse_mask = 0; 481 u32 short_hpd_pulse_mask = 0; 482 enum hpd_pin pin; 483 484 if (!pin_mask) 485 return; 486 487 spin_lock(&dev_priv->irq_lock); 488 489 /* 490 * Determine whether ->hpd_pulse() exists for each pin, and 491 * whether we have a short or a long pulse. This is needed 492 * as each pin may have up to two encoders (HDMI and DP) and 493 * only the one of them (DP) will have ->hpd_pulse(). 494 */ 495 for_each_intel_encoder(&dev_priv->drm, encoder) { 496 enum port port = encoder->port; 497 bool long_hpd; 498 499 pin = encoder->hpd_pin; 500 if (!(BIT(pin) & pin_mask)) 501 continue; 502 503 if (!intel_encoder_has_hpd_pulse(encoder)) 504 continue; 505 506 long_hpd = long_mask & BIT(pin); 507 508 drm_dbg(&dev_priv->drm, 509 "digital hpd on [ENCODER:%d:%s] - %s\n", 510 encoder->base.base.id, encoder->base.name, 511 long_hpd ? "long" : "short"); 512 queue_dig = true; 513 514 if (long_hpd) { 515 long_hpd_pulse_mask |= BIT(pin); 516 dev_priv->display.hotplug.long_port_mask |= BIT(port); 517 } else { 518 short_hpd_pulse_mask |= BIT(pin); 519 dev_priv->display.hotplug.short_port_mask |= BIT(port); 520 } 521 } 522 523 /* Now process each pin just once */ 524 for_each_hpd_pin(pin) { 525 bool long_hpd; 526 527 if (!(BIT(pin) & pin_mask)) 528 continue; 529 530 if (dev_priv->display.hotplug.stats[pin].state == HPD_DISABLED) { 531 /* 532 * On GMCH platforms the interrupt mask bits only 533 * prevent irq generation, not the setting of the 534 * hotplug bits itself. So only WARN about unexpected 535 * interrupts on saner platforms. 536 */ 537 drm_WARN_ONCE(&dev_priv->drm, !HAS_GMCH(dev_priv), 538 "Received HPD interrupt on pin %d although disabled\n", 539 pin); 540 continue; 541 } 542 543 if (dev_priv->display.hotplug.stats[pin].state != HPD_ENABLED) 544 continue; 545 546 /* 547 * Delegate to ->hpd_pulse() if one of the encoders for this 548 * pin has it, otherwise let the hotplug_work deal with this 549 * pin directly. 550 */ 551 if (((short_hpd_pulse_mask | long_hpd_pulse_mask) & BIT(pin))) { 552 long_hpd = long_hpd_pulse_mask & BIT(pin); 553 } else { 554 dev_priv->display.hotplug.event_bits |= BIT(pin); 555 long_hpd = true; 556 queue_hp = true; 557 } 558 559 if (intel_hpd_irq_storm_detect(dev_priv, pin, long_hpd)) { 560 dev_priv->display.hotplug.event_bits &= ~BIT(pin); 561 storm_detected = true; 562 queue_hp = true; 563 } 564 } 565 566 /* 567 * Disable any IRQs that storms were detected on. Polling enablement 568 * happens later in our hotplug work. 569 */ 570 if (storm_detected) 571 intel_hpd_irq_setup(dev_priv); 572 spin_unlock(&dev_priv->irq_lock); 573 574 /* 575 * Our hotplug handler can grab modeset locks (by calling down into the 576 * fb helpers). Hence it must not be run on our own dev-priv->wq work 577 * queue for otherwise the flush_work in the pageflip code will 578 * deadlock. 579 */ 580 if (queue_dig) 581 queue_work(dev_priv->display.hotplug.dp_wq, &dev_priv->display.hotplug.dig_port_work); 582 if (queue_hp) 583 queue_delayed_work(dev_priv->unordered_wq, 584 &dev_priv->display.hotplug.hotplug_work, 0); 585 } 586 587 /** 588 * intel_hpd_init - initializes and enables hpd support 589 * @dev_priv: i915 device instance 590 * 591 * This function enables the hotplug support. It requires that interrupts have 592 * already been enabled with intel_irq_init_hw(). From this point on hotplug and 593 * poll request can run concurrently to other code, so locking rules must be 594 * obeyed. 595 * 596 * This is a separate step from interrupt enabling to simplify the locking rules 597 * in the driver load and resume code. 598 * 599 * Also see: intel_hpd_poll_enable() and intel_hpd_poll_disable(). 600 */ 601 void intel_hpd_init(struct drm_i915_private *dev_priv) 602 { 603 int i; 604 605 if (!HAS_DISPLAY(dev_priv)) 606 return; 607 608 for_each_hpd_pin(i) { 609 dev_priv->display.hotplug.stats[i].count = 0; 610 dev_priv->display.hotplug.stats[i].state = HPD_ENABLED; 611 } 612 613 /* 614 * Interrupt setup is already guaranteed to be single-threaded, this is 615 * just to make the assert_spin_locked checks happy. 616 */ 617 spin_lock_irq(&dev_priv->irq_lock); 618 intel_hpd_irq_setup(dev_priv); 619 spin_unlock_irq(&dev_priv->irq_lock); 620 } 621 622 static void i915_hpd_poll_init_work(struct work_struct *work) 623 { 624 struct drm_i915_private *dev_priv = 625 container_of(work, struct drm_i915_private, 626 display.hotplug.poll_init_work); 627 struct drm_connector_list_iter conn_iter; 628 struct intel_connector *connector; 629 bool enabled; 630 631 mutex_lock(&dev_priv->drm.mode_config.mutex); 632 633 enabled = READ_ONCE(dev_priv->display.hotplug.poll_enabled); 634 635 drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter); 636 for_each_intel_connector_iter(connector, &conn_iter) { 637 enum hpd_pin pin; 638 639 pin = intel_connector_hpd_pin(connector); 640 if (pin == HPD_NONE) 641 continue; 642 643 connector->base.polled = connector->polled; 644 645 if (enabled && connector->base.polled == DRM_CONNECTOR_POLL_HPD) 646 connector->base.polled = DRM_CONNECTOR_POLL_CONNECT | 647 DRM_CONNECTOR_POLL_DISCONNECT; 648 } 649 drm_connector_list_iter_end(&conn_iter); 650 651 if (enabled) 652 drm_kms_helper_poll_enable(&dev_priv->drm); 653 654 mutex_unlock(&dev_priv->drm.mode_config.mutex); 655 656 /* 657 * We might have missed any hotplugs that happened while we were 658 * in the middle of disabling polling 659 */ 660 if (!enabled) 661 drm_helper_hpd_irq_event(&dev_priv->drm); 662 } 663 664 /** 665 * intel_hpd_poll_enable - enable polling for connectors with hpd 666 * @dev_priv: i915 device instance 667 * 668 * This function enables polling for all connectors which support HPD. 669 * Under certain conditions HPD may not be functional. On most Intel GPUs, 670 * this happens when we enter runtime suspend. 671 * On Valleyview and Cherryview systems, this also happens when we shut off all 672 * of the powerwells. 673 * 674 * Since this function can get called in contexts where we're already holding 675 * dev->mode_config.mutex, we do the actual hotplug enabling in a seperate 676 * worker. 677 * 678 * Also see: intel_hpd_init() and intel_hpd_poll_disable(). 679 */ 680 void intel_hpd_poll_enable(struct drm_i915_private *dev_priv) 681 { 682 if (!HAS_DISPLAY(dev_priv) || 683 !INTEL_DISPLAY_ENABLED(dev_priv)) 684 return; 685 686 WRITE_ONCE(dev_priv->display.hotplug.poll_enabled, true); 687 688 /* 689 * We might already be holding dev->mode_config.mutex, so do this in a 690 * seperate worker 691 * As well, there's no issue if we race here since we always reschedule 692 * this worker anyway 693 */ 694 queue_work(dev_priv->unordered_wq, 695 &dev_priv->display.hotplug.poll_init_work); 696 } 697 698 /** 699 * intel_hpd_poll_disable - disable polling for connectors with hpd 700 * @dev_priv: i915 device instance 701 * 702 * This function disables polling for all connectors which support HPD. 703 * Under certain conditions HPD may not be functional. On most Intel GPUs, 704 * this happens when we enter runtime suspend. 705 * On Valleyview and Cherryview systems, this also happens when we shut off all 706 * of the powerwells. 707 * 708 * Since this function can get called in contexts where we're already holding 709 * dev->mode_config.mutex, we do the actual hotplug enabling in a seperate 710 * worker. 711 * 712 * Also used during driver init to initialize connector->polled 713 * appropriately for all connectors. 714 * 715 * Also see: intel_hpd_init() and intel_hpd_poll_enable(). 716 */ 717 void intel_hpd_poll_disable(struct drm_i915_private *dev_priv) 718 { 719 if (!HAS_DISPLAY(dev_priv)) 720 return; 721 722 WRITE_ONCE(dev_priv->display.hotplug.poll_enabled, false); 723 queue_work(dev_priv->unordered_wq, 724 &dev_priv->display.hotplug.poll_init_work); 725 } 726 727 void intel_hpd_init_early(struct drm_i915_private *i915) 728 { 729 INIT_DELAYED_WORK(&i915->display.hotplug.hotplug_work, 730 i915_hotplug_work_func); 731 INIT_WORK(&i915->display.hotplug.dig_port_work, i915_digport_work_func); 732 INIT_WORK(&i915->display.hotplug.poll_init_work, i915_hpd_poll_init_work); 733 INIT_DELAYED_WORK(&i915->display.hotplug.reenable_work, 734 intel_hpd_irq_storm_reenable_work); 735 736 i915->display.hotplug.hpd_storm_threshold = HPD_STORM_DEFAULT_THRESHOLD; 737 /* If we have MST support, we want to avoid doing short HPD IRQ storm 738 * detection, as short HPD storms will occur as a natural part of 739 * sideband messaging with MST. 740 * On older platforms however, IRQ storms can occur with both long and 741 * short pulses, as seen on some G4x systems. 742 */ 743 i915->display.hotplug.hpd_short_storm_enabled = !HAS_DP_MST(i915); 744 } 745 746 void intel_hpd_cancel_work(struct drm_i915_private *dev_priv) 747 { 748 if (!HAS_DISPLAY(dev_priv)) 749 return; 750 751 spin_lock_irq(&dev_priv->irq_lock); 752 753 dev_priv->display.hotplug.long_port_mask = 0; 754 dev_priv->display.hotplug.short_port_mask = 0; 755 dev_priv->display.hotplug.event_bits = 0; 756 dev_priv->display.hotplug.retry_bits = 0; 757 758 spin_unlock_irq(&dev_priv->irq_lock); 759 760 cancel_work_sync(&dev_priv->display.hotplug.dig_port_work); 761 cancel_delayed_work_sync(&dev_priv->display.hotplug.hotplug_work); 762 cancel_work_sync(&dev_priv->display.hotplug.poll_init_work); 763 cancel_delayed_work_sync(&dev_priv->display.hotplug.reenable_work); 764 } 765 766 bool intel_hpd_disable(struct drm_i915_private *dev_priv, enum hpd_pin pin) 767 { 768 bool ret = false; 769 770 if (pin == HPD_NONE) 771 return false; 772 773 spin_lock_irq(&dev_priv->irq_lock); 774 if (dev_priv->display.hotplug.stats[pin].state == HPD_ENABLED) { 775 dev_priv->display.hotplug.stats[pin].state = HPD_DISABLED; 776 ret = true; 777 } 778 spin_unlock_irq(&dev_priv->irq_lock); 779 780 return ret; 781 } 782 783 void intel_hpd_enable(struct drm_i915_private *dev_priv, enum hpd_pin pin) 784 { 785 if (pin == HPD_NONE) 786 return; 787 788 spin_lock_irq(&dev_priv->irq_lock); 789 dev_priv->display.hotplug.stats[pin].state = HPD_ENABLED; 790 spin_unlock_irq(&dev_priv->irq_lock); 791 } 792 793 static int i915_hpd_storm_ctl_show(struct seq_file *m, void *data) 794 { 795 struct drm_i915_private *dev_priv = m->private; 796 struct intel_hotplug *hotplug = &dev_priv->display.hotplug; 797 798 /* Synchronize with everything first in case there's been an HPD 799 * storm, but we haven't finished handling it in the kernel yet 800 */ 801 intel_synchronize_irq(dev_priv); 802 flush_work(&dev_priv->display.hotplug.dig_port_work); 803 flush_delayed_work(&dev_priv->display.hotplug.hotplug_work); 804 805 seq_printf(m, "Threshold: %d\n", hotplug->hpd_storm_threshold); 806 seq_printf(m, "Detected: %s\n", 807 str_yes_no(delayed_work_pending(&hotplug->reenable_work))); 808 809 return 0; 810 } 811 812 static ssize_t i915_hpd_storm_ctl_write(struct file *file, 813 const char __user *ubuf, size_t len, 814 loff_t *offp) 815 { 816 struct seq_file *m = file->private_data; 817 struct drm_i915_private *dev_priv = m->private; 818 struct intel_hotplug *hotplug = &dev_priv->display.hotplug; 819 unsigned int new_threshold; 820 int i; 821 char *newline; 822 char tmp[16]; 823 824 if (len >= sizeof(tmp)) 825 return -EINVAL; 826 827 if (copy_from_user(tmp, ubuf, len)) 828 return -EFAULT; 829 830 tmp[len] = '\0'; 831 832 /* Strip newline, if any */ 833 newline = strchr(tmp, '\n'); 834 if (newline) 835 *newline = '\0'; 836 837 if (strcmp(tmp, "reset") == 0) 838 new_threshold = HPD_STORM_DEFAULT_THRESHOLD; 839 else if (kstrtouint(tmp, 10, &new_threshold) != 0) 840 return -EINVAL; 841 842 if (new_threshold > 0) 843 drm_dbg_kms(&dev_priv->drm, 844 "Setting HPD storm detection threshold to %d\n", 845 new_threshold); 846 else 847 drm_dbg_kms(&dev_priv->drm, "Disabling HPD storm detection\n"); 848 849 spin_lock_irq(&dev_priv->irq_lock); 850 hotplug->hpd_storm_threshold = new_threshold; 851 /* Reset the HPD storm stats so we don't accidentally trigger a storm */ 852 for_each_hpd_pin(i) 853 hotplug->stats[i].count = 0; 854 spin_unlock_irq(&dev_priv->irq_lock); 855 856 /* Re-enable hpd immediately if we were in an irq storm */ 857 flush_delayed_work(&dev_priv->display.hotplug.reenable_work); 858 859 return len; 860 } 861 862 static int i915_hpd_storm_ctl_open(struct inode *inode, struct file *file) 863 { 864 return single_open(file, i915_hpd_storm_ctl_show, inode->i_private); 865 } 866 867 static const struct file_operations i915_hpd_storm_ctl_fops = { 868 .owner = THIS_MODULE, 869 .open = i915_hpd_storm_ctl_open, 870 .read = seq_read, 871 .llseek = seq_lseek, 872 .release = single_release, 873 .write = i915_hpd_storm_ctl_write 874 }; 875 876 static int i915_hpd_short_storm_ctl_show(struct seq_file *m, void *data) 877 { 878 struct drm_i915_private *dev_priv = m->private; 879 880 seq_printf(m, "Enabled: %s\n", 881 str_yes_no(dev_priv->display.hotplug.hpd_short_storm_enabled)); 882 883 return 0; 884 } 885 886 static int 887 i915_hpd_short_storm_ctl_open(struct inode *inode, struct file *file) 888 { 889 return single_open(file, i915_hpd_short_storm_ctl_show, 890 inode->i_private); 891 } 892 893 static ssize_t i915_hpd_short_storm_ctl_write(struct file *file, 894 const char __user *ubuf, 895 size_t len, loff_t *offp) 896 { 897 struct seq_file *m = file->private_data; 898 struct drm_i915_private *dev_priv = m->private; 899 struct intel_hotplug *hotplug = &dev_priv->display.hotplug; 900 char *newline; 901 char tmp[16]; 902 int i; 903 bool new_state; 904 905 if (len >= sizeof(tmp)) 906 return -EINVAL; 907 908 if (copy_from_user(tmp, ubuf, len)) 909 return -EFAULT; 910 911 tmp[len] = '\0'; 912 913 /* Strip newline, if any */ 914 newline = strchr(tmp, '\n'); 915 if (newline) 916 *newline = '\0'; 917 918 /* Reset to the "default" state for this system */ 919 if (strcmp(tmp, "reset") == 0) 920 new_state = !HAS_DP_MST(dev_priv); 921 else if (kstrtobool(tmp, &new_state) != 0) 922 return -EINVAL; 923 924 drm_dbg_kms(&dev_priv->drm, "%sabling HPD short storm detection\n", 925 new_state ? "En" : "Dis"); 926 927 spin_lock_irq(&dev_priv->irq_lock); 928 hotplug->hpd_short_storm_enabled = new_state; 929 /* Reset the HPD storm stats so we don't accidentally trigger a storm */ 930 for_each_hpd_pin(i) 931 hotplug->stats[i].count = 0; 932 spin_unlock_irq(&dev_priv->irq_lock); 933 934 /* Re-enable hpd immediately if we were in an irq storm */ 935 flush_delayed_work(&dev_priv->display.hotplug.reenable_work); 936 937 return len; 938 } 939 940 static const struct file_operations i915_hpd_short_storm_ctl_fops = { 941 .owner = THIS_MODULE, 942 .open = i915_hpd_short_storm_ctl_open, 943 .read = seq_read, 944 .llseek = seq_lseek, 945 .release = single_release, 946 .write = i915_hpd_short_storm_ctl_write, 947 }; 948 949 void intel_hpd_debugfs_register(struct drm_i915_private *i915) 950 { 951 struct drm_minor *minor = i915->drm.primary; 952 953 debugfs_create_file("i915_hpd_storm_ctl", 0644, minor->debugfs_root, 954 i915, &i915_hpd_storm_ctl_fops); 955 debugfs_create_file("i915_hpd_short_storm_ctl", 0644, minor->debugfs_root, 956 i915, &i915_hpd_short_storm_ctl_fops); 957 debugfs_create_bool("i915_ignore_long_hpd", 0644, minor->debugfs_root, 958 &i915->display.hotplug.ignore_long_hpd); 959 } 960