1 /* 2 * Copyright 2015 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: AMD 23 * 24 */ 25 26 #include <drm/drmP.h> 27 28 #include "dm_services_types.h" 29 #include "dc.h" 30 31 #include "amdgpu.h" 32 #include "amdgpu_dm.h" 33 #include "amdgpu_dm_irq.h" 34 35 /** 36 * DOC: overview 37 * 38 * DM provides another layer of IRQ management on top of what the base driver 39 * already provides. This is something that could be cleaned up, and is a 40 * future TODO item. 41 * 42 * The base driver provides IRQ source registration with DRM, handler 43 * registration into the base driver's IRQ table, and a handler callback 44 * amdgpu_irq_handler(), with which DRM calls on interrupts. This generic 45 * handler looks up the IRQ table, and calls the respective 46 * &amdgpu_irq_src_funcs.process hookups. 47 * 48 * What DM provides on top are two IRQ tables specifically for top-half and 49 * bottom-half IRQ handling, with the bottom-half implementing workqueues: 50 * 51 * - &amdgpu_display_manager.irq_handler_list_high_tab 52 * - &amdgpu_display_manager.irq_handler_list_low_tab 53 * 54 * They override the base driver's IRQ table, and the effect can be seen 55 * in the hooks that DM provides for &amdgpu_irq_src_funcs.process. They 56 * are all set to the DM generic handler amdgpu_dm_irq_handler(), which looks up 57 * DM's IRQ tables. However, in order for base driver to recognize this hook, DM 58 * still needs to register the IRQ with the base driver. See 59 * dce110_register_irq_handlers() and dcn10_register_irq_handlers(). 60 * 61 * To expose DC's hardware interrupt toggle to the base driver, DM implements 62 * &amdgpu_irq_src_funcs.set hooks. Base driver calls it through 63 * amdgpu_irq_update() to enable or disable the interrupt. 64 */ 65 66 /****************************************************************************** 67 * Private declarations. 68 *****************************************************************************/ 69 70 /** 71 * struct amdgpu_dm_irq_handler_data - Data for DM interrupt handlers. 72 * 73 * @list: Linked list entry referencing the next/previous handler 74 * @handler: Handler function 75 * @handler_arg: Argument passed to the handler when triggered 76 * @dm: DM which this handler belongs to 77 * @irq_source: DC interrupt source that this handler is registered for 78 */ 79 struct amdgpu_dm_irq_handler_data { 80 struct list_head list; 81 interrupt_handler handler; 82 void *handler_arg; 83 84 struct amdgpu_display_manager *dm; 85 /* DAL irq source which registered for this interrupt. */ 86 enum dc_irq_source irq_source; 87 }; 88 89 #define DM_IRQ_TABLE_LOCK(adev, flags) \ 90 spin_lock_irqsave(&adev->dm.irq_handler_list_table_lock, flags) 91 92 #define DM_IRQ_TABLE_UNLOCK(adev, flags) \ 93 spin_unlock_irqrestore(&adev->dm.irq_handler_list_table_lock, flags) 94 95 /****************************************************************************** 96 * Private functions. 97 *****************************************************************************/ 98 99 static void init_handler_common_data(struct amdgpu_dm_irq_handler_data *hcd, 100 void (*ih)(void *), 101 void *args, 102 struct amdgpu_display_manager *dm) 103 { 104 hcd->handler = ih; 105 hcd->handler_arg = args; 106 hcd->dm = dm; 107 } 108 109 /** 110 * dm_irq_work_func() - Handle an IRQ outside of the interrupt handler proper. 111 * 112 * @work: work struct 113 */ 114 static void dm_irq_work_func(struct work_struct *work) 115 { 116 struct list_head *entry; 117 struct irq_list_head *irq_list_head = 118 container_of(work, struct irq_list_head, work); 119 struct list_head *handler_list = &irq_list_head->head; 120 struct amdgpu_dm_irq_handler_data *handler_data; 121 122 list_for_each(entry, handler_list) { 123 handler_data = list_entry(entry, 124 struct amdgpu_dm_irq_handler_data, 125 list); 126 127 DRM_DEBUG_KMS("DM_IRQ: work_func: for dal_src=%d\n", 128 handler_data->irq_source); 129 130 DRM_DEBUG_KMS("DM_IRQ: schedule_work: for dal_src=%d\n", 131 handler_data->irq_source); 132 133 handler_data->handler(handler_data->handler_arg); 134 } 135 136 /* Call a DAL subcomponent which registered for interrupt notification 137 * at INTERRUPT_LOW_IRQ_CONTEXT. 138 * (The most common use is HPD interrupt) */ 139 } 140 141 /* 142 * Remove a handler and return a pointer to handler list from which the 143 * handler was removed. 144 */ 145 static struct list_head *remove_irq_handler(struct amdgpu_device *adev, 146 void *ih, 147 const struct dc_interrupt_params *int_params) 148 { 149 struct list_head *hnd_list; 150 struct list_head *entry, *tmp; 151 struct amdgpu_dm_irq_handler_data *handler; 152 unsigned long irq_table_flags; 153 bool handler_removed = false; 154 enum dc_irq_source irq_source; 155 156 DM_IRQ_TABLE_LOCK(adev, irq_table_flags); 157 158 irq_source = int_params->irq_source; 159 160 switch (int_params->int_context) { 161 case INTERRUPT_HIGH_IRQ_CONTEXT: 162 hnd_list = &adev->dm.irq_handler_list_high_tab[irq_source]; 163 break; 164 case INTERRUPT_LOW_IRQ_CONTEXT: 165 default: 166 hnd_list = &adev->dm.irq_handler_list_low_tab[irq_source].head; 167 break; 168 } 169 170 list_for_each_safe(entry, tmp, hnd_list) { 171 172 handler = list_entry(entry, struct amdgpu_dm_irq_handler_data, 173 list); 174 175 if (ih == handler) { 176 /* Found our handler. Remove it from the list. */ 177 list_del(&handler->list); 178 handler_removed = true; 179 break; 180 } 181 } 182 183 DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags); 184 185 if (handler_removed == false) { 186 /* Not necessarily an error - caller may not 187 * know the context. */ 188 return NULL; 189 } 190 191 kfree(handler); 192 193 DRM_DEBUG_KMS( 194 "DM_IRQ: removed irq handler: %p for: dal_src=%d, irq context=%d\n", 195 ih, int_params->irq_source, int_params->int_context); 196 197 return hnd_list; 198 } 199 200 static bool 201 validate_irq_registration_params(struct dc_interrupt_params *int_params, 202 void (*ih)(void *)) 203 { 204 if (NULL == int_params || NULL == ih) { 205 DRM_ERROR("DM_IRQ: invalid input!\n"); 206 return false; 207 } 208 209 if (int_params->int_context >= INTERRUPT_CONTEXT_NUMBER) { 210 DRM_ERROR("DM_IRQ: invalid context: %d!\n", 211 int_params->int_context); 212 return false; 213 } 214 215 if (!DAL_VALID_IRQ_SRC_NUM(int_params->irq_source)) { 216 DRM_ERROR("DM_IRQ: invalid irq_source: %d!\n", 217 int_params->irq_source); 218 return false; 219 } 220 221 return true; 222 } 223 224 static bool validate_irq_unregistration_params(enum dc_irq_source irq_source, 225 irq_handler_idx handler_idx) 226 { 227 if (DAL_INVALID_IRQ_HANDLER_IDX == handler_idx) { 228 DRM_ERROR("DM_IRQ: invalid handler_idx==NULL!\n"); 229 return false; 230 } 231 232 if (!DAL_VALID_IRQ_SRC_NUM(irq_source)) { 233 DRM_ERROR("DM_IRQ: invalid irq_source:%d!\n", irq_source); 234 return false; 235 } 236 237 return true; 238 } 239 /****************************************************************************** 240 * Public functions. 241 * 242 * Note: caller is responsible for input validation. 243 *****************************************************************************/ 244 245 /** 246 * amdgpu_dm_irq_register_interrupt() - Register a handler within DM. 247 * @adev: The base driver device containing the DM device. 248 * @int_params: Interrupt parameters containing the source, and handler context 249 * @ih: Function pointer to the interrupt handler to register 250 * @handler_args: Arguments passed to the handler when the interrupt occurs 251 * 252 * Register an interrupt handler for the given IRQ source, under the given 253 * context. The context can either be high or low. High context handlers are 254 * executed directly within ISR context, while low context is executed within a 255 * workqueue, thereby allowing operations that sleep. 256 * 257 * Registered handlers are called in a FIFO manner, i.e. the most recently 258 * registered handler will be called first. 259 * 260 * Return: Handler data &struct amdgpu_dm_irq_handler_data containing the IRQ 261 * source, handler function, and args 262 */ 263 void *amdgpu_dm_irq_register_interrupt(struct amdgpu_device *adev, 264 struct dc_interrupt_params *int_params, 265 void (*ih)(void *), 266 void *handler_args) 267 { 268 struct list_head *hnd_list; 269 struct amdgpu_dm_irq_handler_data *handler_data; 270 unsigned long irq_table_flags; 271 enum dc_irq_source irq_source; 272 273 if (false == validate_irq_registration_params(int_params, ih)) 274 return DAL_INVALID_IRQ_HANDLER_IDX; 275 276 handler_data = kzalloc(sizeof(*handler_data), GFP_KERNEL); 277 if (!handler_data) { 278 DRM_ERROR("DM_IRQ: failed to allocate irq handler!\n"); 279 return DAL_INVALID_IRQ_HANDLER_IDX; 280 } 281 282 memset(handler_data, 0, sizeof(*handler_data)); 283 284 init_handler_common_data(handler_data, ih, handler_args, &adev->dm); 285 286 irq_source = int_params->irq_source; 287 288 handler_data->irq_source = irq_source; 289 290 /* Lock the list, add the handler. */ 291 DM_IRQ_TABLE_LOCK(adev, irq_table_flags); 292 293 switch (int_params->int_context) { 294 case INTERRUPT_HIGH_IRQ_CONTEXT: 295 hnd_list = &adev->dm.irq_handler_list_high_tab[irq_source]; 296 break; 297 case INTERRUPT_LOW_IRQ_CONTEXT: 298 default: 299 hnd_list = &adev->dm.irq_handler_list_low_tab[irq_source].head; 300 break; 301 } 302 303 list_add_tail(&handler_data->list, hnd_list); 304 305 DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags); 306 307 /* This pointer will be stored by code which requested interrupt 308 * registration. 309 * The same pointer will be needed in order to unregister the 310 * interrupt. */ 311 312 DRM_DEBUG_KMS( 313 "DM_IRQ: added irq handler: %p for: dal_src=%d, irq context=%d\n", 314 handler_data, 315 irq_source, 316 int_params->int_context); 317 318 return handler_data; 319 } 320 321 /** 322 * amdgpu_dm_irq_unregister_interrupt() - Remove a handler from the DM IRQ table 323 * @adev: The base driver device containing the DM device 324 * @irq_source: IRQ source to remove the given handler from 325 * @ih: Function pointer to the interrupt handler to unregister 326 * 327 * Go through both low and high context IRQ tables, and find the given handler 328 * for the given irq source. If found, remove it. Otherwise, do nothing. 329 */ 330 void amdgpu_dm_irq_unregister_interrupt(struct amdgpu_device *adev, 331 enum dc_irq_source irq_source, 332 void *ih) 333 { 334 struct list_head *handler_list; 335 struct dc_interrupt_params int_params; 336 int i; 337 338 if (false == validate_irq_unregistration_params(irq_source, ih)) 339 return; 340 341 memset(&int_params, 0, sizeof(int_params)); 342 343 int_params.irq_source = irq_source; 344 345 for (i = 0; i < INTERRUPT_CONTEXT_NUMBER; i++) { 346 347 int_params.int_context = i; 348 349 handler_list = remove_irq_handler(adev, ih, &int_params); 350 351 if (handler_list != NULL) 352 break; 353 } 354 355 if (handler_list == NULL) { 356 /* If we got here, it means we searched all irq contexts 357 * for this irq source, but the handler was not found. */ 358 DRM_ERROR( 359 "DM_IRQ: failed to find irq handler:%p for irq_source:%d!\n", 360 ih, irq_source); 361 } 362 } 363 364 /** 365 * amdgpu_dm_irq_init() - Initialize DM IRQ management 366 * @adev: The base driver device containing the DM device 367 * 368 * Initialize DM's high and low context IRQ tables. 369 * 370 * The N by M table contains N IRQ sources, with M 371 * &struct amdgpu_dm_irq_handler_data hooked together in a linked list. The 372 * list_heads are initialized here. When an interrupt n is triggered, all m 373 * handlers are called in sequence, FIFO according to registration order. 374 * 375 * The low context table requires special steps to initialize, since handlers 376 * will be deferred to a workqueue. See &struct irq_list_head. 377 */ 378 int amdgpu_dm_irq_init(struct amdgpu_device *adev) 379 { 380 int src; 381 struct irq_list_head *lh; 382 383 DRM_DEBUG_KMS("DM_IRQ\n"); 384 385 spin_lock_init(&adev->dm.irq_handler_list_table_lock); 386 387 for (src = 0; src < DAL_IRQ_SOURCES_NUMBER; src++) { 388 /* low context handler list init */ 389 lh = &adev->dm.irq_handler_list_low_tab[src]; 390 INIT_LIST_HEAD(&lh->head); 391 INIT_WORK(&lh->work, dm_irq_work_func); 392 393 /* high context handler init */ 394 INIT_LIST_HEAD(&adev->dm.irq_handler_list_high_tab[src]); 395 } 396 397 return 0; 398 } 399 400 /** 401 * amdgpu_dm_irq_fini() - Tear down DM IRQ management 402 * @adev: The base driver device containing the DM device 403 * 404 * Flush all work within the low context IRQ table. 405 */ 406 void amdgpu_dm_irq_fini(struct amdgpu_device *adev) 407 { 408 int src; 409 struct irq_list_head *lh; 410 unsigned long irq_table_flags; 411 DRM_DEBUG_KMS("DM_IRQ: releasing resources.\n"); 412 for (src = 0; src < DAL_IRQ_SOURCES_NUMBER; src++) { 413 DM_IRQ_TABLE_LOCK(adev, irq_table_flags); 414 /* The handler was removed from the table, 415 * it means it is safe to flush all the 'work' 416 * (because no code can schedule a new one). */ 417 lh = &adev->dm.irq_handler_list_low_tab[src]; 418 DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags); 419 flush_work(&lh->work); 420 } 421 } 422 423 int amdgpu_dm_irq_suspend(struct amdgpu_device *adev) 424 { 425 int src; 426 struct list_head *hnd_list_h; 427 struct list_head *hnd_list_l; 428 unsigned long irq_table_flags; 429 430 DM_IRQ_TABLE_LOCK(adev, irq_table_flags); 431 432 DRM_DEBUG_KMS("DM_IRQ: suspend\n"); 433 434 /** 435 * Disable HW interrupt for HPD and HPDRX only since FLIP and VBLANK 436 * will be disabled from manage_dm_interrupts on disable CRTC. 437 */ 438 for (src = DC_IRQ_SOURCE_HPD1; src <= DC_IRQ_SOURCE_HPD6RX; src++) { 439 hnd_list_l = &adev->dm.irq_handler_list_low_tab[src].head; 440 hnd_list_h = &adev->dm.irq_handler_list_high_tab[src]; 441 if (!list_empty(hnd_list_l) || !list_empty(hnd_list_h)) 442 dc_interrupt_set(adev->dm.dc, src, false); 443 444 DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags); 445 flush_work(&adev->dm.irq_handler_list_low_tab[src].work); 446 447 DM_IRQ_TABLE_LOCK(adev, irq_table_flags); 448 } 449 450 DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags); 451 return 0; 452 } 453 454 int amdgpu_dm_irq_resume_early(struct amdgpu_device *adev) 455 { 456 int src; 457 struct list_head *hnd_list_h, *hnd_list_l; 458 unsigned long irq_table_flags; 459 460 DM_IRQ_TABLE_LOCK(adev, irq_table_flags); 461 462 DRM_DEBUG_KMS("DM_IRQ: early resume\n"); 463 464 /* re-enable short pulse interrupts HW interrupt */ 465 for (src = DC_IRQ_SOURCE_HPD1RX; src <= DC_IRQ_SOURCE_HPD6RX; src++) { 466 hnd_list_l = &adev->dm.irq_handler_list_low_tab[src].head; 467 hnd_list_h = &adev->dm.irq_handler_list_high_tab[src]; 468 if (!list_empty(hnd_list_l) || !list_empty(hnd_list_h)) 469 dc_interrupt_set(adev->dm.dc, src, true); 470 } 471 472 DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags); 473 474 return 0; 475 } 476 477 int amdgpu_dm_irq_resume_late(struct amdgpu_device *adev) 478 { 479 int src; 480 struct list_head *hnd_list_h, *hnd_list_l; 481 unsigned long irq_table_flags; 482 483 DM_IRQ_TABLE_LOCK(adev, irq_table_flags); 484 485 DRM_DEBUG_KMS("DM_IRQ: resume\n"); 486 487 /** 488 * Renable HW interrupt for HPD and only since FLIP and VBLANK 489 * will be enabled from manage_dm_interrupts on enable CRTC. 490 */ 491 for (src = DC_IRQ_SOURCE_HPD1; src <= DC_IRQ_SOURCE_HPD6; src++) { 492 hnd_list_l = &adev->dm.irq_handler_list_low_tab[src].head; 493 hnd_list_h = &adev->dm.irq_handler_list_high_tab[src]; 494 if (!list_empty(hnd_list_l) || !list_empty(hnd_list_h)) 495 dc_interrupt_set(adev->dm.dc, src, true); 496 } 497 498 DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags); 499 return 0; 500 } 501 502 /* 503 * amdgpu_dm_irq_schedule_work - schedule all work items registered for the 504 * "irq_source". 505 */ 506 static void amdgpu_dm_irq_schedule_work(struct amdgpu_device *adev, 507 enum dc_irq_source irq_source) 508 { 509 unsigned long irq_table_flags; 510 struct work_struct *work = NULL; 511 512 DM_IRQ_TABLE_LOCK(adev, irq_table_flags); 513 514 if (!list_empty(&adev->dm.irq_handler_list_low_tab[irq_source].head)) 515 work = &adev->dm.irq_handler_list_low_tab[irq_source].work; 516 517 DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags); 518 519 if (work) { 520 if (!schedule_work(work)) 521 DRM_INFO("amdgpu_dm_irq_schedule_work FAILED src %d\n", 522 irq_source); 523 } 524 525 } 526 527 /* 528 * amdgpu_dm_irq_immediate_work 529 * Callback high irq work immediately, don't send to work queue 530 */ 531 static void amdgpu_dm_irq_immediate_work(struct amdgpu_device *adev, 532 enum dc_irq_source irq_source) 533 { 534 struct amdgpu_dm_irq_handler_data *handler_data; 535 struct list_head *entry; 536 unsigned long irq_table_flags; 537 538 DM_IRQ_TABLE_LOCK(adev, irq_table_flags); 539 540 list_for_each( 541 entry, 542 &adev->dm.irq_handler_list_high_tab[irq_source]) { 543 544 handler_data = list_entry(entry, 545 struct amdgpu_dm_irq_handler_data, 546 list); 547 548 /* Call a subcomponent which registered for immediate 549 * interrupt notification */ 550 handler_data->handler(handler_data->handler_arg); 551 } 552 553 DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags); 554 } 555 556 /** 557 * amdgpu_dm_irq_handler - Generic DM IRQ handler 558 * @adev: amdgpu base driver device containing the DM device 559 * @source: Unused 560 * @entry: Data about the triggered interrupt 561 * 562 * Calls all registered high irq work immediately, and schedules work for low 563 * irq. The DM IRQ table is used to find the corresponding handlers. 564 */ 565 static int amdgpu_dm_irq_handler(struct amdgpu_device *adev, 566 struct amdgpu_irq_src *source, 567 struct amdgpu_iv_entry *entry) 568 { 569 570 enum dc_irq_source src = 571 dc_interrupt_to_irq_source( 572 adev->dm.dc, 573 entry->src_id, 574 entry->src_data[0]); 575 576 dc_interrupt_ack(adev->dm.dc, src); 577 578 /* Call high irq work immediately */ 579 amdgpu_dm_irq_immediate_work(adev, src); 580 /*Schedule low_irq work */ 581 amdgpu_dm_irq_schedule_work(adev, src); 582 583 return 0; 584 } 585 586 static enum dc_irq_source amdgpu_dm_hpd_to_dal_irq_source(unsigned type) 587 { 588 switch (type) { 589 case AMDGPU_HPD_1: 590 return DC_IRQ_SOURCE_HPD1; 591 case AMDGPU_HPD_2: 592 return DC_IRQ_SOURCE_HPD2; 593 case AMDGPU_HPD_3: 594 return DC_IRQ_SOURCE_HPD3; 595 case AMDGPU_HPD_4: 596 return DC_IRQ_SOURCE_HPD4; 597 case AMDGPU_HPD_5: 598 return DC_IRQ_SOURCE_HPD5; 599 case AMDGPU_HPD_6: 600 return DC_IRQ_SOURCE_HPD6; 601 default: 602 return DC_IRQ_SOURCE_INVALID; 603 } 604 } 605 606 static int amdgpu_dm_set_hpd_irq_state(struct amdgpu_device *adev, 607 struct amdgpu_irq_src *source, 608 unsigned type, 609 enum amdgpu_interrupt_state state) 610 { 611 enum dc_irq_source src = amdgpu_dm_hpd_to_dal_irq_source(type); 612 bool st = (state == AMDGPU_IRQ_STATE_ENABLE); 613 614 dc_interrupt_set(adev->dm.dc, src, st); 615 return 0; 616 } 617 618 static inline int dm_irq_state(struct amdgpu_device *adev, 619 struct amdgpu_irq_src *source, 620 unsigned crtc_id, 621 enum amdgpu_interrupt_state state, 622 const enum irq_type dal_irq_type, 623 const char *func) 624 { 625 bool st; 626 enum dc_irq_source irq_source; 627 628 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc_id]; 629 630 if (!acrtc) { 631 DRM_ERROR( 632 "%s: crtc is NULL at id :%d\n", 633 func, 634 crtc_id); 635 return 0; 636 } 637 638 if (acrtc->otg_inst == -1) 639 return 0; 640 641 irq_source = dal_irq_type + acrtc->otg_inst; 642 643 st = (state == AMDGPU_IRQ_STATE_ENABLE); 644 645 dc_interrupt_set(adev->dm.dc, irq_source, st); 646 return 0; 647 } 648 649 static int amdgpu_dm_set_pflip_irq_state(struct amdgpu_device *adev, 650 struct amdgpu_irq_src *source, 651 unsigned crtc_id, 652 enum amdgpu_interrupt_state state) 653 { 654 return dm_irq_state( 655 adev, 656 source, 657 crtc_id, 658 state, 659 IRQ_TYPE_PFLIP, 660 __func__); 661 } 662 663 static int amdgpu_dm_set_crtc_irq_state(struct amdgpu_device *adev, 664 struct amdgpu_irq_src *source, 665 unsigned crtc_id, 666 enum amdgpu_interrupt_state state) 667 { 668 return dm_irq_state( 669 adev, 670 source, 671 crtc_id, 672 state, 673 IRQ_TYPE_VBLANK, 674 __func__); 675 } 676 677 static const struct amdgpu_irq_src_funcs dm_crtc_irq_funcs = { 678 .set = amdgpu_dm_set_crtc_irq_state, 679 .process = amdgpu_dm_irq_handler, 680 }; 681 682 static const struct amdgpu_irq_src_funcs dm_pageflip_irq_funcs = { 683 .set = amdgpu_dm_set_pflip_irq_state, 684 .process = amdgpu_dm_irq_handler, 685 }; 686 687 static const struct amdgpu_irq_src_funcs dm_hpd_irq_funcs = { 688 .set = amdgpu_dm_set_hpd_irq_state, 689 .process = amdgpu_dm_irq_handler, 690 }; 691 692 void amdgpu_dm_set_irq_funcs(struct amdgpu_device *adev) 693 { 694 695 adev->crtc_irq.num_types = adev->mode_info.num_crtc; 696 adev->crtc_irq.funcs = &dm_crtc_irq_funcs; 697 698 adev->pageflip_irq.num_types = adev->mode_info.num_crtc; 699 adev->pageflip_irq.funcs = &dm_pageflip_irq_funcs; 700 701 adev->hpd_irq.num_types = adev->mode_info.num_hpd; 702 adev->hpd_irq.funcs = &dm_hpd_irq_funcs; 703 } 704 705 /** 706 * amdgpu_dm_hpd_init - hpd setup callback. 707 * 708 * @adev: amdgpu_device pointer 709 * 710 * Setup the hpd pins used by the card (evergreen+). 711 * Enable the pin, set the polarity, and enable the hpd interrupts. 712 */ 713 void amdgpu_dm_hpd_init(struct amdgpu_device *adev) 714 { 715 struct drm_device *dev = adev->ddev; 716 struct drm_connector *connector; 717 718 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 719 struct amdgpu_dm_connector *amdgpu_dm_connector = 720 to_amdgpu_dm_connector(connector); 721 722 const struct dc_link *dc_link = amdgpu_dm_connector->dc_link; 723 724 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) { 725 dc_interrupt_set(adev->dm.dc, 726 dc_link->irq_source_hpd, 727 true); 728 } 729 730 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) { 731 dc_interrupt_set(adev->dm.dc, 732 dc_link->irq_source_hpd_rx, 733 true); 734 } 735 } 736 } 737 738 /** 739 * amdgpu_dm_hpd_fini - hpd tear down callback. 740 * 741 * @adev: amdgpu_device pointer 742 * 743 * Tear down the hpd pins used by the card (evergreen+). 744 * Disable the hpd interrupts. 745 */ 746 void amdgpu_dm_hpd_fini(struct amdgpu_device *adev) 747 { 748 struct drm_device *dev = adev->ddev; 749 struct drm_connector *connector; 750 751 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 752 struct amdgpu_dm_connector *amdgpu_dm_connector = 753 to_amdgpu_dm_connector(connector); 754 const struct dc_link *dc_link = amdgpu_dm_connector->dc_link; 755 756 dc_interrupt_set(adev->dm.dc, dc_link->irq_source_hpd, false); 757 758 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) { 759 dc_interrupt_set(adev->dm.dc, 760 dc_link->irq_source_hpd_rx, 761 false); 762 } 763 } 764 } 765