1 /* 2 * Copyright 2008 Advanced Micro Devices, Inc. 3 * Copyright 2008 Red Hat Inc. 4 * Copyright 2009 Jerome Glisse. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 22 * OTHER DEALINGS IN THE SOFTWARE. 23 * 24 * Authors: Dave Airlie 25 * Alex Deucher 26 * Jerome Glisse 27 */ 28 #include <linux/irq.h> 29 #include <drm/drmP.h> 30 #include <drm/drm_crtc_helper.h> 31 #include <drm/amdgpu_drm.h> 32 #include "amdgpu.h" 33 #include "amdgpu_ih.h" 34 #include "atom.h" 35 #include "amdgpu_connectors.h" 36 #include "amdgpu_trace.h" 37 38 #include <linux/pm_runtime.h> 39 40 #define AMDGPU_WAIT_IDLE_TIMEOUT 200 41 42 /* 43 * Handle hotplug events outside the interrupt handler proper. 44 */ 45 /** 46 * amdgpu_hotplug_work_func - display hotplug work handler 47 * 48 * @work: work struct 49 * 50 * This is the hot plug event work handler (all asics). 51 * The work gets scheduled from the irq handler if there 52 * was a hot plug interrupt. It walks the connector table 53 * and calls the hotplug handler for each one, then sends 54 * a drm hotplug event to alert userspace. 55 */ 56 static void amdgpu_hotplug_work_func(struct work_struct *work) 57 { 58 struct amdgpu_device *adev = container_of(work, struct amdgpu_device, 59 hotplug_work); 60 struct drm_device *dev = adev->ddev; 61 struct drm_mode_config *mode_config = &dev->mode_config; 62 struct drm_connector *connector; 63 64 mutex_lock(&mode_config->mutex); 65 list_for_each_entry(connector, &mode_config->connector_list, head) 66 amdgpu_connector_hotplug(connector); 67 mutex_unlock(&mode_config->mutex); 68 /* Just fire off a uevent and let userspace tell us what to do */ 69 drm_helper_hpd_irq_event(dev); 70 } 71 72 /** 73 * amdgpu_irq_reset_work_func - execute gpu reset 74 * 75 * @work: work struct 76 * 77 * Execute scheduled gpu reset (cayman+). 78 * This function is called when the irq handler 79 * thinks we need a gpu reset. 80 */ 81 static void amdgpu_irq_reset_work_func(struct work_struct *work) 82 { 83 struct amdgpu_device *adev = container_of(work, struct amdgpu_device, 84 reset_work); 85 86 amdgpu_gpu_reset(adev); 87 } 88 89 /* Disable *all* interrupts */ 90 static void amdgpu_irq_disable_all(struct amdgpu_device *adev) 91 { 92 unsigned long irqflags; 93 unsigned i, j, k; 94 int r; 95 96 spin_lock_irqsave(&adev->irq.lock, irqflags); 97 for (i = 0; i < AMDGPU_IH_CLIENTID_MAX; ++i) { 98 if (!adev->irq.client[i].sources) 99 continue; 100 101 for (j = 0; j < AMDGPU_MAX_IRQ_SRC_ID; ++j) { 102 struct amdgpu_irq_src *src = adev->irq.client[i].sources[j]; 103 104 if (!src || !src->funcs->set || !src->num_types) 105 continue; 106 107 for (k = 0; k < src->num_types; ++k) { 108 atomic_set(&src->enabled_types[k], 0); 109 r = src->funcs->set(adev, src, k, 110 AMDGPU_IRQ_STATE_DISABLE); 111 if (r) 112 DRM_ERROR("error disabling interrupt (%d)\n", 113 r); 114 } 115 } 116 } 117 spin_unlock_irqrestore(&adev->irq.lock, irqflags); 118 } 119 120 /** 121 * amdgpu_irq_preinstall - drm irq preinstall callback 122 * 123 * @dev: drm dev pointer 124 * 125 * Gets the hw ready to enable irqs (all asics). 126 * This function disables all interrupt sources on the GPU. 127 */ 128 void amdgpu_irq_preinstall(struct drm_device *dev) 129 { 130 struct amdgpu_device *adev = dev->dev_private; 131 132 /* Disable *all* interrupts */ 133 amdgpu_irq_disable_all(adev); 134 /* Clear bits */ 135 amdgpu_ih_process(adev); 136 } 137 138 /** 139 * amdgpu_irq_postinstall - drm irq preinstall callback 140 * 141 * @dev: drm dev pointer 142 * 143 * Handles stuff to be done after enabling irqs (all asics). 144 * Returns 0 on success. 145 */ 146 int amdgpu_irq_postinstall(struct drm_device *dev) 147 { 148 dev->max_vblank_count = 0x00ffffff; 149 return 0; 150 } 151 152 /** 153 * amdgpu_irq_uninstall - drm irq uninstall callback 154 * 155 * @dev: drm dev pointer 156 * 157 * This function disables all interrupt sources on the GPU (all asics). 158 */ 159 void amdgpu_irq_uninstall(struct drm_device *dev) 160 { 161 struct amdgpu_device *adev = dev->dev_private; 162 163 if (adev == NULL) { 164 return; 165 } 166 amdgpu_irq_disable_all(adev); 167 } 168 169 /** 170 * amdgpu_irq_handler - irq handler 171 * 172 * @int irq, void *arg: args 173 * 174 * This is the irq handler for the amdgpu driver (all asics). 175 */ 176 irqreturn_t amdgpu_irq_handler(int irq, void *arg) 177 { 178 struct drm_device *dev = (struct drm_device *) arg; 179 struct amdgpu_device *adev = dev->dev_private; 180 irqreturn_t ret; 181 182 ret = amdgpu_ih_process(adev); 183 if (ret == IRQ_HANDLED) 184 pm_runtime_mark_last_busy(dev->dev); 185 return ret; 186 } 187 188 /** 189 * amdgpu_msi_ok - asic specific msi checks 190 * 191 * @adev: amdgpu device pointer 192 * 193 * Handles asic specific MSI checks to determine if 194 * MSIs should be enabled on a particular chip (all asics). 195 * Returns true if MSIs should be enabled, false if MSIs 196 * should not be enabled. 197 */ 198 static bool amdgpu_msi_ok(struct amdgpu_device *adev) 199 { 200 /* force MSI on */ 201 if (amdgpu_msi == 1) 202 return true; 203 else if (amdgpu_msi == 0) 204 return false; 205 206 return true; 207 } 208 209 /** 210 * amdgpu_irq_init - init driver interrupt info 211 * 212 * @adev: amdgpu device pointer 213 * 214 * Sets up the work irq handlers, vblank init, MSIs, etc. (all asics). 215 * Returns 0 for success, error for failure. 216 */ 217 int amdgpu_irq_init(struct amdgpu_device *adev) 218 { 219 int r = 0; 220 221 spin_lock_init(&adev->irq.lock); 222 r = drm_vblank_init(adev->ddev, adev->mode_info.num_crtc); 223 if (r) { 224 return r; 225 } 226 227 /* enable msi */ 228 adev->irq.msi_enabled = false; 229 230 if (amdgpu_msi_ok(adev)) { 231 int ret = pci_enable_msi(adev->pdev); 232 if (!ret) { 233 adev->irq.msi_enabled = true; 234 dev_info(adev->dev, "amdgpu: using MSI.\n"); 235 } 236 } 237 238 INIT_WORK(&adev->hotplug_work, amdgpu_hotplug_work_func); 239 INIT_WORK(&adev->reset_work, amdgpu_irq_reset_work_func); 240 241 adev->irq.installed = true; 242 r = drm_irq_install(adev->ddev, adev->ddev->pdev->irq); 243 if (r) { 244 adev->irq.installed = false; 245 flush_work(&adev->hotplug_work); 246 cancel_work_sync(&adev->reset_work); 247 return r; 248 } 249 250 DRM_INFO("amdgpu: irq initialized.\n"); 251 return 0; 252 } 253 254 /** 255 * amdgpu_irq_fini - tear down driver interrupt info 256 * 257 * @adev: amdgpu device pointer 258 * 259 * Tears down the work irq handlers, vblank handlers, MSIs, etc. (all asics). 260 */ 261 void amdgpu_irq_fini(struct amdgpu_device *adev) 262 { 263 unsigned i, j; 264 265 drm_vblank_cleanup(adev->ddev); 266 if (adev->irq.installed) { 267 drm_irq_uninstall(adev->ddev); 268 adev->irq.installed = false; 269 if (adev->irq.msi_enabled) 270 pci_disable_msi(adev->pdev); 271 flush_work(&adev->hotplug_work); 272 cancel_work_sync(&adev->reset_work); 273 } 274 275 for (i = 0; i < AMDGPU_IH_CLIENTID_MAX; ++i) { 276 if (!adev->irq.client[i].sources) 277 continue; 278 279 for (j = 0; j < AMDGPU_MAX_IRQ_SRC_ID; ++j) { 280 struct amdgpu_irq_src *src = adev->irq.client[i].sources[j]; 281 282 if (!src) 283 continue; 284 285 kfree(src->enabled_types); 286 src->enabled_types = NULL; 287 if (src->data) { 288 kfree(src->data); 289 kfree(src); 290 adev->irq.client[i].sources[j] = NULL; 291 } 292 } 293 kfree(adev->irq.client[i].sources); 294 } 295 } 296 297 /** 298 * amdgpu_irq_add_id - register irq source 299 * 300 * @adev: amdgpu device pointer 301 * @src_id: source id for this source 302 * @source: irq source 303 * 304 */ 305 int amdgpu_irq_add_id(struct amdgpu_device *adev, 306 unsigned client_id, unsigned src_id, 307 struct amdgpu_irq_src *source) 308 { 309 if (client_id >= AMDGPU_IH_CLIENTID_MAX) 310 return -EINVAL; 311 312 if (src_id >= AMDGPU_MAX_IRQ_SRC_ID) 313 return -EINVAL; 314 315 if (!source->funcs) 316 return -EINVAL; 317 318 if (!adev->irq.client[client_id].sources) { 319 adev->irq.client[client_id].sources = 320 kcalloc(AMDGPU_MAX_IRQ_SRC_ID, 321 sizeof(struct amdgpu_irq_src *), 322 GFP_KERNEL); 323 if (!adev->irq.client[client_id].sources) 324 return -ENOMEM; 325 } 326 327 if (adev->irq.client[client_id].sources[src_id] != NULL) 328 return -EINVAL; 329 330 if (source->num_types && !source->enabled_types) { 331 atomic_t *types; 332 333 types = kcalloc(source->num_types, sizeof(atomic_t), 334 GFP_KERNEL); 335 if (!types) 336 return -ENOMEM; 337 338 source->enabled_types = types; 339 } 340 341 adev->irq.client[client_id].sources[src_id] = source; 342 return 0; 343 } 344 345 /** 346 * amdgpu_irq_dispatch - dispatch irq to IP blocks 347 * 348 * @adev: amdgpu device pointer 349 * @entry: interrupt vector 350 * 351 * Dispatches the irq to the different IP blocks 352 */ 353 void amdgpu_irq_dispatch(struct amdgpu_device *adev, 354 struct amdgpu_iv_entry *entry) 355 { 356 unsigned client_id = entry->client_id; 357 unsigned src_id = entry->src_id; 358 struct amdgpu_irq_src *src; 359 int r; 360 361 trace_amdgpu_iv(entry); 362 363 if (client_id >= AMDGPU_IH_CLIENTID_MAX) { 364 DRM_DEBUG("Invalid client_id in IV: %d\n", client_id); 365 return; 366 } 367 368 if (src_id >= AMDGPU_MAX_IRQ_SRC_ID) { 369 DRM_DEBUG("Invalid src_id in IV: %d\n", src_id); 370 return; 371 } 372 373 if (adev->irq.virq[src_id]) { 374 generic_handle_irq(irq_find_mapping(adev->irq.domain, src_id)); 375 } else { 376 if (!adev->irq.client[client_id].sources) { 377 DRM_DEBUG("Unregistered interrupt client_id: %d src_id: %d\n", 378 client_id, src_id); 379 return; 380 } 381 382 src = adev->irq.client[client_id].sources[src_id]; 383 if (!src) { 384 DRM_DEBUG("Unhandled interrupt src_id: %d\n", src_id); 385 return; 386 } 387 388 r = src->funcs->process(adev, src, entry); 389 if (r) 390 DRM_ERROR("error processing interrupt (%d)\n", r); 391 } 392 } 393 394 /** 395 * amdgpu_irq_update - update hw interrupt state 396 * 397 * @adev: amdgpu device pointer 398 * @src: interrupt src you want to enable 399 * @type: type of interrupt you want to update 400 * 401 * Updates the interrupt state for a specific src (all asics). 402 */ 403 int amdgpu_irq_update(struct amdgpu_device *adev, 404 struct amdgpu_irq_src *src, unsigned type) 405 { 406 unsigned long irqflags; 407 enum amdgpu_interrupt_state state; 408 int r; 409 410 spin_lock_irqsave(&adev->irq.lock, irqflags); 411 412 /* we need to determine after taking the lock, otherwise 413 we might disable just enabled interrupts again */ 414 if (amdgpu_irq_enabled(adev, src, type)) 415 state = AMDGPU_IRQ_STATE_ENABLE; 416 else 417 state = AMDGPU_IRQ_STATE_DISABLE; 418 419 r = src->funcs->set(adev, src, type, state); 420 spin_unlock_irqrestore(&adev->irq.lock, irqflags); 421 return r; 422 } 423 424 void amdgpu_irq_gpu_reset_resume_helper(struct amdgpu_device *adev) 425 { 426 int i, j, k; 427 428 for (i = 0; i < AMDGPU_IH_CLIENTID_MAX; ++i) { 429 if (!adev->irq.client[i].sources) 430 continue; 431 432 for (j = 0; j < AMDGPU_MAX_IRQ_SRC_ID; ++j) { 433 struct amdgpu_irq_src *src = adev->irq.client[i].sources[j]; 434 435 if (!src) 436 continue; 437 for (k = 0; k < src->num_types; k++) 438 amdgpu_irq_update(adev, src, k); 439 } 440 } 441 } 442 443 /** 444 * amdgpu_irq_get - enable interrupt 445 * 446 * @adev: amdgpu device pointer 447 * @src: interrupt src you want to enable 448 * @type: type of interrupt you want to enable 449 * 450 * Enables the interrupt type for a specific src (all asics). 451 */ 452 int amdgpu_irq_get(struct amdgpu_device *adev, struct amdgpu_irq_src *src, 453 unsigned type) 454 { 455 if (!adev->ddev->irq_enabled) 456 return -ENOENT; 457 458 if (type >= src->num_types) 459 return -EINVAL; 460 461 if (!src->enabled_types || !src->funcs->set) 462 return -EINVAL; 463 464 if (atomic_inc_return(&src->enabled_types[type]) == 1) 465 return amdgpu_irq_update(adev, src, type); 466 467 return 0; 468 } 469 470 /** 471 * amdgpu_irq_put - disable interrupt 472 * 473 * @adev: amdgpu device pointer 474 * @src: interrupt src you want to disable 475 * @type: type of interrupt you want to disable 476 * 477 * Disables the interrupt type for a specific src (all asics). 478 */ 479 int amdgpu_irq_put(struct amdgpu_device *adev, struct amdgpu_irq_src *src, 480 unsigned type) 481 { 482 if (!adev->ddev->irq_enabled) 483 return -ENOENT; 484 485 if (type >= src->num_types) 486 return -EINVAL; 487 488 if (!src->enabled_types || !src->funcs->set) 489 return -EINVAL; 490 491 if (atomic_dec_and_test(&src->enabled_types[type])) 492 return amdgpu_irq_update(adev, src, type); 493 494 return 0; 495 } 496 497 /** 498 * amdgpu_irq_enabled - test if irq is enabled or not 499 * 500 * @adev: amdgpu device pointer 501 * @idx: interrupt src you want to test 502 * 503 * Tests if the given interrupt source is enabled or not 504 */ 505 bool amdgpu_irq_enabled(struct amdgpu_device *adev, struct amdgpu_irq_src *src, 506 unsigned type) 507 { 508 if (!adev->ddev->irq_enabled) 509 return false; 510 511 if (type >= src->num_types) 512 return false; 513 514 if (!src->enabled_types || !src->funcs->set) 515 return false; 516 517 return !!atomic_read(&src->enabled_types[type]); 518 } 519 520 /* gen irq */ 521 static void amdgpu_irq_mask(struct irq_data *irqd) 522 { 523 /* XXX */ 524 } 525 526 static void amdgpu_irq_unmask(struct irq_data *irqd) 527 { 528 /* XXX */ 529 } 530 531 static struct irq_chip amdgpu_irq_chip = { 532 .name = "amdgpu-ih", 533 .irq_mask = amdgpu_irq_mask, 534 .irq_unmask = amdgpu_irq_unmask, 535 }; 536 537 static int amdgpu_irqdomain_map(struct irq_domain *d, 538 unsigned int irq, irq_hw_number_t hwirq) 539 { 540 if (hwirq >= AMDGPU_MAX_IRQ_SRC_ID) 541 return -EPERM; 542 543 irq_set_chip_and_handler(irq, 544 &amdgpu_irq_chip, handle_simple_irq); 545 return 0; 546 } 547 548 static const struct irq_domain_ops amdgpu_hw_irqdomain_ops = { 549 .map = amdgpu_irqdomain_map, 550 }; 551 552 /** 553 * amdgpu_irq_add_domain - create a linear irq domain 554 * 555 * @adev: amdgpu device pointer 556 * 557 * Create an irq domain for GPU interrupt sources 558 * that may be driven by another driver (e.g., ACP). 559 */ 560 int amdgpu_irq_add_domain(struct amdgpu_device *adev) 561 { 562 adev->irq.domain = irq_domain_add_linear(NULL, AMDGPU_MAX_IRQ_SRC_ID, 563 &amdgpu_hw_irqdomain_ops, adev); 564 if (!adev->irq.domain) { 565 DRM_ERROR("GPU irq add domain failed\n"); 566 return -ENODEV; 567 } 568 569 return 0; 570 } 571 572 /** 573 * amdgpu_irq_remove_domain - remove the irq domain 574 * 575 * @adev: amdgpu device pointer 576 * 577 * Remove the irq domain for GPU interrupt sources 578 * that may be driven by another driver (e.g., ACP). 579 */ 580 void amdgpu_irq_remove_domain(struct amdgpu_device *adev) 581 { 582 if (adev->irq.domain) { 583 irq_domain_remove(adev->irq.domain); 584 adev->irq.domain = NULL; 585 } 586 } 587 588 /** 589 * amdgpu_irq_create_mapping - create a mapping between a domain irq and a 590 * Linux irq 591 * 592 * @adev: amdgpu device pointer 593 * @src_id: IH source id 594 * 595 * Create a mapping between a domain irq (GPU IH src id) and a Linux irq 596 * Use this for components that generate a GPU interrupt, but are driven 597 * by a different driver (e.g., ACP). 598 * Returns the Linux irq. 599 */ 600 unsigned amdgpu_irq_create_mapping(struct amdgpu_device *adev, unsigned src_id) 601 { 602 adev->irq.virq[src_id] = irq_create_mapping(adev->irq.domain, src_id); 603 604 return adev->irq.virq[src_id]; 605 } 606