1 /* 2 * Copyright 2008 Advanced Micro Devices, Inc. 3 * Copyright 2008 Red Hat Inc. 4 * Copyright 2009 Jerome Glisse. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 22 * OTHER DEALINGS IN THE SOFTWARE. 23 * 24 * Authors: Dave Airlie 25 * Alex Deucher 26 * Jerome Glisse 27 */ 28 #include <linux/irq.h> 29 #include <drm/drmP.h> 30 #include <drm/drm_crtc_helper.h> 31 #include <drm/amdgpu_drm.h> 32 #include "amdgpu.h" 33 #include "amdgpu_ih.h" 34 #include "atom.h" 35 #include "amdgpu_connectors.h" 36 #include "amdgpu_trace.h" 37 38 #include <linux/pm_runtime.h> 39 40 #define AMDGPU_WAIT_IDLE_TIMEOUT 200 41 42 /* 43 * Handle hotplug events outside the interrupt handler proper. 44 */ 45 /** 46 * amdgpu_hotplug_work_func - display hotplug work handler 47 * 48 * @work: work struct 49 * 50 * This is the hot plug event work handler (all asics). 51 * The work gets scheduled from the irq handler if there 52 * was a hot plug interrupt. It walks the connector table 53 * and calls the hotplug handler for each one, then sends 54 * a drm hotplug event to alert userspace. 55 */ 56 static void amdgpu_hotplug_work_func(struct work_struct *work) 57 { 58 struct amdgpu_device *adev = container_of(work, struct amdgpu_device, 59 hotplug_work); 60 struct drm_device *dev = adev->ddev; 61 struct drm_mode_config *mode_config = &dev->mode_config; 62 struct drm_connector *connector; 63 64 mutex_lock(&mode_config->mutex); 65 list_for_each_entry(connector, &mode_config->connector_list, head) 66 amdgpu_connector_hotplug(connector); 67 mutex_unlock(&mode_config->mutex); 68 /* Just fire off a uevent and let userspace tell us what to do */ 69 drm_helper_hpd_irq_event(dev); 70 } 71 72 /** 73 * amdgpu_irq_reset_work_func - execute gpu reset 74 * 75 * @work: work struct 76 * 77 * Execute scheduled gpu reset (cayman+). 78 * This function is called when the irq handler 79 * thinks we need a gpu reset. 80 */ 81 static void amdgpu_irq_reset_work_func(struct work_struct *work) 82 { 83 struct amdgpu_device *adev = container_of(work, struct amdgpu_device, 84 reset_work); 85 86 amdgpu_gpu_reset(adev); 87 } 88 89 /* Disable *all* interrupts */ 90 static void amdgpu_irq_disable_all(struct amdgpu_device *adev) 91 { 92 unsigned long irqflags; 93 unsigned i, j, k; 94 int r; 95 96 spin_lock_irqsave(&adev->irq.lock, irqflags); 97 for (i = 0; i < AMDGPU_IH_CLIENTID_MAX; ++i) { 98 if (!adev->irq.client[i].sources) 99 continue; 100 101 for (j = 0; j < AMDGPU_MAX_IRQ_SRC_ID; ++j) { 102 struct amdgpu_irq_src *src = adev->irq.client[i].sources[j]; 103 104 if (!src || !src->funcs->set || !src->num_types) 105 continue; 106 107 for (k = 0; k < src->num_types; ++k) { 108 atomic_set(&src->enabled_types[k], 0); 109 r = src->funcs->set(adev, src, k, 110 AMDGPU_IRQ_STATE_DISABLE); 111 if (r) 112 DRM_ERROR("error disabling interrupt (%d)\n", 113 r); 114 } 115 } 116 } 117 spin_unlock_irqrestore(&adev->irq.lock, irqflags); 118 } 119 120 /** 121 * amdgpu_irq_preinstall - drm irq preinstall callback 122 * 123 * @dev: drm dev pointer 124 * 125 * Gets the hw ready to enable irqs (all asics). 126 * This function disables all interrupt sources on the GPU. 127 */ 128 void amdgpu_irq_preinstall(struct drm_device *dev) 129 { 130 struct amdgpu_device *adev = dev->dev_private; 131 132 /* Disable *all* interrupts */ 133 amdgpu_irq_disable_all(adev); 134 /* Clear bits */ 135 amdgpu_ih_process(adev); 136 } 137 138 /** 139 * amdgpu_irq_postinstall - drm irq preinstall callback 140 * 141 * @dev: drm dev pointer 142 * 143 * Handles stuff to be done after enabling irqs (all asics). 144 * Returns 0 on success. 145 */ 146 int amdgpu_irq_postinstall(struct drm_device *dev) 147 { 148 dev->max_vblank_count = 0x00ffffff; 149 return 0; 150 } 151 152 /** 153 * amdgpu_irq_uninstall - drm irq uninstall callback 154 * 155 * @dev: drm dev pointer 156 * 157 * This function disables all interrupt sources on the GPU (all asics). 158 */ 159 void amdgpu_irq_uninstall(struct drm_device *dev) 160 { 161 struct amdgpu_device *adev = dev->dev_private; 162 163 if (adev == NULL) { 164 return; 165 } 166 amdgpu_irq_disable_all(adev); 167 } 168 169 /** 170 * amdgpu_irq_handler - irq handler 171 * 172 * @int irq, void *arg: args 173 * 174 * This is the irq handler for the amdgpu driver (all asics). 175 */ 176 irqreturn_t amdgpu_irq_handler(int irq, void *arg) 177 { 178 struct drm_device *dev = (struct drm_device *) arg; 179 struct amdgpu_device *adev = dev->dev_private; 180 irqreturn_t ret; 181 182 ret = amdgpu_ih_process(adev); 183 if (ret == IRQ_HANDLED) 184 pm_runtime_mark_last_busy(dev->dev); 185 return ret; 186 } 187 188 /** 189 * amdgpu_msi_ok - asic specific msi checks 190 * 191 * @adev: amdgpu device pointer 192 * 193 * Handles asic specific MSI checks to determine if 194 * MSIs should be enabled on a particular chip (all asics). 195 * Returns true if MSIs should be enabled, false if MSIs 196 * should not be enabled. 197 */ 198 static bool amdgpu_msi_ok(struct amdgpu_device *adev) 199 { 200 /* force MSI on */ 201 if (amdgpu_msi == 1) 202 return true; 203 else if (amdgpu_msi == 0) 204 return false; 205 206 return true; 207 } 208 209 /** 210 * amdgpu_irq_init - init driver interrupt info 211 * 212 * @adev: amdgpu device pointer 213 * 214 * Sets up the work irq handlers, vblank init, MSIs, etc. (all asics). 215 * Returns 0 for success, error for failure. 216 */ 217 int amdgpu_irq_init(struct amdgpu_device *adev) 218 { 219 int r = 0; 220 221 spin_lock_init(&adev->irq.lock); 222 r = drm_vblank_init(adev->ddev, adev->mode_info.num_crtc); 223 if (r) { 224 return r; 225 } 226 227 /* enable msi */ 228 adev->irq.msi_enabled = false; 229 230 if (amdgpu_msi_ok(adev)) { 231 int ret = pci_enable_msi(adev->pdev); 232 if (!ret) { 233 adev->irq.msi_enabled = true; 234 dev_info(adev->dev, "amdgpu: using MSI.\n"); 235 } 236 } 237 238 INIT_WORK(&adev->hotplug_work, amdgpu_hotplug_work_func); 239 INIT_WORK(&adev->reset_work, amdgpu_irq_reset_work_func); 240 241 adev->irq.installed = true; 242 r = drm_irq_install(adev->ddev, adev->ddev->pdev->irq); 243 if (r) { 244 adev->irq.installed = false; 245 flush_work(&adev->hotplug_work); 246 cancel_work_sync(&adev->reset_work); 247 return r; 248 } 249 250 DRM_INFO("amdgpu: irq initialized.\n"); 251 return 0; 252 } 253 254 /** 255 * amdgpu_irq_fini - tear down driver interrupt info 256 * 257 * @adev: amdgpu device pointer 258 * 259 * Tears down the work irq handlers, vblank handlers, MSIs, etc. (all asics). 260 */ 261 void amdgpu_irq_fini(struct amdgpu_device *adev) 262 { 263 unsigned i, j; 264 265 drm_vblank_cleanup(adev->ddev); 266 if (adev->irq.installed) { 267 drm_irq_uninstall(adev->ddev); 268 adev->irq.installed = false; 269 if (adev->irq.msi_enabled) 270 pci_disable_msi(adev->pdev); 271 flush_work(&adev->hotplug_work); 272 cancel_work_sync(&adev->reset_work); 273 } 274 275 for (i = 0; i < AMDGPU_IH_CLIENTID_MAX; ++i) { 276 if (!adev->irq.client[i].sources) 277 continue; 278 279 for (j = 0; j < AMDGPU_MAX_IRQ_SRC_ID; ++j) { 280 struct amdgpu_irq_src *src = adev->irq.client[i].sources[j]; 281 282 if (!src) 283 continue; 284 285 kfree(src->enabled_types); 286 src->enabled_types = NULL; 287 if (src->data) { 288 kfree(src->data); 289 kfree(src); 290 adev->irq.client[i].sources[j] = NULL; 291 } 292 } 293 kfree(adev->irq.client[i].sources); 294 } 295 } 296 297 /** 298 * amdgpu_irq_add_id - register irq source 299 * 300 * @adev: amdgpu device pointer 301 * @src_id: source id for this source 302 * @source: irq source 303 * 304 */ 305 int amdgpu_irq_add_id(struct amdgpu_device *adev, 306 unsigned client_id, unsigned src_id, 307 struct amdgpu_irq_src *source) 308 { 309 if (client_id >= AMDGPU_IH_CLIENTID_MAX) 310 return -EINVAL; 311 312 if (src_id >= AMDGPU_MAX_IRQ_SRC_ID) 313 return -EINVAL; 314 315 if (!source->funcs) 316 return -EINVAL; 317 318 if (!adev->irq.client[client_id].sources) { 319 adev->irq.client[client_id].sources = kcalloc(AMDGPU_MAX_IRQ_SRC_ID, 320 sizeof(struct amdgpu_irq_src), 321 GFP_KERNEL); 322 if (!adev->irq.client[client_id].sources) 323 return -ENOMEM; 324 } 325 326 if (adev->irq.client[client_id].sources[src_id] != NULL) 327 return -EINVAL; 328 329 if (source->num_types && !source->enabled_types) { 330 atomic_t *types; 331 332 types = kcalloc(source->num_types, sizeof(atomic_t), 333 GFP_KERNEL); 334 if (!types) 335 return -ENOMEM; 336 337 source->enabled_types = types; 338 } 339 340 adev->irq.client[client_id].sources[src_id] = source; 341 return 0; 342 } 343 344 /** 345 * amdgpu_irq_dispatch - dispatch irq to IP blocks 346 * 347 * @adev: amdgpu device pointer 348 * @entry: interrupt vector 349 * 350 * Dispatches the irq to the different IP blocks 351 */ 352 void amdgpu_irq_dispatch(struct amdgpu_device *adev, 353 struct amdgpu_iv_entry *entry) 354 { 355 unsigned client_id = entry->client_id; 356 unsigned src_id = entry->src_id; 357 struct amdgpu_irq_src *src; 358 int r; 359 360 trace_amdgpu_iv(entry); 361 362 if (client_id >= AMDGPU_IH_CLIENTID_MAX) { 363 DRM_DEBUG("Invalid client_id in IV: %d\n", client_id); 364 return; 365 } 366 367 if (src_id >= AMDGPU_MAX_IRQ_SRC_ID) { 368 DRM_DEBUG("Invalid src_id in IV: %d\n", src_id); 369 return; 370 } 371 372 if (adev->irq.virq[src_id]) { 373 generic_handle_irq(irq_find_mapping(adev->irq.domain, src_id)); 374 } else { 375 if (!adev->irq.client[client_id].sources) { 376 DRM_DEBUG("Unregistered interrupt client_id: %d src_id: %d\n", 377 client_id, src_id); 378 return; 379 } 380 381 src = adev->irq.client[client_id].sources[src_id]; 382 if (!src) { 383 DRM_DEBUG("Unhandled interrupt src_id: %d\n", src_id); 384 return; 385 } 386 387 r = src->funcs->process(adev, src, entry); 388 if (r) 389 DRM_ERROR("error processing interrupt (%d)\n", r); 390 } 391 } 392 393 /** 394 * amdgpu_irq_update - update hw interrupt state 395 * 396 * @adev: amdgpu device pointer 397 * @src: interrupt src you want to enable 398 * @type: type of interrupt you want to update 399 * 400 * Updates the interrupt state for a specific src (all asics). 401 */ 402 int amdgpu_irq_update(struct amdgpu_device *adev, 403 struct amdgpu_irq_src *src, unsigned type) 404 { 405 unsigned long irqflags; 406 enum amdgpu_interrupt_state state; 407 int r; 408 409 spin_lock_irqsave(&adev->irq.lock, irqflags); 410 411 /* we need to determine after taking the lock, otherwise 412 we might disable just enabled interrupts again */ 413 if (amdgpu_irq_enabled(adev, src, type)) 414 state = AMDGPU_IRQ_STATE_ENABLE; 415 else 416 state = AMDGPU_IRQ_STATE_DISABLE; 417 418 r = src->funcs->set(adev, src, type, state); 419 spin_unlock_irqrestore(&adev->irq.lock, irqflags); 420 return r; 421 } 422 423 void amdgpu_irq_gpu_reset_resume_helper(struct amdgpu_device *adev) 424 { 425 int i, j, k; 426 427 for (i = 0; i < AMDGPU_IH_CLIENTID_MAX; ++i) { 428 if (!adev->irq.client[i].sources) 429 continue; 430 431 for (j = 0; j < AMDGPU_MAX_IRQ_SRC_ID; ++j) { 432 struct amdgpu_irq_src *src = adev->irq.client[i].sources[j]; 433 434 if (!src) 435 continue; 436 for (k = 0; k < src->num_types; k++) 437 amdgpu_irq_update(adev, src, k); 438 } 439 } 440 } 441 442 /** 443 * amdgpu_irq_get - enable interrupt 444 * 445 * @adev: amdgpu device pointer 446 * @src: interrupt src you want to enable 447 * @type: type of interrupt you want to enable 448 * 449 * Enables the interrupt type for a specific src (all asics). 450 */ 451 int amdgpu_irq_get(struct amdgpu_device *adev, struct amdgpu_irq_src *src, 452 unsigned type) 453 { 454 if (!adev->ddev->irq_enabled) 455 return -ENOENT; 456 457 if (type >= src->num_types) 458 return -EINVAL; 459 460 if (!src->enabled_types || !src->funcs->set) 461 return -EINVAL; 462 463 if (atomic_inc_return(&src->enabled_types[type]) == 1) 464 return amdgpu_irq_update(adev, src, type); 465 466 return 0; 467 } 468 469 /** 470 * amdgpu_irq_put - disable interrupt 471 * 472 * @adev: amdgpu device pointer 473 * @src: interrupt src you want to disable 474 * @type: type of interrupt you want to disable 475 * 476 * Disables the interrupt type for a specific src (all asics). 477 */ 478 int amdgpu_irq_put(struct amdgpu_device *adev, struct amdgpu_irq_src *src, 479 unsigned type) 480 { 481 if (!adev->ddev->irq_enabled) 482 return -ENOENT; 483 484 if (type >= src->num_types) 485 return -EINVAL; 486 487 if (!src->enabled_types || !src->funcs->set) 488 return -EINVAL; 489 490 if (atomic_dec_and_test(&src->enabled_types[type])) 491 return amdgpu_irq_update(adev, src, type); 492 493 return 0; 494 } 495 496 /** 497 * amdgpu_irq_enabled - test if irq is enabled or not 498 * 499 * @adev: amdgpu device pointer 500 * @idx: interrupt src you want to test 501 * 502 * Tests if the given interrupt source is enabled or not 503 */ 504 bool amdgpu_irq_enabled(struct amdgpu_device *adev, struct amdgpu_irq_src *src, 505 unsigned type) 506 { 507 if (!adev->ddev->irq_enabled) 508 return false; 509 510 if (type >= src->num_types) 511 return false; 512 513 if (!src->enabled_types || !src->funcs->set) 514 return false; 515 516 return !!atomic_read(&src->enabled_types[type]); 517 } 518 519 /* gen irq */ 520 static void amdgpu_irq_mask(struct irq_data *irqd) 521 { 522 /* XXX */ 523 } 524 525 static void amdgpu_irq_unmask(struct irq_data *irqd) 526 { 527 /* XXX */ 528 } 529 530 static struct irq_chip amdgpu_irq_chip = { 531 .name = "amdgpu-ih", 532 .irq_mask = amdgpu_irq_mask, 533 .irq_unmask = amdgpu_irq_unmask, 534 }; 535 536 static int amdgpu_irqdomain_map(struct irq_domain *d, 537 unsigned int irq, irq_hw_number_t hwirq) 538 { 539 if (hwirq >= AMDGPU_MAX_IRQ_SRC_ID) 540 return -EPERM; 541 542 irq_set_chip_and_handler(irq, 543 &amdgpu_irq_chip, handle_simple_irq); 544 return 0; 545 } 546 547 static const struct irq_domain_ops amdgpu_hw_irqdomain_ops = { 548 .map = amdgpu_irqdomain_map, 549 }; 550 551 /** 552 * amdgpu_irq_add_domain - create a linear irq domain 553 * 554 * @adev: amdgpu device pointer 555 * 556 * Create an irq domain for GPU interrupt sources 557 * that may be driven by another driver (e.g., ACP). 558 */ 559 int amdgpu_irq_add_domain(struct amdgpu_device *adev) 560 { 561 adev->irq.domain = irq_domain_add_linear(NULL, AMDGPU_MAX_IRQ_SRC_ID, 562 &amdgpu_hw_irqdomain_ops, adev); 563 if (!adev->irq.domain) { 564 DRM_ERROR("GPU irq add domain failed\n"); 565 return -ENODEV; 566 } 567 568 return 0; 569 } 570 571 /** 572 * amdgpu_irq_remove_domain - remove the irq domain 573 * 574 * @adev: amdgpu device pointer 575 * 576 * Remove the irq domain for GPU interrupt sources 577 * that may be driven by another driver (e.g., ACP). 578 */ 579 void amdgpu_irq_remove_domain(struct amdgpu_device *adev) 580 { 581 if (adev->irq.domain) { 582 irq_domain_remove(adev->irq.domain); 583 adev->irq.domain = NULL; 584 } 585 } 586 587 /** 588 * amdgpu_irq_create_mapping - create a mapping between a domain irq and a 589 * Linux irq 590 * 591 * @adev: amdgpu device pointer 592 * @src_id: IH source id 593 * 594 * Create a mapping between a domain irq (GPU IH src id) and a Linux irq 595 * Use this for components that generate a GPU interrupt, but are driven 596 * by a different driver (e.g., ACP). 597 * Returns the Linux irq. 598 */ 599 unsigned amdgpu_irq_create_mapping(struct amdgpu_device *adev, unsigned src_id) 600 { 601 adev->irq.virq[src_id] = irq_create_mapping(adev->irq.domain, src_id); 602 603 return adev->irq.virq[src_id]; 604 } 605