1 /* 2 * Copyright 2008 Advanced Micro Devices, Inc. 3 * Copyright 2008 Red Hat Inc. 4 * Copyright 2009 Jerome Glisse. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 22 * OTHER DEALINGS IN THE SOFTWARE. 23 * 24 * Authors: Dave Airlie 25 * Alex Deucher 26 * Jerome Glisse 27 */ 28 #include <linux/irq.h> 29 #include <drm/drmP.h> 30 #include <drm/drm_crtc_helper.h> 31 #include <drm/amdgpu_drm.h> 32 #include "amdgpu.h" 33 #include "amdgpu_ih.h" 34 #include "atom.h" 35 #include "amdgpu_connectors.h" 36 #include "amdgpu_trace.h" 37 38 #include <linux/pm_runtime.h> 39 40 #define AMDGPU_WAIT_IDLE_TIMEOUT 200 41 42 /* 43 * Handle hotplug events outside the interrupt handler proper. 44 */ 45 /** 46 * amdgpu_hotplug_work_func - display hotplug work handler 47 * 48 * @work: work struct 49 * 50 * This is the hot plug event work handler (all asics). 51 * The work gets scheduled from the irq handler if there 52 * was a hot plug interrupt. It walks the connector table 53 * and calls the hotplug handler for each one, then sends 54 * a drm hotplug event to alert userspace. 55 */ 56 static void amdgpu_hotplug_work_func(struct work_struct *work) 57 { 58 struct amdgpu_device *adev = container_of(work, struct amdgpu_device, 59 hotplug_work); 60 struct drm_device *dev = adev->ddev; 61 struct drm_mode_config *mode_config = &dev->mode_config; 62 struct drm_connector *connector; 63 64 mutex_lock(&mode_config->mutex); 65 list_for_each_entry(connector, &mode_config->connector_list, head) 66 amdgpu_connector_hotplug(connector); 67 mutex_unlock(&mode_config->mutex); 68 /* Just fire off a uevent and let userspace tell us what to do */ 69 drm_helper_hpd_irq_event(dev); 70 } 71 72 /** 73 * amdgpu_irq_reset_work_func - execute gpu reset 74 * 75 * @work: work struct 76 * 77 * Execute scheduled gpu reset (cayman+). 78 * This function is called when the irq handler 79 * thinks we need a gpu reset. 80 */ 81 static void amdgpu_irq_reset_work_func(struct work_struct *work) 82 { 83 struct amdgpu_device *adev = container_of(work, struct amdgpu_device, 84 reset_work); 85 86 if (!amdgpu_sriov_vf(adev)) 87 amdgpu_gpu_reset(adev); 88 } 89 90 /* Disable *all* interrupts */ 91 static void amdgpu_irq_disable_all(struct amdgpu_device *adev) 92 { 93 unsigned long irqflags; 94 unsigned i, j, k; 95 int r; 96 97 spin_lock_irqsave(&adev->irq.lock, irqflags); 98 for (i = 0; i < AMDGPU_IH_CLIENTID_MAX; ++i) { 99 if (!adev->irq.client[i].sources) 100 continue; 101 102 for (j = 0; j < AMDGPU_MAX_IRQ_SRC_ID; ++j) { 103 struct amdgpu_irq_src *src = adev->irq.client[i].sources[j]; 104 105 if (!src || !src->funcs->set || !src->num_types) 106 continue; 107 108 for (k = 0; k < src->num_types; ++k) { 109 atomic_set(&src->enabled_types[k], 0); 110 r = src->funcs->set(adev, src, k, 111 AMDGPU_IRQ_STATE_DISABLE); 112 if (r) 113 DRM_ERROR("error disabling interrupt (%d)\n", 114 r); 115 } 116 } 117 } 118 spin_unlock_irqrestore(&adev->irq.lock, irqflags); 119 } 120 121 /** 122 * amdgpu_irq_preinstall - drm irq preinstall callback 123 * 124 * @dev: drm dev pointer 125 * 126 * Gets the hw ready to enable irqs (all asics). 127 * This function disables all interrupt sources on the GPU. 128 */ 129 void amdgpu_irq_preinstall(struct drm_device *dev) 130 { 131 struct amdgpu_device *adev = dev->dev_private; 132 133 /* Disable *all* interrupts */ 134 amdgpu_irq_disable_all(adev); 135 /* Clear bits */ 136 amdgpu_ih_process(adev); 137 } 138 139 /** 140 * amdgpu_irq_postinstall - drm irq preinstall callback 141 * 142 * @dev: drm dev pointer 143 * 144 * Handles stuff to be done after enabling irqs (all asics). 145 * Returns 0 on success. 146 */ 147 int amdgpu_irq_postinstall(struct drm_device *dev) 148 { 149 dev->max_vblank_count = 0x00ffffff; 150 return 0; 151 } 152 153 /** 154 * amdgpu_irq_uninstall - drm irq uninstall callback 155 * 156 * @dev: drm dev pointer 157 * 158 * This function disables all interrupt sources on the GPU (all asics). 159 */ 160 void amdgpu_irq_uninstall(struct drm_device *dev) 161 { 162 struct amdgpu_device *adev = dev->dev_private; 163 164 if (adev == NULL) { 165 return; 166 } 167 amdgpu_irq_disable_all(adev); 168 } 169 170 /** 171 * amdgpu_irq_handler - irq handler 172 * 173 * @int irq, void *arg: args 174 * 175 * This is the irq handler for the amdgpu driver (all asics). 176 */ 177 irqreturn_t amdgpu_irq_handler(int irq, void *arg) 178 { 179 struct drm_device *dev = (struct drm_device *) arg; 180 struct amdgpu_device *adev = dev->dev_private; 181 irqreturn_t ret; 182 183 ret = amdgpu_ih_process(adev); 184 if (ret == IRQ_HANDLED) 185 pm_runtime_mark_last_busy(dev->dev); 186 return ret; 187 } 188 189 /** 190 * amdgpu_msi_ok - asic specific msi checks 191 * 192 * @adev: amdgpu device pointer 193 * 194 * Handles asic specific MSI checks to determine if 195 * MSIs should be enabled on a particular chip (all asics). 196 * Returns true if MSIs should be enabled, false if MSIs 197 * should not be enabled. 198 */ 199 static bool amdgpu_msi_ok(struct amdgpu_device *adev) 200 { 201 /* force MSI on */ 202 if (amdgpu_msi == 1) 203 return true; 204 else if (amdgpu_msi == 0) 205 return false; 206 207 return true; 208 } 209 210 /** 211 * amdgpu_irq_init - init driver interrupt info 212 * 213 * @adev: amdgpu device pointer 214 * 215 * Sets up the work irq handlers, vblank init, MSIs, etc. (all asics). 216 * Returns 0 for success, error for failure. 217 */ 218 int amdgpu_irq_init(struct amdgpu_device *adev) 219 { 220 int r = 0; 221 222 spin_lock_init(&adev->irq.lock); 223 r = drm_vblank_init(adev->ddev, adev->mode_info.num_crtc); 224 if (r) { 225 return r; 226 } 227 228 /* enable msi */ 229 adev->irq.msi_enabled = false; 230 231 if (amdgpu_msi_ok(adev)) { 232 int ret = pci_enable_msi(adev->pdev); 233 if (!ret) { 234 adev->irq.msi_enabled = true; 235 dev_info(adev->dev, "amdgpu: using MSI.\n"); 236 } 237 } 238 239 INIT_WORK(&adev->hotplug_work, amdgpu_hotplug_work_func); 240 INIT_WORK(&adev->reset_work, amdgpu_irq_reset_work_func); 241 242 adev->irq.installed = true; 243 r = drm_irq_install(adev->ddev, adev->ddev->pdev->irq); 244 if (r) { 245 adev->irq.installed = false; 246 flush_work(&adev->hotplug_work); 247 cancel_work_sync(&adev->reset_work); 248 return r; 249 } 250 251 DRM_INFO("amdgpu: irq initialized.\n"); 252 return 0; 253 } 254 255 /** 256 * amdgpu_irq_fini - tear down driver interrupt info 257 * 258 * @adev: amdgpu device pointer 259 * 260 * Tears down the work irq handlers, vblank handlers, MSIs, etc. (all asics). 261 */ 262 void amdgpu_irq_fini(struct amdgpu_device *adev) 263 { 264 unsigned i, j; 265 266 drm_vblank_cleanup(adev->ddev); 267 if (adev->irq.installed) { 268 drm_irq_uninstall(adev->ddev); 269 adev->irq.installed = false; 270 if (adev->irq.msi_enabled) 271 pci_disable_msi(adev->pdev); 272 flush_work(&adev->hotplug_work); 273 cancel_work_sync(&adev->reset_work); 274 } 275 276 for (i = 0; i < AMDGPU_IH_CLIENTID_MAX; ++i) { 277 if (!adev->irq.client[i].sources) 278 continue; 279 280 for (j = 0; j < AMDGPU_MAX_IRQ_SRC_ID; ++j) { 281 struct amdgpu_irq_src *src = adev->irq.client[i].sources[j]; 282 283 if (!src) 284 continue; 285 286 kfree(src->enabled_types); 287 src->enabled_types = NULL; 288 if (src->data) { 289 kfree(src->data); 290 kfree(src); 291 adev->irq.client[i].sources[j] = NULL; 292 } 293 } 294 kfree(adev->irq.client[i].sources); 295 } 296 } 297 298 /** 299 * amdgpu_irq_add_id - register irq source 300 * 301 * @adev: amdgpu device pointer 302 * @src_id: source id for this source 303 * @source: irq source 304 * 305 */ 306 int amdgpu_irq_add_id(struct amdgpu_device *adev, 307 unsigned client_id, unsigned src_id, 308 struct amdgpu_irq_src *source) 309 { 310 if (client_id >= AMDGPU_IH_CLIENTID_MAX) 311 return -EINVAL; 312 313 if (src_id >= AMDGPU_MAX_IRQ_SRC_ID) 314 return -EINVAL; 315 316 if (!source->funcs) 317 return -EINVAL; 318 319 if (!adev->irq.client[client_id].sources) { 320 adev->irq.client[client_id].sources = 321 kcalloc(AMDGPU_MAX_IRQ_SRC_ID, 322 sizeof(struct amdgpu_irq_src *), 323 GFP_KERNEL); 324 if (!adev->irq.client[client_id].sources) 325 return -ENOMEM; 326 } 327 328 if (adev->irq.client[client_id].sources[src_id] != NULL) 329 return -EINVAL; 330 331 if (source->num_types && !source->enabled_types) { 332 atomic_t *types; 333 334 types = kcalloc(source->num_types, sizeof(atomic_t), 335 GFP_KERNEL); 336 if (!types) 337 return -ENOMEM; 338 339 source->enabled_types = types; 340 } 341 342 adev->irq.client[client_id].sources[src_id] = source; 343 return 0; 344 } 345 346 /** 347 * amdgpu_irq_dispatch - dispatch irq to IP blocks 348 * 349 * @adev: amdgpu device pointer 350 * @entry: interrupt vector 351 * 352 * Dispatches the irq to the different IP blocks 353 */ 354 void amdgpu_irq_dispatch(struct amdgpu_device *adev, 355 struct amdgpu_iv_entry *entry) 356 { 357 unsigned client_id = entry->client_id; 358 unsigned src_id = entry->src_id; 359 struct amdgpu_irq_src *src; 360 int r; 361 362 trace_amdgpu_iv(entry); 363 364 if (client_id >= AMDGPU_IH_CLIENTID_MAX) { 365 DRM_DEBUG("Invalid client_id in IV: %d\n", client_id); 366 return; 367 } 368 369 if (src_id >= AMDGPU_MAX_IRQ_SRC_ID) { 370 DRM_DEBUG("Invalid src_id in IV: %d\n", src_id); 371 return; 372 } 373 374 if (adev->irq.virq[src_id]) { 375 generic_handle_irq(irq_find_mapping(adev->irq.domain, src_id)); 376 } else { 377 if (!adev->irq.client[client_id].sources) { 378 DRM_DEBUG("Unregistered interrupt client_id: %d src_id: %d\n", 379 client_id, src_id); 380 return; 381 } 382 383 src = adev->irq.client[client_id].sources[src_id]; 384 if (!src) { 385 DRM_DEBUG("Unhandled interrupt src_id: %d\n", src_id); 386 return; 387 } 388 389 r = src->funcs->process(adev, src, entry); 390 if (r) 391 DRM_ERROR("error processing interrupt (%d)\n", r); 392 } 393 } 394 395 /** 396 * amdgpu_irq_update - update hw interrupt state 397 * 398 * @adev: amdgpu device pointer 399 * @src: interrupt src you want to enable 400 * @type: type of interrupt you want to update 401 * 402 * Updates the interrupt state for a specific src (all asics). 403 */ 404 int amdgpu_irq_update(struct amdgpu_device *adev, 405 struct amdgpu_irq_src *src, unsigned type) 406 { 407 unsigned long irqflags; 408 enum amdgpu_interrupt_state state; 409 int r; 410 411 spin_lock_irqsave(&adev->irq.lock, irqflags); 412 413 /* we need to determine after taking the lock, otherwise 414 we might disable just enabled interrupts again */ 415 if (amdgpu_irq_enabled(adev, src, type)) 416 state = AMDGPU_IRQ_STATE_ENABLE; 417 else 418 state = AMDGPU_IRQ_STATE_DISABLE; 419 420 r = src->funcs->set(adev, src, type, state); 421 spin_unlock_irqrestore(&adev->irq.lock, irqflags); 422 return r; 423 } 424 425 void amdgpu_irq_gpu_reset_resume_helper(struct amdgpu_device *adev) 426 { 427 int i, j, k; 428 429 for (i = 0; i < AMDGPU_IH_CLIENTID_MAX; ++i) { 430 if (!adev->irq.client[i].sources) 431 continue; 432 433 for (j = 0; j < AMDGPU_MAX_IRQ_SRC_ID; ++j) { 434 struct amdgpu_irq_src *src = adev->irq.client[i].sources[j]; 435 436 if (!src) 437 continue; 438 for (k = 0; k < src->num_types; k++) 439 amdgpu_irq_update(adev, src, k); 440 } 441 } 442 } 443 444 /** 445 * amdgpu_irq_get - enable interrupt 446 * 447 * @adev: amdgpu device pointer 448 * @src: interrupt src you want to enable 449 * @type: type of interrupt you want to enable 450 * 451 * Enables the interrupt type for a specific src (all asics). 452 */ 453 int amdgpu_irq_get(struct amdgpu_device *adev, struct amdgpu_irq_src *src, 454 unsigned type) 455 { 456 if (!adev->ddev->irq_enabled) 457 return -ENOENT; 458 459 if (type >= src->num_types) 460 return -EINVAL; 461 462 if (!src->enabled_types || !src->funcs->set) 463 return -EINVAL; 464 465 if (atomic_inc_return(&src->enabled_types[type]) == 1) 466 return amdgpu_irq_update(adev, src, type); 467 468 return 0; 469 } 470 471 /** 472 * amdgpu_irq_put - disable interrupt 473 * 474 * @adev: amdgpu device pointer 475 * @src: interrupt src you want to disable 476 * @type: type of interrupt you want to disable 477 * 478 * Disables the interrupt type for a specific src (all asics). 479 */ 480 int amdgpu_irq_put(struct amdgpu_device *adev, struct amdgpu_irq_src *src, 481 unsigned type) 482 { 483 if (!adev->ddev->irq_enabled) 484 return -ENOENT; 485 486 if (type >= src->num_types) 487 return -EINVAL; 488 489 if (!src->enabled_types || !src->funcs->set) 490 return -EINVAL; 491 492 if (atomic_dec_and_test(&src->enabled_types[type])) 493 return amdgpu_irq_update(adev, src, type); 494 495 return 0; 496 } 497 498 /** 499 * amdgpu_irq_enabled - test if irq is enabled or not 500 * 501 * @adev: amdgpu device pointer 502 * @idx: interrupt src you want to test 503 * 504 * Tests if the given interrupt source is enabled or not 505 */ 506 bool amdgpu_irq_enabled(struct amdgpu_device *adev, struct amdgpu_irq_src *src, 507 unsigned type) 508 { 509 if (!adev->ddev->irq_enabled) 510 return false; 511 512 if (type >= src->num_types) 513 return false; 514 515 if (!src->enabled_types || !src->funcs->set) 516 return false; 517 518 return !!atomic_read(&src->enabled_types[type]); 519 } 520 521 /* gen irq */ 522 static void amdgpu_irq_mask(struct irq_data *irqd) 523 { 524 /* XXX */ 525 } 526 527 static void amdgpu_irq_unmask(struct irq_data *irqd) 528 { 529 /* XXX */ 530 } 531 532 static struct irq_chip amdgpu_irq_chip = { 533 .name = "amdgpu-ih", 534 .irq_mask = amdgpu_irq_mask, 535 .irq_unmask = amdgpu_irq_unmask, 536 }; 537 538 static int amdgpu_irqdomain_map(struct irq_domain *d, 539 unsigned int irq, irq_hw_number_t hwirq) 540 { 541 if (hwirq >= AMDGPU_MAX_IRQ_SRC_ID) 542 return -EPERM; 543 544 irq_set_chip_and_handler(irq, 545 &amdgpu_irq_chip, handle_simple_irq); 546 return 0; 547 } 548 549 static const struct irq_domain_ops amdgpu_hw_irqdomain_ops = { 550 .map = amdgpu_irqdomain_map, 551 }; 552 553 /** 554 * amdgpu_irq_add_domain - create a linear irq domain 555 * 556 * @adev: amdgpu device pointer 557 * 558 * Create an irq domain for GPU interrupt sources 559 * that may be driven by another driver (e.g., ACP). 560 */ 561 int amdgpu_irq_add_domain(struct amdgpu_device *adev) 562 { 563 adev->irq.domain = irq_domain_add_linear(NULL, AMDGPU_MAX_IRQ_SRC_ID, 564 &amdgpu_hw_irqdomain_ops, adev); 565 if (!adev->irq.domain) { 566 DRM_ERROR("GPU irq add domain failed\n"); 567 return -ENODEV; 568 } 569 570 return 0; 571 } 572 573 /** 574 * amdgpu_irq_remove_domain - remove the irq domain 575 * 576 * @adev: amdgpu device pointer 577 * 578 * Remove the irq domain for GPU interrupt sources 579 * that may be driven by another driver (e.g., ACP). 580 */ 581 void amdgpu_irq_remove_domain(struct amdgpu_device *adev) 582 { 583 if (adev->irq.domain) { 584 irq_domain_remove(adev->irq.domain); 585 adev->irq.domain = NULL; 586 } 587 } 588 589 /** 590 * amdgpu_irq_create_mapping - create a mapping between a domain irq and a 591 * Linux irq 592 * 593 * @adev: amdgpu device pointer 594 * @src_id: IH source id 595 * 596 * Create a mapping between a domain irq (GPU IH src id) and a Linux irq 597 * Use this for components that generate a GPU interrupt, but are driven 598 * by a different driver (e.g., ACP). 599 * Returns the Linux irq. 600 */ 601 unsigned amdgpu_irq_create_mapping(struct amdgpu_device *adev, unsigned src_id) 602 { 603 adev->irq.virq[src_id] = irq_create_mapping(adev->irq.domain, src_id); 604 605 return adev->irq.virq[src_id]; 606 } 607