1 /*
2  * Copyright 2008 Advanced Micro Devices, Inc.
3  * Copyright 2008 Red Hat Inc.
4  * Copyright 2009 Jerome Glisse.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors: Dave Airlie
25  *          Alex Deucher
26  *          Jerome Glisse
27  */
28 
29 /**
30  * DOC: Interrupt Handling
31  *
32  * Interrupts generated within GPU hardware raise interrupt requests that are
33  * passed to amdgpu IRQ handler which is responsible for detecting source and
34  * type of the interrupt and dispatching matching handlers. If handling an
35  * interrupt requires calling kernel functions that may sleep processing is
36  * dispatched to work handlers.
37  *
38  * If MSI functionality is not disabled by module parameter then MSI
39  * support will be enabled.
40  *
41  * For GPU interrupt sources that may be driven by another driver, IRQ domain
42  * support is used (with mapping between virtual and hardware IRQs).
43  */
44 
45 #include <linux/irq.h>
46 #include <linux/pci.h>
47 
48 #include <drm/drm_crtc_helper.h>
49 #include <drm/drm_irq.h>
50 #include <drm/drm_vblank.h>
51 #include <drm/amdgpu_drm.h>
52 #include <drm/drm_drv.h>
53 #include "amdgpu.h"
54 #include "amdgpu_ih.h"
55 #include "atom.h"
56 #include "amdgpu_connectors.h"
57 #include "amdgpu_trace.h"
58 #include "amdgpu_amdkfd.h"
59 #include "amdgpu_ras.h"
60 
61 #include <linux/pm_runtime.h>
62 
63 #ifdef CONFIG_DRM_AMD_DC
64 #include "amdgpu_dm_irq.h"
65 #endif
66 
67 #define AMDGPU_WAIT_IDLE_TIMEOUT 200
68 
69 const char *soc15_ih_clientid_name[] = {
70 	"IH",
71 	"SDMA2 or ACP",
72 	"ATHUB",
73 	"BIF",
74 	"SDMA3 or DCE",
75 	"SDMA4 or ISP",
76 	"VMC1 or PCIE0",
77 	"RLC",
78 	"SDMA0",
79 	"SDMA1",
80 	"SE0SH",
81 	"SE1SH",
82 	"SE2SH",
83 	"SE3SH",
84 	"VCN1 or UVD1",
85 	"THM",
86 	"VCN or UVD",
87 	"SDMA5 or VCE0",
88 	"VMC",
89 	"SDMA6 or XDMA",
90 	"GRBM_CP",
91 	"ATS",
92 	"ROM_SMUIO",
93 	"DF",
94 	"SDMA7 or VCE1",
95 	"PWR",
96 	"reserved",
97 	"UTCL2",
98 	"EA",
99 	"UTCL2LOG",
100 	"MP0",
101 	"MP1"
102 };
103 
104 /**
105  * amdgpu_hotplug_work_func - work handler for display hotplug event
106  *
107  * @work: work struct pointer
108  *
109  * This is the hotplug event work handler (all ASICs).
110  * The work gets scheduled from the IRQ handler if there
111  * was a hotplug interrupt.  It walks through the connector table
112  * and calls hotplug handler for each connector. After this, it sends
113  * a DRM hotplug event to alert userspace.
114  *
115  * This design approach is required in order to defer hotplug event handling
116  * from the IRQ handler to a work handler because hotplug handler has to use
117  * mutexes which cannot be locked in an IRQ handler (since &mutex_lock may
118  * sleep).
119  */
120 static void amdgpu_hotplug_work_func(struct work_struct *work)
121 {
122 	struct amdgpu_device *adev = container_of(work, struct amdgpu_device,
123 						  hotplug_work);
124 	struct drm_device *dev = adev_to_drm(adev);
125 	struct drm_mode_config *mode_config = &dev->mode_config;
126 	struct drm_connector *connector;
127 	struct drm_connector_list_iter iter;
128 
129 	mutex_lock(&mode_config->mutex);
130 	drm_connector_list_iter_begin(dev, &iter);
131 	drm_for_each_connector_iter(connector, &iter)
132 		amdgpu_connector_hotplug(connector);
133 	drm_connector_list_iter_end(&iter);
134 	mutex_unlock(&mode_config->mutex);
135 	/* Just fire off a uevent and let userspace tell us what to do */
136 	drm_helper_hpd_irq_event(dev);
137 }
138 
139 /**
140  * amdgpu_irq_disable_all - disable *all* interrupts
141  *
142  * @adev: amdgpu device pointer
143  *
144  * Disable all types of interrupts from all sources.
145  */
146 void amdgpu_irq_disable_all(struct amdgpu_device *adev)
147 {
148 	unsigned long irqflags;
149 	unsigned i, j, k;
150 	int r;
151 
152 	spin_lock_irqsave(&adev->irq.lock, irqflags);
153 	for (i = 0; i < AMDGPU_IRQ_CLIENTID_MAX; ++i) {
154 		if (!adev->irq.client[i].sources)
155 			continue;
156 
157 		for (j = 0; j < AMDGPU_MAX_IRQ_SRC_ID; ++j) {
158 			struct amdgpu_irq_src *src = adev->irq.client[i].sources[j];
159 
160 			if (!src || !src->funcs->set || !src->num_types)
161 				continue;
162 
163 			for (k = 0; k < src->num_types; ++k) {
164 				atomic_set(&src->enabled_types[k], 0);
165 				r = src->funcs->set(adev, src, k,
166 						    AMDGPU_IRQ_STATE_DISABLE);
167 				if (r)
168 					DRM_ERROR("error disabling interrupt (%d)\n",
169 						  r);
170 			}
171 		}
172 	}
173 	spin_unlock_irqrestore(&adev->irq.lock, irqflags);
174 }
175 
176 /**
177  * amdgpu_irq_handler - IRQ handler
178  *
179  * @irq: IRQ number (unused)
180  * @arg: pointer to DRM device
181  *
182  * IRQ handler for amdgpu driver (all ASICs).
183  *
184  * Returns:
185  * result of handling the IRQ, as defined by &irqreturn_t
186  */
187 irqreturn_t amdgpu_irq_handler(int irq, void *arg)
188 {
189 	struct drm_device *dev = (struct drm_device *) arg;
190 	struct amdgpu_device *adev = drm_to_adev(dev);
191 	irqreturn_t ret;
192 
193 	ret = amdgpu_ih_process(adev, &adev->irq.ih);
194 	if (ret == IRQ_HANDLED)
195 		pm_runtime_mark_last_busy(dev->dev);
196 
197 	/* For the hardware that cannot enable bif ring for both ras_controller_irq
198          * and ras_err_evnet_athub_irq ih cookies, the driver has to poll status
199 	 * register to check whether the interrupt is triggered or not, and properly
200 	 * ack the interrupt if it is there
201 	 */
202 	if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__PCIE_BIF)) {
203 		if (adev->nbio.ras_funcs &&
204 		    adev->nbio.ras_funcs->handle_ras_controller_intr_no_bifring)
205 			adev->nbio.ras_funcs->handle_ras_controller_intr_no_bifring(adev);
206 
207 		if (adev->nbio.ras_funcs &&
208 		    adev->nbio.ras_funcs->handle_ras_err_event_athub_intr_no_bifring)
209 			adev->nbio.ras_funcs->handle_ras_err_event_athub_intr_no_bifring(adev);
210 	}
211 
212 	return ret;
213 }
214 
215 /**
216  * amdgpu_irq_handle_ih1 - kick of processing for IH1
217  *
218  * @work: work structure in struct amdgpu_irq
219  *
220  * Kick of processing IH ring 1.
221  */
222 static void amdgpu_irq_handle_ih1(struct work_struct *work)
223 {
224 	struct amdgpu_device *adev = container_of(work, struct amdgpu_device,
225 						  irq.ih1_work);
226 
227 	amdgpu_ih_process(adev, &adev->irq.ih1);
228 }
229 
230 /**
231  * amdgpu_irq_handle_ih2 - kick of processing for IH2
232  *
233  * @work: work structure in struct amdgpu_irq
234  *
235  * Kick of processing IH ring 2.
236  */
237 static void amdgpu_irq_handle_ih2(struct work_struct *work)
238 {
239 	struct amdgpu_device *adev = container_of(work, struct amdgpu_device,
240 						  irq.ih2_work);
241 
242 	amdgpu_ih_process(adev, &adev->irq.ih2);
243 }
244 
245 /**
246  * amdgpu_irq_handle_ih_soft - kick of processing for ih_soft
247  *
248  * @work: work structure in struct amdgpu_irq
249  *
250  * Kick of processing IH soft ring.
251  */
252 static void amdgpu_irq_handle_ih_soft(struct work_struct *work)
253 {
254 	struct amdgpu_device *adev = container_of(work, struct amdgpu_device,
255 						  irq.ih_soft_work);
256 
257 	amdgpu_ih_process(adev, &adev->irq.ih_soft);
258 }
259 
260 /**
261  * amdgpu_msi_ok - check whether MSI functionality is enabled
262  *
263  * @adev: amdgpu device pointer (unused)
264  *
265  * Checks whether MSI functionality has been disabled via module parameter
266  * (all ASICs).
267  *
268  * Returns:
269  * *true* if MSIs are allowed to be enabled or *false* otherwise
270  */
271 static bool amdgpu_msi_ok(struct amdgpu_device *adev)
272 {
273 	if (amdgpu_msi == 1)
274 		return true;
275 	else if (amdgpu_msi == 0)
276 		return false;
277 
278 	return true;
279 }
280 
281 static void amdgpu_restore_msix(struct amdgpu_device *adev)
282 {
283 	u16 ctrl;
284 
285 	pci_read_config_word(adev->pdev, adev->pdev->msix_cap + PCI_MSIX_FLAGS, &ctrl);
286 	if (!(ctrl & PCI_MSIX_FLAGS_ENABLE))
287 		return;
288 
289 	/* VF FLR */
290 	ctrl &= ~PCI_MSIX_FLAGS_ENABLE;
291 	pci_write_config_word(adev->pdev, adev->pdev->msix_cap + PCI_MSIX_FLAGS, ctrl);
292 	ctrl |= PCI_MSIX_FLAGS_ENABLE;
293 	pci_write_config_word(adev->pdev, adev->pdev->msix_cap + PCI_MSIX_FLAGS, ctrl);
294 }
295 
296 /**
297  * amdgpu_irq_init - initialize interrupt handling
298  *
299  * @adev: amdgpu device pointer
300  *
301  * Sets up work functions for hotplug and reset interrupts, enables MSI
302  * functionality, initializes vblank, hotplug and reset interrupt handling.
303  *
304  * Returns:
305  * 0 on success or error code on failure
306  */
307 int amdgpu_irq_init(struct amdgpu_device *adev)
308 {
309 	int r = 0;
310 
311 	spin_lock_init(&adev->irq.lock);
312 
313 	/* Enable MSI if not disabled by module parameter */
314 	adev->irq.msi_enabled = false;
315 
316 	if (amdgpu_msi_ok(adev)) {
317 		int nvec = pci_msix_vec_count(adev->pdev);
318 		unsigned int flags;
319 
320 		if (nvec <= 0) {
321 			flags = PCI_IRQ_MSI;
322 		} else {
323 			flags = PCI_IRQ_MSI | PCI_IRQ_MSIX;
324 		}
325 		/* we only need one vector */
326 		nvec = pci_alloc_irq_vectors(adev->pdev, 1, 1, flags);
327 		if (nvec > 0) {
328 			adev->irq.msi_enabled = true;
329 			dev_dbg(adev->dev, "using MSI/MSI-X.\n");
330 		}
331 	}
332 
333 	if (!amdgpu_device_has_dc_support(adev)) {
334 		if (!adev->enable_virtual_display)
335 			/* Disable vblank IRQs aggressively for power-saving */
336 			/* XXX: can this be enabled for DC? */
337 			adev_to_drm(adev)->vblank_disable_immediate = true;
338 
339 		r = drm_vblank_init(adev_to_drm(adev), adev->mode_info.num_crtc);
340 		if (r)
341 			return r;
342 
343 		/* Pre-DCE11 */
344 		INIT_WORK(&adev->hotplug_work,
345 				amdgpu_hotplug_work_func);
346 	}
347 
348 	INIT_WORK(&adev->irq.ih1_work, amdgpu_irq_handle_ih1);
349 	INIT_WORK(&adev->irq.ih2_work, amdgpu_irq_handle_ih2);
350 	INIT_WORK(&adev->irq.ih_soft_work, amdgpu_irq_handle_ih_soft);
351 
352 	adev->irq.installed = true;
353 	/* Use vector 0 for MSI-X */
354 	r = drm_irq_install(adev_to_drm(adev), pci_irq_vector(adev->pdev, 0));
355 	if (r) {
356 		adev->irq.installed = false;
357 		if (!amdgpu_device_has_dc_support(adev))
358 			flush_work(&adev->hotplug_work);
359 		return r;
360 	}
361 	adev_to_drm(adev)->max_vblank_count = 0x00ffffff;
362 
363 	DRM_DEBUG("amdgpu: irq initialized.\n");
364 	return 0;
365 }
366 
367 
368 void amdgpu_irq_fini_hw(struct amdgpu_device *adev)
369 {
370 	if (adev->irq.installed) {
371 		drm_irq_uninstall(&adev->ddev);
372 		adev->irq.installed = false;
373 		if (adev->irq.msi_enabled)
374 			pci_free_irq_vectors(adev->pdev);
375 
376 		if (!amdgpu_device_has_dc_support(adev))
377 			flush_work(&adev->hotplug_work);
378 	}
379 
380 	amdgpu_ih_ring_fini(adev, &adev->irq.ih_soft);
381 	amdgpu_ih_ring_fini(adev, &adev->irq.ih);
382 	amdgpu_ih_ring_fini(adev, &adev->irq.ih1);
383 	amdgpu_ih_ring_fini(adev, &adev->irq.ih2);
384 }
385 
386 /**
387  * amdgpu_irq_fini - shut down interrupt handling
388  *
389  * @adev: amdgpu device pointer
390  *
391  * Tears down work functions for hotplug and reset interrupts, disables MSI
392  * functionality, shuts down vblank, hotplug and reset interrupt handling,
393  * turns off interrupts from all sources (all ASICs).
394  */
395 void amdgpu_irq_fini_sw(struct amdgpu_device *adev)
396 {
397 	unsigned i, j;
398 
399 	for (i = 0; i < AMDGPU_IRQ_CLIENTID_MAX; ++i) {
400 		if (!adev->irq.client[i].sources)
401 			continue;
402 
403 		for (j = 0; j < AMDGPU_MAX_IRQ_SRC_ID; ++j) {
404 			struct amdgpu_irq_src *src = adev->irq.client[i].sources[j];
405 
406 			if (!src)
407 				continue;
408 
409 			kfree(src->enabled_types);
410 			src->enabled_types = NULL;
411 		}
412 		kfree(adev->irq.client[i].sources);
413 		adev->irq.client[i].sources = NULL;
414 	}
415 }
416 
417 /**
418  * amdgpu_irq_add_id - register IRQ source
419  *
420  * @adev: amdgpu device pointer
421  * @client_id: client id
422  * @src_id: source id
423  * @source: IRQ source pointer
424  *
425  * Registers IRQ source on a client.
426  *
427  * Returns:
428  * 0 on success or error code otherwise
429  */
430 int amdgpu_irq_add_id(struct amdgpu_device *adev,
431 		      unsigned client_id, unsigned src_id,
432 		      struct amdgpu_irq_src *source)
433 {
434 	if (client_id >= AMDGPU_IRQ_CLIENTID_MAX)
435 		return -EINVAL;
436 
437 	if (src_id >= AMDGPU_MAX_IRQ_SRC_ID)
438 		return -EINVAL;
439 
440 	if (!source->funcs)
441 		return -EINVAL;
442 
443 	if (!adev->irq.client[client_id].sources) {
444 		adev->irq.client[client_id].sources =
445 			kcalloc(AMDGPU_MAX_IRQ_SRC_ID,
446 				sizeof(struct amdgpu_irq_src *),
447 				GFP_KERNEL);
448 		if (!adev->irq.client[client_id].sources)
449 			return -ENOMEM;
450 	}
451 
452 	if (adev->irq.client[client_id].sources[src_id] != NULL)
453 		return -EINVAL;
454 
455 	if (source->num_types && !source->enabled_types) {
456 		atomic_t *types;
457 
458 		types = kcalloc(source->num_types, sizeof(atomic_t),
459 				GFP_KERNEL);
460 		if (!types)
461 			return -ENOMEM;
462 
463 		source->enabled_types = types;
464 	}
465 
466 	adev->irq.client[client_id].sources[src_id] = source;
467 	return 0;
468 }
469 
470 /**
471  * amdgpu_irq_dispatch - dispatch IRQ to IP blocks
472  *
473  * @adev: amdgpu device pointer
474  * @ih: interrupt ring instance
475  *
476  * Dispatches IRQ to IP blocks.
477  */
478 void amdgpu_irq_dispatch(struct amdgpu_device *adev,
479 			 struct amdgpu_ih_ring *ih)
480 {
481 	u32 ring_index = ih->rptr >> 2;
482 	struct amdgpu_iv_entry entry;
483 	unsigned client_id, src_id;
484 	struct amdgpu_irq_src *src;
485 	bool handled = false;
486 	int r;
487 
488 	entry.ih = ih;
489 	entry.iv_entry = (const uint32_t *)&ih->ring[ring_index];
490 	amdgpu_ih_decode_iv(adev, &entry);
491 
492 	trace_amdgpu_iv(ih - &adev->irq.ih, &entry);
493 
494 	client_id = entry.client_id;
495 	src_id = entry.src_id;
496 
497 	if (client_id >= AMDGPU_IRQ_CLIENTID_MAX) {
498 		DRM_DEBUG("Invalid client_id in IV: %d\n", client_id);
499 
500 	} else	if (src_id >= AMDGPU_MAX_IRQ_SRC_ID) {
501 		DRM_DEBUG("Invalid src_id in IV: %d\n", src_id);
502 
503 	} else if ((client_id == AMDGPU_IRQ_CLIENTID_LEGACY) &&
504 		   adev->irq.virq[src_id]) {
505 		generic_handle_irq(irq_find_mapping(adev->irq.domain, src_id));
506 
507 	} else if (!adev->irq.client[client_id].sources) {
508 		DRM_DEBUG("Unregistered interrupt client_id: %d src_id: %d\n",
509 			  client_id, src_id);
510 
511 	} else if ((src = adev->irq.client[client_id].sources[src_id])) {
512 		r = src->funcs->process(adev, src, &entry);
513 		if (r < 0)
514 			DRM_ERROR("error processing interrupt (%d)\n", r);
515 		else if (r)
516 			handled = true;
517 
518 	} else {
519 		DRM_DEBUG("Unhandled interrupt src_id: %d\n", src_id);
520 	}
521 
522 	/* Send it to amdkfd as well if it isn't already handled */
523 	if (!handled)
524 		amdgpu_amdkfd_interrupt(adev, entry.iv_entry);
525 }
526 
527 /**
528  * amdgpu_irq_delegate - delegate IV to soft IH ring
529  *
530  * @adev: amdgpu device pointer
531  * @entry: IV entry
532  * @num_dw: size of IV
533  *
534  * Delegate the IV to the soft IH ring and schedule processing of it. Used
535  * if the hardware delegation to IH1 or IH2 doesn't work for some reason.
536  */
537 void amdgpu_irq_delegate(struct amdgpu_device *adev,
538 			 struct amdgpu_iv_entry *entry,
539 			 unsigned int num_dw)
540 {
541 	amdgpu_ih_ring_write(&adev->irq.ih_soft, entry->iv_entry, num_dw);
542 	schedule_work(&adev->irq.ih_soft_work);
543 }
544 
545 /**
546  * amdgpu_irq_update - update hardware interrupt state
547  *
548  * @adev: amdgpu device pointer
549  * @src: interrupt source pointer
550  * @type: type of interrupt
551  *
552  * Updates interrupt state for the specific source (all ASICs).
553  */
554 int amdgpu_irq_update(struct amdgpu_device *adev,
555 			     struct amdgpu_irq_src *src, unsigned type)
556 {
557 	unsigned long irqflags;
558 	enum amdgpu_interrupt_state state;
559 	int r;
560 
561 	spin_lock_irqsave(&adev->irq.lock, irqflags);
562 
563 	/* We need to determine after taking the lock, otherwise
564 	   we might disable just enabled interrupts again */
565 	if (amdgpu_irq_enabled(adev, src, type))
566 		state = AMDGPU_IRQ_STATE_ENABLE;
567 	else
568 		state = AMDGPU_IRQ_STATE_DISABLE;
569 
570 	r = src->funcs->set(adev, src, type, state);
571 	spin_unlock_irqrestore(&adev->irq.lock, irqflags);
572 	return r;
573 }
574 
575 /**
576  * amdgpu_irq_gpu_reset_resume_helper - update interrupt states on all sources
577  *
578  * @adev: amdgpu device pointer
579  *
580  * Updates state of all types of interrupts on all sources on resume after
581  * reset.
582  */
583 void amdgpu_irq_gpu_reset_resume_helper(struct amdgpu_device *adev)
584 {
585 	int i, j, k;
586 
587 	if (amdgpu_sriov_vf(adev))
588 		amdgpu_restore_msix(adev);
589 
590 	for (i = 0; i < AMDGPU_IRQ_CLIENTID_MAX; ++i) {
591 		if (!adev->irq.client[i].sources)
592 			continue;
593 
594 		for (j = 0; j < AMDGPU_MAX_IRQ_SRC_ID; ++j) {
595 			struct amdgpu_irq_src *src = adev->irq.client[i].sources[j];
596 
597 			if (!src || !src->funcs || !src->funcs->set)
598 				continue;
599 			for (k = 0; k < src->num_types; k++)
600 				amdgpu_irq_update(adev, src, k);
601 		}
602 	}
603 }
604 
605 /**
606  * amdgpu_irq_get - enable interrupt
607  *
608  * @adev: amdgpu device pointer
609  * @src: interrupt source pointer
610  * @type: type of interrupt
611  *
612  * Enables specified type of interrupt on the specified source (all ASICs).
613  *
614  * Returns:
615  * 0 on success or error code otherwise
616  */
617 int amdgpu_irq_get(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
618 		   unsigned type)
619 {
620 	if (!adev_to_drm(adev)->irq_enabled)
621 		return -ENOENT;
622 
623 	if (type >= src->num_types)
624 		return -EINVAL;
625 
626 	if (!src->enabled_types || !src->funcs->set)
627 		return -EINVAL;
628 
629 	if (atomic_inc_return(&src->enabled_types[type]) == 1)
630 		return amdgpu_irq_update(adev, src, type);
631 
632 	return 0;
633 }
634 
635 /**
636  * amdgpu_irq_put - disable interrupt
637  *
638  * @adev: amdgpu device pointer
639  * @src: interrupt source pointer
640  * @type: type of interrupt
641  *
642  * Enables specified type of interrupt on the specified source (all ASICs).
643  *
644  * Returns:
645  * 0 on success or error code otherwise
646  */
647 int amdgpu_irq_put(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
648 		   unsigned type)
649 {
650 	if (!adev_to_drm(adev)->irq_enabled)
651 		return -ENOENT;
652 
653 	if (type >= src->num_types)
654 		return -EINVAL;
655 
656 	if (!src->enabled_types || !src->funcs->set)
657 		return -EINVAL;
658 
659 	if (atomic_dec_and_test(&src->enabled_types[type]))
660 		return amdgpu_irq_update(adev, src, type);
661 
662 	return 0;
663 }
664 
665 /**
666  * amdgpu_irq_enabled - check whether interrupt is enabled or not
667  *
668  * @adev: amdgpu device pointer
669  * @src: interrupt source pointer
670  * @type: type of interrupt
671  *
672  * Checks whether the given type of interrupt is enabled on the given source.
673  *
674  * Returns:
675  * *true* if interrupt is enabled, *false* if interrupt is disabled or on
676  * invalid parameters
677  */
678 bool amdgpu_irq_enabled(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
679 			unsigned type)
680 {
681 	if (!adev_to_drm(adev)->irq_enabled)
682 		return false;
683 
684 	if (type >= src->num_types)
685 		return false;
686 
687 	if (!src->enabled_types || !src->funcs->set)
688 		return false;
689 
690 	return !!atomic_read(&src->enabled_types[type]);
691 }
692 
693 /* XXX: Generic IRQ handling */
694 static void amdgpu_irq_mask(struct irq_data *irqd)
695 {
696 	/* XXX */
697 }
698 
699 static void amdgpu_irq_unmask(struct irq_data *irqd)
700 {
701 	/* XXX */
702 }
703 
704 /* amdgpu hardware interrupt chip descriptor */
705 static struct irq_chip amdgpu_irq_chip = {
706 	.name = "amdgpu-ih",
707 	.irq_mask = amdgpu_irq_mask,
708 	.irq_unmask = amdgpu_irq_unmask,
709 };
710 
711 /**
712  * amdgpu_irqdomain_map - create mapping between virtual and hardware IRQ numbers
713  *
714  * @d: amdgpu IRQ domain pointer (unused)
715  * @irq: virtual IRQ number
716  * @hwirq: hardware irq number
717  *
718  * Current implementation assigns simple interrupt handler to the given virtual
719  * IRQ.
720  *
721  * Returns:
722  * 0 on success or error code otherwise
723  */
724 static int amdgpu_irqdomain_map(struct irq_domain *d,
725 				unsigned int irq, irq_hw_number_t hwirq)
726 {
727 	if (hwirq >= AMDGPU_MAX_IRQ_SRC_ID)
728 		return -EPERM;
729 
730 	irq_set_chip_and_handler(irq,
731 				 &amdgpu_irq_chip, handle_simple_irq);
732 	return 0;
733 }
734 
735 /* Implementation of methods for amdgpu IRQ domain */
736 static const struct irq_domain_ops amdgpu_hw_irqdomain_ops = {
737 	.map = amdgpu_irqdomain_map,
738 };
739 
740 /**
741  * amdgpu_irq_add_domain - create a linear IRQ domain
742  *
743  * @adev: amdgpu device pointer
744  *
745  * Creates an IRQ domain for GPU interrupt sources
746  * that may be driven by another driver (e.g., ACP).
747  *
748  * Returns:
749  * 0 on success or error code otherwise
750  */
751 int amdgpu_irq_add_domain(struct amdgpu_device *adev)
752 {
753 	adev->irq.domain = irq_domain_add_linear(NULL, AMDGPU_MAX_IRQ_SRC_ID,
754 						 &amdgpu_hw_irqdomain_ops, adev);
755 	if (!adev->irq.domain) {
756 		DRM_ERROR("GPU irq add domain failed\n");
757 		return -ENODEV;
758 	}
759 
760 	return 0;
761 }
762 
763 /**
764  * amdgpu_irq_remove_domain - remove the IRQ domain
765  *
766  * @adev: amdgpu device pointer
767  *
768  * Removes the IRQ domain for GPU interrupt sources
769  * that may be driven by another driver (e.g., ACP).
770  */
771 void amdgpu_irq_remove_domain(struct amdgpu_device *adev)
772 {
773 	if (adev->irq.domain) {
774 		irq_domain_remove(adev->irq.domain);
775 		adev->irq.domain = NULL;
776 	}
777 }
778 
779 /**
780  * amdgpu_irq_create_mapping - create mapping between domain Linux IRQs
781  *
782  * @adev: amdgpu device pointer
783  * @src_id: IH source id
784  *
785  * Creates mapping between a domain IRQ (GPU IH src id) and a Linux IRQ
786  * Use this for components that generate a GPU interrupt, but are driven
787  * by a different driver (e.g., ACP).
788  *
789  * Returns:
790  * Linux IRQ
791  */
792 unsigned amdgpu_irq_create_mapping(struct amdgpu_device *adev, unsigned src_id)
793 {
794 	adev->irq.virq[src_id] = irq_create_mapping(adev->irq.domain, src_id);
795 
796 	return adev->irq.virq[src_id];
797 }
798