1 /*
2  * Copyright 2008 Advanced Micro Devices, Inc.
3  * Copyright 2008 Red Hat Inc.
4  * Copyright 2009 Jerome Glisse.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors: Dave Airlie
25  *          Alex Deucher
26  *          Jerome Glisse
27  */
28 
29 /**
30  * DOC: Interrupt Handling
31  *
32  * Interrupts generated within GPU hardware raise interrupt requests that are
33  * passed to amdgpu IRQ handler which is responsible for detecting source and
34  * type of the interrupt and dispatching matching handlers. If handling an
35  * interrupt requires calling kernel functions that may sleep processing is
36  * dispatched to work handlers.
37  *
38  * If MSI functionality is not disabled by module parameter then MSI
39  * support will be enabled.
40  *
41  * For GPU interrupt sources that may be driven by another driver, IRQ domain
42  * support is used (with mapping between virtual and hardware IRQs).
43  */
44 
45 #include <linux/irq.h>
46 #include <linux/pci.h>
47 
48 #include <drm/drm_crtc_helper.h>
49 #include <drm/drm_irq.h>
50 #include <drm/drm_vblank.h>
51 #include <drm/amdgpu_drm.h>
52 #include <drm/drm_drv.h>
53 #include "amdgpu.h"
54 #include "amdgpu_ih.h"
55 #include "atom.h"
56 #include "amdgpu_connectors.h"
57 #include "amdgpu_trace.h"
58 #include "amdgpu_amdkfd.h"
59 #include "amdgpu_ras.h"
60 
61 #include <linux/pm_runtime.h>
62 
63 #ifdef CONFIG_DRM_AMD_DC
64 #include "amdgpu_dm_irq.h"
65 #endif
66 
67 #define AMDGPU_WAIT_IDLE_TIMEOUT 200
68 
69 const char *soc15_ih_clientid_name[] = {
70 	"IH",
71 	"SDMA2 or ACP",
72 	"ATHUB",
73 	"BIF",
74 	"SDMA3 or DCE",
75 	"SDMA4 or ISP",
76 	"VMC1 or PCIE0",
77 	"RLC",
78 	"SDMA0",
79 	"SDMA1",
80 	"SE0SH",
81 	"SE1SH",
82 	"SE2SH",
83 	"SE3SH",
84 	"VCN1 or UVD1",
85 	"THM",
86 	"VCN or UVD",
87 	"SDMA5 or VCE0",
88 	"VMC",
89 	"SDMA6 or XDMA",
90 	"GRBM_CP",
91 	"ATS",
92 	"ROM_SMUIO",
93 	"DF",
94 	"SDMA7 or VCE1",
95 	"PWR",
96 	"reserved",
97 	"UTCL2",
98 	"EA",
99 	"UTCL2LOG",
100 	"MP0",
101 	"MP1"
102 };
103 
104 /**
105  * amdgpu_hotplug_work_func - work handler for display hotplug event
106  *
107  * @work: work struct pointer
108  *
109  * This is the hotplug event work handler (all ASICs).
110  * The work gets scheduled from the IRQ handler if there
111  * was a hotplug interrupt.  It walks through the connector table
112  * and calls hotplug handler for each connector. After this, it sends
113  * a DRM hotplug event to alert userspace.
114  *
115  * This design approach is required in order to defer hotplug event handling
116  * from the IRQ handler to a work handler because hotplug handler has to use
117  * mutexes which cannot be locked in an IRQ handler (since &mutex_lock may
118  * sleep).
119  */
120 static void amdgpu_hotplug_work_func(struct work_struct *work)
121 {
122 	struct amdgpu_device *adev = container_of(work, struct amdgpu_device,
123 						  hotplug_work);
124 	struct drm_device *dev = adev_to_drm(adev);
125 	struct drm_mode_config *mode_config = &dev->mode_config;
126 	struct drm_connector *connector;
127 	struct drm_connector_list_iter iter;
128 
129 	mutex_lock(&mode_config->mutex);
130 	drm_connector_list_iter_begin(dev, &iter);
131 	drm_for_each_connector_iter(connector, &iter)
132 		amdgpu_connector_hotplug(connector);
133 	drm_connector_list_iter_end(&iter);
134 	mutex_unlock(&mode_config->mutex);
135 	/* Just fire off a uevent and let userspace tell us what to do */
136 	drm_helper_hpd_irq_event(dev);
137 }
138 
139 /**
140  * amdgpu_irq_disable_all - disable *all* interrupts
141  *
142  * @adev: amdgpu device pointer
143  *
144  * Disable all types of interrupts from all sources.
145  */
146 void amdgpu_irq_disable_all(struct amdgpu_device *adev)
147 {
148 	unsigned long irqflags;
149 	unsigned i, j, k;
150 	int r;
151 
152 	spin_lock_irqsave(&adev->irq.lock, irqflags);
153 	for (i = 0; i < AMDGPU_IRQ_CLIENTID_MAX; ++i) {
154 		if (!adev->irq.client[i].sources)
155 			continue;
156 
157 		for (j = 0; j < AMDGPU_MAX_IRQ_SRC_ID; ++j) {
158 			struct amdgpu_irq_src *src = adev->irq.client[i].sources[j];
159 
160 			if (!src || !src->funcs->set || !src->num_types)
161 				continue;
162 
163 			for (k = 0; k < src->num_types; ++k) {
164 				atomic_set(&src->enabled_types[k], 0);
165 				r = src->funcs->set(adev, src, k,
166 						    AMDGPU_IRQ_STATE_DISABLE);
167 				if (r)
168 					DRM_ERROR("error disabling interrupt (%d)\n",
169 						  r);
170 			}
171 		}
172 	}
173 	spin_unlock_irqrestore(&adev->irq.lock, irqflags);
174 }
175 
176 /**
177  * amdgpu_irq_handler - IRQ handler
178  *
179  * @irq: IRQ number (unused)
180  * @arg: pointer to DRM device
181  *
182  * IRQ handler for amdgpu driver (all ASICs).
183  *
184  * Returns:
185  * result of handling the IRQ, as defined by &irqreturn_t
186  */
187 irqreturn_t amdgpu_irq_handler(int irq, void *arg)
188 {
189 	struct drm_device *dev = (struct drm_device *) arg;
190 	struct amdgpu_device *adev = drm_to_adev(dev);
191 	irqreturn_t ret;
192 
193 	ret = amdgpu_ih_process(adev, &adev->irq.ih);
194 	if (ret == IRQ_HANDLED)
195 		pm_runtime_mark_last_busy(dev->dev);
196 
197 	/* For the hardware that cannot enable bif ring for both ras_controller_irq
198          * and ras_err_evnet_athub_irq ih cookies, the driver has to poll status
199 	 * register to check whether the interrupt is triggered or not, and properly
200 	 * ack the interrupt if it is there
201 	 */
202 	if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__PCIE_BIF)) {
203 		if (adev->nbio.ras_funcs &&
204 		    adev->nbio.ras_funcs->handle_ras_controller_intr_no_bifring)
205 			adev->nbio.ras_funcs->handle_ras_controller_intr_no_bifring(adev);
206 
207 		if (adev->nbio.ras_funcs &&
208 		    adev->nbio.ras_funcs->handle_ras_err_event_athub_intr_no_bifring)
209 			adev->nbio.ras_funcs->handle_ras_err_event_athub_intr_no_bifring(adev);
210 	}
211 
212 	return ret;
213 }
214 
215 /**
216  * amdgpu_irq_handle_ih1 - kick of processing for IH1
217  *
218  * @work: work structure in struct amdgpu_irq
219  *
220  * Kick of processing IH ring 1.
221  */
222 static void amdgpu_irq_handle_ih1(struct work_struct *work)
223 {
224 	struct amdgpu_device *adev = container_of(work, struct amdgpu_device,
225 						  irq.ih1_work);
226 
227 	amdgpu_ih_process(adev, &adev->irq.ih1);
228 }
229 
230 /**
231  * amdgpu_irq_handle_ih2 - kick of processing for IH2
232  *
233  * @work: work structure in struct amdgpu_irq
234  *
235  * Kick of processing IH ring 2.
236  */
237 static void amdgpu_irq_handle_ih2(struct work_struct *work)
238 {
239 	struct amdgpu_device *adev = container_of(work, struct amdgpu_device,
240 						  irq.ih2_work);
241 
242 	amdgpu_ih_process(adev, &adev->irq.ih2);
243 }
244 
245 /**
246  * amdgpu_irq_handle_ih_soft - kick of processing for ih_soft
247  *
248  * @work: work structure in struct amdgpu_irq
249  *
250  * Kick of processing IH soft ring.
251  */
252 static void amdgpu_irq_handle_ih_soft(struct work_struct *work)
253 {
254 	struct amdgpu_device *adev = container_of(work, struct amdgpu_device,
255 						  irq.ih_soft_work);
256 
257 	amdgpu_ih_process(adev, &adev->irq.ih_soft);
258 }
259 
260 /**
261  * amdgpu_msi_ok - check whether MSI functionality is enabled
262  *
263  * @adev: amdgpu device pointer (unused)
264  *
265  * Checks whether MSI functionality has been disabled via module parameter
266  * (all ASICs).
267  *
268  * Returns:
269  * *true* if MSIs are allowed to be enabled or *false* otherwise
270  */
271 static bool amdgpu_msi_ok(struct amdgpu_device *adev)
272 {
273 	if (amdgpu_msi == 1)
274 		return true;
275 	else if (amdgpu_msi == 0)
276 		return false;
277 
278 	return true;
279 }
280 
281 /**
282  * amdgpu_irq_init - initialize interrupt handling
283  *
284  * @adev: amdgpu device pointer
285  *
286  * Sets up work functions for hotplug and reset interrupts, enables MSI
287  * functionality, initializes vblank, hotplug and reset interrupt handling.
288  *
289  * Returns:
290  * 0 on success or error code on failure
291  */
292 int amdgpu_irq_init(struct amdgpu_device *adev)
293 {
294 	int r = 0;
295 
296 	spin_lock_init(&adev->irq.lock);
297 
298 	/* Enable MSI if not disabled by module parameter */
299 	adev->irq.msi_enabled = false;
300 
301 	if (amdgpu_msi_ok(adev)) {
302 		int nvec = pci_msix_vec_count(adev->pdev);
303 		unsigned int flags;
304 
305 		if (nvec <= 0) {
306 			flags = PCI_IRQ_MSI;
307 		} else {
308 			flags = PCI_IRQ_MSI | PCI_IRQ_MSIX;
309 		}
310 		/* we only need one vector */
311 		nvec = pci_alloc_irq_vectors(adev->pdev, 1, 1, flags);
312 		if (nvec > 0) {
313 			adev->irq.msi_enabled = true;
314 			dev_dbg(adev->dev, "using MSI/MSI-X.\n");
315 		}
316 	}
317 
318 	if (!amdgpu_device_has_dc_support(adev)) {
319 		if (!adev->enable_virtual_display)
320 			/* Disable vblank IRQs aggressively for power-saving */
321 			/* XXX: can this be enabled for DC? */
322 			adev_to_drm(adev)->vblank_disable_immediate = true;
323 
324 		r = drm_vblank_init(adev_to_drm(adev), adev->mode_info.num_crtc);
325 		if (r)
326 			return r;
327 
328 		/* Pre-DCE11 */
329 		INIT_WORK(&adev->hotplug_work,
330 				amdgpu_hotplug_work_func);
331 	}
332 
333 	INIT_WORK(&adev->irq.ih1_work, amdgpu_irq_handle_ih1);
334 	INIT_WORK(&adev->irq.ih2_work, amdgpu_irq_handle_ih2);
335 	INIT_WORK(&adev->irq.ih_soft_work, amdgpu_irq_handle_ih_soft);
336 
337 	adev->irq.installed = true;
338 	/* Use vector 0 for MSI-X */
339 	r = drm_irq_install(adev_to_drm(adev), pci_irq_vector(adev->pdev, 0));
340 	if (r) {
341 		adev->irq.installed = false;
342 		if (!amdgpu_device_has_dc_support(adev))
343 			flush_work(&adev->hotplug_work);
344 		return r;
345 	}
346 	adev_to_drm(adev)->max_vblank_count = 0x00ffffff;
347 
348 	DRM_DEBUG("amdgpu: irq initialized.\n");
349 	return 0;
350 }
351 
352 
353 void amdgpu_irq_fini_hw(struct amdgpu_device *adev)
354 {
355 	if (adev->irq.installed) {
356 		drm_irq_uninstall(&adev->ddev);
357 		adev->irq.installed = false;
358 		if (adev->irq.msi_enabled)
359 			pci_free_irq_vectors(adev->pdev);
360 
361 		if (!amdgpu_device_has_dc_support(adev))
362 			flush_work(&adev->hotplug_work);
363 	}
364 
365 	amdgpu_ih_ring_fini(adev, &adev->irq.ih_soft);
366 	amdgpu_ih_ring_fini(adev, &adev->irq.ih);
367 	amdgpu_ih_ring_fini(adev, &adev->irq.ih1);
368 	amdgpu_ih_ring_fini(adev, &adev->irq.ih2);
369 }
370 
371 /**
372  * amdgpu_irq_fini - shut down interrupt handling
373  *
374  * @adev: amdgpu device pointer
375  *
376  * Tears down work functions for hotplug and reset interrupts, disables MSI
377  * functionality, shuts down vblank, hotplug and reset interrupt handling,
378  * turns off interrupts from all sources (all ASICs).
379  */
380 void amdgpu_irq_fini_sw(struct amdgpu_device *adev)
381 {
382 	unsigned i, j;
383 
384 	for (i = 0; i < AMDGPU_IRQ_CLIENTID_MAX; ++i) {
385 		if (!adev->irq.client[i].sources)
386 			continue;
387 
388 		for (j = 0; j < AMDGPU_MAX_IRQ_SRC_ID; ++j) {
389 			struct amdgpu_irq_src *src = adev->irq.client[i].sources[j];
390 
391 			if (!src)
392 				continue;
393 
394 			kfree(src->enabled_types);
395 			src->enabled_types = NULL;
396 		}
397 		kfree(adev->irq.client[i].sources);
398 		adev->irq.client[i].sources = NULL;
399 	}
400 }
401 
402 /**
403  * amdgpu_irq_add_id - register IRQ source
404  *
405  * @adev: amdgpu device pointer
406  * @client_id: client id
407  * @src_id: source id
408  * @source: IRQ source pointer
409  *
410  * Registers IRQ source on a client.
411  *
412  * Returns:
413  * 0 on success or error code otherwise
414  */
415 int amdgpu_irq_add_id(struct amdgpu_device *adev,
416 		      unsigned client_id, unsigned src_id,
417 		      struct amdgpu_irq_src *source)
418 {
419 	if (client_id >= AMDGPU_IRQ_CLIENTID_MAX)
420 		return -EINVAL;
421 
422 	if (src_id >= AMDGPU_MAX_IRQ_SRC_ID)
423 		return -EINVAL;
424 
425 	if (!source->funcs)
426 		return -EINVAL;
427 
428 	if (!adev->irq.client[client_id].sources) {
429 		adev->irq.client[client_id].sources =
430 			kcalloc(AMDGPU_MAX_IRQ_SRC_ID,
431 				sizeof(struct amdgpu_irq_src *),
432 				GFP_KERNEL);
433 		if (!adev->irq.client[client_id].sources)
434 			return -ENOMEM;
435 	}
436 
437 	if (adev->irq.client[client_id].sources[src_id] != NULL)
438 		return -EINVAL;
439 
440 	if (source->num_types && !source->enabled_types) {
441 		atomic_t *types;
442 
443 		types = kcalloc(source->num_types, sizeof(atomic_t),
444 				GFP_KERNEL);
445 		if (!types)
446 			return -ENOMEM;
447 
448 		source->enabled_types = types;
449 	}
450 
451 	adev->irq.client[client_id].sources[src_id] = source;
452 	return 0;
453 }
454 
455 /**
456  * amdgpu_irq_dispatch - dispatch IRQ to IP blocks
457  *
458  * @adev: amdgpu device pointer
459  * @ih: interrupt ring instance
460  *
461  * Dispatches IRQ to IP blocks.
462  */
463 void amdgpu_irq_dispatch(struct amdgpu_device *adev,
464 			 struct amdgpu_ih_ring *ih)
465 {
466 	u32 ring_index = ih->rptr >> 2;
467 	struct amdgpu_iv_entry entry;
468 	unsigned client_id, src_id;
469 	struct amdgpu_irq_src *src;
470 	bool handled = false;
471 	int r;
472 
473 	entry.ih = ih;
474 	entry.iv_entry = (const uint32_t *)&ih->ring[ring_index];
475 	amdgpu_ih_decode_iv(adev, &entry);
476 
477 	trace_amdgpu_iv(ih - &adev->irq.ih, &entry);
478 
479 	client_id = entry.client_id;
480 	src_id = entry.src_id;
481 
482 	if (client_id >= AMDGPU_IRQ_CLIENTID_MAX) {
483 		DRM_DEBUG("Invalid client_id in IV: %d\n", client_id);
484 
485 	} else	if (src_id >= AMDGPU_MAX_IRQ_SRC_ID) {
486 		DRM_DEBUG("Invalid src_id in IV: %d\n", src_id);
487 
488 	} else if ((client_id == AMDGPU_IRQ_CLIENTID_LEGACY) &&
489 		   adev->irq.virq[src_id]) {
490 		generic_handle_irq(irq_find_mapping(adev->irq.domain, src_id));
491 
492 	} else if (!adev->irq.client[client_id].sources) {
493 		DRM_DEBUG("Unregistered interrupt client_id: %d src_id: %d\n",
494 			  client_id, src_id);
495 
496 	} else if ((src = adev->irq.client[client_id].sources[src_id])) {
497 		r = src->funcs->process(adev, src, &entry);
498 		if (r < 0)
499 			DRM_ERROR("error processing interrupt (%d)\n", r);
500 		else if (r)
501 			handled = true;
502 
503 	} else {
504 		DRM_DEBUG("Unhandled interrupt src_id: %d\n", src_id);
505 	}
506 
507 	/* Send it to amdkfd as well if it isn't already handled */
508 	if (!handled)
509 		amdgpu_amdkfd_interrupt(adev, entry.iv_entry);
510 }
511 
512 /**
513  * amdgpu_irq_delegate - delegate IV to soft IH ring
514  *
515  * @adev: amdgpu device pointer
516  * @entry: IV entry
517  * @num_dw: size of IV
518  *
519  * Delegate the IV to the soft IH ring and schedule processing of it. Used
520  * if the hardware delegation to IH1 or IH2 doesn't work for some reason.
521  */
522 void amdgpu_irq_delegate(struct amdgpu_device *adev,
523 			 struct amdgpu_iv_entry *entry,
524 			 unsigned int num_dw)
525 {
526 	amdgpu_ih_ring_write(&adev->irq.ih_soft, entry->iv_entry, num_dw);
527 	schedule_work(&adev->irq.ih_soft_work);
528 }
529 
530 /**
531  * amdgpu_irq_update - update hardware interrupt state
532  *
533  * @adev: amdgpu device pointer
534  * @src: interrupt source pointer
535  * @type: type of interrupt
536  *
537  * Updates interrupt state for the specific source (all ASICs).
538  */
539 int amdgpu_irq_update(struct amdgpu_device *adev,
540 			     struct amdgpu_irq_src *src, unsigned type)
541 {
542 	unsigned long irqflags;
543 	enum amdgpu_interrupt_state state;
544 	int r;
545 
546 	spin_lock_irqsave(&adev->irq.lock, irqflags);
547 
548 	/* We need to determine after taking the lock, otherwise
549 	   we might disable just enabled interrupts again */
550 	if (amdgpu_irq_enabled(adev, src, type))
551 		state = AMDGPU_IRQ_STATE_ENABLE;
552 	else
553 		state = AMDGPU_IRQ_STATE_DISABLE;
554 
555 	r = src->funcs->set(adev, src, type, state);
556 	spin_unlock_irqrestore(&adev->irq.lock, irqflags);
557 	return r;
558 }
559 
560 /**
561  * amdgpu_irq_gpu_reset_resume_helper - update interrupt states on all sources
562  *
563  * @adev: amdgpu device pointer
564  *
565  * Updates state of all types of interrupts on all sources on resume after
566  * reset.
567  */
568 void amdgpu_irq_gpu_reset_resume_helper(struct amdgpu_device *adev)
569 {
570 	int i, j, k;
571 
572 	for (i = 0; i < AMDGPU_IRQ_CLIENTID_MAX; ++i) {
573 		if (!adev->irq.client[i].sources)
574 			continue;
575 
576 		for (j = 0; j < AMDGPU_MAX_IRQ_SRC_ID; ++j) {
577 			struct amdgpu_irq_src *src = adev->irq.client[i].sources[j];
578 
579 			if (!src || !src->funcs || !src->funcs->set)
580 				continue;
581 			for (k = 0; k < src->num_types; k++)
582 				amdgpu_irq_update(adev, src, k);
583 		}
584 	}
585 }
586 
587 /**
588  * amdgpu_irq_get - enable interrupt
589  *
590  * @adev: amdgpu device pointer
591  * @src: interrupt source pointer
592  * @type: type of interrupt
593  *
594  * Enables specified type of interrupt on the specified source (all ASICs).
595  *
596  * Returns:
597  * 0 on success or error code otherwise
598  */
599 int amdgpu_irq_get(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
600 		   unsigned type)
601 {
602 	if (!adev_to_drm(adev)->irq_enabled)
603 		return -ENOENT;
604 
605 	if (type >= src->num_types)
606 		return -EINVAL;
607 
608 	if (!src->enabled_types || !src->funcs->set)
609 		return -EINVAL;
610 
611 	if (atomic_inc_return(&src->enabled_types[type]) == 1)
612 		return amdgpu_irq_update(adev, src, type);
613 
614 	return 0;
615 }
616 
617 /**
618  * amdgpu_irq_put - disable interrupt
619  *
620  * @adev: amdgpu device pointer
621  * @src: interrupt source pointer
622  * @type: type of interrupt
623  *
624  * Enables specified type of interrupt on the specified source (all ASICs).
625  *
626  * Returns:
627  * 0 on success or error code otherwise
628  */
629 int amdgpu_irq_put(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
630 		   unsigned type)
631 {
632 	if (!adev_to_drm(adev)->irq_enabled)
633 		return -ENOENT;
634 
635 	if (type >= src->num_types)
636 		return -EINVAL;
637 
638 	if (!src->enabled_types || !src->funcs->set)
639 		return -EINVAL;
640 
641 	if (atomic_dec_and_test(&src->enabled_types[type]))
642 		return amdgpu_irq_update(adev, src, type);
643 
644 	return 0;
645 }
646 
647 /**
648  * amdgpu_irq_enabled - check whether interrupt is enabled or not
649  *
650  * @adev: amdgpu device pointer
651  * @src: interrupt source pointer
652  * @type: type of interrupt
653  *
654  * Checks whether the given type of interrupt is enabled on the given source.
655  *
656  * Returns:
657  * *true* if interrupt is enabled, *false* if interrupt is disabled or on
658  * invalid parameters
659  */
660 bool amdgpu_irq_enabled(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
661 			unsigned type)
662 {
663 	if (!adev_to_drm(adev)->irq_enabled)
664 		return false;
665 
666 	if (type >= src->num_types)
667 		return false;
668 
669 	if (!src->enabled_types || !src->funcs->set)
670 		return false;
671 
672 	return !!atomic_read(&src->enabled_types[type]);
673 }
674 
675 /* XXX: Generic IRQ handling */
676 static void amdgpu_irq_mask(struct irq_data *irqd)
677 {
678 	/* XXX */
679 }
680 
681 static void amdgpu_irq_unmask(struct irq_data *irqd)
682 {
683 	/* XXX */
684 }
685 
686 /* amdgpu hardware interrupt chip descriptor */
687 static struct irq_chip amdgpu_irq_chip = {
688 	.name = "amdgpu-ih",
689 	.irq_mask = amdgpu_irq_mask,
690 	.irq_unmask = amdgpu_irq_unmask,
691 };
692 
693 /**
694  * amdgpu_irqdomain_map - create mapping between virtual and hardware IRQ numbers
695  *
696  * @d: amdgpu IRQ domain pointer (unused)
697  * @irq: virtual IRQ number
698  * @hwirq: hardware irq number
699  *
700  * Current implementation assigns simple interrupt handler to the given virtual
701  * IRQ.
702  *
703  * Returns:
704  * 0 on success or error code otherwise
705  */
706 static int amdgpu_irqdomain_map(struct irq_domain *d,
707 				unsigned int irq, irq_hw_number_t hwirq)
708 {
709 	if (hwirq >= AMDGPU_MAX_IRQ_SRC_ID)
710 		return -EPERM;
711 
712 	irq_set_chip_and_handler(irq,
713 				 &amdgpu_irq_chip, handle_simple_irq);
714 	return 0;
715 }
716 
717 /* Implementation of methods for amdgpu IRQ domain */
718 static const struct irq_domain_ops amdgpu_hw_irqdomain_ops = {
719 	.map = amdgpu_irqdomain_map,
720 };
721 
722 /**
723  * amdgpu_irq_add_domain - create a linear IRQ domain
724  *
725  * @adev: amdgpu device pointer
726  *
727  * Creates an IRQ domain for GPU interrupt sources
728  * that may be driven by another driver (e.g., ACP).
729  *
730  * Returns:
731  * 0 on success or error code otherwise
732  */
733 int amdgpu_irq_add_domain(struct amdgpu_device *adev)
734 {
735 	adev->irq.domain = irq_domain_add_linear(NULL, AMDGPU_MAX_IRQ_SRC_ID,
736 						 &amdgpu_hw_irqdomain_ops, adev);
737 	if (!adev->irq.domain) {
738 		DRM_ERROR("GPU irq add domain failed\n");
739 		return -ENODEV;
740 	}
741 
742 	return 0;
743 }
744 
745 /**
746  * amdgpu_irq_remove_domain - remove the IRQ domain
747  *
748  * @adev: amdgpu device pointer
749  *
750  * Removes the IRQ domain for GPU interrupt sources
751  * that may be driven by another driver (e.g., ACP).
752  */
753 void amdgpu_irq_remove_domain(struct amdgpu_device *adev)
754 {
755 	if (adev->irq.domain) {
756 		irq_domain_remove(adev->irq.domain);
757 		adev->irq.domain = NULL;
758 	}
759 }
760 
761 /**
762  * amdgpu_irq_create_mapping - create mapping between domain Linux IRQs
763  *
764  * @adev: amdgpu device pointer
765  * @src_id: IH source id
766  *
767  * Creates mapping between a domain IRQ (GPU IH src id) and a Linux IRQ
768  * Use this for components that generate a GPU interrupt, but are driven
769  * by a different driver (e.g., ACP).
770  *
771  * Returns:
772  * Linux IRQ
773  */
774 unsigned amdgpu_irq_create_mapping(struct amdgpu_device *adev, unsigned src_id)
775 {
776 	adev->irq.virq[src_id] = irq_create_mapping(adev->irq.domain, src_id);
777 
778 	return adev->irq.virq[src_id];
779 }
780