1 /*
2  * Copyright 2008 Advanced Micro Devices, Inc.
3  * Copyright 2008 Red Hat Inc.
4  * Copyright 2009 Jerome Glisse.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors: Dave Airlie
25  *          Alex Deucher
26  *          Jerome Glisse
27  */
28 
29 /**
30  * DOC: Interrupt Handling
31  *
32  * Interrupts generated within GPU hardware raise interrupt requests that are
33  * passed to amdgpu IRQ handler which is responsible for detecting source and
34  * type of the interrupt and dispatching matching handlers. If handling an
35  * interrupt requires calling kernel functions that may sleep processing is
36  * dispatched to work handlers.
37  *
38  * If MSI functionality is not disabled by module parameter then MSI
39  * support will be enabled.
40  *
41  * For GPU interrupt sources that may be driven by another driver, IRQ domain
42  * support is used (with mapping between virtual and hardware IRQs).
43  */
44 
45 #include <linux/irq.h>
46 #include <linux/pci.h>
47 
48 #include <drm/drm_crtc_helper.h>
49 #include <drm/drm_vblank.h>
50 #include <drm/amdgpu_drm.h>
51 #include <drm/drm_drv.h>
52 #include "amdgpu.h"
53 #include "amdgpu_ih.h"
54 #include "atom.h"
55 #include "amdgpu_connectors.h"
56 #include "amdgpu_trace.h"
57 #include "amdgpu_amdkfd.h"
58 #include "amdgpu_ras.h"
59 
60 #include <linux/pm_runtime.h>
61 
62 #ifdef CONFIG_DRM_AMD_DC
63 #include "amdgpu_dm_irq.h"
64 #endif
65 
66 #define AMDGPU_WAIT_IDLE_TIMEOUT 200
67 
68 const char *soc15_ih_clientid_name[] = {
69 	"IH",
70 	"SDMA2 or ACP",
71 	"ATHUB",
72 	"BIF",
73 	"SDMA3 or DCE",
74 	"SDMA4 or ISP",
75 	"VMC1 or PCIE0",
76 	"RLC",
77 	"SDMA0",
78 	"SDMA1",
79 	"SE0SH",
80 	"SE1SH",
81 	"SE2SH",
82 	"SE3SH",
83 	"VCN1 or UVD1",
84 	"THM",
85 	"VCN or UVD",
86 	"SDMA5 or VCE0",
87 	"VMC",
88 	"SDMA6 or XDMA",
89 	"GRBM_CP",
90 	"ATS",
91 	"ROM_SMUIO",
92 	"DF",
93 	"SDMA7 or VCE1",
94 	"PWR",
95 	"reserved",
96 	"UTCL2",
97 	"EA",
98 	"UTCL2LOG",
99 	"MP0",
100 	"MP1"
101 };
102 
103 /**
104  * amdgpu_irq_disable_all - disable *all* interrupts
105  *
106  * @adev: amdgpu device pointer
107  *
108  * Disable all types of interrupts from all sources.
109  */
110 void amdgpu_irq_disable_all(struct amdgpu_device *adev)
111 {
112 	unsigned long irqflags;
113 	unsigned i, j, k;
114 	int r;
115 
116 	spin_lock_irqsave(&adev->irq.lock, irqflags);
117 	for (i = 0; i < AMDGPU_IRQ_CLIENTID_MAX; ++i) {
118 		if (!adev->irq.client[i].sources)
119 			continue;
120 
121 		for (j = 0; j < AMDGPU_MAX_IRQ_SRC_ID; ++j) {
122 			struct amdgpu_irq_src *src = adev->irq.client[i].sources[j];
123 
124 			if (!src || !src->funcs->set || !src->num_types)
125 				continue;
126 
127 			for (k = 0; k < src->num_types; ++k) {
128 				atomic_set(&src->enabled_types[k], 0);
129 				r = src->funcs->set(adev, src, k,
130 						    AMDGPU_IRQ_STATE_DISABLE);
131 				if (r)
132 					DRM_ERROR("error disabling interrupt (%d)\n",
133 						  r);
134 			}
135 		}
136 	}
137 	spin_unlock_irqrestore(&adev->irq.lock, irqflags);
138 }
139 
140 /**
141  * amdgpu_irq_handler - IRQ handler
142  *
143  * @irq: IRQ number (unused)
144  * @arg: pointer to DRM device
145  *
146  * IRQ handler for amdgpu driver (all ASICs).
147  *
148  * Returns:
149  * result of handling the IRQ, as defined by &irqreturn_t
150  */
151 static irqreturn_t amdgpu_irq_handler(int irq, void *arg)
152 {
153 	struct drm_device *dev = (struct drm_device *) arg;
154 	struct amdgpu_device *adev = drm_to_adev(dev);
155 	irqreturn_t ret;
156 
157 	ret = amdgpu_ih_process(adev, &adev->irq.ih);
158 	if (ret == IRQ_HANDLED)
159 		pm_runtime_mark_last_busy(dev->dev);
160 
161 	amdgpu_ras_interrupt_fatal_error_handler(adev);
162 
163 	return ret;
164 }
165 
166 /**
167  * amdgpu_irq_handle_ih1 - kick of processing for IH1
168  *
169  * @work: work structure in struct amdgpu_irq
170  *
171  * Kick of processing IH ring 1.
172  */
173 static void amdgpu_irq_handle_ih1(struct work_struct *work)
174 {
175 	struct amdgpu_device *adev = container_of(work, struct amdgpu_device,
176 						  irq.ih1_work);
177 
178 	amdgpu_ih_process(adev, &adev->irq.ih1);
179 }
180 
181 /**
182  * amdgpu_irq_handle_ih2 - kick of processing for IH2
183  *
184  * @work: work structure in struct amdgpu_irq
185  *
186  * Kick of processing IH ring 2.
187  */
188 static void amdgpu_irq_handle_ih2(struct work_struct *work)
189 {
190 	struct amdgpu_device *adev = container_of(work, struct amdgpu_device,
191 						  irq.ih2_work);
192 
193 	amdgpu_ih_process(adev, &adev->irq.ih2);
194 }
195 
196 /**
197  * amdgpu_irq_handle_ih_soft - kick of processing for ih_soft
198  *
199  * @work: work structure in struct amdgpu_irq
200  *
201  * Kick of processing IH soft ring.
202  */
203 static void amdgpu_irq_handle_ih_soft(struct work_struct *work)
204 {
205 	struct amdgpu_device *adev = container_of(work, struct amdgpu_device,
206 						  irq.ih_soft_work);
207 
208 	amdgpu_ih_process(adev, &adev->irq.ih_soft);
209 }
210 
211 /**
212  * amdgpu_msi_ok - check whether MSI functionality is enabled
213  *
214  * @adev: amdgpu device pointer (unused)
215  *
216  * Checks whether MSI functionality has been disabled via module parameter
217  * (all ASICs).
218  *
219  * Returns:
220  * *true* if MSIs are allowed to be enabled or *false* otherwise
221  */
222 static bool amdgpu_msi_ok(struct amdgpu_device *adev)
223 {
224 	if (amdgpu_msi == 1)
225 		return true;
226 	else if (amdgpu_msi == 0)
227 		return false;
228 
229 	return true;
230 }
231 
232 static void amdgpu_restore_msix(struct amdgpu_device *adev)
233 {
234 	u16 ctrl;
235 
236 	pci_read_config_word(adev->pdev, adev->pdev->msix_cap + PCI_MSIX_FLAGS, &ctrl);
237 	if (!(ctrl & PCI_MSIX_FLAGS_ENABLE))
238 		return;
239 
240 	/* VF FLR */
241 	ctrl &= ~PCI_MSIX_FLAGS_ENABLE;
242 	pci_write_config_word(adev->pdev, adev->pdev->msix_cap + PCI_MSIX_FLAGS, ctrl);
243 	ctrl |= PCI_MSIX_FLAGS_ENABLE;
244 	pci_write_config_word(adev->pdev, adev->pdev->msix_cap + PCI_MSIX_FLAGS, ctrl);
245 }
246 
247 /**
248  * amdgpu_irq_init - initialize interrupt handling
249  *
250  * @adev: amdgpu device pointer
251  *
252  * Sets up work functions for hotplug and reset interrupts, enables MSI
253  * functionality, initializes vblank, hotplug and reset interrupt handling.
254  *
255  * Returns:
256  * 0 on success or error code on failure
257  */
258 int amdgpu_irq_init(struct amdgpu_device *adev)
259 {
260 	int r = 0;
261 	unsigned int irq;
262 
263 	spin_lock_init(&adev->irq.lock);
264 
265 	/* Enable MSI if not disabled by module parameter */
266 	adev->irq.msi_enabled = false;
267 
268 	if (amdgpu_msi_ok(adev)) {
269 		int nvec = pci_msix_vec_count(adev->pdev);
270 		unsigned int flags;
271 
272 		if (nvec <= 0) {
273 			flags = PCI_IRQ_MSI;
274 		} else {
275 			flags = PCI_IRQ_MSI | PCI_IRQ_MSIX;
276 		}
277 		/* we only need one vector */
278 		nvec = pci_alloc_irq_vectors(adev->pdev, 1, 1, flags);
279 		if (nvec > 0) {
280 			adev->irq.msi_enabled = true;
281 			dev_dbg(adev->dev, "using MSI/MSI-X.\n");
282 		}
283 	}
284 
285 	INIT_WORK(&adev->irq.ih1_work, amdgpu_irq_handle_ih1);
286 	INIT_WORK(&adev->irq.ih2_work, amdgpu_irq_handle_ih2);
287 	INIT_WORK(&adev->irq.ih_soft_work, amdgpu_irq_handle_ih_soft);
288 
289 	/* Use vector 0 for MSI-X. */
290 	r = pci_irq_vector(adev->pdev, 0);
291 	if (r < 0)
292 		return r;
293 	irq = r;
294 
295 	/* PCI devices require shared interrupts. */
296 	r = request_irq(irq, amdgpu_irq_handler, IRQF_SHARED, adev_to_drm(adev)->driver->name,
297 			adev_to_drm(adev));
298 	if (r)
299 		return r;
300 	adev->irq.installed = true;
301 	adev->irq.irq = irq;
302 	adev_to_drm(adev)->max_vblank_count = 0x00ffffff;
303 
304 	DRM_DEBUG("amdgpu: irq initialized.\n");
305 	return 0;
306 }
307 
308 
309 void amdgpu_irq_fini_hw(struct amdgpu_device *adev)
310 {
311 	if (adev->irq.installed) {
312 		free_irq(adev->irq.irq, adev_to_drm(adev));
313 		adev->irq.installed = false;
314 		if (adev->irq.msi_enabled)
315 			pci_free_irq_vectors(adev->pdev);
316 	}
317 
318 	amdgpu_ih_ring_fini(adev, &adev->irq.ih_soft);
319 	amdgpu_ih_ring_fini(adev, &adev->irq.ih);
320 	amdgpu_ih_ring_fini(adev, &adev->irq.ih1);
321 	amdgpu_ih_ring_fini(adev, &adev->irq.ih2);
322 }
323 
324 /**
325  * amdgpu_irq_fini_sw - shut down interrupt handling
326  *
327  * @adev: amdgpu device pointer
328  *
329  * Tears down work functions for hotplug and reset interrupts, disables MSI
330  * functionality, shuts down vblank, hotplug and reset interrupt handling,
331  * turns off interrupts from all sources (all ASICs).
332  */
333 void amdgpu_irq_fini_sw(struct amdgpu_device *adev)
334 {
335 	unsigned i, j;
336 
337 	for (i = 0; i < AMDGPU_IRQ_CLIENTID_MAX; ++i) {
338 		if (!adev->irq.client[i].sources)
339 			continue;
340 
341 		for (j = 0; j < AMDGPU_MAX_IRQ_SRC_ID; ++j) {
342 			struct amdgpu_irq_src *src = adev->irq.client[i].sources[j];
343 
344 			if (!src)
345 				continue;
346 
347 			kfree(src->enabled_types);
348 			src->enabled_types = NULL;
349 		}
350 		kfree(adev->irq.client[i].sources);
351 		adev->irq.client[i].sources = NULL;
352 	}
353 }
354 
355 /**
356  * amdgpu_irq_add_id - register IRQ source
357  *
358  * @adev: amdgpu device pointer
359  * @client_id: client id
360  * @src_id: source id
361  * @source: IRQ source pointer
362  *
363  * Registers IRQ source on a client.
364  *
365  * Returns:
366  * 0 on success or error code otherwise
367  */
368 int amdgpu_irq_add_id(struct amdgpu_device *adev,
369 		      unsigned client_id, unsigned src_id,
370 		      struct amdgpu_irq_src *source)
371 {
372 	if (client_id >= AMDGPU_IRQ_CLIENTID_MAX)
373 		return -EINVAL;
374 
375 	if (src_id >= AMDGPU_MAX_IRQ_SRC_ID)
376 		return -EINVAL;
377 
378 	if (!source->funcs)
379 		return -EINVAL;
380 
381 	if (!adev->irq.client[client_id].sources) {
382 		adev->irq.client[client_id].sources =
383 			kcalloc(AMDGPU_MAX_IRQ_SRC_ID,
384 				sizeof(struct amdgpu_irq_src *),
385 				GFP_KERNEL);
386 		if (!adev->irq.client[client_id].sources)
387 			return -ENOMEM;
388 	}
389 
390 	if (adev->irq.client[client_id].sources[src_id] != NULL)
391 		return -EINVAL;
392 
393 	if (source->num_types && !source->enabled_types) {
394 		atomic_t *types;
395 
396 		types = kcalloc(source->num_types, sizeof(atomic_t),
397 				GFP_KERNEL);
398 		if (!types)
399 			return -ENOMEM;
400 
401 		source->enabled_types = types;
402 	}
403 
404 	adev->irq.client[client_id].sources[src_id] = source;
405 	return 0;
406 }
407 
408 /**
409  * amdgpu_irq_dispatch - dispatch IRQ to IP blocks
410  *
411  * @adev: amdgpu device pointer
412  * @ih: interrupt ring instance
413  *
414  * Dispatches IRQ to IP blocks.
415  */
416 void amdgpu_irq_dispatch(struct amdgpu_device *adev,
417 			 struct amdgpu_ih_ring *ih)
418 {
419 	u32 ring_index = ih->rptr >> 2;
420 	struct amdgpu_iv_entry entry;
421 	unsigned client_id, src_id;
422 	struct amdgpu_irq_src *src;
423 	bool handled = false;
424 	int r;
425 
426 	entry.ih = ih;
427 	entry.iv_entry = (const uint32_t *)&ih->ring[ring_index];
428 	amdgpu_ih_decode_iv(adev, &entry);
429 
430 	trace_amdgpu_iv(ih - &adev->irq.ih, &entry);
431 
432 	client_id = entry.client_id;
433 	src_id = entry.src_id;
434 
435 	if (client_id >= AMDGPU_IRQ_CLIENTID_MAX) {
436 		DRM_DEBUG("Invalid client_id in IV: %d\n", client_id);
437 
438 	} else	if (src_id >= AMDGPU_MAX_IRQ_SRC_ID) {
439 		DRM_DEBUG("Invalid src_id in IV: %d\n", src_id);
440 
441 	} else if ((client_id == AMDGPU_IRQ_CLIENTID_LEGACY) &&
442 		   adev->irq.virq[src_id]) {
443 		generic_handle_domain_irq(adev->irq.domain, src_id);
444 
445 	} else if (!adev->irq.client[client_id].sources) {
446 		DRM_DEBUG("Unregistered interrupt client_id: %d src_id: %d\n",
447 			  client_id, src_id);
448 
449 	} else if ((src = adev->irq.client[client_id].sources[src_id])) {
450 		r = src->funcs->process(adev, src, &entry);
451 		if (r < 0)
452 			DRM_ERROR("error processing interrupt (%d)\n", r);
453 		else if (r)
454 			handled = true;
455 
456 	} else {
457 		DRM_DEBUG("Unhandled interrupt src_id: %d\n", src_id);
458 	}
459 
460 	/* Send it to amdkfd as well if it isn't already handled */
461 	if (!handled)
462 		amdgpu_amdkfd_interrupt(adev, entry.iv_entry);
463 
464 	if (amdgpu_ih_ts_after(ih->processed_timestamp, entry.timestamp))
465 		ih->processed_timestamp = entry.timestamp;
466 }
467 
468 /**
469  * amdgpu_irq_delegate - delegate IV to soft IH ring
470  *
471  * @adev: amdgpu device pointer
472  * @entry: IV entry
473  * @num_dw: size of IV
474  *
475  * Delegate the IV to the soft IH ring and schedule processing of it. Used
476  * if the hardware delegation to IH1 or IH2 doesn't work for some reason.
477  */
478 void amdgpu_irq_delegate(struct amdgpu_device *adev,
479 			 struct amdgpu_iv_entry *entry,
480 			 unsigned int num_dw)
481 {
482 	amdgpu_ih_ring_write(&adev->irq.ih_soft, entry->iv_entry, num_dw);
483 	schedule_work(&adev->irq.ih_soft_work);
484 }
485 
486 /**
487  * amdgpu_irq_update - update hardware interrupt state
488  *
489  * @adev: amdgpu device pointer
490  * @src: interrupt source pointer
491  * @type: type of interrupt
492  *
493  * Updates interrupt state for the specific source (all ASICs).
494  */
495 int amdgpu_irq_update(struct amdgpu_device *adev,
496 			     struct amdgpu_irq_src *src, unsigned type)
497 {
498 	unsigned long irqflags;
499 	enum amdgpu_interrupt_state state;
500 	int r;
501 
502 	spin_lock_irqsave(&adev->irq.lock, irqflags);
503 
504 	/* We need to determine after taking the lock, otherwise
505 	   we might disable just enabled interrupts again */
506 	if (amdgpu_irq_enabled(adev, src, type))
507 		state = AMDGPU_IRQ_STATE_ENABLE;
508 	else
509 		state = AMDGPU_IRQ_STATE_DISABLE;
510 
511 	r = src->funcs->set(adev, src, type, state);
512 	spin_unlock_irqrestore(&adev->irq.lock, irqflags);
513 	return r;
514 }
515 
516 /**
517  * amdgpu_irq_gpu_reset_resume_helper - update interrupt states on all sources
518  *
519  * @adev: amdgpu device pointer
520  *
521  * Updates state of all types of interrupts on all sources on resume after
522  * reset.
523  */
524 void amdgpu_irq_gpu_reset_resume_helper(struct amdgpu_device *adev)
525 {
526 	int i, j, k;
527 
528 	if (amdgpu_sriov_vf(adev) || amdgpu_passthrough(adev))
529 		amdgpu_restore_msix(adev);
530 
531 	for (i = 0; i < AMDGPU_IRQ_CLIENTID_MAX; ++i) {
532 		if (!adev->irq.client[i].sources)
533 			continue;
534 
535 		for (j = 0; j < AMDGPU_MAX_IRQ_SRC_ID; ++j) {
536 			struct amdgpu_irq_src *src = adev->irq.client[i].sources[j];
537 
538 			if (!src || !src->funcs || !src->funcs->set)
539 				continue;
540 			for (k = 0; k < src->num_types; k++)
541 				amdgpu_irq_update(adev, src, k);
542 		}
543 	}
544 }
545 
546 /**
547  * amdgpu_irq_get - enable interrupt
548  *
549  * @adev: amdgpu device pointer
550  * @src: interrupt source pointer
551  * @type: type of interrupt
552  *
553  * Enables specified type of interrupt on the specified source (all ASICs).
554  *
555  * Returns:
556  * 0 on success or error code otherwise
557  */
558 int amdgpu_irq_get(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
559 		   unsigned type)
560 {
561 	if (!adev->irq.installed)
562 		return -ENOENT;
563 
564 	if (type >= src->num_types)
565 		return -EINVAL;
566 
567 	if (!src->enabled_types || !src->funcs->set)
568 		return -EINVAL;
569 
570 	if (atomic_inc_return(&src->enabled_types[type]) == 1)
571 		return amdgpu_irq_update(adev, src, type);
572 
573 	return 0;
574 }
575 
576 /**
577  * amdgpu_irq_put - disable interrupt
578  *
579  * @adev: amdgpu device pointer
580  * @src: interrupt source pointer
581  * @type: type of interrupt
582  *
583  * Enables specified type of interrupt on the specified source (all ASICs).
584  *
585  * Returns:
586  * 0 on success or error code otherwise
587  */
588 int amdgpu_irq_put(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
589 		   unsigned type)
590 {
591 	if (!adev->irq.installed)
592 		return -ENOENT;
593 
594 	if (type >= src->num_types)
595 		return -EINVAL;
596 
597 	if (!src->enabled_types || !src->funcs->set)
598 		return -EINVAL;
599 
600 	if (atomic_dec_and_test(&src->enabled_types[type]))
601 		return amdgpu_irq_update(adev, src, type);
602 
603 	return 0;
604 }
605 
606 /**
607  * amdgpu_irq_enabled - check whether interrupt is enabled or not
608  *
609  * @adev: amdgpu device pointer
610  * @src: interrupt source pointer
611  * @type: type of interrupt
612  *
613  * Checks whether the given type of interrupt is enabled on the given source.
614  *
615  * Returns:
616  * *true* if interrupt is enabled, *false* if interrupt is disabled or on
617  * invalid parameters
618  */
619 bool amdgpu_irq_enabled(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
620 			unsigned type)
621 {
622 	if (!adev->irq.installed)
623 		return false;
624 
625 	if (type >= src->num_types)
626 		return false;
627 
628 	if (!src->enabled_types || !src->funcs->set)
629 		return false;
630 
631 	return !!atomic_read(&src->enabled_types[type]);
632 }
633 
634 /* XXX: Generic IRQ handling */
635 static void amdgpu_irq_mask(struct irq_data *irqd)
636 {
637 	/* XXX */
638 }
639 
640 static void amdgpu_irq_unmask(struct irq_data *irqd)
641 {
642 	/* XXX */
643 }
644 
645 /* amdgpu hardware interrupt chip descriptor */
646 static struct irq_chip amdgpu_irq_chip = {
647 	.name = "amdgpu-ih",
648 	.irq_mask = amdgpu_irq_mask,
649 	.irq_unmask = amdgpu_irq_unmask,
650 };
651 
652 /**
653  * amdgpu_irqdomain_map - create mapping between virtual and hardware IRQ numbers
654  *
655  * @d: amdgpu IRQ domain pointer (unused)
656  * @irq: virtual IRQ number
657  * @hwirq: hardware irq number
658  *
659  * Current implementation assigns simple interrupt handler to the given virtual
660  * IRQ.
661  *
662  * Returns:
663  * 0 on success or error code otherwise
664  */
665 static int amdgpu_irqdomain_map(struct irq_domain *d,
666 				unsigned int irq, irq_hw_number_t hwirq)
667 {
668 	if (hwirq >= AMDGPU_MAX_IRQ_SRC_ID)
669 		return -EPERM;
670 
671 	irq_set_chip_and_handler(irq,
672 				 &amdgpu_irq_chip, handle_simple_irq);
673 	return 0;
674 }
675 
676 /* Implementation of methods for amdgpu IRQ domain */
677 static const struct irq_domain_ops amdgpu_hw_irqdomain_ops = {
678 	.map = amdgpu_irqdomain_map,
679 };
680 
681 /**
682  * amdgpu_irq_add_domain - create a linear IRQ domain
683  *
684  * @adev: amdgpu device pointer
685  *
686  * Creates an IRQ domain for GPU interrupt sources
687  * that may be driven by another driver (e.g., ACP).
688  *
689  * Returns:
690  * 0 on success or error code otherwise
691  */
692 int amdgpu_irq_add_domain(struct amdgpu_device *adev)
693 {
694 	adev->irq.domain = irq_domain_add_linear(NULL, AMDGPU_MAX_IRQ_SRC_ID,
695 						 &amdgpu_hw_irqdomain_ops, adev);
696 	if (!adev->irq.domain) {
697 		DRM_ERROR("GPU irq add domain failed\n");
698 		return -ENODEV;
699 	}
700 
701 	return 0;
702 }
703 
704 /**
705  * amdgpu_irq_remove_domain - remove the IRQ domain
706  *
707  * @adev: amdgpu device pointer
708  *
709  * Removes the IRQ domain for GPU interrupt sources
710  * that may be driven by another driver (e.g., ACP).
711  */
712 void amdgpu_irq_remove_domain(struct amdgpu_device *adev)
713 {
714 	if (adev->irq.domain) {
715 		irq_domain_remove(adev->irq.domain);
716 		adev->irq.domain = NULL;
717 	}
718 }
719 
720 /**
721  * amdgpu_irq_create_mapping - create mapping between domain Linux IRQs
722  *
723  * @adev: amdgpu device pointer
724  * @src_id: IH source id
725  *
726  * Creates mapping between a domain IRQ (GPU IH src id) and a Linux IRQ
727  * Use this for components that generate a GPU interrupt, but are driven
728  * by a different driver (e.g., ACP).
729  *
730  * Returns:
731  * Linux IRQ
732  */
733 unsigned amdgpu_irq_create_mapping(struct amdgpu_device *adev, unsigned src_id)
734 {
735 	adev->irq.virq[src_id] = irq_create_mapping(adev->irq.domain, src_id);
736 
737 	return adev->irq.virq[src_id];
738 }
739