1 /*
2  * Copyright 2008 Advanced Micro Devices, Inc.
3  * Copyright 2008 Red Hat Inc.
4  * Copyright 2009 Jerome Glisse.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors: Dave Airlie
25  *          Alex Deucher
26  *          Jerome Glisse
27  */
28 
29 /**
30  * DOC: Interrupt Handling
31  *
32  * Interrupts generated within GPU hardware raise interrupt requests that are
33  * passed to amdgpu IRQ handler which is responsible for detecting source and
34  * type of the interrupt and dispatching matching handlers. If handling an
35  * interrupt requires calling kernel functions that may sleep processing is
36  * dispatched to work handlers.
37  *
38  * If MSI functionality is not disabled by module parameter then MSI
39  * support will be enabled.
40  *
41  * For GPU interrupt sources that may be driven by another driver, IRQ domain
42  * support is used (with mapping between virtual and hardware IRQs).
43  */
44 
45 #include <linux/irq.h>
46 #include <linux/pci.h>
47 
48 #include <drm/drm_vblank.h>
49 #include <drm/amdgpu_drm.h>
50 #include <drm/drm_drv.h>
51 #include "amdgpu.h"
52 #include "amdgpu_ih.h"
53 #include "atom.h"
54 #include "amdgpu_connectors.h"
55 #include "amdgpu_trace.h"
56 #include "amdgpu_amdkfd.h"
57 #include "amdgpu_ras.h"
58 
59 #include <linux/pm_runtime.h>
60 
61 #ifdef CONFIG_DRM_AMD_DC
62 #include "amdgpu_dm_irq.h"
63 #endif
64 
65 #define AMDGPU_WAIT_IDLE_TIMEOUT 200
66 
67 const char *soc15_ih_clientid_name[] = {
68 	"IH",
69 	"SDMA2 or ACP",
70 	"ATHUB",
71 	"BIF",
72 	"SDMA3 or DCE",
73 	"SDMA4 or ISP",
74 	"VMC1 or PCIE0",
75 	"RLC",
76 	"SDMA0",
77 	"SDMA1",
78 	"SE0SH",
79 	"SE1SH",
80 	"SE2SH",
81 	"SE3SH",
82 	"VCN1 or UVD1",
83 	"THM",
84 	"VCN or UVD",
85 	"SDMA5 or VCE0",
86 	"VMC",
87 	"SDMA6 or XDMA",
88 	"GRBM_CP",
89 	"ATS",
90 	"ROM_SMUIO",
91 	"DF",
92 	"SDMA7 or VCE1",
93 	"PWR",
94 	"reserved",
95 	"UTCL2",
96 	"EA",
97 	"UTCL2LOG",
98 	"MP0",
99 	"MP1"
100 };
101 
102 const int node_id_to_phys_map[NODEID_MAX] = {
103 	[AID0_NODEID] = 0,
104 	[XCD0_NODEID] = 0,
105 	[XCD1_NODEID] = 1,
106 	[AID1_NODEID] = 1,
107 	[XCD2_NODEID] = 2,
108 	[XCD3_NODEID] = 3,
109 	[AID2_NODEID] = 2,
110 	[XCD4_NODEID] = 4,
111 	[XCD5_NODEID] = 5,
112 	[AID3_NODEID] = 3,
113 	[XCD6_NODEID] = 6,
114 	[XCD7_NODEID] = 7,
115 };
116 
117 /**
118  * amdgpu_irq_disable_all - disable *all* interrupts
119  *
120  * @adev: amdgpu device pointer
121  *
122  * Disable all types of interrupts from all sources.
123  */
124 void amdgpu_irq_disable_all(struct amdgpu_device *adev)
125 {
126 	unsigned long irqflags;
127 	unsigned i, j, k;
128 	int r;
129 
130 	spin_lock_irqsave(&adev->irq.lock, irqflags);
131 	for (i = 0; i < AMDGPU_IRQ_CLIENTID_MAX; ++i) {
132 		if (!adev->irq.client[i].sources)
133 			continue;
134 
135 		for (j = 0; j < AMDGPU_MAX_IRQ_SRC_ID; ++j) {
136 			struct amdgpu_irq_src *src = adev->irq.client[i].sources[j];
137 
138 			if (!src || !src->funcs->set || !src->num_types)
139 				continue;
140 
141 			for (k = 0; k < src->num_types; ++k) {
142 				atomic_set(&src->enabled_types[k], 0);
143 				r = src->funcs->set(adev, src, k,
144 						    AMDGPU_IRQ_STATE_DISABLE);
145 				if (r)
146 					DRM_ERROR("error disabling interrupt (%d)\n",
147 						  r);
148 			}
149 		}
150 	}
151 	spin_unlock_irqrestore(&adev->irq.lock, irqflags);
152 }
153 
154 /**
155  * amdgpu_irq_handler - IRQ handler
156  *
157  * @irq: IRQ number (unused)
158  * @arg: pointer to DRM device
159  *
160  * IRQ handler for amdgpu driver (all ASICs).
161  *
162  * Returns:
163  * result of handling the IRQ, as defined by &irqreturn_t
164  */
165 static irqreturn_t amdgpu_irq_handler(int irq, void *arg)
166 {
167 	struct drm_device *dev = (struct drm_device *) arg;
168 	struct amdgpu_device *adev = drm_to_adev(dev);
169 	irqreturn_t ret;
170 
171 	ret = amdgpu_ih_process(adev, &adev->irq.ih);
172 	if (ret == IRQ_HANDLED)
173 		pm_runtime_mark_last_busy(dev->dev);
174 
175 	amdgpu_ras_interrupt_fatal_error_handler(adev);
176 
177 	return ret;
178 }
179 
180 /**
181  * amdgpu_irq_handle_ih1 - kick of processing for IH1
182  *
183  * @work: work structure in struct amdgpu_irq
184  *
185  * Kick of processing IH ring 1.
186  */
187 static void amdgpu_irq_handle_ih1(struct work_struct *work)
188 {
189 	struct amdgpu_device *adev = container_of(work, struct amdgpu_device,
190 						  irq.ih1_work);
191 
192 	amdgpu_ih_process(adev, &adev->irq.ih1);
193 }
194 
195 /**
196  * amdgpu_irq_handle_ih2 - kick of processing for IH2
197  *
198  * @work: work structure in struct amdgpu_irq
199  *
200  * Kick of processing IH ring 2.
201  */
202 static void amdgpu_irq_handle_ih2(struct work_struct *work)
203 {
204 	struct amdgpu_device *adev = container_of(work, struct amdgpu_device,
205 						  irq.ih2_work);
206 
207 	amdgpu_ih_process(adev, &adev->irq.ih2);
208 }
209 
210 /**
211  * amdgpu_irq_handle_ih_soft - kick of processing for ih_soft
212  *
213  * @work: work structure in struct amdgpu_irq
214  *
215  * Kick of processing IH soft ring.
216  */
217 static void amdgpu_irq_handle_ih_soft(struct work_struct *work)
218 {
219 	struct amdgpu_device *adev = container_of(work, struct amdgpu_device,
220 						  irq.ih_soft_work);
221 
222 	amdgpu_ih_process(adev, &adev->irq.ih_soft);
223 }
224 
225 /**
226  * amdgpu_msi_ok - check whether MSI functionality is enabled
227  *
228  * @adev: amdgpu device pointer (unused)
229  *
230  * Checks whether MSI functionality has been disabled via module parameter
231  * (all ASICs).
232  *
233  * Returns:
234  * *true* if MSIs are allowed to be enabled or *false* otherwise
235  */
236 static bool amdgpu_msi_ok(struct amdgpu_device *adev)
237 {
238 	if (amdgpu_msi == 1)
239 		return true;
240 	else if (amdgpu_msi == 0)
241 		return false;
242 
243 	return true;
244 }
245 
246 static void amdgpu_restore_msix(struct amdgpu_device *adev)
247 {
248 	u16 ctrl;
249 
250 	pci_read_config_word(adev->pdev, adev->pdev->msix_cap + PCI_MSIX_FLAGS, &ctrl);
251 	if (!(ctrl & PCI_MSIX_FLAGS_ENABLE))
252 		return;
253 
254 	/* VF FLR */
255 	ctrl &= ~PCI_MSIX_FLAGS_ENABLE;
256 	pci_write_config_word(adev->pdev, adev->pdev->msix_cap + PCI_MSIX_FLAGS, ctrl);
257 	ctrl |= PCI_MSIX_FLAGS_ENABLE;
258 	pci_write_config_word(adev->pdev, adev->pdev->msix_cap + PCI_MSIX_FLAGS, ctrl);
259 }
260 
261 /**
262  * amdgpu_irq_init - initialize interrupt handling
263  *
264  * @adev: amdgpu device pointer
265  *
266  * Sets up work functions for hotplug and reset interrupts, enables MSI
267  * functionality, initializes vblank, hotplug and reset interrupt handling.
268  *
269  * Returns:
270  * 0 on success or error code on failure
271  */
272 int amdgpu_irq_init(struct amdgpu_device *adev)
273 {
274 	int r = 0;
275 	unsigned int irq;
276 
277 	spin_lock_init(&adev->irq.lock);
278 
279 	/* Enable MSI if not disabled by module parameter */
280 	adev->irq.msi_enabled = false;
281 
282 	if (amdgpu_msi_ok(adev)) {
283 		int nvec = pci_msix_vec_count(adev->pdev);
284 		unsigned int flags;
285 
286 		if (nvec <= 0) {
287 			flags = PCI_IRQ_MSI;
288 		} else {
289 			flags = PCI_IRQ_MSI | PCI_IRQ_MSIX;
290 		}
291 		/* we only need one vector */
292 		nvec = pci_alloc_irq_vectors(adev->pdev, 1, 1, flags);
293 		if (nvec > 0) {
294 			adev->irq.msi_enabled = true;
295 			dev_dbg(adev->dev, "using MSI/MSI-X.\n");
296 		}
297 	}
298 
299 	INIT_WORK(&adev->irq.ih1_work, amdgpu_irq_handle_ih1);
300 	INIT_WORK(&adev->irq.ih2_work, amdgpu_irq_handle_ih2);
301 	INIT_WORK(&adev->irq.ih_soft_work, amdgpu_irq_handle_ih_soft);
302 
303 	/* Use vector 0 for MSI-X. */
304 	r = pci_irq_vector(adev->pdev, 0);
305 	if (r < 0)
306 		return r;
307 	irq = r;
308 
309 	/* PCI devices require shared interrupts. */
310 	r = request_irq(irq, amdgpu_irq_handler, IRQF_SHARED, adev_to_drm(adev)->driver->name,
311 			adev_to_drm(adev));
312 	if (r)
313 		return r;
314 	adev->irq.installed = true;
315 	adev->irq.irq = irq;
316 	adev_to_drm(adev)->max_vblank_count = 0x00ffffff;
317 
318 	DRM_DEBUG("amdgpu: irq initialized.\n");
319 	return 0;
320 }
321 
322 
323 void amdgpu_irq_fini_hw(struct amdgpu_device *adev)
324 {
325 	if (adev->irq.installed) {
326 		free_irq(adev->irq.irq, adev_to_drm(adev));
327 		adev->irq.installed = false;
328 		if (adev->irq.msi_enabled)
329 			pci_free_irq_vectors(adev->pdev);
330 	}
331 
332 	amdgpu_ih_ring_fini(adev, &adev->irq.ih_soft);
333 	amdgpu_ih_ring_fini(adev, &adev->irq.ih);
334 	amdgpu_ih_ring_fini(adev, &adev->irq.ih1);
335 	amdgpu_ih_ring_fini(adev, &adev->irq.ih2);
336 }
337 
338 /**
339  * amdgpu_irq_fini_sw - shut down interrupt handling
340  *
341  * @adev: amdgpu device pointer
342  *
343  * Tears down work functions for hotplug and reset interrupts, disables MSI
344  * functionality, shuts down vblank, hotplug and reset interrupt handling,
345  * turns off interrupts from all sources (all ASICs).
346  */
347 void amdgpu_irq_fini_sw(struct amdgpu_device *adev)
348 {
349 	unsigned i, j;
350 
351 	for (i = 0; i < AMDGPU_IRQ_CLIENTID_MAX; ++i) {
352 		if (!adev->irq.client[i].sources)
353 			continue;
354 
355 		for (j = 0; j < AMDGPU_MAX_IRQ_SRC_ID; ++j) {
356 			struct amdgpu_irq_src *src = adev->irq.client[i].sources[j];
357 
358 			if (!src)
359 				continue;
360 
361 			kfree(src->enabled_types);
362 			src->enabled_types = NULL;
363 		}
364 		kfree(adev->irq.client[i].sources);
365 		adev->irq.client[i].sources = NULL;
366 	}
367 }
368 
369 /**
370  * amdgpu_irq_add_id - register IRQ source
371  *
372  * @adev: amdgpu device pointer
373  * @client_id: client id
374  * @src_id: source id
375  * @source: IRQ source pointer
376  *
377  * Registers IRQ source on a client.
378  *
379  * Returns:
380  * 0 on success or error code otherwise
381  */
382 int amdgpu_irq_add_id(struct amdgpu_device *adev,
383 		      unsigned client_id, unsigned src_id,
384 		      struct amdgpu_irq_src *source)
385 {
386 	if (client_id >= AMDGPU_IRQ_CLIENTID_MAX)
387 		return -EINVAL;
388 
389 	if (src_id >= AMDGPU_MAX_IRQ_SRC_ID)
390 		return -EINVAL;
391 
392 	if (!source->funcs)
393 		return -EINVAL;
394 
395 	if (!adev->irq.client[client_id].sources) {
396 		adev->irq.client[client_id].sources =
397 			kcalloc(AMDGPU_MAX_IRQ_SRC_ID,
398 				sizeof(struct amdgpu_irq_src *),
399 				GFP_KERNEL);
400 		if (!adev->irq.client[client_id].sources)
401 			return -ENOMEM;
402 	}
403 
404 	if (adev->irq.client[client_id].sources[src_id] != NULL)
405 		return -EINVAL;
406 
407 	if (source->num_types && !source->enabled_types) {
408 		atomic_t *types;
409 
410 		types = kcalloc(source->num_types, sizeof(atomic_t),
411 				GFP_KERNEL);
412 		if (!types)
413 			return -ENOMEM;
414 
415 		source->enabled_types = types;
416 	}
417 
418 	adev->irq.client[client_id].sources[src_id] = source;
419 	return 0;
420 }
421 
422 /**
423  * amdgpu_irq_dispatch - dispatch IRQ to IP blocks
424  *
425  * @adev: amdgpu device pointer
426  * @ih: interrupt ring instance
427  *
428  * Dispatches IRQ to IP blocks.
429  */
430 void amdgpu_irq_dispatch(struct amdgpu_device *adev,
431 			 struct amdgpu_ih_ring *ih)
432 {
433 	u32 ring_index = ih->rptr >> 2;
434 	struct amdgpu_iv_entry entry;
435 	unsigned client_id, src_id;
436 	struct amdgpu_irq_src *src;
437 	bool handled = false;
438 	int r;
439 
440 	entry.ih = ih;
441 	entry.iv_entry = (const uint32_t *)&ih->ring[ring_index];
442 	amdgpu_ih_decode_iv(adev, &entry);
443 
444 	trace_amdgpu_iv(ih - &adev->irq.ih, &entry);
445 
446 	client_id = entry.client_id;
447 	src_id = entry.src_id;
448 
449 	if (client_id >= AMDGPU_IRQ_CLIENTID_MAX) {
450 		DRM_DEBUG("Invalid client_id in IV: %d\n", client_id);
451 
452 	} else	if (src_id >= AMDGPU_MAX_IRQ_SRC_ID) {
453 		DRM_DEBUG("Invalid src_id in IV: %d\n", src_id);
454 
455 	} else if ((client_id == AMDGPU_IRQ_CLIENTID_LEGACY) &&
456 		   adev->irq.virq[src_id]) {
457 		generic_handle_domain_irq(adev->irq.domain, src_id);
458 
459 	} else if (!adev->irq.client[client_id].sources) {
460 		DRM_DEBUG("Unregistered interrupt client_id: %d src_id: %d\n",
461 			  client_id, src_id);
462 
463 	} else if ((src = adev->irq.client[client_id].sources[src_id])) {
464 		r = src->funcs->process(adev, src, &entry);
465 		if (r < 0)
466 			DRM_ERROR("error processing interrupt (%d)\n", r);
467 		else if (r)
468 			handled = true;
469 
470 	} else {
471 		DRM_DEBUG("Unhandled interrupt src_id: %d\n", src_id);
472 	}
473 
474 	/* Send it to amdkfd as well if it isn't already handled */
475 	if (!handled)
476 		amdgpu_amdkfd_interrupt(adev, entry.iv_entry);
477 
478 	if (amdgpu_ih_ts_after(ih->processed_timestamp, entry.timestamp))
479 		ih->processed_timestamp = entry.timestamp;
480 }
481 
482 /**
483  * amdgpu_irq_delegate - delegate IV to soft IH ring
484  *
485  * @adev: amdgpu device pointer
486  * @entry: IV entry
487  * @num_dw: size of IV
488  *
489  * Delegate the IV to the soft IH ring and schedule processing of it. Used
490  * if the hardware delegation to IH1 or IH2 doesn't work for some reason.
491  */
492 void amdgpu_irq_delegate(struct amdgpu_device *adev,
493 			 struct amdgpu_iv_entry *entry,
494 			 unsigned int num_dw)
495 {
496 	amdgpu_ih_ring_write(&adev->irq.ih_soft, entry->iv_entry, num_dw);
497 	schedule_work(&adev->irq.ih_soft_work);
498 }
499 
500 /**
501  * amdgpu_irq_update - update hardware interrupt state
502  *
503  * @adev: amdgpu device pointer
504  * @src: interrupt source pointer
505  * @type: type of interrupt
506  *
507  * Updates interrupt state for the specific source (all ASICs).
508  */
509 int amdgpu_irq_update(struct amdgpu_device *adev,
510 			     struct amdgpu_irq_src *src, unsigned type)
511 {
512 	unsigned long irqflags;
513 	enum amdgpu_interrupt_state state;
514 	int r;
515 
516 	spin_lock_irqsave(&adev->irq.lock, irqflags);
517 
518 	/* We need to determine after taking the lock, otherwise
519 	   we might disable just enabled interrupts again */
520 	if (amdgpu_irq_enabled(adev, src, type))
521 		state = AMDGPU_IRQ_STATE_ENABLE;
522 	else
523 		state = AMDGPU_IRQ_STATE_DISABLE;
524 
525 	r = src->funcs->set(adev, src, type, state);
526 	spin_unlock_irqrestore(&adev->irq.lock, irqflags);
527 	return r;
528 }
529 
530 /**
531  * amdgpu_irq_gpu_reset_resume_helper - update interrupt states on all sources
532  *
533  * @adev: amdgpu device pointer
534  *
535  * Updates state of all types of interrupts on all sources on resume after
536  * reset.
537  */
538 void amdgpu_irq_gpu_reset_resume_helper(struct amdgpu_device *adev)
539 {
540 	int i, j, k;
541 
542 	if (amdgpu_sriov_vf(adev) || amdgpu_passthrough(adev))
543 		amdgpu_restore_msix(adev);
544 
545 	for (i = 0; i < AMDGPU_IRQ_CLIENTID_MAX; ++i) {
546 		if (!adev->irq.client[i].sources)
547 			continue;
548 
549 		for (j = 0; j < AMDGPU_MAX_IRQ_SRC_ID; ++j) {
550 			struct amdgpu_irq_src *src = adev->irq.client[i].sources[j];
551 
552 			if (!src || !src->funcs || !src->funcs->set)
553 				continue;
554 			for (k = 0; k < src->num_types; k++)
555 				amdgpu_irq_update(adev, src, k);
556 		}
557 	}
558 }
559 
560 /**
561  * amdgpu_irq_get - enable interrupt
562  *
563  * @adev: amdgpu device pointer
564  * @src: interrupt source pointer
565  * @type: type of interrupt
566  *
567  * Enables specified type of interrupt on the specified source (all ASICs).
568  *
569  * Returns:
570  * 0 on success or error code otherwise
571  */
572 int amdgpu_irq_get(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
573 		   unsigned type)
574 {
575 	if (!adev->irq.installed)
576 		return -ENOENT;
577 
578 	if (type >= src->num_types)
579 		return -EINVAL;
580 
581 	if (!src->enabled_types || !src->funcs->set)
582 		return -EINVAL;
583 
584 	if (atomic_inc_return(&src->enabled_types[type]) == 1)
585 		return amdgpu_irq_update(adev, src, type);
586 
587 	return 0;
588 }
589 
590 /**
591  * amdgpu_irq_put - disable interrupt
592  *
593  * @adev: amdgpu device pointer
594  * @src: interrupt source pointer
595  * @type: type of interrupt
596  *
597  * Enables specified type of interrupt on the specified source (all ASICs).
598  *
599  * Returns:
600  * 0 on success or error code otherwise
601  */
602 int amdgpu_irq_put(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
603 		   unsigned type)
604 {
605 	if (!adev->irq.installed)
606 		return -ENOENT;
607 
608 	if (type >= src->num_types)
609 		return -EINVAL;
610 
611 	if (!src->enabled_types || !src->funcs->set)
612 		return -EINVAL;
613 
614 	if (WARN_ON(!amdgpu_irq_enabled(adev, src, type)))
615 		return -EINVAL;
616 
617 	if (atomic_dec_and_test(&src->enabled_types[type]))
618 		return amdgpu_irq_update(adev, src, type);
619 
620 	return 0;
621 }
622 
623 /**
624  * amdgpu_irq_enabled - check whether interrupt is enabled or not
625  *
626  * @adev: amdgpu device pointer
627  * @src: interrupt source pointer
628  * @type: type of interrupt
629  *
630  * Checks whether the given type of interrupt is enabled on the given source.
631  *
632  * Returns:
633  * *true* if interrupt is enabled, *false* if interrupt is disabled or on
634  * invalid parameters
635  */
636 bool amdgpu_irq_enabled(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
637 			unsigned type)
638 {
639 	if (!adev->irq.installed)
640 		return false;
641 
642 	if (type >= src->num_types)
643 		return false;
644 
645 	if (!src->enabled_types || !src->funcs->set)
646 		return false;
647 
648 	return !!atomic_read(&src->enabled_types[type]);
649 }
650 
651 /* XXX: Generic IRQ handling */
652 static void amdgpu_irq_mask(struct irq_data *irqd)
653 {
654 	/* XXX */
655 }
656 
657 static void amdgpu_irq_unmask(struct irq_data *irqd)
658 {
659 	/* XXX */
660 }
661 
662 /* amdgpu hardware interrupt chip descriptor */
663 static struct irq_chip amdgpu_irq_chip = {
664 	.name = "amdgpu-ih",
665 	.irq_mask = amdgpu_irq_mask,
666 	.irq_unmask = amdgpu_irq_unmask,
667 };
668 
669 /**
670  * amdgpu_irqdomain_map - create mapping between virtual and hardware IRQ numbers
671  *
672  * @d: amdgpu IRQ domain pointer (unused)
673  * @irq: virtual IRQ number
674  * @hwirq: hardware irq number
675  *
676  * Current implementation assigns simple interrupt handler to the given virtual
677  * IRQ.
678  *
679  * Returns:
680  * 0 on success or error code otherwise
681  */
682 static int amdgpu_irqdomain_map(struct irq_domain *d,
683 				unsigned int irq, irq_hw_number_t hwirq)
684 {
685 	if (hwirq >= AMDGPU_MAX_IRQ_SRC_ID)
686 		return -EPERM;
687 
688 	irq_set_chip_and_handler(irq,
689 				 &amdgpu_irq_chip, handle_simple_irq);
690 	return 0;
691 }
692 
693 /* Implementation of methods for amdgpu IRQ domain */
694 static const struct irq_domain_ops amdgpu_hw_irqdomain_ops = {
695 	.map = amdgpu_irqdomain_map,
696 };
697 
698 /**
699  * amdgpu_irq_add_domain - create a linear IRQ domain
700  *
701  * @adev: amdgpu device pointer
702  *
703  * Creates an IRQ domain for GPU interrupt sources
704  * that may be driven by another driver (e.g., ACP).
705  *
706  * Returns:
707  * 0 on success or error code otherwise
708  */
709 int amdgpu_irq_add_domain(struct amdgpu_device *adev)
710 {
711 	adev->irq.domain = irq_domain_add_linear(NULL, AMDGPU_MAX_IRQ_SRC_ID,
712 						 &amdgpu_hw_irqdomain_ops, adev);
713 	if (!adev->irq.domain) {
714 		DRM_ERROR("GPU irq add domain failed\n");
715 		return -ENODEV;
716 	}
717 
718 	return 0;
719 }
720 
721 /**
722  * amdgpu_irq_remove_domain - remove the IRQ domain
723  *
724  * @adev: amdgpu device pointer
725  *
726  * Removes the IRQ domain for GPU interrupt sources
727  * that may be driven by another driver (e.g., ACP).
728  */
729 void amdgpu_irq_remove_domain(struct amdgpu_device *adev)
730 {
731 	if (adev->irq.domain) {
732 		irq_domain_remove(adev->irq.domain);
733 		adev->irq.domain = NULL;
734 	}
735 }
736 
737 /**
738  * amdgpu_irq_create_mapping - create mapping between domain Linux IRQs
739  *
740  * @adev: amdgpu device pointer
741  * @src_id: IH source id
742  *
743  * Creates mapping between a domain IRQ (GPU IH src id) and a Linux IRQ
744  * Use this for components that generate a GPU interrupt, but are driven
745  * by a different driver (e.g., ACP).
746  *
747  * Returns:
748  * Linux IRQ
749  */
750 unsigned amdgpu_irq_create_mapping(struct amdgpu_device *adev, unsigned src_id)
751 {
752 	adev->irq.virq[src_id] = irq_create_mapping(adev->irq.domain, src_id);
753 
754 	return adev->irq.virq[src_id];
755 }
756