1 /*
2  * Copyright 2008 Advanced Micro Devices, Inc.
3  * Copyright 2008 Red Hat Inc.
4  * Copyright 2009 Jerome Glisse.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors: Dave Airlie
25  *          Alex Deucher
26  *          Jerome Glisse
27  */
28 #include <linux/irq.h>
29 #include <drm/drmP.h>
30 #include <drm/drm_crtc_helper.h>
31 #include <drm/amdgpu_drm.h>
32 #include "amdgpu.h"
33 #include "amdgpu_ih.h"
34 #include "atom.h"
35 #include "amdgpu_connectors.h"
36 #include "amdgpu_trace.h"
37 
38 #include <linux/pm_runtime.h>
39 
40 #define AMDGPU_WAIT_IDLE_TIMEOUT 200
41 
42 /*
43  * Handle hotplug events outside the interrupt handler proper.
44  */
45 /**
46  * amdgpu_hotplug_work_func - display hotplug work handler
47  *
48  * @work: work struct
49  *
50  * This is the hot plug event work handler (all asics).
51  * The work gets scheduled from the irq handler if there
52  * was a hot plug interrupt.  It walks the connector table
53  * and calls the hotplug handler for each one, then sends
54  * a drm hotplug event to alert userspace.
55  */
56 static void amdgpu_hotplug_work_func(struct work_struct *work)
57 {
58 	struct amdgpu_device *adev = container_of(work, struct amdgpu_device,
59 						  hotplug_work);
60 	struct drm_device *dev = adev->ddev;
61 	struct drm_mode_config *mode_config = &dev->mode_config;
62 	struct drm_connector *connector;
63 
64 	mutex_lock(&mode_config->mutex);
65 	list_for_each_entry(connector, &mode_config->connector_list, head)
66 		amdgpu_connector_hotplug(connector);
67 	mutex_unlock(&mode_config->mutex);
68 	/* Just fire off a uevent and let userspace tell us what to do */
69 	drm_helper_hpd_irq_event(dev);
70 }
71 
72 /**
73  * amdgpu_irq_reset_work_func - execute gpu reset
74  *
75  * @work: work struct
76  *
77  * Execute scheduled gpu reset (cayman+).
78  * This function is called when the irq handler
79  * thinks we need a gpu reset.
80  */
81 static void amdgpu_irq_reset_work_func(struct work_struct *work)
82 {
83 	struct amdgpu_device *adev = container_of(work, struct amdgpu_device,
84 						  reset_work);
85 
86 	if (!amdgpu_sriov_vf(adev))
87 		amdgpu_gpu_reset(adev);
88 }
89 
90 /* Disable *all* interrupts */
91 static void amdgpu_irq_disable_all(struct amdgpu_device *adev)
92 {
93 	unsigned long irqflags;
94 	unsigned i, j, k;
95 	int r;
96 
97 	spin_lock_irqsave(&adev->irq.lock, irqflags);
98 	for (i = 0; i < AMDGPU_IH_CLIENTID_MAX; ++i) {
99 		if (!adev->irq.client[i].sources)
100 			continue;
101 
102 		for (j = 0; j < AMDGPU_MAX_IRQ_SRC_ID; ++j) {
103 			struct amdgpu_irq_src *src = adev->irq.client[i].sources[j];
104 
105 			if (!src || !src->funcs->set || !src->num_types)
106 				continue;
107 
108 			for (k = 0; k < src->num_types; ++k) {
109 				atomic_set(&src->enabled_types[k], 0);
110 				r = src->funcs->set(adev, src, k,
111 						    AMDGPU_IRQ_STATE_DISABLE);
112 				if (r)
113 					DRM_ERROR("error disabling interrupt (%d)\n",
114 						  r);
115 			}
116 		}
117 	}
118 	spin_unlock_irqrestore(&adev->irq.lock, irqflags);
119 }
120 
121 /**
122  * amdgpu_irq_preinstall - drm irq preinstall callback
123  *
124  * @dev: drm dev pointer
125  *
126  * Gets the hw ready to enable irqs (all asics).
127  * This function disables all interrupt sources on the GPU.
128  */
129 void amdgpu_irq_preinstall(struct drm_device *dev)
130 {
131 	struct amdgpu_device *adev = dev->dev_private;
132 
133 	/* Disable *all* interrupts */
134 	amdgpu_irq_disable_all(adev);
135 	/* Clear bits */
136 	amdgpu_ih_process(adev);
137 }
138 
139 /**
140  * amdgpu_irq_postinstall - drm irq preinstall callback
141  *
142  * @dev: drm dev pointer
143  *
144  * Handles stuff to be done after enabling irqs (all asics).
145  * Returns 0 on success.
146  */
147 int amdgpu_irq_postinstall(struct drm_device *dev)
148 {
149 	dev->max_vblank_count = 0x00ffffff;
150 	return 0;
151 }
152 
153 /**
154  * amdgpu_irq_uninstall - drm irq uninstall callback
155  *
156  * @dev: drm dev pointer
157  *
158  * This function disables all interrupt sources on the GPU (all asics).
159  */
160 void amdgpu_irq_uninstall(struct drm_device *dev)
161 {
162 	struct amdgpu_device *adev = dev->dev_private;
163 
164 	if (adev == NULL) {
165 		return;
166 	}
167 	amdgpu_irq_disable_all(adev);
168 }
169 
170 /**
171  * amdgpu_irq_handler - irq handler
172  *
173  * @int irq, void *arg: args
174  *
175  * This is the irq handler for the amdgpu driver (all asics).
176  */
177 irqreturn_t amdgpu_irq_handler(int irq, void *arg)
178 {
179 	struct drm_device *dev = (struct drm_device *) arg;
180 	struct amdgpu_device *adev = dev->dev_private;
181 	irqreturn_t ret;
182 
183 	ret = amdgpu_ih_process(adev);
184 	if (ret == IRQ_HANDLED)
185 		pm_runtime_mark_last_busy(dev->dev);
186 	return ret;
187 }
188 
189 /**
190  * amdgpu_msi_ok - asic specific msi checks
191  *
192  * @adev: amdgpu device pointer
193  *
194  * Handles asic specific MSI checks to determine if
195  * MSIs should be enabled on a particular chip (all asics).
196  * Returns true if MSIs should be enabled, false if MSIs
197  * should not be enabled.
198  */
199 static bool amdgpu_msi_ok(struct amdgpu_device *adev)
200 {
201 	/* force MSI on */
202 	if (amdgpu_msi == 1)
203 		return true;
204 	else if (amdgpu_msi == 0)
205 		return false;
206 
207 	return true;
208 }
209 
210 /**
211  * amdgpu_irq_init - init driver interrupt info
212  *
213  * @adev: amdgpu device pointer
214  *
215  * Sets up the work irq handlers, vblank init, MSIs, etc. (all asics).
216  * Returns 0 for success, error for failure.
217  */
218 int amdgpu_irq_init(struct amdgpu_device *adev)
219 {
220 	int r = 0;
221 
222 	spin_lock_init(&adev->irq.lock);
223 
224 	if (!adev->enable_virtual_display)
225 		/* Disable vblank irqs aggressively for power-saving */
226 		adev->ddev->vblank_disable_immediate = true;
227 
228 	r = drm_vblank_init(adev->ddev, adev->mode_info.num_crtc);
229 	if (r) {
230 		return r;
231 	}
232 
233 	/* enable msi */
234 	adev->irq.msi_enabled = false;
235 
236 	if (amdgpu_msi_ok(adev)) {
237 		int ret = pci_enable_msi(adev->pdev);
238 		if (!ret) {
239 			adev->irq.msi_enabled = true;
240 			dev_info(adev->dev, "amdgpu: using MSI.\n");
241 		}
242 	}
243 
244 	INIT_WORK(&adev->hotplug_work, amdgpu_hotplug_work_func);
245 	INIT_WORK(&adev->reset_work, amdgpu_irq_reset_work_func);
246 
247 	adev->irq.installed = true;
248 	r = drm_irq_install(adev->ddev, adev->ddev->pdev->irq);
249 	if (r) {
250 		adev->irq.installed = false;
251 		flush_work(&adev->hotplug_work);
252 		cancel_work_sync(&adev->reset_work);
253 		return r;
254 	}
255 
256 	DRM_INFO("amdgpu: irq initialized.\n");
257 	return 0;
258 }
259 
260 /**
261  * amdgpu_irq_fini - tear down driver interrupt info
262  *
263  * @adev: amdgpu device pointer
264  *
265  * Tears down the work irq handlers, vblank handlers, MSIs, etc. (all asics).
266  */
267 void amdgpu_irq_fini(struct amdgpu_device *adev)
268 {
269 	unsigned i, j;
270 
271 	if (adev->irq.installed) {
272 		drm_irq_uninstall(adev->ddev);
273 		adev->irq.installed = false;
274 		if (adev->irq.msi_enabled)
275 			pci_disable_msi(adev->pdev);
276 		flush_work(&adev->hotplug_work);
277 		cancel_work_sync(&adev->reset_work);
278 	}
279 
280 	for (i = 0; i < AMDGPU_IH_CLIENTID_MAX; ++i) {
281 		if (!adev->irq.client[i].sources)
282 			continue;
283 
284 		for (j = 0; j < AMDGPU_MAX_IRQ_SRC_ID; ++j) {
285 			struct amdgpu_irq_src *src = adev->irq.client[i].sources[j];
286 
287 			if (!src)
288 				continue;
289 
290 			kfree(src->enabled_types);
291 			src->enabled_types = NULL;
292 			if (src->data) {
293 				kfree(src->data);
294 				kfree(src);
295 				adev->irq.client[i].sources[j] = NULL;
296 			}
297 		}
298 		kfree(adev->irq.client[i].sources);
299 	}
300 }
301 
302 /**
303  * amdgpu_irq_add_id - register irq source
304  *
305  * @adev: amdgpu device pointer
306  * @src_id: source id for this source
307  * @source: irq source
308  *
309  */
310 int amdgpu_irq_add_id(struct amdgpu_device *adev,
311 		      unsigned client_id, unsigned src_id,
312 		      struct amdgpu_irq_src *source)
313 {
314 	if (client_id >= AMDGPU_IH_CLIENTID_MAX)
315 		return -EINVAL;
316 
317 	if (src_id >= AMDGPU_MAX_IRQ_SRC_ID)
318 		return -EINVAL;
319 
320 	if (!source->funcs)
321 		return -EINVAL;
322 
323 	if (!adev->irq.client[client_id].sources) {
324 		adev->irq.client[client_id].sources =
325 			kcalloc(AMDGPU_MAX_IRQ_SRC_ID,
326 				sizeof(struct amdgpu_irq_src *),
327 				GFP_KERNEL);
328 		if (!adev->irq.client[client_id].sources)
329 			return -ENOMEM;
330 	}
331 
332 	if (adev->irq.client[client_id].sources[src_id] != NULL)
333 		return -EINVAL;
334 
335 	if (source->num_types && !source->enabled_types) {
336 		atomic_t *types;
337 
338 		types = kcalloc(source->num_types, sizeof(atomic_t),
339 				GFP_KERNEL);
340 		if (!types)
341 			return -ENOMEM;
342 
343 		source->enabled_types = types;
344 	}
345 
346 	adev->irq.client[client_id].sources[src_id] = source;
347 	return 0;
348 }
349 
350 /**
351  * amdgpu_irq_dispatch - dispatch irq to IP blocks
352  *
353  * @adev: amdgpu device pointer
354  * @entry: interrupt vector
355  *
356  * Dispatches the irq to the different IP blocks
357  */
358 void amdgpu_irq_dispatch(struct amdgpu_device *adev,
359 			 struct amdgpu_iv_entry *entry)
360 {
361 	unsigned client_id = entry->client_id;
362 	unsigned src_id = entry->src_id;
363 	struct amdgpu_irq_src *src;
364 	int r;
365 
366 	trace_amdgpu_iv(entry);
367 
368 	if (client_id >= AMDGPU_IH_CLIENTID_MAX) {
369 		DRM_DEBUG("Invalid client_id in IV: %d\n", client_id);
370 		return;
371 	}
372 
373 	if (src_id >= AMDGPU_MAX_IRQ_SRC_ID) {
374 		DRM_DEBUG("Invalid src_id in IV: %d\n", src_id);
375 		return;
376 	}
377 
378 	if (adev->irq.virq[src_id]) {
379 		generic_handle_irq(irq_find_mapping(adev->irq.domain, src_id));
380 	} else {
381 		if (!adev->irq.client[client_id].sources) {
382 			DRM_DEBUG("Unregistered interrupt client_id: %d src_id: %d\n",
383 				  client_id, src_id);
384 			return;
385 		}
386 
387 		src = adev->irq.client[client_id].sources[src_id];
388 		if (!src) {
389 			DRM_DEBUG("Unhandled interrupt src_id: %d\n", src_id);
390 			return;
391 		}
392 
393 		r = src->funcs->process(adev, src, entry);
394 		if (r)
395 			DRM_ERROR("error processing interrupt (%d)\n", r);
396 	}
397 }
398 
399 /**
400  * amdgpu_irq_update - update hw interrupt state
401  *
402  * @adev: amdgpu device pointer
403  * @src: interrupt src you want to enable
404  * @type: type of interrupt you want to update
405  *
406  * Updates the interrupt state for a specific src (all asics).
407  */
408 int amdgpu_irq_update(struct amdgpu_device *adev,
409 			     struct amdgpu_irq_src *src, unsigned type)
410 {
411 	unsigned long irqflags;
412 	enum amdgpu_interrupt_state state;
413 	int r;
414 
415 	spin_lock_irqsave(&adev->irq.lock, irqflags);
416 
417 	/* we need to determine after taking the lock, otherwise
418 	   we might disable just enabled interrupts again */
419 	if (amdgpu_irq_enabled(adev, src, type))
420 		state = AMDGPU_IRQ_STATE_ENABLE;
421 	else
422 		state = AMDGPU_IRQ_STATE_DISABLE;
423 
424 	r = src->funcs->set(adev, src, type, state);
425 	spin_unlock_irqrestore(&adev->irq.lock, irqflags);
426 	return r;
427 }
428 
429 void amdgpu_irq_gpu_reset_resume_helper(struct amdgpu_device *adev)
430 {
431 	int i, j, k;
432 
433 	for (i = 0; i < AMDGPU_IH_CLIENTID_MAX; ++i) {
434 		if (!adev->irq.client[i].sources)
435 			continue;
436 
437 		for (j = 0; j < AMDGPU_MAX_IRQ_SRC_ID; ++j) {
438 			struct amdgpu_irq_src *src = adev->irq.client[i].sources[j];
439 
440 			if (!src)
441 				continue;
442 			for (k = 0; k < src->num_types; k++)
443 				amdgpu_irq_update(adev, src, k);
444 		}
445 	}
446 }
447 
448 /**
449  * amdgpu_irq_get - enable interrupt
450  *
451  * @adev: amdgpu device pointer
452  * @src: interrupt src you want to enable
453  * @type: type of interrupt you want to enable
454  *
455  * Enables the interrupt type for a specific src (all asics).
456  */
457 int amdgpu_irq_get(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
458 		   unsigned type)
459 {
460 	if (!adev->ddev->irq_enabled)
461 		return -ENOENT;
462 
463 	if (type >= src->num_types)
464 		return -EINVAL;
465 
466 	if (!src->enabled_types || !src->funcs->set)
467 		return -EINVAL;
468 
469 	if (atomic_inc_return(&src->enabled_types[type]) == 1)
470 		return amdgpu_irq_update(adev, src, type);
471 
472 	return 0;
473 }
474 
475 /**
476  * amdgpu_irq_put - disable interrupt
477  *
478  * @adev: amdgpu device pointer
479  * @src: interrupt src you want to disable
480  * @type: type of interrupt you want to disable
481  *
482  * Disables the interrupt type for a specific src (all asics).
483  */
484 int amdgpu_irq_put(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
485 		   unsigned type)
486 {
487 	if (!adev->ddev->irq_enabled)
488 		return -ENOENT;
489 
490 	if (type >= src->num_types)
491 		return -EINVAL;
492 
493 	if (!src->enabled_types || !src->funcs->set)
494 		return -EINVAL;
495 
496 	if (atomic_dec_and_test(&src->enabled_types[type]))
497 		return amdgpu_irq_update(adev, src, type);
498 
499 	return 0;
500 }
501 
502 /**
503  * amdgpu_irq_enabled - test if irq is enabled or not
504  *
505  * @adev: amdgpu device pointer
506  * @idx: interrupt src you want to test
507  *
508  * Tests if the given interrupt source is enabled or not
509  */
510 bool amdgpu_irq_enabled(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
511 			unsigned type)
512 {
513 	if (!adev->ddev->irq_enabled)
514 		return false;
515 
516 	if (type >= src->num_types)
517 		return false;
518 
519 	if (!src->enabled_types || !src->funcs->set)
520 		return false;
521 
522 	return !!atomic_read(&src->enabled_types[type]);
523 }
524 
525 /* gen irq */
526 static void amdgpu_irq_mask(struct irq_data *irqd)
527 {
528 	/* XXX */
529 }
530 
531 static void amdgpu_irq_unmask(struct irq_data *irqd)
532 {
533 	/* XXX */
534 }
535 
536 static struct irq_chip amdgpu_irq_chip = {
537 	.name = "amdgpu-ih",
538 	.irq_mask = amdgpu_irq_mask,
539 	.irq_unmask = amdgpu_irq_unmask,
540 };
541 
542 static int amdgpu_irqdomain_map(struct irq_domain *d,
543 				unsigned int irq, irq_hw_number_t hwirq)
544 {
545 	if (hwirq >= AMDGPU_MAX_IRQ_SRC_ID)
546 		return -EPERM;
547 
548 	irq_set_chip_and_handler(irq,
549 				 &amdgpu_irq_chip, handle_simple_irq);
550 	return 0;
551 }
552 
553 static const struct irq_domain_ops amdgpu_hw_irqdomain_ops = {
554 	.map = amdgpu_irqdomain_map,
555 };
556 
557 /**
558  * amdgpu_irq_add_domain - create a linear irq domain
559  *
560  * @adev: amdgpu device pointer
561  *
562  * Create an irq domain for GPU interrupt sources
563  * that may be driven by another driver (e.g., ACP).
564  */
565 int amdgpu_irq_add_domain(struct amdgpu_device *adev)
566 {
567 	adev->irq.domain = irq_domain_add_linear(NULL, AMDGPU_MAX_IRQ_SRC_ID,
568 						 &amdgpu_hw_irqdomain_ops, adev);
569 	if (!adev->irq.domain) {
570 		DRM_ERROR("GPU irq add domain failed\n");
571 		return -ENODEV;
572 	}
573 
574 	return 0;
575 }
576 
577 /**
578  * amdgpu_irq_remove_domain - remove the irq domain
579  *
580  * @adev: amdgpu device pointer
581  *
582  * Remove the irq domain for GPU interrupt sources
583  * that may be driven by another driver (e.g., ACP).
584  */
585 void amdgpu_irq_remove_domain(struct amdgpu_device *adev)
586 {
587 	if (adev->irq.domain) {
588 		irq_domain_remove(adev->irq.domain);
589 		adev->irq.domain = NULL;
590 	}
591 }
592 
593 /**
594  * amdgpu_irq_create_mapping - create a mapping between a domain irq and a
595  *                             Linux irq
596  *
597  * @adev: amdgpu device pointer
598  * @src_id: IH source id
599  *
600  * Create a mapping between a domain irq (GPU IH src id) and a Linux irq
601  * Use this for components that generate a GPU interrupt, but are driven
602  * by a different driver (e.g., ACP).
603  * Returns the Linux irq.
604  */
605 unsigned amdgpu_irq_create_mapping(struct amdgpu_device *adev, unsigned src_id)
606 {
607 	adev->irq.virq[src_id] = irq_create_mapping(adev->irq.domain, src_id);
608 
609 	return adev->irq.virq[src_id];
610 }
611