xref: /openbmc/linux/drivers/gpu/drm/drm_irq.c (revision 545e4006)
1 /**
2  * \file drm_irq.c
3  * IRQ support
4  *
5  * \author Rickard E. (Rik) Faith <faith@valinux.com>
6  * \author Gareth Hughes <gareth@valinux.com>
7  */
8 
9 /*
10  * Created: Fri Mar 19 14:30:16 1999 by faith@valinux.com
11  *
12  * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
13  * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
14  * All Rights Reserved.
15  *
16  * Permission is hereby granted, free of charge, to any person obtaining a
17  * copy of this software and associated documentation files (the "Software"),
18  * to deal in the Software without restriction, including without limitation
19  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
20  * and/or sell copies of the Software, and to permit persons to whom the
21  * Software is furnished to do so, subject to the following conditions:
22  *
23  * The above copyright notice and this permission notice (including the next
24  * paragraph) shall be included in all copies or substantial portions of the
25  * Software.
26  *
27  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
28  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
29  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
30  * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
31  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
32  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
33  * OTHER DEALINGS IN THE SOFTWARE.
34  */
35 
36 #include "drmP.h"
37 
38 #include <linux/interrupt.h>	/* For task queue support */
39 
40 /**
41  * Get interrupt from bus id.
42  *
43  * \param inode device inode.
44  * \param file_priv DRM file private.
45  * \param cmd command.
46  * \param arg user argument, pointing to a drm_irq_busid structure.
47  * \return zero on success or a negative number on failure.
48  *
49  * Finds the PCI device with the specified bus id and gets its IRQ number.
50  * This IOCTL is deprecated, and will now return EINVAL for any busid not equal
51  * to that of the device that this DRM instance attached to.
52  */
53 int drm_irq_by_busid(struct drm_device *dev, void *data,
54 		     struct drm_file *file_priv)
55 {
56 	struct drm_irq_busid *p = data;
57 
58 	if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
59 		return -EINVAL;
60 
61 	if ((p->busnum >> 8) != drm_get_pci_domain(dev) ||
62 	    (p->busnum & 0xff) != dev->pdev->bus->number ||
63 	    p->devnum != PCI_SLOT(dev->pdev->devfn) || p->funcnum != PCI_FUNC(dev->pdev->devfn))
64 		return -EINVAL;
65 
66 	p->irq = dev->irq;
67 
68 	DRM_DEBUG("%d:%d:%d => IRQ %d\n", p->busnum, p->devnum, p->funcnum,
69 		  p->irq);
70 
71 	return 0;
72 }
73 
74 /**
75  * Install IRQ handler.
76  *
77  * \param dev DRM device.
78  * \param irq IRQ number.
79  *
80  * Initializes the IRQ related data, and setups drm_device::vbl_queue. Installs the handler, calling the driver
81  * \c drm_driver_irq_preinstall() and \c drm_driver_irq_postinstall() functions
82  * before and after the installation.
83  */
84 static int drm_irq_install(struct drm_device * dev)
85 {
86 	int ret;
87 	unsigned long sh_flags = 0;
88 
89 	if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
90 		return -EINVAL;
91 
92 	if (dev->irq == 0)
93 		return -EINVAL;
94 
95 	mutex_lock(&dev->struct_mutex);
96 
97 	/* Driver must have been initialized */
98 	if (!dev->dev_private) {
99 		mutex_unlock(&dev->struct_mutex);
100 		return -EINVAL;
101 	}
102 
103 	if (dev->irq_enabled) {
104 		mutex_unlock(&dev->struct_mutex);
105 		return -EBUSY;
106 	}
107 	dev->irq_enabled = 1;
108 	mutex_unlock(&dev->struct_mutex);
109 
110 	DRM_DEBUG("irq=%d\n", dev->irq);
111 
112 	if (drm_core_check_feature(dev, DRIVER_IRQ_VBL)) {
113 		init_waitqueue_head(&dev->vbl_queue);
114 
115 		spin_lock_init(&dev->vbl_lock);
116 
117 		INIT_LIST_HEAD(&dev->vbl_sigs);
118 		INIT_LIST_HEAD(&dev->vbl_sigs2);
119 
120 		dev->vbl_pending = 0;
121 	}
122 
123 	/* Before installing handler */
124 	dev->driver->irq_preinstall(dev);
125 
126 	/* Install handler */
127 	if (drm_core_check_feature(dev, DRIVER_IRQ_SHARED))
128 		sh_flags = IRQF_SHARED;
129 
130 	ret = request_irq(dev->irq, dev->driver->irq_handler,
131 			  sh_flags, dev->devname, dev);
132 	if (ret < 0) {
133 		mutex_lock(&dev->struct_mutex);
134 		dev->irq_enabled = 0;
135 		mutex_unlock(&dev->struct_mutex);
136 		return ret;
137 	}
138 
139 	/* After installing handler */
140 	dev->driver->irq_postinstall(dev);
141 
142 	return 0;
143 }
144 
145 /**
146  * Uninstall the IRQ handler.
147  *
148  * \param dev DRM device.
149  *
150  * Calls the driver's \c drm_driver_irq_uninstall() function, and stops the irq.
151  */
152 int drm_irq_uninstall(struct drm_device * dev)
153 {
154 	int irq_enabled;
155 
156 	if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
157 		return -EINVAL;
158 
159 	mutex_lock(&dev->struct_mutex);
160 	irq_enabled = dev->irq_enabled;
161 	dev->irq_enabled = 0;
162 	mutex_unlock(&dev->struct_mutex);
163 
164 	if (!irq_enabled)
165 		return -EINVAL;
166 
167 	DRM_DEBUG("irq=%d\n", dev->irq);
168 
169 	dev->driver->irq_uninstall(dev);
170 
171 	free_irq(dev->irq, dev);
172 
173 	dev->locked_tasklet_func = NULL;
174 
175 	return 0;
176 }
177 
178 EXPORT_SYMBOL(drm_irq_uninstall);
179 
180 /**
181  * IRQ control ioctl.
182  *
183  * \param inode device inode.
184  * \param file_priv DRM file private.
185  * \param cmd command.
186  * \param arg user argument, pointing to a drm_control structure.
187  * \return zero on success or a negative number on failure.
188  *
189  * Calls irq_install() or irq_uninstall() according to \p arg.
190  */
191 int drm_control(struct drm_device *dev, void *data,
192 		struct drm_file *file_priv)
193 {
194 	struct drm_control *ctl = data;
195 
196 	/* if we haven't irq we fallback for compatibility reasons - this used to be a separate function in drm_dma.h */
197 
198 
199 	switch (ctl->func) {
200 	case DRM_INST_HANDLER:
201 		if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
202 			return 0;
203 		if (dev->if_version < DRM_IF_VERSION(1, 2) &&
204 		    ctl->irq != dev->irq)
205 			return -EINVAL;
206 		return drm_irq_install(dev);
207 	case DRM_UNINST_HANDLER:
208 		if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
209 			return 0;
210 		return drm_irq_uninstall(dev);
211 	default:
212 		return -EINVAL;
213 	}
214 }
215 
216 /**
217  * Wait for VBLANK.
218  *
219  * \param inode device inode.
220  * \param file_priv DRM file private.
221  * \param cmd command.
222  * \param data user argument, pointing to a drm_wait_vblank structure.
223  * \return zero on success or a negative number on failure.
224  *
225  * Verifies the IRQ is installed.
226  *
227  * If a signal is requested checks if this task has already scheduled the same signal
228  * for the same vblank sequence number - nothing to be done in
229  * that case. If the number of tasks waiting for the interrupt exceeds 100 the
230  * function fails. Otherwise adds a new entry to drm_device::vbl_sigs for this
231  * task.
232  *
233  * If a signal is not requested, then calls vblank_wait().
234  */
235 int drm_wait_vblank(struct drm_device *dev, void *data, struct drm_file *file_priv)
236 {
237 	union drm_wait_vblank *vblwait = data;
238 	struct timeval now;
239 	int ret = 0;
240 	unsigned int flags, seq;
241 
242 	if ((!dev->irq) || (!dev->irq_enabled))
243 		return -EINVAL;
244 
245 	if (vblwait->request.type &
246 	    ~(_DRM_VBLANK_TYPES_MASK | _DRM_VBLANK_FLAGS_MASK)) {
247 		DRM_ERROR("Unsupported type value 0x%x, supported mask 0x%x\n",
248 			  vblwait->request.type,
249 			  (_DRM_VBLANK_TYPES_MASK | _DRM_VBLANK_FLAGS_MASK));
250 		return -EINVAL;
251 	}
252 
253 	flags = vblwait->request.type & _DRM_VBLANK_FLAGS_MASK;
254 
255 	if (!drm_core_check_feature(dev, (flags & _DRM_VBLANK_SECONDARY) ?
256 				    DRIVER_IRQ_VBL2 : DRIVER_IRQ_VBL))
257 		return -EINVAL;
258 
259 	seq = atomic_read((flags & _DRM_VBLANK_SECONDARY) ? &dev->vbl_received2
260 			  : &dev->vbl_received);
261 
262 	switch (vblwait->request.type & _DRM_VBLANK_TYPES_MASK) {
263 	case _DRM_VBLANK_RELATIVE:
264 		vblwait->request.sequence += seq;
265 		vblwait->request.type &= ~_DRM_VBLANK_RELATIVE;
266 	case _DRM_VBLANK_ABSOLUTE:
267 		break;
268 	default:
269 		return -EINVAL;
270 	}
271 
272 	if ((flags & _DRM_VBLANK_NEXTONMISS) &&
273 	    (seq - vblwait->request.sequence) <= (1<<23)) {
274 		vblwait->request.sequence = seq + 1;
275 	}
276 
277 	if (flags & _DRM_VBLANK_SIGNAL) {
278 		unsigned long irqflags;
279 		struct list_head *vbl_sigs = (flags & _DRM_VBLANK_SECONDARY)
280 				      ? &dev->vbl_sigs2 : &dev->vbl_sigs;
281 		struct drm_vbl_sig *vbl_sig;
282 
283 		spin_lock_irqsave(&dev->vbl_lock, irqflags);
284 
285 		/* Check if this task has already scheduled the same signal
286 		 * for the same vblank sequence number; nothing to be done in
287 		 * that case
288 		 */
289 		list_for_each_entry(vbl_sig, vbl_sigs, head) {
290 			if (vbl_sig->sequence == vblwait->request.sequence
291 			    && vbl_sig->info.si_signo ==
292 			    vblwait->request.signal
293 			    && vbl_sig->task == current) {
294 				spin_unlock_irqrestore(&dev->vbl_lock,
295 						       irqflags);
296 				vblwait->reply.sequence = seq;
297 				goto done;
298 			}
299 		}
300 
301 		if (dev->vbl_pending >= 100) {
302 			spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
303 			return -EBUSY;
304 		}
305 
306 		dev->vbl_pending++;
307 
308 		spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
309 
310 		if (!
311 		    (vbl_sig =
312 		     drm_alloc(sizeof(struct drm_vbl_sig), DRM_MEM_DRIVER))) {
313 			return -ENOMEM;
314 		}
315 
316 		memset((void *)vbl_sig, 0, sizeof(*vbl_sig));
317 
318 		vbl_sig->sequence = vblwait->request.sequence;
319 		vbl_sig->info.si_signo = vblwait->request.signal;
320 		vbl_sig->task = current;
321 
322 		spin_lock_irqsave(&dev->vbl_lock, irqflags);
323 
324 		list_add_tail(&vbl_sig->head, vbl_sigs);
325 
326 		spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
327 
328 		vblwait->reply.sequence = seq;
329 	} else {
330 		if (flags & _DRM_VBLANK_SECONDARY) {
331 			if (dev->driver->vblank_wait2)
332 				ret = dev->driver->vblank_wait2(dev, &vblwait->request.sequence);
333 		} else if (dev->driver->vblank_wait)
334 			ret =
335 			    dev->driver->vblank_wait(dev,
336 						     &vblwait->request.sequence);
337 
338 		do_gettimeofday(&now);
339 		vblwait->reply.tval_sec = now.tv_sec;
340 		vblwait->reply.tval_usec = now.tv_usec;
341 	}
342 
343       done:
344 	return ret;
345 }
346 
347 /**
348  * Send the VBLANK signals.
349  *
350  * \param dev DRM device.
351  *
352  * Sends a signal for each task in drm_device::vbl_sigs and empties the list.
353  *
354  * If a signal is not requested, then calls vblank_wait().
355  */
356 void drm_vbl_send_signals(struct drm_device * dev)
357 {
358 	unsigned long flags;
359 	int i;
360 
361 	spin_lock_irqsave(&dev->vbl_lock, flags);
362 
363 	for (i = 0; i < 2; i++) {
364 		struct drm_vbl_sig *vbl_sig, *tmp;
365 		struct list_head *vbl_sigs = i ? &dev->vbl_sigs2 : &dev->vbl_sigs;
366 		unsigned int vbl_seq = atomic_read(i ? &dev->vbl_received2 :
367 						   &dev->vbl_received);
368 
369 		list_for_each_entry_safe(vbl_sig, tmp, vbl_sigs, head) {
370 			if ((vbl_seq - vbl_sig->sequence) <= (1 << 23)) {
371 				vbl_sig->info.si_code = vbl_seq;
372 				send_sig_info(vbl_sig->info.si_signo,
373 					      &vbl_sig->info, vbl_sig->task);
374 
375 				list_del(&vbl_sig->head);
376 
377 				drm_free(vbl_sig, sizeof(*vbl_sig),
378 					 DRM_MEM_DRIVER);
379 
380 				dev->vbl_pending--;
381 			}
382 		}
383 	}
384 
385 	spin_unlock_irqrestore(&dev->vbl_lock, flags);
386 }
387 
388 EXPORT_SYMBOL(drm_vbl_send_signals);
389 
390 /**
391  * Tasklet wrapper function.
392  *
393  * \param data DRM device in disguise.
394  *
395  * Attempts to grab the HW lock and calls the driver callback on success. On
396  * failure, leave the lock marked as contended so the callback can be called
397  * from drm_unlock().
398  */
399 static void drm_locked_tasklet_func(unsigned long data)
400 {
401 	struct drm_device *dev = (struct drm_device *)data;
402 	unsigned long irqflags;
403 
404 	spin_lock_irqsave(&dev->tasklet_lock, irqflags);
405 
406 	if (!dev->locked_tasklet_func ||
407 	    !drm_lock_take(&dev->lock,
408 			   DRM_KERNEL_CONTEXT)) {
409 		spin_unlock_irqrestore(&dev->tasklet_lock, irqflags);
410 		return;
411 	}
412 
413 	dev->lock.lock_time = jiffies;
414 	atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
415 
416 	dev->locked_tasklet_func(dev);
417 
418 	drm_lock_free(&dev->lock,
419 		      DRM_KERNEL_CONTEXT);
420 
421 	dev->locked_tasklet_func = NULL;
422 
423 	spin_unlock_irqrestore(&dev->tasklet_lock, irqflags);
424 }
425 
426 /**
427  * Schedule a tasklet to call back a driver hook with the HW lock held.
428  *
429  * \param dev DRM device.
430  * \param func Driver callback.
431  *
432  * This is intended for triggering actions that require the HW lock from an
433  * interrupt handler. The lock will be grabbed ASAP after the interrupt handler
434  * completes. Note that the callback may be called from interrupt or process
435  * context, it must not make any assumptions about this. Also, the HW lock will
436  * be held with the kernel context or any client context.
437  */
438 void drm_locked_tasklet(struct drm_device *dev, void (*func)(struct drm_device *))
439 {
440 	unsigned long irqflags;
441 	static DECLARE_TASKLET(drm_tasklet, drm_locked_tasklet_func, 0);
442 
443 	if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ) ||
444 	    test_bit(TASKLET_STATE_SCHED, &drm_tasklet.state))
445 		return;
446 
447 	spin_lock_irqsave(&dev->tasklet_lock, irqflags);
448 
449 	if (dev->locked_tasklet_func) {
450 		spin_unlock_irqrestore(&dev->tasklet_lock, irqflags);
451 		return;
452 	}
453 
454 	dev->locked_tasklet_func = func;
455 
456 	spin_unlock_irqrestore(&dev->tasklet_lock, irqflags);
457 
458 	drm_tasklet.data = (unsigned long)dev;
459 
460 	tasklet_hi_schedule(&drm_tasklet);
461 }
462 EXPORT_SYMBOL(drm_locked_tasklet);
463