xref: /openbmc/linux/drivers/gpu/drm/drm_lock.c (revision abfbd895)
1 /**
2  * \file drm_lock.c
3  * IOCTLs for locking
4  *
5  * \author Rickard E. (Rik) Faith <faith@valinux.com>
6  * \author Gareth Hughes <gareth@valinux.com>
7  */
8 
9 /*
10  * Created: Tue Feb  2 08:37:54 1999 by faith@valinux.com
11  *
12  * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
13  * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
14  * All Rights Reserved.
15  *
16  * Permission is hereby granted, free of charge, to any person obtaining a
17  * copy of this software and associated documentation files (the "Software"),
18  * to deal in the Software without restriction, including without limitation
19  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
20  * and/or sell copies of the Software, and to permit persons to whom the
21  * Software is furnished to do so, subject to the following conditions:
22  *
23  * The above copyright notice and this permission notice (including the next
24  * paragraph) shall be included in all copies or substantial portions of the
25  * Software.
26  *
27  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
28  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
29  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
30  * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
31  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
32  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
33  * OTHER DEALINGS IN THE SOFTWARE.
34  */
35 
36 #include <linux/export.h>
37 #include <drm/drmP.h>
38 #include "drm_legacy.h"
39 #include "drm_internal.h"
40 
41 static int drm_lock_take(struct drm_lock_data *lock_data, unsigned int context);
42 
43 /**
44  * Lock ioctl.
45  *
46  * \param inode device inode.
47  * \param file_priv DRM file private.
48  * \param cmd command.
49  * \param arg user argument, pointing to a drm_lock structure.
50  * \return zero on success or negative number on failure.
51  *
52  * Add the current task to the lock wait queue, and attempt to take to lock.
53  */
54 int drm_legacy_lock(struct drm_device *dev, void *data,
55 		    struct drm_file *file_priv)
56 {
57 	DECLARE_WAITQUEUE(entry, current);
58 	struct drm_lock *lock = data;
59 	struct drm_master *master = file_priv->master;
60 	int ret = 0;
61 
62 	if (drm_core_check_feature(dev, DRIVER_MODESET))
63 		return -EINVAL;
64 
65 	++file_priv->lock_count;
66 
67 	if (lock->context == DRM_KERNEL_CONTEXT) {
68 		DRM_ERROR("Process %d using kernel context %d\n",
69 			  task_pid_nr(current), lock->context);
70 		return -EINVAL;
71 	}
72 
73 	DRM_DEBUG("%d (pid %d) requests lock (0x%08x), flags = 0x%08x\n",
74 		  lock->context, task_pid_nr(current),
75 		  master->lock.hw_lock->lock, lock->flags);
76 
77 	add_wait_queue(&master->lock.lock_queue, &entry);
78 	spin_lock_bh(&master->lock.spinlock);
79 	master->lock.user_waiters++;
80 	spin_unlock_bh(&master->lock.spinlock);
81 
82 	for (;;) {
83 		__set_current_state(TASK_INTERRUPTIBLE);
84 		if (!master->lock.hw_lock) {
85 			/* Device has been unregistered */
86 			send_sig(SIGTERM, current, 0);
87 			ret = -EINTR;
88 			break;
89 		}
90 		if (drm_lock_take(&master->lock, lock->context)) {
91 			master->lock.file_priv = file_priv;
92 			master->lock.lock_time = jiffies;
93 			break;	/* Got lock */
94 		}
95 
96 		/* Contention */
97 		mutex_unlock(&drm_global_mutex);
98 		schedule();
99 		mutex_lock(&drm_global_mutex);
100 		if (signal_pending(current)) {
101 			ret = -EINTR;
102 			break;
103 		}
104 	}
105 	spin_lock_bh(&master->lock.spinlock);
106 	master->lock.user_waiters--;
107 	spin_unlock_bh(&master->lock.spinlock);
108 	__set_current_state(TASK_RUNNING);
109 	remove_wait_queue(&master->lock.lock_queue, &entry);
110 
111 	DRM_DEBUG("%d %s\n", lock->context,
112 		  ret ? "interrupted" : "has lock");
113 	if (ret) return ret;
114 
115 	/* don't set the block all signals on the master process for now
116 	 * really probably not the correct answer but lets us debug xkb
117  	 * xserver for now */
118 	if (!file_priv->is_master) {
119 		dev->sigdata.context = lock->context;
120 		dev->sigdata.lock = master->lock.hw_lock;
121 	}
122 
123 	if (dev->driver->dma_quiescent && (lock->flags & _DRM_LOCK_QUIESCENT))
124 	{
125 		if (dev->driver->dma_quiescent(dev)) {
126 			DRM_DEBUG("%d waiting for DMA quiescent\n",
127 				  lock->context);
128 			return -EBUSY;
129 		}
130 	}
131 
132 	return 0;
133 }
134 
135 /**
136  * Unlock ioctl.
137  *
138  * \param inode device inode.
139  * \param file_priv DRM file private.
140  * \param cmd command.
141  * \param arg user argument, pointing to a drm_lock structure.
142  * \return zero on success or negative number on failure.
143  *
144  * Transfer and free the lock.
145  */
146 int drm_legacy_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv)
147 {
148 	struct drm_lock *lock = data;
149 	struct drm_master *master = file_priv->master;
150 
151 	if (drm_core_check_feature(dev, DRIVER_MODESET))
152 		return -EINVAL;
153 
154 	if (lock->context == DRM_KERNEL_CONTEXT) {
155 		DRM_ERROR("Process %d using kernel context %d\n",
156 			  task_pid_nr(current), lock->context);
157 		return -EINVAL;
158 	}
159 
160 	if (drm_legacy_lock_free(&master->lock, lock->context)) {
161 		/* FIXME: Should really bail out here. */
162 	}
163 
164 	return 0;
165 }
166 
167 /**
168  * Take the heavyweight lock.
169  *
170  * \param lock lock pointer.
171  * \param context locking context.
172  * \return one if the lock is held, or zero otherwise.
173  *
174  * Attempt to mark the lock as held by the given context, via the \p cmpxchg instruction.
175  */
176 static
177 int drm_lock_take(struct drm_lock_data *lock_data,
178 		  unsigned int context)
179 {
180 	unsigned int old, new, prev;
181 	volatile unsigned int *lock = &lock_data->hw_lock->lock;
182 
183 	spin_lock_bh(&lock_data->spinlock);
184 	do {
185 		old = *lock;
186 		if (old & _DRM_LOCK_HELD)
187 			new = old | _DRM_LOCK_CONT;
188 		else {
189 			new = context | _DRM_LOCK_HELD |
190 				((lock_data->user_waiters + lock_data->kernel_waiters > 1) ?
191 				 _DRM_LOCK_CONT : 0);
192 		}
193 		prev = cmpxchg(lock, old, new);
194 	} while (prev != old);
195 	spin_unlock_bh(&lock_data->spinlock);
196 
197 	if (_DRM_LOCKING_CONTEXT(old) == context) {
198 		if (old & _DRM_LOCK_HELD) {
199 			if (context != DRM_KERNEL_CONTEXT) {
200 				DRM_ERROR("%d holds heavyweight lock\n",
201 					  context);
202 			}
203 			return 0;
204 		}
205 	}
206 
207 	if ((_DRM_LOCKING_CONTEXT(new)) == context && (new & _DRM_LOCK_HELD)) {
208 		/* Have lock */
209 		return 1;
210 	}
211 	return 0;
212 }
213 
214 /**
215  * This takes a lock forcibly and hands it to context.	Should ONLY be used
216  * inside *_unlock to give lock to kernel before calling *_dma_schedule.
217  *
218  * \param dev DRM device.
219  * \param lock lock pointer.
220  * \param context locking context.
221  * \return always one.
222  *
223  * Resets the lock file pointer.
224  * Marks the lock as held by the given context, via the \p cmpxchg instruction.
225  */
226 static int drm_lock_transfer(struct drm_lock_data *lock_data,
227 			     unsigned int context)
228 {
229 	unsigned int old, new, prev;
230 	volatile unsigned int *lock = &lock_data->hw_lock->lock;
231 
232 	lock_data->file_priv = NULL;
233 	do {
234 		old = *lock;
235 		new = context | _DRM_LOCK_HELD;
236 		prev = cmpxchg(lock, old, new);
237 	} while (prev != old);
238 	return 1;
239 }
240 
241 /**
242  * Free lock.
243  *
244  * \param dev DRM device.
245  * \param lock lock.
246  * \param context context.
247  *
248  * Resets the lock file pointer.
249  * Marks the lock as not held, via the \p cmpxchg instruction. Wakes any task
250  * waiting on the lock queue.
251  */
252 int drm_legacy_lock_free(struct drm_lock_data *lock_data, unsigned int context)
253 {
254 	unsigned int old, new, prev;
255 	volatile unsigned int *lock = &lock_data->hw_lock->lock;
256 
257 	spin_lock_bh(&lock_data->spinlock);
258 	if (lock_data->kernel_waiters != 0) {
259 		drm_lock_transfer(lock_data, 0);
260 		lock_data->idle_has_lock = 1;
261 		spin_unlock_bh(&lock_data->spinlock);
262 		return 1;
263 	}
264 	spin_unlock_bh(&lock_data->spinlock);
265 
266 	do {
267 		old = *lock;
268 		new = _DRM_LOCKING_CONTEXT(old);
269 		prev = cmpxchg(lock, old, new);
270 	} while (prev != old);
271 
272 	if (_DRM_LOCK_IS_HELD(old) && _DRM_LOCKING_CONTEXT(old) != context) {
273 		DRM_ERROR("%d freed heavyweight lock held by %d\n",
274 			  context, _DRM_LOCKING_CONTEXT(old));
275 		return 1;
276 	}
277 	wake_up_interruptible(&lock_data->lock_queue);
278 	return 0;
279 }
280 
281 /**
282  * This function returns immediately and takes the hw lock
283  * with the kernel context if it is free, otherwise it gets the highest priority when and if
284  * it is eventually released.
285  *
286  * This guarantees that the kernel will _eventually_ have the lock _unless_ it is held
287  * by a blocked process. (In the latter case an explicit wait for the hardware lock would cause
288  * a deadlock, which is why the "idlelock" was invented).
289  *
290  * This should be sufficient to wait for GPU idle without
291  * having to worry about starvation.
292  */
293 
294 void drm_legacy_idlelock_take(struct drm_lock_data *lock_data)
295 {
296 	int ret;
297 
298 	spin_lock_bh(&lock_data->spinlock);
299 	lock_data->kernel_waiters++;
300 	if (!lock_data->idle_has_lock) {
301 
302 		spin_unlock_bh(&lock_data->spinlock);
303 		ret = drm_lock_take(lock_data, DRM_KERNEL_CONTEXT);
304 		spin_lock_bh(&lock_data->spinlock);
305 
306 		if (ret == 1)
307 			lock_data->idle_has_lock = 1;
308 	}
309 	spin_unlock_bh(&lock_data->spinlock);
310 }
311 EXPORT_SYMBOL(drm_legacy_idlelock_take);
312 
313 void drm_legacy_idlelock_release(struct drm_lock_data *lock_data)
314 {
315 	unsigned int old, prev;
316 	volatile unsigned int *lock = &lock_data->hw_lock->lock;
317 
318 	spin_lock_bh(&lock_data->spinlock);
319 	if (--lock_data->kernel_waiters == 0) {
320 		if (lock_data->idle_has_lock) {
321 			do {
322 				old = *lock;
323 				prev = cmpxchg(lock, old, DRM_KERNEL_CONTEXT);
324 			} while (prev != old);
325 			wake_up_interruptible(&lock_data->lock_queue);
326 			lock_data->idle_has_lock = 0;
327 		}
328 	}
329 	spin_unlock_bh(&lock_data->spinlock);
330 }
331 EXPORT_SYMBOL(drm_legacy_idlelock_release);
332 
333 int drm_legacy_i_have_hw_lock(struct drm_device *dev,
334 			      struct drm_file *file_priv)
335 {
336 	struct drm_master *master = file_priv->master;
337 	return (file_priv->lock_count && master->lock.hw_lock &&
338 		_DRM_LOCK_IS_HELD(master->lock.hw_lock->lock) &&
339 		master->lock.file_priv == file_priv);
340 }
341