xref: /openbmc/linux/drivers/dma-buf/dma-resv.c (revision dd550c7c)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright (C) 2012-2014 Canonical Ltd (Maarten Lankhorst)
4  *
5  * Based on bo.c which bears the following copyright notice,
6  * but is dual licensed:
7  *
8  * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
9  * All Rights Reserved.
10  *
11  * Permission is hereby granted, free of charge, to any person obtaining a
12  * copy of this software and associated documentation files (the
13  * "Software"), to deal in the Software without restriction, including
14  * without limitation the rights to use, copy, modify, merge, publish,
15  * distribute, sub license, and/or sell copies of the Software, and to
16  * permit persons to whom the Software is furnished to do so, subject to
17  * the following conditions:
18  *
19  * The above copyright notice and this permission notice (including the
20  * next paragraph) shall be included in all copies or substantial portions
21  * of the Software.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
24  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
25  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
26  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
27  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
28  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
29  * USE OR OTHER DEALINGS IN THE SOFTWARE.
30  *
31  **************************************************************************/
32 /*
33  * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
34  */
35 
36 #include <linux/dma-resv.h>
37 #include <linux/export.h>
38 #include <linux/mm.h>
39 #include <linux/sched/mm.h>
40 #include <linux/mmu_notifier.h>
41 #include <linux/seq_file.h>
42 
43 /**
44  * DOC: Reservation Object Overview
45  *
46  * The reservation object provides a mechanism to manage shared and
47  * exclusive fences associated with a buffer.  A reservation object
48  * can have attached one exclusive fence (normally associated with
49  * write operations) or N shared fences (read operations).  The RCU
50  * mechanism is used to protect read access to fences from locked
51  * write-side updates.
52  *
53  * See struct dma_resv for more details.
54  */
55 
56 DEFINE_WD_CLASS(reservation_ww_class);
57 EXPORT_SYMBOL(reservation_ww_class);
58 
59 struct dma_resv_list {
60 	struct rcu_head rcu;
61 	u32 shared_count, shared_max;
62 	struct dma_fence __rcu *shared[];
63 };
64 
65 /**
66  * dma_resv_list_alloc - allocate fence list
67  * @shared_max: number of fences we need space for
68  *
69  * Allocate a new dma_resv_list and make sure to correctly initialize
70  * shared_max.
71  */
72 static struct dma_resv_list *dma_resv_list_alloc(unsigned int shared_max)
73 {
74 	struct dma_resv_list *list;
75 
76 	list = kmalloc(struct_size(list, shared, shared_max), GFP_KERNEL);
77 	if (!list)
78 		return NULL;
79 
80 	list->shared_max = (ksize(list) - offsetof(typeof(*list), shared)) /
81 		sizeof(*list->shared);
82 
83 	return list;
84 }
85 
86 /**
87  * dma_resv_list_free - free fence list
88  * @list: list to free
89  *
90  * Free a dma_resv_list and make sure to drop all references.
91  */
92 static void dma_resv_list_free(struct dma_resv_list *list)
93 {
94 	unsigned int i;
95 
96 	if (!list)
97 		return;
98 
99 	for (i = 0; i < list->shared_count; ++i)
100 		dma_fence_put(rcu_dereference_protected(list->shared[i], true));
101 
102 	kfree_rcu(list, rcu);
103 }
104 
105 /**
106  * dma_resv_init - initialize a reservation object
107  * @obj: the reservation object
108  */
109 void dma_resv_init(struct dma_resv *obj)
110 {
111 	ww_mutex_init(&obj->lock, &reservation_ww_class);
112 	seqcount_ww_mutex_init(&obj->seq, &obj->lock);
113 
114 	RCU_INIT_POINTER(obj->fence, NULL);
115 	RCU_INIT_POINTER(obj->fence_excl, NULL);
116 }
117 EXPORT_SYMBOL(dma_resv_init);
118 
119 /**
120  * dma_resv_fini - destroys a reservation object
121  * @obj: the reservation object
122  */
123 void dma_resv_fini(struct dma_resv *obj)
124 {
125 	struct dma_resv_list *fobj;
126 	struct dma_fence *excl;
127 
128 	/*
129 	 * This object should be dead and all references must have
130 	 * been released to it, so no need to be protected with rcu.
131 	 */
132 	excl = rcu_dereference_protected(obj->fence_excl, 1);
133 	if (excl)
134 		dma_fence_put(excl);
135 
136 	fobj = rcu_dereference_protected(obj->fence, 1);
137 	dma_resv_list_free(fobj);
138 	ww_mutex_destroy(&obj->lock);
139 }
140 EXPORT_SYMBOL(dma_resv_fini);
141 
142 static inline struct dma_resv_list *dma_resv_shared_list(struct dma_resv *obj)
143 {
144 	return rcu_dereference_check(obj->fence, dma_resv_held(obj));
145 }
146 
147 /**
148  * dma_resv_reserve_shared - Reserve space to add shared fences to
149  * a dma_resv.
150  * @obj: reservation object
151  * @num_fences: number of fences we want to add
152  *
153  * Should be called before dma_resv_add_shared_fence().  Must
154  * be called with @obj locked through dma_resv_lock().
155  *
156  * Note that the preallocated slots need to be re-reserved if @obj is unlocked
157  * at any time before calling dma_resv_add_shared_fence(). This is validated
158  * when CONFIG_DEBUG_MUTEXES is enabled.
159  *
160  * RETURNS
161  * Zero for success, or -errno
162  */
163 int dma_resv_reserve_shared(struct dma_resv *obj, unsigned int num_fences)
164 {
165 	struct dma_resv_list *old, *new;
166 	unsigned int i, j, k, max;
167 
168 	dma_resv_assert_held(obj);
169 
170 	old = dma_resv_shared_list(obj);
171 	if (old && old->shared_max) {
172 		if ((old->shared_count + num_fences) <= old->shared_max)
173 			return 0;
174 		max = max(old->shared_count + num_fences, old->shared_max * 2);
175 	} else {
176 		max = max(4ul, roundup_pow_of_two(num_fences));
177 	}
178 
179 	new = dma_resv_list_alloc(max);
180 	if (!new)
181 		return -ENOMEM;
182 
183 	/*
184 	 * no need to bump fence refcounts, rcu_read access
185 	 * requires the use of kref_get_unless_zero, and the
186 	 * references from the old struct are carried over to
187 	 * the new.
188 	 */
189 	for (i = 0, j = 0, k = max; i < (old ? old->shared_count : 0); ++i) {
190 		struct dma_fence *fence;
191 
192 		fence = rcu_dereference_protected(old->shared[i],
193 						  dma_resv_held(obj));
194 		if (dma_fence_is_signaled(fence))
195 			RCU_INIT_POINTER(new->shared[--k], fence);
196 		else
197 			RCU_INIT_POINTER(new->shared[j++], fence);
198 	}
199 	new->shared_count = j;
200 
201 	/*
202 	 * We are not changing the effective set of fences here so can
203 	 * merely update the pointer to the new array; both existing
204 	 * readers and new readers will see exactly the same set of
205 	 * active (unsignaled) shared fences. Individual fences and the
206 	 * old array are protected by RCU and so will not vanish under
207 	 * the gaze of the rcu_read_lock() readers.
208 	 */
209 	rcu_assign_pointer(obj->fence, new);
210 
211 	if (!old)
212 		return 0;
213 
214 	/* Drop the references to the signaled fences */
215 	for (i = k; i < max; ++i) {
216 		struct dma_fence *fence;
217 
218 		fence = rcu_dereference_protected(new->shared[i],
219 						  dma_resv_held(obj));
220 		dma_fence_put(fence);
221 	}
222 	kfree_rcu(old, rcu);
223 
224 	return 0;
225 }
226 EXPORT_SYMBOL(dma_resv_reserve_shared);
227 
228 #ifdef CONFIG_DEBUG_MUTEXES
229 /**
230  * dma_resv_reset_shared_max - reset shared fences for debugging
231  * @obj: the dma_resv object to reset
232  *
233  * Reset the number of pre-reserved shared slots to test that drivers do
234  * correct slot allocation using dma_resv_reserve_shared(). See also
235  * &dma_resv_list.shared_max.
236  */
237 void dma_resv_reset_shared_max(struct dma_resv *obj)
238 {
239 	struct dma_resv_list *fences = dma_resv_shared_list(obj);
240 
241 	dma_resv_assert_held(obj);
242 
243 	/* Test shared fence slot reservation */
244 	if (fences)
245 		fences->shared_max = fences->shared_count;
246 }
247 EXPORT_SYMBOL(dma_resv_reset_shared_max);
248 #endif
249 
250 /**
251  * dma_resv_add_shared_fence - Add a fence to a shared slot
252  * @obj: the reservation object
253  * @fence: the shared fence to add
254  *
255  * Add a fence to a shared slot, @obj must be locked with dma_resv_lock(), and
256  * dma_resv_reserve_shared() has been called.
257  *
258  * See also &dma_resv.fence for a discussion of the semantics.
259  */
260 void dma_resv_add_shared_fence(struct dma_resv *obj, struct dma_fence *fence)
261 {
262 	struct dma_resv_list *fobj;
263 	struct dma_fence *old;
264 	unsigned int i, count;
265 
266 	dma_fence_get(fence);
267 
268 	dma_resv_assert_held(obj);
269 
270 	/* Drivers should not add containers here, instead add each fence
271 	 * individually.
272 	 */
273 	WARN_ON(dma_fence_is_container(fence));
274 
275 	fobj = dma_resv_shared_list(obj);
276 	count = fobj->shared_count;
277 
278 	write_seqcount_begin(&obj->seq);
279 
280 	for (i = 0; i < count; ++i) {
281 
282 		old = rcu_dereference_protected(fobj->shared[i],
283 						dma_resv_held(obj));
284 		if (old->context == fence->context ||
285 		    dma_fence_is_signaled(old))
286 			goto replace;
287 	}
288 
289 	BUG_ON(fobj->shared_count >= fobj->shared_max);
290 	old = NULL;
291 	count++;
292 
293 replace:
294 	RCU_INIT_POINTER(fobj->shared[i], fence);
295 	/* pointer update must be visible before we extend the shared_count */
296 	smp_store_mb(fobj->shared_count, count);
297 
298 	write_seqcount_end(&obj->seq);
299 	dma_fence_put(old);
300 }
301 EXPORT_SYMBOL(dma_resv_add_shared_fence);
302 
303 /**
304  * dma_resv_replace_fences - replace fences in the dma_resv obj
305  * @obj: the reservation object
306  * @context: the context of the fences to replace
307  * @replacement: the new fence to use instead
308  *
309  * Replace fences with a specified context with a new fence. Only valid if the
310  * operation represented by the original fence has no longer access to the
311  * resources represented by the dma_resv object when the new fence completes.
312  *
313  * And example for using this is replacing a preemption fence with a page table
314  * update fence which makes the resource inaccessible.
315  */
316 void dma_resv_replace_fences(struct dma_resv *obj, uint64_t context,
317 			     struct dma_fence *replacement)
318 {
319 	struct dma_resv_list *list;
320 	struct dma_fence *old;
321 	unsigned int i;
322 
323 	dma_resv_assert_held(obj);
324 
325 	write_seqcount_begin(&obj->seq);
326 
327 	old = dma_resv_excl_fence(obj);
328 	if (old->context == context) {
329 		RCU_INIT_POINTER(obj->fence_excl, dma_fence_get(replacement));
330 		dma_fence_put(old);
331 	}
332 
333 	list = dma_resv_shared_list(obj);
334 	for (i = 0; list && i < list->shared_count; ++i) {
335 		old = rcu_dereference_protected(list->shared[i],
336 						dma_resv_held(obj));
337 		if (old->context != context)
338 			continue;
339 
340 		rcu_assign_pointer(list->shared[i], dma_fence_get(replacement));
341 		dma_fence_put(old);
342 	}
343 
344 	write_seqcount_end(&obj->seq);
345 }
346 EXPORT_SYMBOL(dma_resv_replace_fences);
347 
348 /**
349  * dma_resv_add_excl_fence - Add an exclusive fence.
350  * @obj: the reservation object
351  * @fence: the exclusive fence to add
352  *
353  * Add a fence to the exclusive slot. @obj must be locked with dma_resv_lock().
354  * Note that this function replaces all fences attached to @obj, see also
355  * &dma_resv.fence_excl for a discussion of the semantics.
356  */
357 void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence)
358 {
359 	struct dma_fence *old_fence = dma_resv_excl_fence(obj);
360 	struct dma_resv_list *old;
361 	u32 i = 0;
362 
363 	dma_resv_assert_held(obj);
364 
365 	old = dma_resv_shared_list(obj);
366 	if (old)
367 		i = old->shared_count;
368 
369 	dma_fence_get(fence);
370 
371 	write_seqcount_begin(&obj->seq);
372 	/* write_seqcount_begin provides the necessary memory barrier */
373 	RCU_INIT_POINTER(obj->fence_excl, fence);
374 	if (old)
375 		old->shared_count = 0;
376 	write_seqcount_end(&obj->seq);
377 
378 	/* inplace update, no shared fences */
379 	while (i--)
380 		dma_fence_put(rcu_dereference_protected(old->shared[i],
381 						dma_resv_held(obj)));
382 
383 	dma_fence_put(old_fence);
384 }
385 EXPORT_SYMBOL(dma_resv_add_excl_fence);
386 
387 /* Restart the iterator by initializing all the necessary fields, but not the
388  * relation to the dma_resv object. */
389 static void dma_resv_iter_restart_unlocked(struct dma_resv_iter *cursor)
390 {
391 	cursor->seq = read_seqcount_begin(&cursor->obj->seq);
392 	cursor->index = -1;
393 	cursor->shared_count = 0;
394 	if (cursor->all_fences) {
395 		cursor->fences = dma_resv_shared_list(cursor->obj);
396 		if (cursor->fences)
397 			cursor->shared_count = cursor->fences->shared_count;
398 	} else {
399 		cursor->fences = NULL;
400 	}
401 	cursor->is_restarted = true;
402 }
403 
404 /* Walk to the next not signaled fence and grab a reference to it */
405 static void dma_resv_iter_walk_unlocked(struct dma_resv_iter *cursor)
406 {
407 	struct dma_resv *obj = cursor->obj;
408 
409 	do {
410 		/* Drop the reference from the previous round */
411 		dma_fence_put(cursor->fence);
412 
413 		if (cursor->index == -1) {
414 			cursor->fence = dma_resv_excl_fence(obj);
415 			cursor->index++;
416 			if (!cursor->fence)
417 				continue;
418 
419 		} else if (!cursor->fences ||
420 			   cursor->index >= cursor->shared_count) {
421 			cursor->fence = NULL;
422 			break;
423 
424 		} else {
425 			struct dma_resv_list *fences = cursor->fences;
426 			unsigned int idx = cursor->index++;
427 
428 			cursor->fence = rcu_dereference(fences->shared[idx]);
429 		}
430 		cursor->fence = dma_fence_get_rcu(cursor->fence);
431 		if (!cursor->fence || !dma_fence_is_signaled(cursor->fence))
432 			break;
433 	} while (true);
434 }
435 
436 /**
437  * dma_resv_iter_first_unlocked - first fence in an unlocked dma_resv obj.
438  * @cursor: the cursor with the current position
439  *
440  * Subsequent fences are iterated with dma_resv_iter_next_unlocked().
441  *
442  * Beware that the iterator can be restarted.  Code which accumulates statistics
443  * or similar needs to check for this with dma_resv_iter_is_restarted(). For
444  * this reason prefer the locked dma_resv_iter_first() whenver possible.
445  *
446  * Returns the first fence from an unlocked dma_resv obj.
447  */
448 struct dma_fence *dma_resv_iter_first_unlocked(struct dma_resv_iter *cursor)
449 {
450 	rcu_read_lock();
451 	do {
452 		dma_resv_iter_restart_unlocked(cursor);
453 		dma_resv_iter_walk_unlocked(cursor);
454 	} while (read_seqcount_retry(&cursor->obj->seq, cursor->seq));
455 	rcu_read_unlock();
456 
457 	return cursor->fence;
458 }
459 EXPORT_SYMBOL(dma_resv_iter_first_unlocked);
460 
461 /**
462  * dma_resv_iter_next_unlocked - next fence in an unlocked dma_resv obj.
463  * @cursor: the cursor with the current position
464  *
465  * Beware that the iterator can be restarted.  Code which accumulates statistics
466  * or similar needs to check for this with dma_resv_iter_is_restarted(). For
467  * this reason prefer the locked dma_resv_iter_next() whenver possible.
468  *
469  * Returns the next fence from an unlocked dma_resv obj.
470  */
471 struct dma_fence *dma_resv_iter_next_unlocked(struct dma_resv_iter *cursor)
472 {
473 	bool restart;
474 
475 	rcu_read_lock();
476 	cursor->is_restarted = false;
477 	restart = read_seqcount_retry(&cursor->obj->seq, cursor->seq);
478 	do {
479 		if (restart)
480 			dma_resv_iter_restart_unlocked(cursor);
481 		dma_resv_iter_walk_unlocked(cursor);
482 		restart = true;
483 	} while (read_seqcount_retry(&cursor->obj->seq, cursor->seq));
484 	rcu_read_unlock();
485 
486 	return cursor->fence;
487 }
488 EXPORT_SYMBOL(dma_resv_iter_next_unlocked);
489 
490 /**
491  * dma_resv_iter_first - first fence from a locked dma_resv object
492  * @cursor: cursor to record the current position
493  *
494  * Subsequent fences are iterated with dma_resv_iter_next_unlocked().
495  *
496  * Return the first fence in the dma_resv object while holding the
497  * &dma_resv.lock.
498  */
499 struct dma_fence *dma_resv_iter_first(struct dma_resv_iter *cursor)
500 {
501 	struct dma_fence *fence;
502 
503 	dma_resv_assert_held(cursor->obj);
504 
505 	cursor->index = 0;
506 	if (cursor->all_fences)
507 		cursor->fences = dma_resv_shared_list(cursor->obj);
508 	else
509 		cursor->fences = NULL;
510 
511 	fence = dma_resv_excl_fence(cursor->obj);
512 	if (!fence)
513 		fence = dma_resv_iter_next(cursor);
514 
515 	cursor->is_restarted = true;
516 	return fence;
517 }
518 EXPORT_SYMBOL_GPL(dma_resv_iter_first);
519 
520 /**
521  * dma_resv_iter_next - next fence from a locked dma_resv object
522  * @cursor: cursor to record the current position
523  *
524  * Return the next fences from the dma_resv object while holding the
525  * &dma_resv.lock.
526  */
527 struct dma_fence *dma_resv_iter_next(struct dma_resv_iter *cursor)
528 {
529 	unsigned int idx;
530 
531 	dma_resv_assert_held(cursor->obj);
532 
533 	cursor->is_restarted = false;
534 	if (!cursor->fences || cursor->index >= cursor->fences->shared_count)
535 		return NULL;
536 
537 	idx = cursor->index++;
538 	return rcu_dereference_protected(cursor->fences->shared[idx],
539 					 dma_resv_held(cursor->obj));
540 }
541 EXPORT_SYMBOL_GPL(dma_resv_iter_next);
542 
543 /**
544  * dma_resv_copy_fences - Copy all fences from src to dst.
545  * @dst: the destination reservation object
546  * @src: the source reservation object
547  *
548  * Copy all fences from src to dst. dst-lock must be held.
549  */
550 int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src)
551 {
552 	struct dma_resv_iter cursor;
553 	struct dma_resv_list *list;
554 	struct dma_fence *f, *excl;
555 
556 	dma_resv_assert_held(dst);
557 
558 	list = NULL;
559 	excl = NULL;
560 
561 	dma_resv_iter_begin(&cursor, src, true);
562 	dma_resv_for_each_fence_unlocked(&cursor, f) {
563 
564 		if (dma_resv_iter_is_restarted(&cursor)) {
565 			dma_resv_list_free(list);
566 			dma_fence_put(excl);
567 
568 			if (cursor.shared_count) {
569 				list = dma_resv_list_alloc(cursor.shared_count);
570 				if (!list) {
571 					dma_resv_iter_end(&cursor);
572 					return -ENOMEM;
573 				}
574 
575 				list->shared_count = 0;
576 
577 			} else {
578 				list = NULL;
579 			}
580 			excl = NULL;
581 		}
582 
583 		dma_fence_get(f);
584 		if (dma_resv_iter_is_exclusive(&cursor))
585 			excl = f;
586 		else
587 			RCU_INIT_POINTER(list->shared[list->shared_count++], f);
588 	}
589 	dma_resv_iter_end(&cursor);
590 
591 	write_seqcount_begin(&dst->seq);
592 	excl = rcu_replace_pointer(dst->fence_excl, excl, dma_resv_held(dst));
593 	list = rcu_replace_pointer(dst->fence, list, dma_resv_held(dst));
594 	write_seqcount_end(&dst->seq);
595 
596 	dma_resv_list_free(list);
597 	dma_fence_put(excl);
598 
599 	return 0;
600 }
601 EXPORT_SYMBOL(dma_resv_copy_fences);
602 
603 /**
604  * dma_resv_get_fences - Get an object's shared and exclusive
605  * fences without update side lock held
606  * @obj: the reservation object
607  * @write: true if we should return all fences
608  * @num_fences: the number of fences returned
609  * @fences: the array of fence ptrs returned (array is krealloc'd to the
610  * required size, and must be freed by caller)
611  *
612  * Retrieve all fences from the reservation object.
613  * Returns either zero or -ENOMEM.
614  */
615 int dma_resv_get_fences(struct dma_resv *obj, bool write,
616 			unsigned int *num_fences, struct dma_fence ***fences)
617 {
618 	struct dma_resv_iter cursor;
619 	struct dma_fence *fence;
620 
621 	*num_fences = 0;
622 	*fences = NULL;
623 
624 	dma_resv_iter_begin(&cursor, obj, write);
625 	dma_resv_for_each_fence_unlocked(&cursor, fence) {
626 
627 		if (dma_resv_iter_is_restarted(&cursor)) {
628 			unsigned int count;
629 
630 			while (*num_fences)
631 				dma_fence_put((*fences)[--(*num_fences)]);
632 
633 			count = cursor.shared_count + 1;
634 
635 			/* Eventually re-allocate the array */
636 			*fences = krealloc_array(*fences, count,
637 						 sizeof(void *),
638 						 GFP_KERNEL);
639 			if (count && !*fences) {
640 				dma_resv_iter_end(&cursor);
641 				return -ENOMEM;
642 			}
643 		}
644 
645 		(*fences)[(*num_fences)++] = dma_fence_get(fence);
646 	}
647 	dma_resv_iter_end(&cursor);
648 
649 	return 0;
650 }
651 EXPORT_SYMBOL_GPL(dma_resv_get_fences);
652 
653 /**
654  * dma_resv_wait_timeout - Wait on reservation's objects
655  * shared and/or exclusive fences.
656  * @obj: the reservation object
657  * @wait_all: if true, wait on all fences, else wait on just exclusive fence
658  * @intr: if true, do interruptible wait
659  * @timeout: timeout value in jiffies or zero to return immediately
660  *
661  * Callers are not required to hold specific locks, but maybe hold
662  * dma_resv_lock() already
663  * RETURNS
664  * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or
665  * greater than zer on success.
666  */
667 long dma_resv_wait_timeout(struct dma_resv *obj, bool wait_all, bool intr,
668 			   unsigned long timeout)
669 {
670 	long ret = timeout ? timeout : 1;
671 	struct dma_resv_iter cursor;
672 	struct dma_fence *fence;
673 
674 	dma_resv_iter_begin(&cursor, obj, wait_all);
675 	dma_resv_for_each_fence_unlocked(&cursor, fence) {
676 
677 		ret = dma_fence_wait_timeout(fence, intr, ret);
678 		if (ret <= 0) {
679 			dma_resv_iter_end(&cursor);
680 			return ret;
681 		}
682 	}
683 	dma_resv_iter_end(&cursor);
684 
685 	return ret;
686 }
687 EXPORT_SYMBOL_GPL(dma_resv_wait_timeout);
688 
689 
690 /**
691  * dma_resv_test_signaled - Test if a reservation object's fences have been
692  * signaled.
693  * @obj: the reservation object
694  * @test_all: if true, test all fences, otherwise only test the exclusive
695  * fence
696  *
697  * Callers are not required to hold specific locks, but maybe hold
698  * dma_resv_lock() already.
699  *
700  * RETURNS
701  *
702  * True if all fences signaled, else false.
703  */
704 bool dma_resv_test_signaled(struct dma_resv *obj, bool test_all)
705 {
706 	struct dma_resv_iter cursor;
707 	struct dma_fence *fence;
708 
709 	dma_resv_iter_begin(&cursor, obj, test_all);
710 	dma_resv_for_each_fence_unlocked(&cursor, fence) {
711 		dma_resv_iter_end(&cursor);
712 		return false;
713 	}
714 	dma_resv_iter_end(&cursor);
715 	return true;
716 }
717 EXPORT_SYMBOL_GPL(dma_resv_test_signaled);
718 
719 /**
720  * dma_resv_describe - Dump description of the resv object into seq_file
721  * @obj: the reservation object
722  * @seq: the seq_file to dump the description into
723  *
724  * Dump a textual description of the fences inside an dma_resv object into the
725  * seq_file.
726  */
727 void dma_resv_describe(struct dma_resv *obj, struct seq_file *seq)
728 {
729 	struct dma_resv_iter cursor;
730 	struct dma_fence *fence;
731 
732 	dma_resv_for_each_fence(&cursor, obj, true, fence) {
733 		seq_printf(seq, "\t%s fence:",
734 			   dma_resv_iter_is_exclusive(&cursor) ?
735 				"Exclusive" : "Shared");
736 		dma_fence_describe(fence, seq);
737 	}
738 }
739 EXPORT_SYMBOL_GPL(dma_resv_describe);
740 
741 #if IS_ENABLED(CONFIG_LOCKDEP)
742 static int __init dma_resv_lockdep(void)
743 {
744 	struct mm_struct *mm = mm_alloc();
745 	struct ww_acquire_ctx ctx;
746 	struct dma_resv obj;
747 	struct address_space mapping;
748 	int ret;
749 
750 	if (!mm)
751 		return -ENOMEM;
752 
753 	dma_resv_init(&obj);
754 	address_space_init_once(&mapping);
755 
756 	mmap_read_lock(mm);
757 	ww_acquire_init(&ctx, &reservation_ww_class);
758 	ret = dma_resv_lock(&obj, &ctx);
759 	if (ret == -EDEADLK)
760 		dma_resv_lock_slow(&obj, &ctx);
761 	fs_reclaim_acquire(GFP_KERNEL);
762 	/* for unmap_mapping_range on trylocked buffer objects in shrinkers */
763 	i_mmap_lock_write(&mapping);
764 	i_mmap_unlock_write(&mapping);
765 #ifdef CONFIG_MMU_NOTIFIER
766 	lock_map_acquire(&__mmu_notifier_invalidate_range_start_map);
767 	__dma_fence_might_wait();
768 	lock_map_release(&__mmu_notifier_invalidate_range_start_map);
769 #else
770 	__dma_fence_might_wait();
771 #endif
772 	fs_reclaim_release(GFP_KERNEL);
773 	ww_mutex_unlock(&obj.lock);
774 	ww_acquire_fini(&ctx);
775 	mmap_read_unlock(mm);
776 
777 	mmput(mm);
778 
779 	return 0;
780 }
781 subsys_initcall(dma_resv_lockdep);
782 #endif
783