xref: /openbmc/linux/drivers/dma-buf/dma-resv.c (revision ea8a12e3)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright (C) 2012-2014 Canonical Ltd (Maarten Lankhorst)
4  *
5  * Based on bo.c which bears the following copyright notice,
6  * but is dual licensed:
7  *
8  * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
9  * All Rights Reserved.
10  *
11  * Permission is hereby granted, free of charge, to any person obtaining a
12  * copy of this software and associated documentation files (the
13  * "Software"), to deal in the Software without restriction, including
14  * without limitation the rights to use, copy, modify, merge, publish,
15  * distribute, sub license, and/or sell copies of the Software, and to
16  * permit persons to whom the Software is furnished to do so, subject to
17  * the following conditions:
18  *
19  * The above copyright notice and this permission notice (including the
20  * next paragraph) shall be included in all copies or substantial portions
21  * of the Software.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
24  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
25  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
26  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
27  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
28  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
29  * USE OR OTHER DEALINGS IN THE SOFTWARE.
30  *
31  **************************************************************************/
32 /*
33  * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
34  */
35 
36 #include <linux/dma-resv.h>
37 #include <linux/export.h>
38 #include <linux/mm.h>
39 #include <linux/sched/mm.h>
40 #include <linux/mmu_notifier.h>
41 #include <linux/seq_file.h>
42 
43 /**
44  * DOC: Reservation Object Overview
45  *
46  * The reservation object provides a mechanism to manage shared and
47  * exclusive fences associated with a buffer.  A reservation object
48  * can have attached one exclusive fence (normally associated with
49  * write operations) or N shared fences (read operations).  The RCU
50  * mechanism is used to protect read access to fences from locked
51  * write-side updates.
52  *
53  * See struct dma_resv for more details.
54  */
55 
56 DEFINE_WD_CLASS(reservation_ww_class);
57 EXPORT_SYMBOL(reservation_ww_class);
58 
59 /**
60  * dma_resv_list_alloc - allocate fence list
61  * @shared_max: number of fences we need space for
62  *
63  * Allocate a new dma_resv_list and make sure to correctly initialize
64  * shared_max.
65  */
66 static struct dma_resv_list *dma_resv_list_alloc(unsigned int shared_max)
67 {
68 	struct dma_resv_list *list;
69 
70 	list = kmalloc(struct_size(list, shared, shared_max), GFP_KERNEL);
71 	if (!list)
72 		return NULL;
73 
74 	list->shared_max = (ksize(list) - offsetof(typeof(*list), shared)) /
75 		sizeof(*list->shared);
76 
77 	return list;
78 }
79 
80 /**
81  * dma_resv_list_free - free fence list
82  * @list: list to free
83  *
84  * Free a dma_resv_list and make sure to drop all references.
85  */
86 static void dma_resv_list_free(struct dma_resv_list *list)
87 {
88 	unsigned int i;
89 
90 	if (!list)
91 		return;
92 
93 	for (i = 0; i < list->shared_count; ++i)
94 		dma_fence_put(rcu_dereference_protected(list->shared[i], true));
95 
96 	kfree_rcu(list, rcu);
97 }
98 
99 /**
100  * dma_resv_init - initialize a reservation object
101  * @obj: the reservation object
102  */
103 void dma_resv_init(struct dma_resv *obj)
104 {
105 	ww_mutex_init(&obj->lock, &reservation_ww_class);
106 	seqcount_ww_mutex_init(&obj->seq, &obj->lock);
107 
108 	RCU_INIT_POINTER(obj->fence, NULL);
109 	RCU_INIT_POINTER(obj->fence_excl, NULL);
110 }
111 EXPORT_SYMBOL(dma_resv_init);
112 
113 /**
114  * dma_resv_fini - destroys a reservation object
115  * @obj: the reservation object
116  */
117 void dma_resv_fini(struct dma_resv *obj)
118 {
119 	struct dma_resv_list *fobj;
120 	struct dma_fence *excl;
121 
122 	/*
123 	 * This object should be dead and all references must have
124 	 * been released to it, so no need to be protected with rcu.
125 	 */
126 	excl = rcu_dereference_protected(obj->fence_excl, 1);
127 	if (excl)
128 		dma_fence_put(excl);
129 
130 	fobj = rcu_dereference_protected(obj->fence, 1);
131 	dma_resv_list_free(fobj);
132 	ww_mutex_destroy(&obj->lock);
133 }
134 EXPORT_SYMBOL(dma_resv_fini);
135 
136 /**
137  * dma_resv_reserve_shared - Reserve space to add shared fences to
138  * a dma_resv.
139  * @obj: reservation object
140  * @num_fences: number of fences we want to add
141  *
142  * Should be called before dma_resv_add_shared_fence().  Must
143  * be called with @obj locked through dma_resv_lock().
144  *
145  * Note that the preallocated slots need to be re-reserved if @obj is unlocked
146  * at any time before calling dma_resv_add_shared_fence(). This is validated
147  * when CONFIG_DEBUG_MUTEXES is enabled.
148  *
149  * RETURNS
150  * Zero for success, or -errno
151  */
152 int dma_resv_reserve_shared(struct dma_resv *obj, unsigned int num_fences)
153 {
154 	struct dma_resv_list *old, *new;
155 	unsigned int i, j, k, max;
156 
157 	dma_resv_assert_held(obj);
158 
159 	old = dma_resv_shared_list(obj);
160 	if (old && old->shared_max) {
161 		if ((old->shared_count + num_fences) <= old->shared_max)
162 			return 0;
163 		max = max(old->shared_count + num_fences, old->shared_max * 2);
164 	} else {
165 		max = max(4ul, roundup_pow_of_two(num_fences));
166 	}
167 
168 	new = dma_resv_list_alloc(max);
169 	if (!new)
170 		return -ENOMEM;
171 
172 	/*
173 	 * no need to bump fence refcounts, rcu_read access
174 	 * requires the use of kref_get_unless_zero, and the
175 	 * references from the old struct are carried over to
176 	 * the new.
177 	 */
178 	for (i = 0, j = 0, k = max; i < (old ? old->shared_count : 0); ++i) {
179 		struct dma_fence *fence;
180 
181 		fence = rcu_dereference_protected(old->shared[i],
182 						  dma_resv_held(obj));
183 		if (dma_fence_is_signaled(fence))
184 			RCU_INIT_POINTER(new->shared[--k], fence);
185 		else
186 			RCU_INIT_POINTER(new->shared[j++], fence);
187 	}
188 	new->shared_count = j;
189 
190 	/*
191 	 * We are not changing the effective set of fences here so can
192 	 * merely update the pointer to the new array; both existing
193 	 * readers and new readers will see exactly the same set of
194 	 * active (unsignaled) shared fences. Individual fences and the
195 	 * old array are protected by RCU and so will not vanish under
196 	 * the gaze of the rcu_read_lock() readers.
197 	 */
198 	rcu_assign_pointer(obj->fence, new);
199 
200 	if (!old)
201 		return 0;
202 
203 	/* Drop the references to the signaled fences */
204 	for (i = k; i < max; ++i) {
205 		struct dma_fence *fence;
206 
207 		fence = rcu_dereference_protected(new->shared[i],
208 						  dma_resv_held(obj));
209 		dma_fence_put(fence);
210 	}
211 	kfree_rcu(old, rcu);
212 
213 	return 0;
214 }
215 EXPORT_SYMBOL(dma_resv_reserve_shared);
216 
217 #ifdef CONFIG_DEBUG_MUTEXES
218 /**
219  * dma_resv_reset_shared_max - reset shared fences for debugging
220  * @obj: the dma_resv object to reset
221  *
222  * Reset the number of pre-reserved shared slots to test that drivers do
223  * correct slot allocation using dma_resv_reserve_shared(). See also
224  * &dma_resv_list.shared_max.
225  */
226 void dma_resv_reset_shared_max(struct dma_resv *obj)
227 {
228 	struct dma_resv_list *fences = dma_resv_shared_list(obj);
229 
230 	dma_resv_assert_held(obj);
231 
232 	/* Test shared fence slot reservation */
233 	if (fences)
234 		fences->shared_max = fences->shared_count;
235 }
236 EXPORT_SYMBOL(dma_resv_reset_shared_max);
237 #endif
238 
239 /**
240  * dma_resv_add_shared_fence - Add a fence to a shared slot
241  * @obj: the reservation object
242  * @fence: the shared fence to add
243  *
244  * Add a fence to a shared slot, @obj must be locked with dma_resv_lock(), and
245  * dma_resv_reserve_shared() has been called.
246  *
247  * See also &dma_resv.fence for a discussion of the semantics.
248  */
249 void dma_resv_add_shared_fence(struct dma_resv *obj, struct dma_fence *fence)
250 {
251 	struct dma_resv_list *fobj;
252 	struct dma_fence *old;
253 	unsigned int i, count;
254 
255 	dma_fence_get(fence);
256 
257 	dma_resv_assert_held(obj);
258 
259 	fobj = dma_resv_shared_list(obj);
260 	count = fobj->shared_count;
261 
262 	write_seqcount_begin(&obj->seq);
263 
264 	for (i = 0; i < count; ++i) {
265 
266 		old = rcu_dereference_protected(fobj->shared[i],
267 						dma_resv_held(obj));
268 		if (old->context == fence->context ||
269 		    dma_fence_is_signaled(old))
270 			goto replace;
271 	}
272 
273 	BUG_ON(fobj->shared_count >= fobj->shared_max);
274 	old = NULL;
275 	count++;
276 
277 replace:
278 	RCU_INIT_POINTER(fobj->shared[i], fence);
279 	/* pointer update must be visible before we extend the shared_count */
280 	smp_store_mb(fobj->shared_count, count);
281 
282 	write_seqcount_end(&obj->seq);
283 	dma_fence_put(old);
284 }
285 EXPORT_SYMBOL(dma_resv_add_shared_fence);
286 
287 /**
288  * dma_resv_add_excl_fence - Add an exclusive fence.
289  * @obj: the reservation object
290  * @fence: the exclusive fence to add
291  *
292  * Add a fence to the exclusive slot. @obj must be locked with dma_resv_lock().
293  * Note that this function replaces all fences attached to @obj, see also
294  * &dma_resv.fence_excl for a discussion of the semantics.
295  */
296 void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence)
297 {
298 	struct dma_fence *old_fence = dma_resv_excl_fence(obj);
299 	struct dma_resv_list *old;
300 	u32 i = 0;
301 
302 	dma_resv_assert_held(obj);
303 
304 	old = dma_resv_shared_list(obj);
305 	if (old)
306 		i = old->shared_count;
307 
308 	dma_fence_get(fence);
309 
310 	write_seqcount_begin(&obj->seq);
311 	/* write_seqcount_begin provides the necessary memory barrier */
312 	RCU_INIT_POINTER(obj->fence_excl, fence);
313 	if (old)
314 		old->shared_count = 0;
315 	write_seqcount_end(&obj->seq);
316 
317 	/* inplace update, no shared fences */
318 	while (i--)
319 		dma_fence_put(rcu_dereference_protected(old->shared[i],
320 						dma_resv_held(obj)));
321 
322 	dma_fence_put(old_fence);
323 }
324 EXPORT_SYMBOL(dma_resv_add_excl_fence);
325 
326 /* Restart the iterator by initializing all the necessary fields, but not the
327  * relation to the dma_resv object. */
328 static void dma_resv_iter_restart_unlocked(struct dma_resv_iter *cursor)
329 {
330 	cursor->seq = read_seqcount_begin(&cursor->obj->seq);
331 	cursor->index = -1;
332 	cursor->shared_count = 0;
333 	if (cursor->all_fences) {
334 		cursor->fences = dma_resv_shared_list(cursor->obj);
335 		if (cursor->fences)
336 			cursor->shared_count = cursor->fences->shared_count;
337 	} else {
338 		cursor->fences = NULL;
339 	}
340 	cursor->is_restarted = true;
341 }
342 
343 /* Walk to the next not signaled fence and grab a reference to it */
344 static void dma_resv_iter_walk_unlocked(struct dma_resv_iter *cursor)
345 {
346 	struct dma_resv *obj = cursor->obj;
347 
348 	do {
349 		/* Drop the reference from the previous round */
350 		dma_fence_put(cursor->fence);
351 
352 		if (cursor->index == -1) {
353 			cursor->fence = dma_resv_excl_fence(obj);
354 			cursor->index++;
355 			if (!cursor->fence)
356 				continue;
357 
358 		} else if (!cursor->fences ||
359 			   cursor->index >= cursor->shared_count) {
360 			cursor->fence = NULL;
361 			break;
362 
363 		} else {
364 			struct dma_resv_list *fences = cursor->fences;
365 			unsigned int idx = cursor->index++;
366 
367 			cursor->fence = rcu_dereference(fences->shared[idx]);
368 		}
369 		cursor->fence = dma_fence_get_rcu(cursor->fence);
370 		if (!cursor->fence || !dma_fence_is_signaled(cursor->fence))
371 			break;
372 	} while (true);
373 }
374 
375 /**
376  * dma_resv_iter_first_unlocked - first fence in an unlocked dma_resv obj.
377  * @cursor: the cursor with the current position
378  *
379  * Subsequent fences are iterated with dma_resv_iter_next_unlocked().
380  *
381  * Beware that the iterator can be restarted.  Code which accumulates statistics
382  * or similar needs to check for this with dma_resv_iter_is_restarted(). For
383  * this reason prefer the locked dma_resv_iter_first() whenver possible.
384  *
385  * Returns the first fence from an unlocked dma_resv obj.
386  */
387 struct dma_fence *dma_resv_iter_first_unlocked(struct dma_resv_iter *cursor)
388 {
389 	rcu_read_lock();
390 	do {
391 		dma_resv_iter_restart_unlocked(cursor);
392 		dma_resv_iter_walk_unlocked(cursor);
393 	} while (read_seqcount_retry(&cursor->obj->seq, cursor->seq));
394 	rcu_read_unlock();
395 
396 	return cursor->fence;
397 }
398 EXPORT_SYMBOL(dma_resv_iter_first_unlocked);
399 
400 /**
401  * dma_resv_iter_next_unlocked - next fence in an unlocked dma_resv obj.
402  * @cursor: the cursor with the current position
403  *
404  * Beware that the iterator can be restarted.  Code which accumulates statistics
405  * or similar needs to check for this with dma_resv_iter_is_restarted(). For
406  * this reason prefer the locked dma_resv_iter_next() whenver possible.
407  *
408  * Returns the next fence from an unlocked dma_resv obj.
409  */
410 struct dma_fence *dma_resv_iter_next_unlocked(struct dma_resv_iter *cursor)
411 {
412 	bool restart;
413 
414 	rcu_read_lock();
415 	cursor->is_restarted = false;
416 	restart = read_seqcount_retry(&cursor->obj->seq, cursor->seq);
417 	do {
418 		if (restart)
419 			dma_resv_iter_restart_unlocked(cursor);
420 		dma_resv_iter_walk_unlocked(cursor);
421 		restart = true;
422 	} while (read_seqcount_retry(&cursor->obj->seq, cursor->seq));
423 	rcu_read_unlock();
424 
425 	return cursor->fence;
426 }
427 EXPORT_SYMBOL(dma_resv_iter_next_unlocked);
428 
429 /**
430  * dma_resv_iter_first - first fence from a locked dma_resv object
431  * @cursor: cursor to record the current position
432  *
433  * Subsequent fences are iterated with dma_resv_iter_next_unlocked().
434  *
435  * Return the first fence in the dma_resv object while holding the
436  * &dma_resv.lock.
437  */
438 struct dma_fence *dma_resv_iter_first(struct dma_resv_iter *cursor)
439 {
440 	struct dma_fence *fence;
441 
442 	dma_resv_assert_held(cursor->obj);
443 
444 	cursor->index = 0;
445 	if (cursor->all_fences)
446 		cursor->fences = dma_resv_shared_list(cursor->obj);
447 	else
448 		cursor->fences = NULL;
449 
450 	fence = dma_resv_excl_fence(cursor->obj);
451 	if (!fence)
452 		fence = dma_resv_iter_next(cursor);
453 
454 	cursor->is_restarted = true;
455 	return fence;
456 }
457 EXPORT_SYMBOL_GPL(dma_resv_iter_first);
458 
459 /**
460  * dma_resv_iter_next - next fence from a locked dma_resv object
461  * @cursor: cursor to record the current position
462  *
463  * Return the next fences from the dma_resv object while holding the
464  * &dma_resv.lock.
465  */
466 struct dma_fence *dma_resv_iter_next(struct dma_resv_iter *cursor)
467 {
468 	unsigned int idx;
469 
470 	dma_resv_assert_held(cursor->obj);
471 
472 	cursor->is_restarted = false;
473 	if (!cursor->fences || cursor->index >= cursor->fences->shared_count)
474 		return NULL;
475 
476 	idx = cursor->index++;
477 	return rcu_dereference_protected(cursor->fences->shared[idx],
478 					 dma_resv_held(cursor->obj));
479 }
480 EXPORT_SYMBOL_GPL(dma_resv_iter_next);
481 
482 /**
483  * dma_resv_copy_fences - Copy all fences from src to dst.
484  * @dst: the destination reservation object
485  * @src: the source reservation object
486  *
487  * Copy all fences from src to dst. dst-lock must be held.
488  */
489 int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src)
490 {
491 	struct dma_resv_iter cursor;
492 	struct dma_resv_list *list;
493 	struct dma_fence *f, *excl;
494 
495 	dma_resv_assert_held(dst);
496 
497 	list = NULL;
498 	excl = NULL;
499 
500 	dma_resv_iter_begin(&cursor, src, true);
501 	dma_resv_for_each_fence_unlocked(&cursor, f) {
502 
503 		if (dma_resv_iter_is_restarted(&cursor)) {
504 			dma_resv_list_free(list);
505 			dma_fence_put(excl);
506 
507 			if (cursor.shared_count) {
508 				list = dma_resv_list_alloc(cursor.shared_count);
509 				if (!list) {
510 					dma_resv_iter_end(&cursor);
511 					return -ENOMEM;
512 				}
513 
514 				list->shared_count = 0;
515 
516 			} else {
517 				list = NULL;
518 			}
519 			excl = NULL;
520 		}
521 
522 		dma_fence_get(f);
523 		if (dma_resv_iter_is_exclusive(&cursor))
524 			excl = f;
525 		else
526 			RCU_INIT_POINTER(list->shared[list->shared_count++], f);
527 	}
528 	dma_resv_iter_end(&cursor);
529 
530 	write_seqcount_begin(&dst->seq);
531 	excl = rcu_replace_pointer(dst->fence_excl, excl, dma_resv_held(dst));
532 	list = rcu_replace_pointer(dst->fence, list, dma_resv_held(dst));
533 	write_seqcount_end(&dst->seq);
534 
535 	dma_resv_list_free(list);
536 	dma_fence_put(excl);
537 
538 	return 0;
539 }
540 EXPORT_SYMBOL(dma_resv_copy_fences);
541 
542 /**
543  * dma_resv_get_fences - Get an object's shared and exclusive
544  * fences without update side lock held
545  * @obj: the reservation object
546  * @write: true if we should return all fences
547  * @num_fences: the number of fences returned
548  * @fences: the array of fence ptrs returned (array is krealloc'd to the
549  * required size, and must be freed by caller)
550  *
551  * Retrieve all fences from the reservation object.
552  * Returns either zero or -ENOMEM.
553  */
554 int dma_resv_get_fences(struct dma_resv *obj, bool write,
555 			unsigned int *num_fences, struct dma_fence ***fences)
556 {
557 	struct dma_resv_iter cursor;
558 	struct dma_fence *fence;
559 
560 	*num_fences = 0;
561 	*fences = NULL;
562 
563 	dma_resv_iter_begin(&cursor, obj, write);
564 	dma_resv_for_each_fence_unlocked(&cursor, fence) {
565 
566 		if (dma_resv_iter_is_restarted(&cursor)) {
567 			unsigned int count;
568 
569 			while (*num_fences)
570 				dma_fence_put((*fences)[--(*num_fences)]);
571 
572 			count = cursor.shared_count + 1;
573 
574 			/* Eventually re-allocate the array */
575 			*fences = krealloc_array(*fences, count,
576 						 sizeof(void *),
577 						 GFP_KERNEL);
578 			if (count && !*fences) {
579 				dma_resv_iter_end(&cursor);
580 				return -ENOMEM;
581 			}
582 		}
583 
584 		(*fences)[(*num_fences)++] = dma_fence_get(fence);
585 	}
586 	dma_resv_iter_end(&cursor);
587 
588 	return 0;
589 }
590 EXPORT_SYMBOL_GPL(dma_resv_get_fences);
591 
592 /**
593  * dma_resv_wait_timeout - Wait on reservation's objects
594  * shared and/or exclusive fences.
595  * @obj: the reservation object
596  * @wait_all: if true, wait on all fences, else wait on just exclusive fence
597  * @intr: if true, do interruptible wait
598  * @timeout: timeout value in jiffies or zero to return immediately
599  *
600  * Callers are not required to hold specific locks, but maybe hold
601  * dma_resv_lock() already
602  * RETURNS
603  * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or
604  * greater than zer on success.
605  */
606 long dma_resv_wait_timeout(struct dma_resv *obj, bool wait_all, bool intr,
607 			   unsigned long timeout)
608 {
609 	long ret = timeout ? timeout : 1;
610 	struct dma_resv_iter cursor;
611 	struct dma_fence *fence;
612 
613 	dma_resv_iter_begin(&cursor, obj, wait_all);
614 	dma_resv_for_each_fence_unlocked(&cursor, fence) {
615 
616 		ret = dma_fence_wait_timeout(fence, intr, ret);
617 		if (ret <= 0) {
618 			dma_resv_iter_end(&cursor);
619 			return ret;
620 		}
621 	}
622 	dma_resv_iter_end(&cursor);
623 
624 	return ret;
625 }
626 EXPORT_SYMBOL_GPL(dma_resv_wait_timeout);
627 
628 
629 /**
630  * dma_resv_test_signaled - Test if a reservation object's fences have been
631  * signaled.
632  * @obj: the reservation object
633  * @test_all: if true, test all fences, otherwise only test the exclusive
634  * fence
635  *
636  * Callers are not required to hold specific locks, but maybe hold
637  * dma_resv_lock() already.
638  *
639  * RETURNS
640  *
641  * True if all fences signaled, else false.
642  */
643 bool dma_resv_test_signaled(struct dma_resv *obj, bool test_all)
644 {
645 	struct dma_resv_iter cursor;
646 	struct dma_fence *fence;
647 
648 	dma_resv_iter_begin(&cursor, obj, test_all);
649 	dma_resv_for_each_fence_unlocked(&cursor, fence) {
650 		dma_resv_iter_end(&cursor);
651 		return false;
652 	}
653 	dma_resv_iter_end(&cursor);
654 	return true;
655 }
656 EXPORT_SYMBOL_GPL(dma_resv_test_signaled);
657 
658 /**
659  * dma_resv_describe - Dump description of the resv object into seq_file
660  * @obj: the reservation object
661  * @seq: the seq_file to dump the description into
662  *
663  * Dump a textual description of the fences inside an dma_resv object into the
664  * seq_file.
665  */
666 void dma_resv_describe(struct dma_resv *obj, struct seq_file *seq)
667 {
668 	struct dma_resv_iter cursor;
669 	struct dma_fence *fence;
670 
671 	dma_resv_for_each_fence(&cursor, obj, true, fence) {
672 		seq_printf(seq, "\t%s fence:",
673 			   dma_resv_iter_is_exclusive(&cursor) ?
674 				"Exclusive" : "Shared");
675 		dma_fence_describe(fence, seq);
676 	}
677 }
678 EXPORT_SYMBOL_GPL(dma_resv_describe);
679 
680 #if IS_ENABLED(CONFIG_LOCKDEP)
681 static int __init dma_resv_lockdep(void)
682 {
683 	struct mm_struct *mm = mm_alloc();
684 	struct ww_acquire_ctx ctx;
685 	struct dma_resv obj;
686 	struct address_space mapping;
687 	int ret;
688 
689 	if (!mm)
690 		return -ENOMEM;
691 
692 	dma_resv_init(&obj);
693 	address_space_init_once(&mapping);
694 
695 	mmap_read_lock(mm);
696 	ww_acquire_init(&ctx, &reservation_ww_class);
697 	ret = dma_resv_lock(&obj, &ctx);
698 	if (ret == -EDEADLK)
699 		dma_resv_lock_slow(&obj, &ctx);
700 	fs_reclaim_acquire(GFP_KERNEL);
701 	/* for unmap_mapping_range on trylocked buffer objects in shrinkers */
702 	i_mmap_lock_write(&mapping);
703 	i_mmap_unlock_write(&mapping);
704 #ifdef CONFIG_MMU_NOTIFIER
705 	lock_map_acquire(&__mmu_notifier_invalidate_range_start_map);
706 	__dma_fence_might_wait();
707 	lock_map_release(&__mmu_notifier_invalidate_range_start_map);
708 #else
709 	__dma_fence_might_wait();
710 #endif
711 	fs_reclaim_release(GFP_KERNEL);
712 	ww_mutex_unlock(&obj.lock);
713 	ww_acquire_fini(&ctx);
714 	mmap_read_unlock(mm);
715 
716 	mmput(mm);
717 
718 	return 0;
719 }
720 subsys_initcall(dma_resv_lockdep);
721 #endif
722