xref: /openbmc/linux/drivers/dma-buf/dma-resv.c (revision 0ad53fe3)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright (C) 2012-2014 Canonical Ltd (Maarten Lankhorst)
4  *
5  * Based on bo.c which bears the following copyright notice,
6  * but is dual licensed:
7  *
8  * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
9  * All Rights Reserved.
10  *
11  * Permission is hereby granted, free of charge, to any person obtaining a
12  * copy of this software and associated documentation files (the
13  * "Software"), to deal in the Software without restriction, including
14  * without limitation the rights to use, copy, modify, merge, publish,
15  * distribute, sub license, and/or sell copies of the Software, and to
16  * permit persons to whom the Software is furnished to do so, subject to
17  * the following conditions:
18  *
19  * The above copyright notice and this permission notice (including the
20  * next paragraph) shall be included in all copies or substantial portions
21  * of the Software.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
24  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
25  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
26  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
27  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
28  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
29  * USE OR OTHER DEALINGS IN THE SOFTWARE.
30  *
31  **************************************************************************/
32 /*
33  * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
34  */
35 
36 #include <linux/dma-resv.h>
37 #include <linux/export.h>
38 #include <linux/mm.h>
39 #include <linux/sched/mm.h>
40 #include <linux/mmu_notifier.h>
41 
42 /**
43  * DOC: Reservation Object Overview
44  *
45  * The reservation object provides a mechanism to manage shared and
46  * exclusive fences associated with a buffer.  A reservation object
47  * can have attached one exclusive fence (normally associated with
48  * write operations) or N shared fences (read operations).  The RCU
49  * mechanism is used to protect read access to fences from locked
50  * write-side updates.
51  *
52  * See struct dma_resv for more details.
53  */
54 
55 DEFINE_WD_CLASS(reservation_ww_class);
56 EXPORT_SYMBOL(reservation_ww_class);
57 
58 /**
59  * dma_resv_list_alloc - allocate fence list
60  * @shared_max: number of fences we need space for
61  *
62  * Allocate a new dma_resv_list and make sure to correctly initialize
63  * shared_max.
64  */
65 static struct dma_resv_list *dma_resv_list_alloc(unsigned int shared_max)
66 {
67 	struct dma_resv_list *list;
68 
69 	list = kmalloc(struct_size(list, shared, shared_max), GFP_KERNEL);
70 	if (!list)
71 		return NULL;
72 
73 	list->shared_max = (ksize(list) - offsetof(typeof(*list), shared)) /
74 		sizeof(*list->shared);
75 
76 	return list;
77 }
78 
79 /**
80  * dma_resv_list_free - free fence list
81  * @list: list to free
82  *
83  * Free a dma_resv_list and make sure to drop all references.
84  */
85 static void dma_resv_list_free(struct dma_resv_list *list)
86 {
87 	unsigned int i;
88 
89 	if (!list)
90 		return;
91 
92 	for (i = 0; i < list->shared_count; ++i)
93 		dma_fence_put(rcu_dereference_protected(list->shared[i], true));
94 
95 	kfree_rcu(list, rcu);
96 }
97 
98 /**
99  * dma_resv_init - initialize a reservation object
100  * @obj: the reservation object
101  */
102 void dma_resv_init(struct dma_resv *obj)
103 {
104 	ww_mutex_init(&obj->lock, &reservation_ww_class);
105 	seqcount_ww_mutex_init(&obj->seq, &obj->lock);
106 
107 	RCU_INIT_POINTER(obj->fence, NULL);
108 	RCU_INIT_POINTER(obj->fence_excl, NULL);
109 }
110 EXPORT_SYMBOL(dma_resv_init);
111 
112 /**
113  * dma_resv_fini - destroys a reservation object
114  * @obj: the reservation object
115  */
116 void dma_resv_fini(struct dma_resv *obj)
117 {
118 	struct dma_resv_list *fobj;
119 	struct dma_fence *excl;
120 
121 	/*
122 	 * This object should be dead and all references must have
123 	 * been released to it, so no need to be protected with rcu.
124 	 */
125 	excl = rcu_dereference_protected(obj->fence_excl, 1);
126 	if (excl)
127 		dma_fence_put(excl);
128 
129 	fobj = rcu_dereference_protected(obj->fence, 1);
130 	dma_resv_list_free(fobj);
131 	ww_mutex_destroy(&obj->lock);
132 }
133 EXPORT_SYMBOL(dma_resv_fini);
134 
135 /**
136  * dma_resv_reserve_shared - Reserve space to add shared fences to
137  * a dma_resv.
138  * @obj: reservation object
139  * @num_fences: number of fences we want to add
140  *
141  * Should be called before dma_resv_add_shared_fence().  Must
142  * be called with @obj locked through dma_resv_lock().
143  *
144  * Note that the preallocated slots need to be re-reserved if @obj is unlocked
145  * at any time before calling dma_resv_add_shared_fence(). This is validated
146  * when CONFIG_DEBUG_MUTEXES is enabled.
147  *
148  * RETURNS
149  * Zero for success, or -errno
150  */
151 int dma_resv_reserve_shared(struct dma_resv *obj, unsigned int num_fences)
152 {
153 	struct dma_resv_list *old, *new;
154 	unsigned int i, j, k, max;
155 
156 	dma_resv_assert_held(obj);
157 
158 	old = dma_resv_shared_list(obj);
159 	if (old && old->shared_max) {
160 		if ((old->shared_count + num_fences) <= old->shared_max)
161 			return 0;
162 		max = max(old->shared_count + num_fences, old->shared_max * 2);
163 	} else {
164 		max = max(4ul, roundup_pow_of_two(num_fences));
165 	}
166 
167 	new = dma_resv_list_alloc(max);
168 	if (!new)
169 		return -ENOMEM;
170 
171 	/*
172 	 * no need to bump fence refcounts, rcu_read access
173 	 * requires the use of kref_get_unless_zero, and the
174 	 * references from the old struct are carried over to
175 	 * the new.
176 	 */
177 	for (i = 0, j = 0, k = max; i < (old ? old->shared_count : 0); ++i) {
178 		struct dma_fence *fence;
179 
180 		fence = rcu_dereference_protected(old->shared[i],
181 						  dma_resv_held(obj));
182 		if (dma_fence_is_signaled(fence))
183 			RCU_INIT_POINTER(new->shared[--k], fence);
184 		else
185 			RCU_INIT_POINTER(new->shared[j++], fence);
186 	}
187 	new->shared_count = j;
188 
189 	/*
190 	 * We are not changing the effective set of fences here so can
191 	 * merely update the pointer to the new array; both existing
192 	 * readers and new readers will see exactly the same set of
193 	 * active (unsignaled) shared fences. Individual fences and the
194 	 * old array are protected by RCU and so will not vanish under
195 	 * the gaze of the rcu_read_lock() readers.
196 	 */
197 	rcu_assign_pointer(obj->fence, new);
198 
199 	if (!old)
200 		return 0;
201 
202 	/* Drop the references to the signaled fences */
203 	for (i = k; i < max; ++i) {
204 		struct dma_fence *fence;
205 
206 		fence = rcu_dereference_protected(new->shared[i],
207 						  dma_resv_held(obj));
208 		dma_fence_put(fence);
209 	}
210 	kfree_rcu(old, rcu);
211 
212 	return 0;
213 }
214 EXPORT_SYMBOL(dma_resv_reserve_shared);
215 
216 #ifdef CONFIG_DEBUG_MUTEXES
217 /**
218  * dma_resv_reset_shared_max - reset shared fences for debugging
219  * @obj: the dma_resv object to reset
220  *
221  * Reset the number of pre-reserved shared slots to test that drivers do
222  * correct slot allocation using dma_resv_reserve_shared(). See also
223  * &dma_resv_list.shared_max.
224  */
225 void dma_resv_reset_shared_max(struct dma_resv *obj)
226 {
227 	struct dma_resv_list *fences = dma_resv_shared_list(obj);
228 
229 	dma_resv_assert_held(obj);
230 
231 	/* Test shared fence slot reservation */
232 	if (fences)
233 		fences->shared_max = fences->shared_count;
234 }
235 EXPORT_SYMBOL(dma_resv_reset_shared_max);
236 #endif
237 
238 /**
239  * dma_resv_add_shared_fence - Add a fence to a shared slot
240  * @obj: the reservation object
241  * @fence: the shared fence to add
242  *
243  * Add a fence to a shared slot, @obj must be locked with dma_resv_lock(), and
244  * dma_resv_reserve_shared() has been called.
245  *
246  * See also &dma_resv.fence for a discussion of the semantics.
247  */
248 void dma_resv_add_shared_fence(struct dma_resv *obj, struct dma_fence *fence)
249 {
250 	struct dma_resv_list *fobj;
251 	struct dma_fence *old;
252 	unsigned int i, count;
253 
254 	dma_fence_get(fence);
255 
256 	dma_resv_assert_held(obj);
257 
258 	fobj = dma_resv_shared_list(obj);
259 	count = fobj->shared_count;
260 
261 	write_seqcount_begin(&obj->seq);
262 
263 	for (i = 0; i < count; ++i) {
264 
265 		old = rcu_dereference_protected(fobj->shared[i],
266 						dma_resv_held(obj));
267 		if (old->context == fence->context ||
268 		    dma_fence_is_signaled(old))
269 			goto replace;
270 	}
271 
272 	BUG_ON(fobj->shared_count >= fobj->shared_max);
273 	old = NULL;
274 	count++;
275 
276 replace:
277 	RCU_INIT_POINTER(fobj->shared[i], fence);
278 	/* pointer update must be visible before we extend the shared_count */
279 	smp_store_mb(fobj->shared_count, count);
280 
281 	write_seqcount_end(&obj->seq);
282 	dma_fence_put(old);
283 }
284 EXPORT_SYMBOL(dma_resv_add_shared_fence);
285 
286 /**
287  * dma_resv_add_excl_fence - Add an exclusive fence.
288  * @obj: the reservation object
289  * @fence: the exclusive fence to add
290  *
291  * Add a fence to the exclusive slot. @obj must be locked with dma_resv_lock().
292  * Note that this function replaces all fences attached to @obj, see also
293  * &dma_resv.fence_excl for a discussion of the semantics.
294  */
295 void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence)
296 {
297 	struct dma_fence *old_fence = dma_resv_excl_fence(obj);
298 	struct dma_resv_list *old;
299 	u32 i = 0;
300 
301 	dma_resv_assert_held(obj);
302 
303 	old = dma_resv_shared_list(obj);
304 	if (old)
305 		i = old->shared_count;
306 
307 	if (fence)
308 		dma_fence_get(fence);
309 
310 	write_seqcount_begin(&obj->seq);
311 	/* write_seqcount_begin provides the necessary memory barrier */
312 	RCU_INIT_POINTER(obj->fence_excl, fence);
313 	if (old)
314 		old->shared_count = 0;
315 	write_seqcount_end(&obj->seq);
316 
317 	/* inplace update, no shared fences */
318 	while (i--)
319 		dma_fence_put(rcu_dereference_protected(old->shared[i],
320 						dma_resv_held(obj)));
321 
322 	dma_fence_put(old_fence);
323 }
324 EXPORT_SYMBOL(dma_resv_add_excl_fence);
325 
326 /**
327  * dma_resv_copy_fences - Copy all fences from src to dst.
328  * @dst: the destination reservation object
329  * @src: the source reservation object
330  *
331  * Copy all fences from src to dst. dst-lock must be held.
332  */
333 int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src)
334 {
335 	struct dma_resv_list *src_list, *dst_list;
336 	struct dma_fence *old, *new;
337 	unsigned int i;
338 
339 	dma_resv_assert_held(dst);
340 
341 	rcu_read_lock();
342 	src_list = dma_resv_shared_list(src);
343 
344 retry:
345 	if (src_list) {
346 		unsigned int shared_count = src_list->shared_count;
347 
348 		rcu_read_unlock();
349 
350 		dst_list = dma_resv_list_alloc(shared_count);
351 		if (!dst_list)
352 			return -ENOMEM;
353 
354 		rcu_read_lock();
355 		src_list = dma_resv_shared_list(src);
356 		if (!src_list || src_list->shared_count > shared_count) {
357 			kfree(dst_list);
358 			goto retry;
359 		}
360 
361 		dst_list->shared_count = 0;
362 		for (i = 0; i < src_list->shared_count; ++i) {
363 			struct dma_fence __rcu **dst;
364 			struct dma_fence *fence;
365 
366 			fence = rcu_dereference(src_list->shared[i]);
367 			if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
368 				     &fence->flags))
369 				continue;
370 
371 			if (!dma_fence_get_rcu(fence)) {
372 				dma_resv_list_free(dst_list);
373 				src_list = dma_resv_shared_list(src);
374 				goto retry;
375 			}
376 
377 			if (dma_fence_is_signaled(fence)) {
378 				dma_fence_put(fence);
379 				continue;
380 			}
381 
382 			dst = &dst_list->shared[dst_list->shared_count++];
383 			rcu_assign_pointer(*dst, fence);
384 		}
385 	} else {
386 		dst_list = NULL;
387 	}
388 
389 	new = dma_fence_get_rcu_safe(&src->fence_excl);
390 	rcu_read_unlock();
391 
392 	src_list = dma_resv_shared_list(dst);
393 	old = dma_resv_excl_fence(dst);
394 
395 	write_seqcount_begin(&dst->seq);
396 	/* write_seqcount_begin provides the necessary memory barrier */
397 	RCU_INIT_POINTER(dst->fence_excl, new);
398 	RCU_INIT_POINTER(dst->fence, dst_list);
399 	write_seqcount_end(&dst->seq);
400 
401 	dma_resv_list_free(src_list);
402 	dma_fence_put(old);
403 
404 	return 0;
405 }
406 EXPORT_SYMBOL(dma_resv_copy_fences);
407 
408 /**
409  * dma_resv_get_fences - Get an object's shared and exclusive
410  * fences without update side lock held
411  * @obj: the reservation object
412  * @pfence_excl: the returned exclusive fence (or NULL)
413  * @pshared_count: the number of shared fences returned
414  * @pshared: the array of shared fence ptrs returned (array is krealloc'd to
415  * the required size, and must be freed by caller)
416  *
417  * Retrieve all fences from the reservation object. If the pointer for the
418  * exclusive fence is not specified the fence is put into the array of the
419  * shared fences as well. Returns either zero or -ENOMEM.
420  */
421 int dma_resv_get_fences(struct dma_resv *obj, struct dma_fence **pfence_excl,
422 			unsigned int *pshared_count,
423 			struct dma_fence ***pshared)
424 {
425 	struct dma_fence **shared = NULL;
426 	struct dma_fence *fence_excl;
427 	unsigned int shared_count;
428 	int ret = 1;
429 
430 	do {
431 		struct dma_resv_list *fobj;
432 		unsigned int i, seq;
433 		size_t sz = 0;
434 
435 		shared_count = i = 0;
436 
437 		rcu_read_lock();
438 		seq = read_seqcount_begin(&obj->seq);
439 
440 		fence_excl = dma_resv_excl_fence(obj);
441 		if (fence_excl && !dma_fence_get_rcu(fence_excl))
442 			goto unlock;
443 
444 		fobj = dma_resv_shared_list(obj);
445 		if (fobj)
446 			sz += sizeof(*shared) * fobj->shared_max;
447 
448 		if (!pfence_excl && fence_excl)
449 			sz += sizeof(*shared);
450 
451 		if (sz) {
452 			struct dma_fence **nshared;
453 
454 			nshared = krealloc(shared, sz,
455 					   GFP_NOWAIT | __GFP_NOWARN);
456 			if (!nshared) {
457 				rcu_read_unlock();
458 
459 				dma_fence_put(fence_excl);
460 				fence_excl = NULL;
461 
462 				nshared = krealloc(shared, sz, GFP_KERNEL);
463 				if (nshared) {
464 					shared = nshared;
465 					continue;
466 				}
467 
468 				ret = -ENOMEM;
469 				break;
470 			}
471 			shared = nshared;
472 			shared_count = fobj ? fobj->shared_count : 0;
473 			for (i = 0; i < shared_count; ++i) {
474 				shared[i] = rcu_dereference(fobj->shared[i]);
475 				if (!dma_fence_get_rcu(shared[i]))
476 					break;
477 			}
478 		}
479 
480 		if (i != shared_count || read_seqcount_retry(&obj->seq, seq)) {
481 			while (i--)
482 				dma_fence_put(shared[i]);
483 			dma_fence_put(fence_excl);
484 			goto unlock;
485 		}
486 
487 		ret = 0;
488 unlock:
489 		rcu_read_unlock();
490 	} while (ret);
491 
492 	if (pfence_excl)
493 		*pfence_excl = fence_excl;
494 	else if (fence_excl)
495 		shared[shared_count++] = fence_excl;
496 
497 	if (!shared_count) {
498 		kfree(shared);
499 		shared = NULL;
500 	}
501 
502 	*pshared_count = shared_count;
503 	*pshared = shared;
504 	return ret;
505 }
506 EXPORT_SYMBOL_GPL(dma_resv_get_fences);
507 
508 /**
509  * dma_resv_wait_timeout - Wait on reservation's objects
510  * shared and/or exclusive fences.
511  * @obj: the reservation object
512  * @wait_all: if true, wait on all fences, else wait on just exclusive fence
513  * @intr: if true, do interruptible wait
514  * @timeout: timeout value in jiffies or zero to return immediately
515  *
516  * Callers are not required to hold specific locks, but maybe hold
517  * dma_resv_lock() already
518  * RETURNS
519  * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or
520  * greater than zer on success.
521  */
522 long dma_resv_wait_timeout(struct dma_resv *obj, bool wait_all, bool intr,
523 			   unsigned long timeout)
524 {
525 	long ret = timeout ? timeout : 1;
526 	unsigned int seq, shared_count;
527 	struct dma_fence *fence;
528 	int i;
529 
530 retry:
531 	shared_count = 0;
532 	seq = read_seqcount_begin(&obj->seq);
533 	rcu_read_lock();
534 	i = -1;
535 
536 	fence = dma_resv_excl_fence(obj);
537 	if (fence && !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
538 		if (!dma_fence_get_rcu(fence))
539 			goto unlock_retry;
540 
541 		if (dma_fence_is_signaled(fence)) {
542 			dma_fence_put(fence);
543 			fence = NULL;
544 		}
545 
546 	} else {
547 		fence = NULL;
548 	}
549 
550 	if (wait_all) {
551 		struct dma_resv_list *fobj = dma_resv_shared_list(obj);
552 
553 		if (fobj)
554 			shared_count = fobj->shared_count;
555 
556 		for (i = 0; !fence && i < shared_count; ++i) {
557 			struct dma_fence *lfence;
558 
559 			lfence = rcu_dereference(fobj->shared[i]);
560 			if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
561 				     &lfence->flags))
562 				continue;
563 
564 			if (!dma_fence_get_rcu(lfence))
565 				goto unlock_retry;
566 
567 			if (dma_fence_is_signaled(lfence)) {
568 				dma_fence_put(lfence);
569 				continue;
570 			}
571 
572 			fence = lfence;
573 			break;
574 		}
575 	}
576 
577 	rcu_read_unlock();
578 	if (fence) {
579 		if (read_seqcount_retry(&obj->seq, seq)) {
580 			dma_fence_put(fence);
581 			goto retry;
582 		}
583 
584 		ret = dma_fence_wait_timeout(fence, intr, ret);
585 		dma_fence_put(fence);
586 		if (ret > 0 && wait_all && (i + 1 < shared_count))
587 			goto retry;
588 	}
589 	return ret;
590 
591 unlock_retry:
592 	rcu_read_unlock();
593 	goto retry;
594 }
595 EXPORT_SYMBOL_GPL(dma_resv_wait_timeout);
596 
597 
598 static inline int dma_resv_test_signaled_single(struct dma_fence *passed_fence)
599 {
600 	struct dma_fence *fence, *lfence = passed_fence;
601 	int ret = 1;
602 
603 	if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &lfence->flags)) {
604 		fence = dma_fence_get_rcu(lfence);
605 		if (!fence)
606 			return -1;
607 
608 		ret = !!dma_fence_is_signaled(fence);
609 		dma_fence_put(fence);
610 	}
611 	return ret;
612 }
613 
614 /**
615  * dma_resv_test_signaled - Test if a reservation object's fences have been
616  * signaled.
617  * @obj: the reservation object
618  * @test_all: if true, test all fences, otherwise only test the exclusive
619  * fence
620  *
621  * Callers are not required to hold specific locks, but maybe hold
622  * dma_resv_lock() already.
623  *
624  * RETURNS
625  *
626  * True if all fences signaled, else false.
627  */
628 bool dma_resv_test_signaled(struct dma_resv *obj, bool test_all)
629 {
630 	struct dma_fence *fence;
631 	unsigned int seq;
632 	int ret;
633 
634 	rcu_read_lock();
635 retry:
636 	ret = true;
637 	seq = read_seqcount_begin(&obj->seq);
638 
639 	if (test_all) {
640 		struct dma_resv_list *fobj = dma_resv_shared_list(obj);
641 		unsigned int i, shared_count;
642 
643 		shared_count = fobj ? fobj->shared_count : 0;
644 		for (i = 0; i < shared_count; ++i) {
645 			fence = rcu_dereference(fobj->shared[i]);
646 			ret = dma_resv_test_signaled_single(fence);
647 			if (ret < 0)
648 				goto retry;
649 			else if (!ret)
650 				break;
651 		}
652 	}
653 
654 	fence = dma_resv_excl_fence(obj);
655 	if (ret && fence) {
656 		ret = dma_resv_test_signaled_single(fence);
657 		if (ret < 0)
658 			goto retry;
659 
660 	}
661 
662 	if (read_seqcount_retry(&obj->seq, seq))
663 		goto retry;
664 
665 	rcu_read_unlock();
666 	return ret;
667 }
668 EXPORT_SYMBOL_GPL(dma_resv_test_signaled);
669 
670 #if IS_ENABLED(CONFIG_LOCKDEP)
671 static int __init dma_resv_lockdep(void)
672 {
673 	struct mm_struct *mm = mm_alloc();
674 	struct ww_acquire_ctx ctx;
675 	struct dma_resv obj;
676 	struct address_space mapping;
677 	int ret;
678 
679 	if (!mm)
680 		return -ENOMEM;
681 
682 	dma_resv_init(&obj);
683 	address_space_init_once(&mapping);
684 
685 	mmap_read_lock(mm);
686 	ww_acquire_init(&ctx, &reservation_ww_class);
687 	ret = dma_resv_lock(&obj, &ctx);
688 	if (ret == -EDEADLK)
689 		dma_resv_lock_slow(&obj, &ctx);
690 	fs_reclaim_acquire(GFP_KERNEL);
691 	/* for unmap_mapping_range on trylocked buffer objects in shrinkers */
692 	i_mmap_lock_write(&mapping);
693 	i_mmap_unlock_write(&mapping);
694 #ifdef CONFIG_MMU_NOTIFIER
695 	lock_map_acquire(&__mmu_notifier_invalidate_range_start_map);
696 	__dma_fence_might_wait();
697 	lock_map_release(&__mmu_notifier_invalidate_range_start_map);
698 #else
699 	__dma_fence_might_wait();
700 #endif
701 	fs_reclaim_release(GFP_KERNEL);
702 	ww_mutex_unlock(&obj.lock);
703 	ww_acquire_fini(&ctx);
704 	mmap_read_unlock(mm);
705 
706 	mmput(mm);
707 
708 	return 0;
709 }
710 subsys_initcall(dma_resv_lockdep);
711 #endif
712