xref: /openbmc/linux/drivers/dma-buf/dma-resv.c (revision c4a11bf4)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright (C) 2012-2014 Canonical Ltd (Maarten Lankhorst)
4  *
5  * Based on bo.c which bears the following copyright notice,
6  * but is dual licensed:
7  *
8  * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
9  * All Rights Reserved.
10  *
11  * Permission is hereby granted, free of charge, to any person obtaining a
12  * copy of this software and associated documentation files (the
13  * "Software"), to deal in the Software without restriction, including
14  * without limitation the rights to use, copy, modify, merge, publish,
15  * distribute, sub license, and/or sell copies of the Software, and to
16  * permit persons to whom the Software is furnished to do so, subject to
17  * the following conditions:
18  *
19  * The above copyright notice and this permission notice (including the
20  * next paragraph) shall be included in all copies or substantial portions
21  * of the Software.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
24  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
25  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
26  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
27  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
28  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
29  * USE OR OTHER DEALINGS IN THE SOFTWARE.
30  *
31  **************************************************************************/
32 /*
33  * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
34  */
35 
36 #include <linux/dma-resv.h>
37 #include <linux/export.h>
38 #include <linux/mm.h>
39 #include <linux/sched/mm.h>
40 #include <linux/mmu_notifier.h>
41 
42 /**
43  * DOC: Reservation Object Overview
44  *
45  * The reservation object provides a mechanism to manage shared and
46  * exclusive fences associated with a buffer.  A reservation object
47  * can have attached one exclusive fence (normally associated with
48  * write operations) or N shared fences (read operations).  The RCU
49  * mechanism is used to protect read access to fences from locked
50  * write-side updates.
51  *
52  * See struct dma_resv for more details.
53  */
54 
55 DEFINE_WD_CLASS(reservation_ww_class);
56 EXPORT_SYMBOL(reservation_ww_class);
57 
58 /**
59  * dma_resv_list_alloc - allocate fence list
60  * @shared_max: number of fences we need space for
61  *
62  * Allocate a new dma_resv_list and make sure to correctly initialize
63  * shared_max.
64  */
65 static struct dma_resv_list *dma_resv_list_alloc(unsigned int shared_max)
66 {
67 	struct dma_resv_list *list;
68 
69 	list = kmalloc(struct_size(list, shared, shared_max), GFP_KERNEL);
70 	if (!list)
71 		return NULL;
72 
73 	list->shared_max = (ksize(list) - offsetof(typeof(*list), shared)) /
74 		sizeof(*list->shared);
75 
76 	return list;
77 }
78 
79 /**
80  * dma_resv_list_free - free fence list
81  * @list: list to free
82  *
83  * Free a dma_resv_list and make sure to drop all references.
84  */
85 static void dma_resv_list_free(struct dma_resv_list *list)
86 {
87 	unsigned int i;
88 
89 	if (!list)
90 		return;
91 
92 	for (i = 0; i < list->shared_count; ++i)
93 		dma_fence_put(rcu_dereference_protected(list->shared[i], true));
94 
95 	kfree_rcu(list, rcu);
96 }
97 
98 /**
99  * dma_resv_init - initialize a reservation object
100  * @obj: the reservation object
101  */
102 void dma_resv_init(struct dma_resv *obj)
103 {
104 	ww_mutex_init(&obj->lock, &reservation_ww_class);
105 	seqcount_ww_mutex_init(&obj->seq, &obj->lock);
106 
107 	RCU_INIT_POINTER(obj->fence, NULL);
108 	RCU_INIT_POINTER(obj->fence_excl, NULL);
109 }
110 EXPORT_SYMBOL(dma_resv_init);
111 
112 /**
113  * dma_resv_fini - destroys a reservation object
114  * @obj: the reservation object
115  */
116 void dma_resv_fini(struct dma_resv *obj)
117 {
118 	struct dma_resv_list *fobj;
119 	struct dma_fence *excl;
120 
121 	/*
122 	 * This object should be dead and all references must have
123 	 * been released to it, so no need to be protected with rcu.
124 	 */
125 	excl = rcu_dereference_protected(obj->fence_excl, 1);
126 	if (excl)
127 		dma_fence_put(excl);
128 
129 	fobj = rcu_dereference_protected(obj->fence, 1);
130 	dma_resv_list_free(fobj);
131 	ww_mutex_destroy(&obj->lock);
132 }
133 EXPORT_SYMBOL(dma_resv_fini);
134 
135 /**
136  * dma_resv_reserve_shared - Reserve space to add shared fences to
137  * a dma_resv.
138  * @obj: reservation object
139  * @num_fences: number of fences we want to add
140  *
141  * Should be called before dma_resv_add_shared_fence().  Must
142  * be called with @obj locked through dma_resv_lock().
143  *
144  * Note that the preallocated slots need to be re-reserved if @obj is unlocked
145  * at any time before calling dma_resv_add_shared_fence(). This is validated
146  * when CONFIG_DEBUG_MUTEXES is enabled.
147  *
148  * RETURNS
149  * Zero for success, or -errno
150  */
151 int dma_resv_reserve_shared(struct dma_resv *obj, unsigned int num_fences)
152 {
153 	struct dma_resv_list *old, *new;
154 	unsigned int i, j, k, max;
155 
156 	dma_resv_assert_held(obj);
157 
158 	old = dma_resv_shared_list(obj);
159 	if (old && old->shared_max) {
160 		if ((old->shared_count + num_fences) <= old->shared_max)
161 			return 0;
162 		max = max(old->shared_count + num_fences, old->shared_max * 2);
163 	} else {
164 		max = max(4ul, roundup_pow_of_two(num_fences));
165 	}
166 
167 	new = dma_resv_list_alloc(max);
168 	if (!new)
169 		return -ENOMEM;
170 
171 	/*
172 	 * no need to bump fence refcounts, rcu_read access
173 	 * requires the use of kref_get_unless_zero, and the
174 	 * references from the old struct are carried over to
175 	 * the new.
176 	 */
177 	for (i = 0, j = 0, k = max; i < (old ? old->shared_count : 0); ++i) {
178 		struct dma_fence *fence;
179 
180 		fence = rcu_dereference_protected(old->shared[i],
181 						  dma_resv_held(obj));
182 		if (dma_fence_is_signaled(fence))
183 			RCU_INIT_POINTER(new->shared[--k], fence);
184 		else
185 			RCU_INIT_POINTER(new->shared[j++], fence);
186 	}
187 	new->shared_count = j;
188 
189 	/*
190 	 * We are not changing the effective set of fences here so can
191 	 * merely update the pointer to the new array; both existing
192 	 * readers and new readers will see exactly the same set of
193 	 * active (unsignaled) shared fences. Individual fences and the
194 	 * old array are protected by RCU and so will not vanish under
195 	 * the gaze of the rcu_read_lock() readers.
196 	 */
197 	rcu_assign_pointer(obj->fence, new);
198 
199 	if (!old)
200 		return 0;
201 
202 	/* Drop the references to the signaled fences */
203 	for (i = k; i < max; ++i) {
204 		struct dma_fence *fence;
205 
206 		fence = rcu_dereference_protected(new->shared[i],
207 						  dma_resv_held(obj));
208 		dma_fence_put(fence);
209 	}
210 	kfree_rcu(old, rcu);
211 
212 	return 0;
213 }
214 EXPORT_SYMBOL(dma_resv_reserve_shared);
215 
216 #ifdef CONFIG_DEBUG_MUTEXES
217 /**
218  * dma_resv_reset_shared_max - reset shared fences for debugging
219  * @obj: the dma_resv object to reset
220  *
221  * Reset the number of pre-reserved shared slots to test that drivers do
222  * correct slot allocation using dma_resv_reserve_shared(). See also
223  * &dma_resv_list.shared_max.
224  */
225 void dma_resv_reset_shared_max(struct dma_resv *obj)
226 {
227 	struct dma_resv_list *fences = dma_resv_shared_list(obj);
228 
229 	dma_resv_assert_held(obj);
230 
231 	/* Test shared fence slot reservation */
232 	if (fences)
233 		fences->shared_max = fences->shared_count;
234 }
235 EXPORT_SYMBOL(dma_resv_reset_shared_max);
236 #endif
237 
238 /**
239  * dma_resv_add_shared_fence - Add a fence to a shared slot
240  * @obj: the reservation object
241  * @fence: the shared fence to add
242  *
243  * Add a fence to a shared slot, @obj must be locked with dma_resv_lock(), and
244  * dma_resv_reserve_shared() has been called.
245  *
246  * See also &dma_resv.fence for a discussion of the semantics.
247  */
248 void dma_resv_add_shared_fence(struct dma_resv *obj, struct dma_fence *fence)
249 {
250 	struct dma_resv_list *fobj;
251 	struct dma_fence *old;
252 	unsigned int i, count;
253 
254 	dma_fence_get(fence);
255 
256 	dma_resv_assert_held(obj);
257 
258 	fobj = dma_resv_shared_list(obj);
259 	count = fobj->shared_count;
260 
261 	write_seqcount_begin(&obj->seq);
262 
263 	for (i = 0; i < count; ++i) {
264 
265 		old = rcu_dereference_protected(fobj->shared[i],
266 						dma_resv_held(obj));
267 		if (old->context == fence->context ||
268 		    dma_fence_is_signaled(old))
269 			goto replace;
270 	}
271 
272 	BUG_ON(fobj->shared_count >= fobj->shared_max);
273 	old = NULL;
274 	count++;
275 
276 replace:
277 	RCU_INIT_POINTER(fobj->shared[i], fence);
278 	/* pointer update must be visible before we extend the shared_count */
279 	smp_store_mb(fobj->shared_count, count);
280 
281 	write_seqcount_end(&obj->seq);
282 	dma_fence_put(old);
283 }
284 EXPORT_SYMBOL(dma_resv_add_shared_fence);
285 
286 /**
287  * dma_resv_add_excl_fence - Add an exclusive fence.
288  * @obj: the reservation object
289  * @fence: the exclusive fence to add
290  *
291  * Add a fence to the exclusive slot. @obj must be locked with dma_resv_lock().
292  * Note that this function replaces all fences attached to @obj, see also
293  * &dma_resv.fence_excl for a discussion of the semantics.
294  */
295 void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence)
296 {
297 	struct dma_fence *old_fence = dma_resv_excl_fence(obj);
298 	struct dma_resv_list *old;
299 	u32 i = 0;
300 
301 	dma_resv_assert_held(obj);
302 
303 	old = dma_resv_shared_list(obj);
304 	if (old)
305 		i = old->shared_count;
306 
307 	if (fence)
308 		dma_fence_get(fence);
309 
310 	write_seqcount_begin(&obj->seq);
311 	/* write_seqcount_begin provides the necessary memory barrier */
312 	RCU_INIT_POINTER(obj->fence_excl, fence);
313 	if (old)
314 		old->shared_count = 0;
315 	write_seqcount_end(&obj->seq);
316 
317 	/* inplace update, no shared fences */
318 	while (i--)
319 		dma_fence_put(rcu_dereference_protected(old->shared[i],
320 						dma_resv_held(obj)));
321 
322 	dma_fence_put(old_fence);
323 }
324 EXPORT_SYMBOL(dma_resv_add_excl_fence);
325 
326 /**
327  * dma_resv_iter_restart_unlocked - restart the unlocked iterator
328  * @cursor: The dma_resv_iter object to restart
329  *
330  * Restart the unlocked iteration by initializing the cursor object.
331  */
332 static void dma_resv_iter_restart_unlocked(struct dma_resv_iter *cursor)
333 {
334 	cursor->seq = read_seqcount_begin(&cursor->obj->seq);
335 	cursor->index = -1;
336 	if (cursor->all_fences)
337 		cursor->fences = dma_resv_shared_list(cursor->obj);
338 	else
339 		cursor->fences = NULL;
340 	cursor->is_restarted = true;
341 }
342 
343 /**
344  * dma_resv_iter_walk_unlocked - walk over fences in a dma_resv obj
345  * @cursor: cursor to record the current position
346  *
347  * Return all the fences in the dma_resv object which are not yet signaled.
348  * The returned fence has an extra local reference so will stay alive.
349  * If a concurrent modify is detected the whole iteration is started over again.
350  */
351 static void dma_resv_iter_walk_unlocked(struct dma_resv_iter *cursor)
352 {
353 	struct dma_resv *obj = cursor->obj;
354 
355 	do {
356 		/* Drop the reference from the previous round */
357 		dma_fence_put(cursor->fence);
358 
359 		if (cursor->index == -1) {
360 			cursor->fence = dma_resv_excl_fence(obj);
361 			cursor->index++;
362 			if (!cursor->fence)
363 				continue;
364 
365 		} else if (!cursor->fences ||
366 			   cursor->index >= cursor->fences->shared_count) {
367 			cursor->fence = NULL;
368 			break;
369 
370 		} else {
371 			struct dma_resv_list *fences = cursor->fences;
372 			unsigned int idx = cursor->index++;
373 
374 			cursor->fence = rcu_dereference(fences->shared[idx]);
375 		}
376 		cursor->fence = dma_fence_get_rcu(cursor->fence);
377 		if (!cursor->fence || !dma_fence_is_signaled(cursor->fence))
378 			break;
379 	} while (true);
380 }
381 
382 /**
383  * dma_resv_iter_first_unlocked - first fence in an unlocked dma_resv obj.
384  * @cursor: the cursor with the current position
385  *
386  * Returns the first fence from an unlocked dma_resv obj.
387  */
388 struct dma_fence *dma_resv_iter_first_unlocked(struct dma_resv_iter *cursor)
389 {
390 	rcu_read_lock();
391 	do {
392 		dma_resv_iter_restart_unlocked(cursor);
393 		dma_resv_iter_walk_unlocked(cursor);
394 	} while (read_seqcount_retry(&cursor->obj->seq, cursor->seq));
395 	rcu_read_unlock();
396 
397 	return cursor->fence;
398 }
399 EXPORT_SYMBOL(dma_resv_iter_first_unlocked);
400 
401 /**
402  * dma_resv_iter_next_unlocked - next fence in an unlocked dma_resv obj.
403  * @cursor: the cursor with the current position
404  *
405  * Returns the next fence from an unlocked dma_resv obj.
406  */
407 struct dma_fence *dma_resv_iter_next_unlocked(struct dma_resv_iter *cursor)
408 {
409 	bool restart;
410 
411 	rcu_read_lock();
412 	cursor->is_restarted = false;
413 	restart = read_seqcount_retry(&cursor->obj->seq, cursor->seq);
414 	do {
415 		if (restart)
416 			dma_resv_iter_restart_unlocked(cursor);
417 		dma_resv_iter_walk_unlocked(cursor);
418 		restart = true;
419 	} while (read_seqcount_retry(&cursor->obj->seq, cursor->seq));
420 	rcu_read_unlock();
421 
422 	return cursor->fence;
423 }
424 EXPORT_SYMBOL(dma_resv_iter_next_unlocked);
425 
426 /**
427  * dma_resv_copy_fences - Copy all fences from src to dst.
428  * @dst: the destination reservation object
429  * @src: the source reservation object
430  *
431  * Copy all fences from src to dst. dst-lock must be held.
432  */
433 int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src)
434 {
435 	struct dma_resv_iter cursor;
436 	struct dma_resv_list *list;
437 	struct dma_fence *f, *excl;
438 
439 	dma_resv_assert_held(dst);
440 
441 	list = NULL;
442 	excl = NULL;
443 
444 	dma_resv_iter_begin(&cursor, src, true);
445 	dma_resv_for_each_fence_unlocked(&cursor, f) {
446 
447 		if (dma_resv_iter_is_restarted(&cursor)) {
448 			dma_resv_list_free(list);
449 			dma_fence_put(excl);
450 
451 			if (cursor.fences) {
452 				unsigned int cnt = cursor.fences->shared_count;
453 
454 				list = dma_resv_list_alloc(cnt);
455 				if (!list) {
456 					dma_resv_iter_end(&cursor);
457 					return -ENOMEM;
458 				}
459 
460 				list->shared_count = 0;
461 
462 			} else {
463 				list = NULL;
464 			}
465 			excl = NULL;
466 		}
467 
468 		dma_fence_get(f);
469 		if (dma_resv_iter_is_exclusive(&cursor))
470 			excl = f;
471 		else
472 			RCU_INIT_POINTER(list->shared[list->shared_count++], f);
473 	}
474 	dma_resv_iter_end(&cursor);
475 
476 	write_seqcount_begin(&dst->seq);
477 	excl = rcu_replace_pointer(dst->fence_excl, excl, dma_resv_held(dst));
478 	list = rcu_replace_pointer(dst->fence, list, dma_resv_held(dst));
479 	write_seqcount_end(&dst->seq);
480 
481 	dma_resv_list_free(list);
482 	dma_fence_put(excl);
483 
484 	return 0;
485 }
486 EXPORT_SYMBOL(dma_resv_copy_fences);
487 
488 /**
489  * dma_resv_get_fences - Get an object's shared and exclusive
490  * fences without update side lock held
491  * @obj: the reservation object
492  * @fence_excl: the returned exclusive fence (or NULL)
493  * @shared_count: the number of shared fences returned
494  * @shared: the array of shared fence ptrs returned (array is krealloc'd to
495  * the required size, and must be freed by caller)
496  *
497  * Retrieve all fences from the reservation object. If the pointer for the
498  * exclusive fence is not specified the fence is put into the array of the
499  * shared fences as well. Returns either zero or -ENOMEM.
500  */
501 int dma_resv_get_fences(struct dma_resv *obj, struct dma_fence **fence_excl,
502 			unsigned int *shared_count, struct dma_fence ***shared)
503 {
504 	struct dma_resv_iter cursor;
505 	struct dma_fence *fence;
506 
507 	*shared_count = 0;
508 	*shared = NULL;
509 
510 	if (fence_excl)
511 		*fence_excl = NULL;
512 
513 	dma_resv_iter_begin(&cursor, obj, true);
514 	dma_resv_for_each_fence_unlocked(&cursor, fence) {
515 
516 		if (dma_resv_iter_is_restarted(&cursor)) {
517 			unsigned int count;
518 
519 			while (*shared_count)
520 				dma_fence_put((*shared)[--(*shared_count)]);
521 
522 			if (fence_excl)
523 				dma_fence_put(*fence_excl);
524 
525 			count = cursor.fences ? cursor.fences->shared_count : 0;
526 			count += fence_excl ? 0 : 1;
527 
528 			/* Eventually re-allocate the array */
529 			*shared = krealloc_array(*shared, count,
530 						 sizeof(void *),
531 						 GFP_KERNEL);
532 			if (count && !*shared) {
533 				dma_resv_iter_end(&cursor);
534 				return -ENOMEM;
535 			}
536 		}
537 
538 		dma_fence_get(fence);
539 		if (dma_resv_iter_is_exclusive(&cursor) && fence_excl)
540 			*fence_excl = fence;
541 		else
542 			(*shared)[(*shared_count)++] = fence;
543 	}
544 	dma_resv_iter_end(&cursor);
545 
546 	return 0;
547 }
548 EXPORT_SYMBOL_GPL(dma_resv_get_fences);
549 
550 /**
551  * dma_resv_wait_timeout - Wait on reservation's objects
552  * shared and/or exclusive fences.
553  * @obj: the reservation object
554  * @wait_all: if true, wait on all fences, else wait on just exclusive fence
555  * @intr: if true, do interruptible wait
556  * @timeout: timeout value in jiffies or zero to return immediately
557  *
558  * Callers are not required to hold specific locks, but maybe hold
559  * dma_resv_lock() already
560  * RETURNS
561  * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or
562  * greater than zer on success.
563  */
564 long dma_resv_wait_timeout(struct dma_resv *obj, bool wait_all, bool intr,
565 			   unsigned long timeout)
566 {
567 	long ret = timeout ? timeout : 1;
568 	struct dma_resv_iter cursor;
569 	struct dma_fence *fence;
570 
571 	dma_resv_iter_begin(&cursor, obj, wait_all);
572 	dma_resv_for_each_fence_unlocked(&cursor, fence) {
573 
574 		ret = dma_fence_wait_timeout(fence, intr, ret);
575 		if (ret <= 0) {
576 			dma_resv_iter_end(&cursor);
577 			return ret;
578 		}
579 	}
580 	dma_resv_iter_end(&cursor);
581 
582 	return ret;
583 }
584 EXPORT_SYMBOL_GPL(dma_resv_wait_timeout);
585 
586 
587 /**
588  * dma_resv_test_signaled - Test if a reservation object's fences have been
589  * signaled.
590  * @obj: the reservation object
591  * @test_all: if true, test all fences, otherwise only test the exclusive
592  * fence
593  *
594  * Callers are not required to hold specific locks, but maybe hold
595  * dma_resv_lock() already.
596  *
597  * RETURNS
598  *
599  * True if all fences signaled, else false.
600  */
601 bool dma_resv_test_signaled(struct dma_resv *obj, bool test_all)
602 {
603 	struct dma_resv_iter cursor;
604 	struct dma_fence *fence;
605 
606 	dma_resv_iter_begin(&cursor, obj, test_all);
607 	dma_resv_for_each_fence_unlocked(&cursor, fence) {
608 		dma_resv_iter_end(&cursor);
609 		return false;
610 	}
611 	dma_resv_iter_end(&cursor);
612 	return true;
613 }
614 EXPORT_SYMBOL_GPL(dma_resv_test_signaled);
615 
616 #if IS_ENABLED(CONFIG_LOCKDEP)
617 static int __init dma_resv_lockdep(void)
618 {
619 	struct mm_struct *mm = mm_alloc();
620 	struct ww_acquire_ctx ctx;
621 	struct dma_resv obj;
622 	struct address_space mapping;
623 	int ret;
624 
625 	if (!mm)
626 		return -ENOMEM;
627 
628 	dma_resv_init(&obj);
629 	address_space_init_once(&mapping);
630 
631 	mmap_read_lock(mm);
632 	ww_acquire_init(&ctx, &reservation_ww_class);
633 	ret = dma_resv_lock(&obj, &ctx);
634 	if (ret == -EDEADLK)
635 		dma_resv_lock_slow(&obj, &ctx);
636 	fs_reclaim_acquire(GFP_KERNEL);
637 	/* for unmap_mapping_range on trylocked buffer objects in shrinkers */
638 	i_mmap_lock_write(&mapping);
639 	i_mmap_unlock_write(&mapping);
640 #ifdef CONFIG_MMU_NOTIFIER
641 	lock_map_acquire(&__mmu_notifier_invalidate_range_start_map);
642 	__dma_fence_might_wait();
643 	lock_map_release(&__mmu_notifier_invalidate_range_start_map);
644 #else
645 	__dma_fence_might_wait();
646 #endif
647 	fs_reclaim_release(GFP_KERNEL);
648 	ww_mutex_unlock(&obj.lock);
649 	ww_acquire_fini(&ctx);
650 	mmap_read_unlock(mm);
651 
652 	mmput(mm);
653 
654 	return 0;
655 }
656 subsys_initcall(dma_resv_lockdep);
657 #endif
658