1 /* SPDX-License-Identifier: MIT */
2 
3 /*
4  * Copyright © 2019 Intel Corporation
5  */
6 
7 #include <linux/delay.h>
8 #include <linux/dma-fence.h>
9 #include <linux/kernel.h>
10 #include <linux/kthread.h>
11 #include <linux/sched/signal.h>
12 #include <linux/slab.h>
13 #include <linux/spinlock.h>
14 
15 #include "selftest.h"
16 
17 static struct kmem_cache *slab_fences;
18 
19 static struct mock_fence {
20 	struct dma_fence base;
21 	struct spinlock lock;
to_mock_fence(struct dma_fence * f)22 } *to_mock_fence(struct dma_fence *f) {
23 	return container_of(f, struct mock_fence, base);
24 }
25 
mock_name(struct dma_fence * f)26 static const char *mock_name(struct dma_fence *f)
27 {
28 	return "mock";
29 }
30 
mock_fence_release(struct dma_fence * f)31 static void mock_fence_release(struct dma_fence *f)
32 {
33 	kmem_cache_free(slab_fences, to_mock_fence(f));
34 }
35 
36 struct wait_cb {
37 	struct dma_fence_cb cb;
38 	struct task_struct *task;
39 };
40 
mock_wakeup(struct dma_fence * f,struct dma_fence_cb * cb)41 static void mock_wakeup(struct dma_fence *f, struct dma_fence_cb *cb)
42 {
43 	wake_up_process(container_of(cb, struct wait_cb, cb)->task);
44 }
45 
mock_wait(struct dma_fence * f,bool intr,long timeout)46 static long mock_wait(struct dma_fence *f, bool intr, long timeout)
47 {
48 	const int state = intr ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE;
49 	struct wait_cb cb = { .task = current };
50 
51 	if (dma_fence_add_callback(f, &cb.cb, mock_wakeup))
52 		return timeout;
53 
54 	while (timeout) {
55 		set_current_state(state);
56 
57 		if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &f->flags))
58 			break;
59 
60 		if (signal_pending_state(state, current))
61 			break;
62 
63 		timeout = schedule_timeout(timeout);
64 	}
65 	__set_current_state(TASK_RUNNING);
66 
67 	if (!dma_fence_remove_callback(f, &cb.cb))
68 		return timeout;
69 
70 	if (signal_pending_state(state, current))
71 		return -ERESTARTSYS;
72 
73 	return -ETIME;
74 }
75 
76 static const struct dma_fence_ops mock_ops = {
77 	.get_driver_name = mock_name,
78 	.get_timeline_name = mock_name,
79 	.wait = mock_wait,
80 	.release = mock_fence_release,
81 };
82 
mock_fence(void)83 static struct dma_fence *mock_fence(void)
84 {
85 	struct mock_fence *f;
86 
87 	f = kmem_cache_alloc(slab_fences, GFP_KERNEL);
88 	if (!f)
89 		return NULL;
90 
91 	spin_lock_init(&f->lock);
92 	dma_fence_init(&f->base, &mock_ops, &f->lock, 0, 0);
93 
94 	return &f->base;
95 }
96 
sanitycheck(void * arg)97 static int sanitycheck(void *arg)
98 {
99 	struct dma_fence *f;
100 
101 	f = mock_fence();
102 	if (!f)
103 		return -ENOMEM;
104 
105 	dma_fence_enable_sw_signaling(f);
106 
107 	dma_fence_signal(f);
108 	dma_fence_put(f);
109 
110 	return 0;
111 }
112 
test_signaling(void * arg)113 static int test_signaling(void *arg)
114 {
115 	struct dma_fence *f;
116 	int err = -EINVAL;
117 
118 	f = mock_fence();
119 	if (!f)
120 		return -ENOMEM;
121 
122 	dma_fence_enable_sw_signaling(f);
123 
124 	if (dma_fence_is_signaled(f)) {
125 		pr_err("Fence unexpectedly signaled on creation\n");
126 		goto err_free;
127 	}
128 
129 	if (dma_fence_signal(f)) {
130 		pr_err("Fence reported being already signaled\n");
131 		goto err_free;
132 	}
133 
134 	if (!dma_fence_is_signaled(f)) {
135 		pr_err("Fence not reporting signaled\n");
136 		goto err_free;
137 	}
138 
139 	if (!dma_fence_signal(f)) {
140 		pr_err("Fence reported not being already signaled\n");
141 		goto err_free;
142 	}
143 
144 	err = 0;
145 err_free:
146 	dma_fence_put(f);
147 	return err;
148 }
149 
150 struct simple_cb {
151 	struct dma_fence_cb cb;
152 	bool seen;
153 };
154 
simple_callback(struct dma_fence * f,struct dma_fence_cb * cb)155 static void simple_callback(struct dma_fence *f, struct dma_fence_cb *cb)
156 {
157 	smp_store_mb(container_of(cb, struct simple_cb, cb)->seen, true);
158 }
159 
test_add_callback(void * arg)160 static int test_add_callback(void *arg)
161 {
162 	struct simple_cb cb = {};
163 	struct dma_fence *f;
164 	int err = -EINVAL;
165 
166 	f = mock_fence();
167 	if (!f)
168 		return -ENOMEM;
169 
170 	if (dma_fence_add_callback(f, &cb.cb, simple_callback)) {
171 		pr_err("Failed to add callback, fence already signaled!\n");
172 		goto err_free;
173 	}
174 
175 	dma_fence_signal(f);
176 	if (!cb.seen) {
177 		pr_err("Callback failed!\n");
178 		goto err_free;
179 	}
180 
181 	err = 0;
182 err_free:
183 	dma_fence_put(f);
184 	return err;
185 }
186 
test_late_add_callback(void * arg)187 static int test_late_add_callback(void *arg)
188 {
189 	struct simple_cb cb = {};
190 	struct dma_fence *f;
191 	int err = -EINVAL;
192 
193 	f = mock_fence();
194 	if (!f)
195 		return -ENOMEM;
196 
197 	dma_fence_enable_sw_signaling(f);
198 
199 	dma_fence_signal(f);
200 
201 	if (!dma_fence_add_callback(f, &cb.cb, simple_callback)) {
202 		pr_err("Added callback, but fence was already signaled!\n");
203 		goto err_free;
204 	}
205 
206 	dma_fence_signal(f);
207 	if (cb.seen) {
208 		pr_err("Callback called after failed attachment !\n");
209 		goto err_free;
210 	}
211 
212 	err = 0;
213 err_free:
214 	dma_fence_put(f);
215 	return err;
216 }
217 
test_rm_callback(void * arg)218 static int test_rm_callback(void *arg)
219 {
220 	struct simple_cb cb = {};
221 	struct dma_fence *f;
222 	int err = -EINVAL;
223 
224 	f = mock_fence();
225 	if (!f)
226 		return -ENOMEM;
227 
228 	if (dma_fence_add_callback(f, &cb.cb, simple_callback)) {
229 		pr_err("Failed to add callback, fence already signaled!\n");
230 		goto err_free;
231 	}
232 
233 	if (!dma_fence_remove_callback(f, &cb.cb)) {
234 		pr_err("Failed to remove callback!\n");
235 		goto err_free;
236 	}
237 
238 	dma_fence_signal(f);
239 	if (cb.seen) {
240 		pr_err("Callback still signaled after removal!\n");
241 		goto err_free;
242 	}
243 
244 	err = 0;
245 err_free:
246 	dma_fence_put(f);
247 	return err;
248 }
249 
test_late_rm_callback(void * arg)250 static int test_late_rm_callback(void *arg)
251 {
252 	struct simple_cb cb = {};
253 	struct dma_fence *f;
254 	int err = -EINVAL;
255 
256 	f = mock_fence();
257 	if (!f)
258 		return -ENOMEM;
259 
260 	if (dma_fence_add_callback(f, &cb.cb, simple_callback)) {
261 		pr_err("Failed to add callback, fence already signaled!\n");
262 		goto err_free;
263 	}
264 
265 	dma_fence_signal(f);
266 	if (!cb.seen) {
267 		pr_err("Callback failed!\n");
268 		goto err_free;
269 	}
270 
271 	if (dma_fence_remove_callback(f, &cb.cb)) {
272 		pr_err("Callback removal succeed after being executed!\n");
273 		goto err_free;
274 	}
275 
276 	err = 0;
277 err_free:
278 	dma_fence_put(f);
279 	return err;
280 }
281 
test_status(void * arg)282 static int test_status(void *arg)
283 {
284 	struct dma_fence *f;
285 	int err = -EINVAL;
286 
287 	f = mock_fence();
288 	if (!f)
289 		return -ENOMEM;
290 
291 	dma_fence_enable_sw_signaling(f);
292 
293 	if (dma_fence_get_status(f)) {
294 		pr_err("Fence unexpectedly has signaled status on creation\n");
295 		goto err_free;
296 	}
297 
298 	dma_fence_signal(f);
299 	if (!dma_fence_get_status(f)) {
300 		pr_err("Fence not reporting signaled status\n");
301 		goto err_free;
302 	}
303 
304 	err = 0;
305 err_free:
306 	dma_fence_put(f);
307 	return err;
308 }
309 
test_error(void * arg)310 static int test_error(void *arg)
311 {
312 	struct dma_fence *f;
313 	int err = -EINVAL;
314 
315 	f = mock_fence();
316 	if (!f)
317 		return -ENOMEM;
318 
319 	dma_fence_enable_sw_signaling(f);
320 
321 	dma_fence_set_error(f, -EIO);
322 
323 	if (dma_fence_get_status(f)) {
324 		pr_err("Fence unexpectedly has error status before signal\n");
325 		goto err_free;
326 	}
327 
328 	dma_fence_signal(f);
329 	if (dma_fence_get_status(f) != -EIO) {
330 		pr_err("Fence not reporting error status, got %d\n",
331 		       dma_fence_get_status(f));
332 		goto err_free;
333 	}
334 
335 	err = 0;
336 err_free:
337 	dma_fence_put(f);
338 	return err;
339 }
340 
test_wait(void * arg)341 static int test_wait(void *arg)
342 {
343 	struct dma_fence *f;
344 	int err = -EINVAL;
345 
346 	f = mock_fence();
347 	if (!f)
348 		return -ENOMEM;
349 
350 	dma_fence_enable_sw_signaling(f);
351 
352 	if (dma_fence_wait_timeout(f, false, 0) != -ETIME) {
353 		pr_err("Wait reported complete before being signaled\n");
354 		goto err_free;
355 	}
356 
357 	dma_fence_signal(f);
358 
359 	if (dma_fence_wait_timeout(f, false, 0) != 0) {
360 		pr_err("Wait reported incomplete after being signaled\n");
361 		goto err_free;
362 	}
363 
364 	err = 0;
365 err_free:
366 	dma_fence_signal(f);
367 	dma_fence_put(f);
368 	return err;
369 }
370 
371 struct wait_timer {
372 	struct timer_list timer;
373 	struct dma_fence *f;
374 };
375 
wait_timer(struct timer_list * timer)376 static void wait_timer(struct timer_list *timer)
377 {
378 	struct wait_timer *wt = from_timer(wt, timer, timer);
379 
380 	dma_fence_signal(wt->f);
381 }
382 
test_wait_timeout(void * arg)383 static int test_wait_timeout(void *arg)
384 {
385 	struct wait_timer wt;
386 	int err = -EINVAL;
387 
388 	timer_setup_on_stack(&wt.timer, wait_timer, 0);
389 
390 	wt.f = mock_fence();
391 	if (!wt.f)
392 		return -ENOMEM;
393 
394 	dma_fence_enable_sw_signaling(wt.f);
395 
396 	if (dma_fence_wait_timeout(wt.f, false, 1) != -ETIME) {
397 		pr_err("Wait reported complete before being signaled\n");
398 		goto err_free;
399 	}
400 
401 	mod_timer(&wt.timer, jiffies + 1);
402 
403 	if (dma_fence_wait_timeout(wt.f, false, 2) == -ETIME) {
404 		if (timer_pending(&wt.timer)) {
405 			pr_notice("Timer did not fire within the jiffie!\n");
406 			err = 0; /* not our fault! */
407 		} else {
408 			pr_err("Wait reported incomplete after timeout\n");
409 		}
410 		goto err_free;
411 	}
412 
413 	err = 0;
414 err_free:
415 	del_timer_sync(&wt.timer);
416 	destroy_timer_on_stack(&wt.timer);
417 	dma_fence_signal(wt.f);
418 	dma_fence_put(wt.f);
419 	return err;
420 }
421 
test_stub(void * arg)422 static int test_stub(void *arg)
423 {
424 	struct dma_fence *f[64];
425 	int err = -EINVAL;
426 	int i;
427 
428 	for (i = 0; i < ARRAY_SIZE(f); i++) {
429 		f[i] = dma_fence_get_stub();
430 		if (!dma_fence_is_signaled(f[i])) {
431 			pr_err("Obtained unsignaled stub fence!\n");
432 			goto err;
433 		}
434 	}
435 
436 	err = 0;
437 err:
438 	while (i--)
439 		dma_fence_put(f[i]);
440 	return err;
441 }
442 
443 /* Now off to the races! */
444 
445 struct race_thread {
446 	struct dma_fence __rcu **fences;
447 	struct task_struct *task;
448 	bool before;
449 	int id;
450 };
451 
__wait_for_callbacks(struct dma_fence * f)452 static void __wait_for_callbacks(struct dma_fence *f)
453 {
454 	spin_lock_irq(f->lock);
455 	spin_unlock_irq(f->lock);
456 }
457 
thread_signal_callback(void * arg)458 static int thread_signal_callback(void *arg)
459 {
460 	const struct race_thread *t = arg;
461 	unsigned long pass = 0;
462 	unsigned long miss = 0;
463 	int err = 0;
464 
465 	while (!err && !kthread_should_stop()) {
466 		struct dma_fence *f1, *f2;
467 		struct simple_cb cb;
468 
469 		f1 = mock_fence();
470 		if (!f1) {
471 			err = -ENOMEM;
472 			break;
473 		}
474 
475 		dma_fence_enable_sw_signaling(f1);
476 
477 		rcu_assign_pointer(t->fences[t->id], f1);
478 		smp_wmb();
479 
480 		rcu_read_lock();
481 		do {
482 			f2 = dma_fence_get_rcu_safe(&t->fences[!t->id]);
483 		} while (!f2 && !kthread_should_stop());
484 		rcu_read_unlock();
485 
486 		if (t->before)
487 			dma_fence_signal(f1);
488 
489 		smp_store_mb(cb.seen, false);
490 		if (!f2 ||
491 		    dma_fence_add_callback(f2, &cb.cb, simple_callback)) {
492 			miss++;
493 			cb.seen = true;
494 		}
495 
496 		if (!t->before)
497 			dma_fence_signal(f1);
498 
499 		if (!cb.seen) {
500 			dma_fence_wait(f2, false);
501 			__wait_for_callbacks(f2);
502 		}
503 
504 		if (!READ_ONCE(cb.seen)) {
505 			pr_err("Callback not seen on thread %d, pass %lu (%lu misses), signaling %s add_callback; fence signaled? %s\n",
506 			       t->id, pass, miss,
507 			       t->before ? "before" : "after",
508 			       dma_fence_is_signaled(f2) ? "yes" : "no");
509 			err = -EINVAL;
510 		}
511 
512 		dma_fence_put(f2);
513 
514 		rcu_assign_pointer(t->fences[t->id], NULL);
515 		smp_wmb();
516 
517 		dma_fence_put(f1);
518 
519 		pass++;
520 	}
521 
522 	pr_info("%s[%d] completed %lu passes, %lu misses\n",
523 		__func__, t->id, pass, miss);
524 	return err;
525 }
526 
race_signal_callback(void * arg)527 static int race_signal_callback(void *arg)
528 {
529 	struct dma_fence __rcu *f[2] = {};
530 	int ret = 0;
531 	int pass;
532 
533 	for (pass = 0; !ret && pass <= 1; pass++) {
534 		struct race_thread t[2];
535 		int i;
536 
537 		for (i = 0; i < ARRAY_SIZE(t); i++) {
538 			t[i].fences = f;
539 			t[i].id = i;
540 			t[i].before = pass;
541 			t[i].task = kthread_run(thread_signal_callback, &t[i],
542 						"dma-fence:%d", i);
543 			get_task_struct(t[i].task);
544 		}
545 
546 		msleep(50);
547 
548 		for (i = 0; i < ARRAY_SIZE(t); i++) {
549 			int err;
550 
551 			err = kthread_stop(t[i].task);
552 			if (err && !ret)
553 				ret = err;
554 
555 			put_task_struct(t[i].task);
556 		}
557 	}
558 
559 	return ret;
560 }
561 
dma_fence(void)562 int dma_fence(void)
563 {
564 	static const struct subtest tests[] = {
565 		SUBTEST(sanitycheck),
566 		SUBTEST(test_signaling),
567 		SUBTEST(test_add_callback),
568 		SUBTEST(test_late_add_callback),
569 		SUBTEST(test_rm_callback),
570 		SUBTEST(test_late_rm_callback),
571 		SUBTEST(test_status),
572 		SUBTEST(test_error),
573 		SUBTEST(test_wait),
574 		SUBTEST(test_wait_timeout),
575 		SUBTEST(test_stub),
576 		SUBTEST(race_signal_callback),
577 	};
578 	int ret;
579 
580 	pr_info("sizeof(dma_fence)=%zu\n", sizeof(struct dma_fence));
581 
582 	slab_fences = KMEM_CACHE(mock_fence,
583 				 SLAB_TYPESAFE_BY_RCU |
584 				 SLAB_HWCACHE_ALIGN);
585 	if (!slab_fences)
586 		return -ENOMEM;
587 
588 	ret = subtests(tests, NULL);
589 
590 	kmem_cache_destroy(slab_fences);
591 
592 	return ret;
593 }
594