1 /* SPDX-License-Identifier: MIT */
2 
3 /*
4  * Copyright © 2019 Intel Corporation
5  */
6 
7 #include <linux/delay.h>
8 #include <linux/dma-fence.h>
9 #include <linux/kernel.h>
10 #include <linux/kthread.h>
11 #include <linux/sched/signal.h>
12 #include <linux/slab.h>
13 #include <linux/spinlock.h>
14 
15 #include "selftest.h"
16 
17 static struct kmem_cache *slab_fences;
18 
19 static struct mock_fence {
20 	struct dma_fence base;
21 	struct spinlock lock;
22 } *to_mock_fence(struct dma_fence *f) {
23 	return container_of(f, struct mock_fence, base);
24 }
25 
26 static const char *mock_name(struct dma_fence *f)
27 {
28 	return "mock";
29 }
30 
31 static void mock_fence_release(struct dma_fence *f)
32 {
33 	kmem_cache_free(slab_fences, to_mock_fence(f));
34 }
35 
36 struct wait_cb {
37 	struct dma_fence_cb cb;
38 	struct task_struct *task;
39 };
40 
41 static void mock_wakeup(struct dma_fence *f, struct dma_fence_cb *cb)
42 {
43 	wake_up_process(container_of(cb, struct wait_cb, cb)->task);
44 }
45 
46 static long mock_wait(struct dma_fence *f, bool intr, long timeout)
47 {
48 	const int state = intr ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE;
49 	struct wait_cb cb = { .task = current };
50 
51 	if (dma_fence_add_callback(f, &cb.cb, mock_wakeup))
52 		return timeout;
53 
54 	while (timeout) {
55 		set_current_state(state);
56 
57 		if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &f->flags))
58 			break;
59 
60 		if (signal_pending_state(state, current))
61 			break;
62 
63 		timeout = schedule_timeout(timeout);
64 	}
65 	__set_current_state(TASK_RUNNING);
66 
67 	if (!dma_fence_remove_callback(f, &cb.cb))
68 		return timeout;
69 
70 	if (signal_pending_state(state, current))
71 		return -ERESTARTSYS;
72 
73 	return -ETIME;
74 }
75 
76 static const struct dma_fence_ops mock_ops = {
77 	.get_driver_name = mock_name,
78 	.get_timeline_name = mock_name,
79 	.wait = mock_wait,
80 	.release = mock_fence_release,
81 };
82 
83 static struct dma_fence *mock_fence(void)
84 {
85 	struct mock_fence *f;
86 
87 	f = kmem_cache_alloc(slab_fences, GFP_KERNEL);
88 	if (!f)
89 		return NULL;
90 
91 	spin_lock_init(&f->lock);
92 	dma_fence_init(&f->base, &mock_ops, &f->lock, 0, 0);
93 
94 	return &f->base;
95 }
96 
97 static int sanitycheck(void *arg)
98 {
99 	struct dma_fence *f;
100 
101 	f = mock_fence();
102 	if (!f)
103 		return -ENOMEM;
104 
105 	dma_fence_signal(f);
106 	dma_fence_put(f);
107 
108 	return 0;
109 }
110 
111 static int test_signaling(void *arg)
112 {
113 	struct dma_fence *f;
114 	int err = -EINVAL;
115 
116 	f = mock_fence();
117 	if (!f)
118 		return -ENOMEM;
119 
120 	if (dma_fence_is_signaled(f)) {
121 		pr_err("Fence unexpectedly signaled on creation\n");
122 		goto err_free;
123 	}
124 
125 	if (dma_fence_signal(f)) {
126 		pr_err("Fence reported being already signaled\n");
127 		goto err_free;
128 	}
129 
130 	if (!dma_fence_is_signaled(f)) {
131 		pr_err("Fence not reporting signaled\n");
132 		goto err_free;
133 	}
134 
135 	if (!dma_fence_signal(f)) {
136 		pr_err("Fence reported not being already signaled\n");
137 		goto err_free;
138 	}
139 
140 	err = 0;
141 err_free:
142 	dma_fence_put(f);
143 	return err;
144 }
145 
146 struct simple_cb {
147 	struct dma_fence_cb cb;
148 	bool seen;
149 };
150 
151 static void simple_callback(struct dma_fence *f, struct dma_fence_cb *cb)
152 {
153 	smp_store_mb(container_of(cb, struct simple_cb, cb)->seen, true);
154 }
155 
156 static int test_add_callback(void *arg)
157 {
158 	struct simple_cb cb = {};
159 	struct dma_fence *f;
160 	int err = -EINVAL;
161 
162 	f = mock_fence();
163 	if (!f)
164 		return -ENOMEM;
165 
166 	if (dma_fence_add_callback(f, &cb.cb, simple_callback)) {
167 		pr_err("Failed to add callback, fence already signaled!\n");
168 		goto err_free;
169 	}
170 
171 	dma_fence_signal(f);
172 	if (!cb.seen) {
173 		pr_err("Callback failed!\n");
174 		goto err_free;
175 	}
176 
177 	err = 0;
178 err_free:
179 	dma_fence_put(f);
180 	return err;
181 }
182 
183 static int test_late_add_callback(void *arg)
184 {
185 	struct simple_cb cb = {};
186 	struct dma_fence *f;
187 	int err = -EINVAL;
188 
189 	f = mock_fence();
190 	if (!f)
191 		return -ENOMEM;
192 
193 	dma_fence_signal(f);
194 
195 	if (!dma_fence_add_callback(f, &cb.cb, simple_callback)) {
196 		pr_err("Added callback, but fence was already signaled!\n");
197 		goto err_free;
198 	}
199 
200 	dma_fence_signal(f);
201 	if (cb.seen) {
202 		pr_err("Callback called after failed attachment !\n");
203 		goto err_free;
204 	}
205 
206 	err = 0;
207 err_free:
208 	dma_fence_put(f);
209 	return err;
210 }
211 
212 static int test_rm_callback(void *arg)
213 {
214 	struct simple_cb cb = {};
215 	struct dma_fence *f;
216 	int err = -EINVAL;
217 
218 	f = mock_fence();
219 	if (!f)
220 		return -ENOMEM;
221 
222 	if (dma_fence_add_callback(f, &cb.cb, simple_callback)) {
223 		pr_err("Failed to add callback, fence already signaled!\n");
224 		goto err_free;
225 	}
226 
227 	if (!dma_fence_remove_callback(f, &cb.cb)) {
228 		pr_err("Failed to remove callback!\n");
229 		goto err_free;
230 	}
231 
232 	dma_fence_signal(f);
233 	if (cb.seen) {
234 		pr_err("Callback still signaled after removal!\n");
235 		goto err_free;
236 	}
237 
238 	err = 0;
239 err_free:
240 	dma_fence_put(f);
241 	return err;
242 }
243 
244 static int test_late_rm_callback(void *arg)
245 {
246 	struct simple_cb cb = {};
247 	struct dma_fence *f;
248 	int err = -EINVAL;
249 
250 	f = mock_fence();
251 	if (!f)
252 		return -ENOMEM;
253 
254 	if (dma_fence_add_callback(f, &cb.cb, simple_callback)) {
255 		pr_err("Failed to add callback, fence already signaled!\n");
256 		goto err_free;
257 	}
258 
259 	dma_fence_signal(f);
260 	if (!cb.seen) {
261 		pr_err("Callback failed!\n");
262 		goto err_free;
263 	}
264 
265 	if (dma_fence_remove_callback(f, &cb.cb)) {
266 		pr_err("Callback removal succeed after being executed!\n");
267 		goto err_free;
268 	}
269 
270 	err = 0;
271 err_free:
272 	dma_fence_put(f);
273 	return err;
274 }
275 
276 static int test_status(void *arg)
277 {
278 	struct dma_fence *f;
279 	int err = -EINVAL;
280 
281 	f = mock_fence();
282 	if (!f)
283 		return -ENOMEM;
284 
285 	if (dma_fence_get_status(f)) {
286 		pr_err("Fence unexpectedly has signaled status on creation\n");
287 		goto err_free;
288 	}
289 
290 	dma_fence_signal(f);
291 	if (!dma_fence_get_status(f)) {
292 		pr_err("Fence not reporting signaled status\n");
293 		goto err_free;
294 	}
295 
296 	err = 0;
297 err_free:
298 	dma_fence_put(f);
299 	return err;
300 }
301 
302 static int test_error(void *arg)
303 {
304 	struct dma_fence *f;
305 	int err = -EINVAL;
306 
307 	f = mock_fence();
308 	if (!f)
309 		return -ENOMEM;
310 
311 	dma_fence_set_error(f, -EIO);
312 
313 	if (dma_fence_get_status(f)) {
314 		pr_err("Fence unexpectedly has error status before signal\n");
315 		goto err_free;
316 	}
317 
318 	dma_fence_signal(f);
319 	if (dma_fence_get_status(f) != -EIO) {
320 		pr_err("Fence not reporting error status, got %d\n",
321 		       dma_fence_get_status(f));
322 		goto err_free;
323 	}
324 
325 	err = 0;
326 err_free:
327 	dma_fence_put(f);
328 	return err;
329 }
330 
331 static int test_wait(void *arg)
332 {
333 	struct dma_fence *f;
334 	int err = -EINVAL;
335 
336 	f = mock_fence();
337 	if (!f)
338 		return -ENOMEM;
339 
340 	if (dma_fence_wait_timeout(f, false, 0) != -ETIME) {
341 		pr_err("Wait reported complete before being signaled\n");
342 		goto err_free;
343 	}
344 
345 	dma_fence_signal(f);
346 
347 	if (dma_fence_wait_timeout(f, false, 0) != 0) {
348 		pr_err("Wait reported incomplete after being signaled\n");
349 		goto err_free;
350 	}
351 
352 	err = 0;
353 err_free:
354 	dma_fence_signal(f);
355 	dma_fence_put(f);
356 	return err;
357 }
358 
359 struct wait_timer {
360 	struct timer_list timer;
361 	struct dma_fence *f;
362 };
363 
364 static void wait_timer(struct timer_list *timer)
365 {
366 	struct wait_timer *wt = from_timer(wt, timer, timer);
367 
368 	dma_fence_signal(wt->f);
369 }
370 
371 static int test_wait_timeout(void *arg)
372 {
373 	struct wait_timer wt;
374 	int err = -EINVAL;
375 
376 	timer_setup_on_stack(&wt.timer, wait_timer, 0);
377 
378 	wt.f = mock_fence();
379 	if (!wt.f)
380 		return -ENOMEM;
381 
382 	if (dma_fence_wait_timeout(wt.f, false, 1) != -ETIME) {
383 		pr_err("Wait reported complete before being signaled\n");
384 		goto err_free;
385 	}
386 
387 	mod_timer(&wt.timer, jiffies + 1);
388 
389 	if (dma_fence_wait_timeout(wt.f, false, 2) == -ETIME) {
390 		if (timer_pending(&wt.timer)) {
391 			pr_notice("Timer did not fire within the jiffie!\n");
392 			err = 0; /* not our fault! */
393 		} else {
394 			pr_err("Wait reported incomplete after timeout\n");
395 		}
396 		goto err_free;
397 	}
398 
399 	err = 0;
400 err_free:
401 	del_timer_sync(&wt.timer);
402 	destroy_timer_on_stack(&wt.timer);
403 	dma_fence_signal(wt.f);
404 	dma_fence_put(wt.f);
405 	return err;
406 }
407 
408 static int test_stub(void *arg)
409 {
410 	struct dma_fence *f[64];
411 	int err = -EINVAL;
412 	int i;
413 
414 	for (i = 0; i < ARRAY_SIZE(f); i++) {
415 		f[i] = dma_fence_get_stub();
416 		if (!dma_fence_is_signaled(f[i])) {
417 			pr_err("Obtained unsignaled stub fence!\n");
418 			goto err;
419 		}
420 	}
421 
422 	err = 0;
423 err:
424 	while (i--)
425 		dma_fence_put(f[i]);
426 	return err;
427 }
428 
429 /* Now off to the races! */
430 
431 struct race_thread {
432 	struct dma_fence __rcu **fences;
433 	struct task_struct *task;
434 	bool before;
435 	int id;
436 };
437 
438 static void __wait_for_callbacks(struct dma_fence *f)
439 {
440 	spin_lock_irq(f->lock);
441 	spin_unlock_irq(f->lock);
442 }
443 
444 static int thread_signal_callback(void *arg)
445 {
446 	const struct race_thread *t = arg;
447 	unsigned long pass = 0;
448 	unsigned long miss = 0;
449 	int err = 0;
450 
451 	while (!err && !kthread_should_stop()) {
452 		struct dma_fence *f1, *f2;
453 		struct simple_cb cb;
454 
455 		f1 = mock_fence();
456 		if (!f1) {
457 			err = -ENOMEM;
458 			break;
459 		}
460 
461 		rcu_assign_pointer(t->fences[t->id], f1);
462 		smp_wmb();
463 
464 		rcu_read_lock();
465 		do {
466 			f2 = dma_fence_get_rcu_safe(&t->fences[!t->id]);
467 		} while (!f2 && !kthread_should_stop());
468 		rcu_read_unlock();
469 
470 		if (t->before)
471 			dma_fence_signal(f1);
472 
473 		smp_store_mb(cb.seen, false);
474 		if (!f2 ||
475 		    dma_fence_add_callback(f2, &cb.cb, simple_callback)) {
476 			miss++;
477 			cb.seen = true;
478 		}
479 
480 		if (!t->before)
481 			dma_fence_signal(f1);
482 
483 		if (!cb.seen) {
484 			dma_fence_wait(f2, false);
485 			__wait_for_callbacks(f2);
486 		}
487 
488 		if (!READ_ONCE(cb.seen)) {
489 			pr_err("Callback not seen on thread %d, pass %lu (%lu misses), signaling %s add_callback; fence signaled? %s\n",
490 			       t->id, pass, miss,
491 			       t->before ? "before" : "after",
492 			       dma_fence_is_signaled(f2) ? "yes" : "no");
493 			err = -EINVAL;
494 		}
495 
496 		dma_fence_put(f2);
497 
498 		rcu_assign_pointer(t->fences[t->id], NULL);
499 		smp_wmb();
500 
501 		dma_fence_put(f1);
502 
503 		pass++;
504 	}
505 
506 	pr_info("%s[%d] completed %lu passes, %lu misses\n",
507 		__func__, t->id, pass, miss);
508 	return err;
509 }
510 
511 static int race_signal_callback(void *arg)
512 {
513 	struct dma_fence __rcu *f[2] = {};
514 	int ret = 0;
515 	int pass;
516 
517 	for (pass = 0; !ret && pass <= 1; pass++) {
518 		struct race_thread t[2];
519 		int i;
520 
521 		for (i = 0; i < ARRAY_SIZE(t); i++) {
522 			t[i].fences = f;
523 			t[i].id = i;
524 			t[i].before = pass;
525 			t[i].task = kthread_run(thread_signal_callback, &t[i],
526 						"dma-fence:%d", i);
527 			get_task_struct(t[i].task);
528 		}
529 
530 		msleep(50);
531 
532 		for (i = 0; i < ARRAY_SIZE(t); i++) {
533 			int err;
534 
535 			err = kthread_stop(t[i].task);
536 			if (err && !ret)
537 				ret = err;
538 
539 			put_task_struct(t[i].task);
540 		}
541 	}
542 
543 	return ret;
544 }
545 
546 int dma_fence(void)
547 {
548 	static const struct subtest tests[] = {
549 		SUBTEST(sanitycheck),
550 		SUBTEST(test_signaling),
551 		SUBTEST(test_add_callback),
552 		SUBTEST(test_late_add_callback),
553 		SUBTEST(test_rm_callback),
554 		SUBTEST(test_late_rm_callback),
555 		SUBTEST(test_status),
556 		SUBTEST(test_error),
557 		SUBTEST(test_wait),
558 		SUBTEST(test_wait_timeout),
559 		SUBTEST(test_stub),
560 		SUBTEST(race_signal_callback),
561 	};
562 	int ret;
563 
564 	pr_info("sizeof(dma_fence)=%zu\n", sizeof(struct dma_fence));
565 
566 	slab_fences = KMEM_CACHE(mock_fence,
567 				 SLAB_TYPESAFE_BY_RCU |
568 				 SLAB_HWCACHE_ALIGN);
569 	if (!slab_fences)
570 		return -ENOMEM;
571 
572 	ret = subtests(tests, NULL);
573 
574 	kmem_cache_destroy(slab_fences);
575 
576 	return ret;
577 }
578