1 /*
2  * Copyright © 2017 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  */
24 
25 #include <linux/completion.h>
26 #include <linux/delay.h>
27 #include <linux/prime_numbers.h>
28 
29 #include "../i915_selftest.h"
30 
31 static int
fence_notify(struct i915_sw_fence * fence,enum i915_sw_fence_notify state)32 fence_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
33 {
34 	switch (state) {
35 	case FENCE_COMPLETE:
36 		break;
37 
38 	case FENCE_FREE:
39 		/* Leave the fence for the caller to free it after testing */
40 		break;
41 	}
42 
43 	return NOTIFY_DONE;
44 }
45 
alloc_fence(void)46 static struct i915_sw_fence *alloc_fence(void)
47 {
48 	struct i915_sw_fence *fence;
49 
50 	fence = kmalloc(sizeof(*fence), GFP_KERNEL);
51 	if (!fence)
52 		return NULL;
53 
54 	i915_sw_fence_init(fence, fence_notify);
55 	return fence;
56 }
57 
free_fence(struct i915_sw_fence * fence)58 static void free_fence(struct i915_sw_fence *fence)
59 {
60 	i915_sw_fence_fini(fence);
61 	kfree(fence);
62 }
63 
__test_self(struct i915_sw_fence * fence)64 static int __test_self(struct i915_sw_fence *fence)
65 {
66 	if (i915_sw_fence_done(fence))
67 		return -EINVAL;
68 
69 	i915_sw_fence_commit(fence);
70 	if (!i915_sw_fence_done(fence))
71 		return -EINVAL;
72 
73 	i915_sw_fence_wait(fence);
74 	if (!i915_sw_fence_done(fence))
75 		return -EINVAL;
76 
77 	return 0;
78 }
79 
test_self(void * arg)80 static int test_self(void *arg)
81 {
82 	struct i915_sw_fence *fence;
83 	int ret;
84 
85 	/* Test i915_sw_fence signaling and completion testing */
86 	fence = alloc_fence();
87 	if (!fence)
88 		return -ENOMEM;
89 
90 	ret = __test_self(fence);
91 
92 	free_fence(fence);
93 	return ret;
94 }
95 
test_dag(void * arg)96 static int test_dag(void *arg)
97 {
98 	struct i915_sw_fence *A, *B, *C;
99 	int ret = -EINVAL;
100 
101 	/* Test detection of cycles within the i915_sw_fence graphs */
102 	if (!IS_ENABLED(CONFIG_DRM_I915_SW_FENCE_CHECK_DAG))
103 		return 0;
104 
105 	A = alloc_fence();
106 	if (!A)
107 		return -ENOMEM;
108 
109 	if (i915_sw_fence_await_sw_fence_gfp(A, A, GFP_KERNEL) != -EINVAL) {
110 		pr_err("recursive cycle not detected (AA)\n");
111 		goto err_A;
112 	}
113 
114 	B = alloc_fence();
115 	if (!B) {
116 		ret = -ENOMEM;
117 		goto err_A;
118 	}
119 
120 	i915_sw_fence_await_sw_fence_gfp(A, B, GFP_KERNEL);
121 	if (i915_sw_fence_await_sw_fence_gfp(B, A, GFP_KERNEL) != -EINVAL) {
122 		pr_err("single depth cycle not detected (BAB)\n");
123 		goto err_B;
124 	}
125 
126 	C = alloc_fence();
127 	if (!C) {
128 		ret = -ENOMEM;
129 		goto err_B;
130 	}
131 
132 	if (i915_sw_fence_await_sw_fence_gfp(B, C, GFP_KERNEL) == -EINVAL) {
133 		pr_err("invalid cycle detected\n");
134 		goto err_C;
135 	}
136 	if (i915_sw_fence_await_sw_fence_gfp(C, B, GFP_KERNEL) != -EINVAL) {
137 		pr_err("single depth cycle not detected (CBC)\n");
138 		goto err_C;
139 	}
140 	if (i915_sw_fence_await_sw_fence_gfp(C, A, GFP_KERNEL) != -EINVAL) {
141 		pr_err("cycle not detected (BA, CB, AC)\n");
142 		goto err_C;
143 	}
144 	if (i915_sw_fence_await_sw_fence_gfp(A, C, GFP_KERNEL) == -EINVAL) {
145 		pr_err("invalid cycle detected\n");
146 		goto err_C;
147 	}
148 
149 	i915_sw_fence_commit(A);
150 	i915_sw_fence_commit(B);
151 	i915_sw_fence_commit(C);
152 
153 	ret = 0;
154 	if (!i915_sw_fence_done(C)) {
155 		pr_err("fence C not done\n");
156 		ret = -EINVAL;
157 	}
158 	if (!i915_sw_fence_done(B)) {
159 		pr_err("fence B not done\n");
160 		ret = -EINVAL;
161 	}
162 	if (!i915_sw_fence_done(A)) {
163 		pr_err("fence A not done\n");
164 		ret = -EINVAL;
165 	}
166 err_C:
167 	free_fence(C);
168 err_B:
169 	free_fence(B);
170 err_A:
171 	free_fence(A);
172 	return ret;
173 }
174 
test_AB(void * arg)175 static int test_AB(void *arg)
176 {
177 	struct i915_sw_fence *A, *B;
178 	int ret;
179 
180 	/* Test i915_sw_fence (A) waiting on an event source (B) */
181 	A = alloc_fence();
182 	if (!A)
183 		return -ENOMEM;
184 	B = alloc_fence();
185 	if (!B) {
186 		ret = -ENOMEM;
187 		goto err_A;
188 	}
189 
190 	ret = i915_sw_fence_await_sw_fence_gfp(A, B, GFP_KERNEL);
191 	if (ret < 0)
192 		goto err_B;
193 	if (ret == 0) {
194 		pr_err("Incorrectly reported fence A was complete before await\n");
195 		ret = -EINVAL;
196 		goto err_B;
197 	}
198 
199 	ret = -EINVAL;
200 	i915_sw_fence_commit(A);
201 	if (i915_sw_fence_done(A))
202 		goto err_B;
203 
204 	i915_sw_fence_commit(B);
205 	if (!i915_sw_fence_done(B)) {
206 		pr_err("Fence B is not done\n");
207 		goto err_B;
208 	}
209 
210 	if (!i915_sw_fence_done(A)) {
211 		pr_err("Fence A is not done\n");
212 		goto err_B;
213 	}
214 
215 	ret = 0;
216 err_B:
217 	free_fence(B);
218 err_A:
219 	free_fence(A);
220 	return ret;
221 }
222 
test_ABC(void * arg)223 static int test_ABC(void *arg)
224 {
225 	struct i915_sw_fence *A, *B, *C;
226 	int ret;
227 
228 	/* Test a chain of fences, A waits on B who waits on C */
229 	A = alloc_fence();
230 	if (!A)
231 		return -ENOMEM;
232 
233 	B = alloc_fence();
234 	if (!B) {
235 		ret = -ENOMEM;
236 		goto err_A;
237 	}
238 
239 	C = alloc_fence();
240 	if (!C) {
241 		ret = -ENOMEM;
242 		goto err_B;
243 	}
244 
245 	ret = i915_sw_fence_await_sw_fence_gfp(A, B, GFP_KERNEL);
246 	if (ret < 0)
247 		goto err_C;
248 	if (ret == 0) {
249 		pr_err("Incorrectly reported fence B was complete before await\n");
250 		goto err_C;
251 	}
252 
253 	ret = i915_sw_fence_await_sw_fence_gfp(B, C, GFP_KERNEL);
254 	if (ret < 0)
255 		goto err_C;
256 	if (ret == 0) {
257 		pr_err("Incorrectly reported fence C was complete before await\n");
258 		goto err_C;
259 	}
260 
261 	ret = -EINVAL;
262 	i915_sw_fence_commit(A);
263 	if (i915_sw_fence_done(A)) {
264 		pr_err("Fence A completed early\n");
265 		goto err_C;
266 	}
267 
268 	i915_sw_fence_commit(B);
269 	if (i915_sw_fence_done(B)) {
270 		pr_err("Fence B completed early\n");
271 		goto err_C;
272 	}
273 
274 	if (i915_sw_fence_done(A)) {
275 		pr_err("Fence A completed early (after signaling B)\n");
276 		goto err_C;
277 	}
278 
279 	i915_sw_fence_commit(C);
280 
281 	ret = 0;
282 	if (!i915_sw_fence_done(C)) {
283 		pr_err("Fence C not done\n");
284 		ret = -EINVAL;
285 	}
286 	if (!i915_sw_fence_done(B)) {
287 		pr_err("Fence B not done\n");
288 		ret = -EINVAL;
289 	}
290 	if (!i915_sw_fence_done(A)) {
291 		pr_err("Fence A not done\n");
292 		ret = -EINVAL;
293 	}
294 err_C:
295 	free_fence(C);
296 err_B:
297 	free_fence(B);
298 err_A:
299 	free_fence(A);
300 	return ret;
301 }
302 
test_AB_C(void * arg)303 static int test_AB_C(void *arg)
304 {
305 	struct i915_sw_fence *A, *B, *C;
306 	int ret = -EINVAL;
307 
308 	/* Test multiple fences (AB) waiting on a single event (C) */
309 	A = alloc_fence();
310 	if (!A)
311 		return -ENOMEM;
312 
313 	B = alloc_fence();
314 	if (!B) {
315 		ret = -ENOMEM;
316 		goto err_A;
317 	}
318 
319 	C = alloc_fence();
320 	if (!C) {
321 		ret = -ENOMEM;
322 		goto err_B;
323 	}
324 
325 	ret = i915_sw_fence_await_sw_fence_gfp(A, C, GFP_KERNEL);
326 	if (ret < 0)
327 		goto err_C;
328 	if (ret == 0) {
329 		ret = -EINVAL;
330 		goto err_C;
331 	}
332 
333 	ret = i915_sw_fence_await_sw_fence_gfp(B, C, GFP_KERNEL);
334 	if (ret < 0)
335 		goto err_C;
336 	if (ret == 0) {
337 		ret = -EINVAL;
338 		goto err_C;
339 	}
340 
341 	i915_sw_fence_commit(A);
342 	i915_sw_fence_commit(B);
343 
344 	ret = 0;
345 	if (i915_sw_fence_done(A)) {
346 		pr_err("Fence A completed early\n");
347 		ret = -EINVAL;
348 	}
349 
350 	if (i915_sw_fence_done(B)) {
351 		pr_err("Fence B completed early\n");
352 		ret = -EINVAL;
353 	}
354 
355 	i915_sw_fence_commit(C);
356 	if (!i915_sw_fence_done(C)) {
357 		pr_err("Fence C not done\n");
358 		ret = -EINVAL;
359 	}
360 
361 	if (!i915_sw_fence_done(B)) {
362 		pr_err("Fence B not done\n");
363 		ret = -EINVAL;
364 	}
365 
366 	if (!i915_sw_fence_done(A)) {
367 		pr_err("Fence A not done\n");
368 		ret = -EINVAL;
369 	}
370 
371 err_C:
372 	free_fence(C);
373 err_B:
374 	free_fence(B);
375 err_A:
376 	free_fence(A);
377 	return ret;
378 }
379 
test_C_AB(void * arg)380 static int test_C_AB(void *arg)
381 {
382 	struct i915_sw_fence *A, *B, *C;
383 	int ret;
384 
385 	/* Test multiple event sources (A,B) for a single fence (C) */
386 	A = alloc_fence();
387 	if (!A)
388 		return -ENOMEM;
389 
390 	B = alloc_fence();
391 	if (!B) {
392 		ret = -ENOMEM;
393 		goto err_A;
394 	}
395 
396 	C = alloc_fence();
397 	if (!C) {
398 		ret = -ENOMEM;
399 		goto err_B;
400 	}
401 
402 	ret = i915_sw_fence_await_sw_fence_gfp(C, A, GFP_KERNEL);
403 	if (ret < 0)
404 		goto err_C;
405 	if (ret == 0) {
406 		ret = -EINVAL;
407 		goto err_C;
408 	}
409 
410 	ret = i915_sw_fence_await_sw_fence_gfp(C, B, GFP_KERNEL);
411 	if (ret < 0)
412 		goto err_C;
413 	if (ret == 0) {
414 		ret = -EINVAL;
415 		goto err_C;
416 	}
417 
418 	ret = 0;
419 	i915_sw_fence_commit(C);
420 	if (i915_sw_fence_done(C))
421 		ret = -EINVAL;
422 
423 	i915_sw_fence_commit(A);
424 	i915_sw_fence_commit(B);
425 
426 	if (!i915_sw_fence_done(A)) {
427 		pr_err("Fence A not done\n");
428 		ret = -EINVAL;
429 	}
430 
431 	if (!i915_sw_fence_done(B)) {
432 		pr_err("Fence B not done\n");
433 		ret = -EINVAL;
434 	}
435 
436 	if (!i915_sw_fence_done(C)) {
437 		pr_err("Fence C not done\n");
438 		ret = -EINVAL;
439 	}
440 
441 err_C:
442 	free_fence(C);
443 err_B:
444 	free_fence(B);
445 err_A:
446 	free_fence(A);
447 	return ret;
448 }
449 
test_chain(void * arg)450 static int test_chain(void *arg)
451 {
452 	int nfences = 4096;
453 	struct i915_sw_fence **fences;
454 	int ret, i;
455 
456 	/* Test a long chain of fences */
457 	fences = kmalloc_array(nfences, sizeof(*fences), GFP_KERNEL);
458 	if (!fences)
459 		return -ENOMEM;
460 
461 	for (i = 0; i < nfences; i++) {
462 		fences[i] = alloc_fence();
463 		if (!fences[i]) {
464 			nfences = i;
465 			ret = -ENOMEM;
466 			goto err;
467 		}
468 
469 		if (i > 0) {
470 			ret = i915_sw_fence_await_sw_fence_gfp(fences[i],
471 							       fences[i - 1],
472 							       GFP_KERNEL);
473 			if (ret < 0) {
474 				nfences = i + 1;
475 				goto err;
476 			}
477 
478 			i915_sw_fence_commit(fences[i]);
479 		}
480 	}
481 
482 	ret = 0;
483 	for (i = nfences; --i; ) {
484 		if (i915_sw_fence_done(fences[i])) {
485 			if (ret == 0)
486 				pr_err("Fence[%d] completed early\n", i);
487 			ret = -EINVAL;
488 		}
489 	}
490 	i915_sw_fence_commit(fences[0]);
491 	for (i = 0; ret == 0 && i < nfences; i++) {
492 		if (!i915_sw_fence_done(fences[i])) {
493 			pr_err("Fence[%d] is not done\n", i);
494 			ret = -EINVAL;
495 		}
496 	}
497 
498 err:
499 	for (i = 0; i < nfences; i++)
500 		free_fence(fences[i]);
501 	kfree(fences);
502 	return ret;
503 }
504 
505 struct task_ipc {
506 	struct work_struct work;
507 	struct completion started;
508 	struct i915_sw_fence *in, *out;
509 	int value;
510 };
511 
task_ipc(struct work_struct * work)512 static void task_ipc(struct work_struct *work)
513 {
514 	struct task_ipc *ipc = container_of(work, typeof(*ipc), work);
515 
516 	complete(&ipc->started);
517 
518 	i915_sw_fence_wait(ipc->in);
519 	smp_store_mb(ipc->value, 1);
520 	i915_sw_fence_commit(ipc->out);
521 }
522 
test_ipc(void * arg)523 static int test_ipc(void *arg)
524 {
525 	struct task_ipc ipc;
526 	struct workqueue_struct *wq;
527 	int ret = 0;
528 
529 	wq = alloc_workqueue("i1915-selftest", 0, 0);
530 	if (wq == NULL)
531 		return -ENOMEM;
532 
533 	/* Test use of i915_sw_fence as an interprocess signaling mechanism */
534 	ipc.in = alloc_fence();
535 	if (!ipc.in) {
536 		ret = -ENOMEM;
537 		goto err_work;
538 	}
539 	ipc.out = alloc_fence();
540 	if (!ipc.out) {
541 		ret = -ENOMEM;
542 		goto err_in;
543 	}
544 
545 	/* use a completion to avoid chicken-and-egg testing */
546 	init_completion(&ipc.started);
547 
548 	ipc.value = 0;
549 	INIT_WORK_ONSTACK(&ipc.work, task_ipc);
550 	queue_work(wq, &ipc.work);
551 
552 	wait_for_completion(&ipc.started);
553 
554 	usleep_range(1000, 2000);
555 	if (READ_ONCE(ipc.value)) {
556 		pr_err("worker updated value before i915_sw_fence was signaled\n");
557 		ret = -EINVAL;
558 	}
559 
560 	i915_sw_fence_commit(ipc.in);
561 	i915_sw_fence_wait(ipc.out);
562 
563 	if (!READ_ONCE(ipc.value)) {
564 		pr_err("worker signaled i915_sw_fence before value was posted\n");
565 		ret = -EINVAL;
566 	}
567 
568 	flush_work(&ipc.work);
569 	destroy_work_on_stack(&ipc.work);
570 	free_fence(ipc.out);
571 err_in:
572 	free_fence(ipc.in);
573 err_work:
574 	destroy_workqueue(wq);
575 
576 	return ret;
577 }
578 
test_timer(void * arg)579 static int test_timer(void *arg)
580 {
581 	unsigned long target, delay;
582 	struct timed_fence tf;
583 
584 	preempt_disable();
585 	timed_fence_init(&tf, target = jiffies);
586 	if (!i915_sw_fence_done(&tf.fence)) {
587 		pr_err("Fence with immediate expiration not signaled\n");
588 		goto err;
589 	}
590 	preempt_enable();
591 	timed_fence_fini(&tf);
592 
593 	for_each_prime_number(delay, i915_selftest.timeout_jiffies/2) {
594 		preempt_disable();
595 		timed_fence_init(&tf, target = jiffies + delay);
596 		if (i915_sw_fence_done(&tf.fence)) {
597 			pr_err("Fence with future expiration (%lu jiffies) already signaled\n", delay);
598 			goto err;
599 		}
600 		preempt_enable();
601 
602 		i915_sw_fence_wait(&tf.fence);
603 
604 		preempt_disable();
605 		if (!i915_sw_fence_done(&tf.fence)) {
606 			pr_err("Fence not signaled after wait\n");
607 			goto err;
608 		}
609 		if (time_before(jiffies, target)) {
610 			pr_err("Fence signaled too early, target=%lu, now=%lu\n",
611 			       target, jiffies);
612 			goto err;
613 		}
614 		preempt_enable();
615 		timed_fence_fini(&tf);
616 	}
617 
618 	return 0;
619 
620 err:
621 	preempt_enable();
622 	timed_fence_fini(&tf);
623 	return -EINVAL;
624 }
625 
mock_name(struct dma_fence * fence)626 static const char *mock_name(struct dma_fence *fence)
627 {
628 	return "mock";
629 }
630 
631 static const struct dma_fence_ops mock_fence_ops = {
632 	.get_driver_name = mock_name,
633 	.get_timeline_name = mock_name,
634 };
635 
636 static DEFINE_SPINLOCK(mock_fence_lock);
637 
alloc_dma_fence(void)638 static struct dma_fence *alloc_dma_fence(void)
639 {
640 	struct dma_fence *dma;
641 
642 	dma = kmalloc(sizeof(*dma), GFP_KERNEL);
643 	if (dma)
644 		dma_fence_init(dma, &mock_fence_ops, &mock_fence_lock, 0, 0);
645 
646 	return dma;
647 }
648 
649 static struct i915_sw_fence *
wrap_dma_fence(struct dma_fence * dma,unsigned long delay)650 wrap_dma_fence(struct dma_fence *dma, unsigned long delay)
651 {
652 	struct i915_sw_fence *fence;
653 	int err;
654 
655 	fence = alloc_fence();
656 	if (!fence)
657 		return ERR_PTR(-ENOMEM);
658 
659 	err = i915_sw_fence_await_dma_fence(fence, dma, delay, GFP_NOWAIT);
660 	i915_sw_fence_commit(fence);
661 	if (err < 0) {
662 		free_fence(fence);
663 		return ERR_PTR(err);
664 	}
665 
666 	return fence;
667 }
668 
test_dma_fence(void * arg)669 static int test_dma_fence(void *arg)
670 {
671 	struct i915_sw_fence *timeout = NULL, *not = NULL;
672 	unsigned long delay = i915_selftest.timeout_jiffies;
673 	unsigned long end, sleep;
674 	struct dma_fence *dma;
675 	int err;
676 
677 	dma = alloc_dma_fence();
678 	if (!dma)
679 		return -ENOMEM;
680 
681 	timeout = wrap_dma_fence(dma, delay);
682 	if (IS_ERR(timeout)) {
683 		err = PTR_ERR(timeout);
684 		goto err;
685 	}
686 
687 	not = wrap_dma_fence(dma, 0);
688 	if (IS_ERR(not)) {
689 		err = PTR_ERR(not);
690 		goto err;
691 	}
692 
693 	err = -EINVAL;
694 	if (i915_sw_fence_done(timeout) || i915_sw_fence_done(not)) {
695 		pr_err("Fences immediately signaled\n");
696 		goto err;
697 	}
698 
699 	/* We round the timeout for the fence up to the next second */
700 	end = round_jiffies_up(jiffies + delay);
701 
702 	sleep = jiffies_to_usecs(delay) / 3;
703 	usleep_range(sleep, 2 * sleep);
704 	if (time_after(jiffies, end)) {
705 		pr_debug("Slept too long, delay=%lu, (target=%lu, now=%lu) skipping\n",
706 			 delay, end, jiffies);
707 		goto skip;
708 	}
709 
710 	if (i915_sw_fence_done(timeout) || i915_sw_fence_done(not)) {
711 		pr_err("Fences signaled too early\n");
712 		goto err;
713 	}
714 
715 	if (!wait_event_timeout(timeout->wait,
716 				i915_sw_fence_done(timeout),
717 				2 * (end - jiffies) + 1)) {
718 		pr_err("Timeout fence unsignaled!\n");
719 		goto err;
720 	}
721 
722 	if (i915_sw_fence_done(not)) {
723 		pr_err("No timeout fence signaled!\n");
724 		goto err;
725 	}
726 
727 skip:
728 	dma_fence_signal(dma);
729 
730 	if (!i915_sw_fence_done(timeout) || !i915_sw_fence_done(not)) {
731 		pr_err("Fences unsignaled\n");
732 		goto err;
733 	}
734 
735 	free_fence(not);
736 	free_fence(timeout);
737 	dma_fence_put(dma);
738 
739 	return 0;
740 
741 err:
742 	dma_fence_signal(dma);
743 	if (!IS_ERR_OR_NULL(timeout))
744 		free_fence(timeout);
745 	if (!IS_ERR_OR_NULL(not))
746 		free_fence(not);
747 	dma_fence_put(dma);
748 	return err;
749 }
750 
i915_sw_fence_mock_selftests(void)751 int i915_sw_fence_mock_selftests(void)
752 {
753 	static const struct i915_subtest tests[] = {
754 		SUBTEST(test_self),
755 		SUBTEST(test_dag),
756 		SUBTEST(test_AB),
757 		SUBTEST(test_ABC),
758 		SUBTEST(test_AB_C),
759 		SUBTEST(test_C_AB),
760 		SUBTEST(test_chain),
761 		SUBTEST(test_ipc),
762 		SUBTEST(test_timer),
763 		SUBTEST(test_dma_fence),
764 	};
765 
766 	return i915_subtests(tests, NULL);
767 }
768