xref: /openbmc/linux/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c (revision 781095f903f398148cd0b646d3984234a715f29e)
1 /*
2  * Copyright 2009 Jerome Glisse.
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the
7  * "Software"), to deal in the Software without restriction, including
8  * without limitation the rights to use, copy, modify, merge, publish,
9  * distribute, sub license, and/or sell copies of the Software, and to
10  * permit persons to whom the Software is furnished to do so, subject to
11  * the following conditions:
12  *
13  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19  * USE OR OTHER DEALINGS IN THE SOFTWARE.
20  *
21  * The above copyright notice and this permission notice (including the
22  * next paragraph) shall be included in all copies or substantial portions
23  * of the Software.
24  *
25  */
26 /*
27  * Authors:
28  *    Jerome Glisse <glisse@freedesktop.org>
29  *    Dave Airlie
30  */
31 #include <linux/seq_file.h>
32 #include <linux/atomic.h>
33 #include <linux/wait.h>
34 #include <linux/kref.h>
35 #include <linux/slab.h>
36 #include <linux/firmware.h>
37 #include <drm/drmP.h>
38 #include "amdgpu.h"
39 #include "amdgpu_trace.h"
40 
41 /*
42  * Fences
43  * Fences mark an event in the GPUs pipeline and are used
44  * for GPU/CPU synchronization.  When the fence is written,
45  * it is expected that all buffers associated with that fence
46  * are no longer in use by the associated ring on the GPU and
47  * that the the relevant GPU caches have been flushed.
48  */
49 
50 static struct kmem_cache *amdgpu_fence_slab;
51 static atomic_t amdgpu_fence_slab_ref = ATOMIC_INIT(0);
52 
53 /**
54  * amdgpu_fence_write - write a fence value
55  *
56  * @ring: ring the fence is associated with
57  * @seq: sequence number to write
58  *
59  * Writes a fence value to memory (all asics).
60  */
61 static void amdgpu_fence_write(struct amdgpu_ring *ring, u32 seq)
62 {
63 	struct amdgpu_fence_driver *drv = &ring->fence_drv;
64 
65 	if (drv->cpu_addr)
66 		*drv->cpu_addr = cpu_to_le32(seq);
67 }
68 
69 /**
70  * amdgpu_fence_read - read a fence value
71  *
72  * @ring: ring the fence is associated with
73  *
74  * Reads a fence value from memory (all asics).
75  * Returns the value of the fence read from memory.
76  */
77 static u32 amdgpu_fence_read(struct amdgpu_ring *ring)
78 {
79 	struct amdgpu_fence_driver *drv = &ring->fence_drv;
80 	u32 seq = 0;
81 
82 	if (drv->cpu_addr)
83 		seq = le32_to_cpu(*drv->cpu_addr);
84 	else
85 		seq = lower_32_bits(atomic64_read(&drv->last_seq));
86 
87 	return seq;
88 }
89 
90 /**
91  * amdgpu_fence_emit - emit a fence on the requested ring
92  *
93  * @ring: ring the fence is associated with
94  * @owner: creator of the fence
95  * @fence: amdgpu fence object
96  *
97  * Emits a fence command on the requested ring (all asics).
98  * Returns 0 on success, -ENOMEM on failure.
99  */
100 int amdgpu_fence_emit(struct amdgpu_ring *ring, void *owner,
101 		      struct amdgpu_fence **fence)
102 {
103 	struct amdgpu_device *adev = ring->adev;
104 
105 	/* we are protected by the ring emission mutex */
106 	*fence = kmem_cache_alloc(amdgpu_fence_slab, GFP_KERNEL);
107 	if ((*fence) == NULL) {
108 		return -ENOMEM;
109 	}
110 	(*fence)->seq = ++ring->fence_drv.sync_seq;
111 	(*fence)->ring = ring;
112 	(*fence)->owner = owner;
113 	fence_init(&(*fence)->base, &amdgpu_fence_ops,
114 		&ring->fence_drv.fence_queue.lock,
115 		adev->fence_context + ring->idx,
116 		(*fence)->seq);
117 	amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr,
118 			       (*fence)->seq,
119 			       AMDGPU_FENCE_FLAG_INT);
120 	return 0;
121 }
122 
123 /**
124  * amdgpu_fence_schedule_fallback - schedule fallback check
125  *
126  * @ring: pointer to struct amdgpu_ring
127  *
128  * Start a timer as fallback to our interrupts.
129  */
130 static void amdgpu_fence_schedule_fallback(struct amdgpu_ring *ring)
131 {
132 	mod_timer(&ring->fence_drv.fallback_timer,
133 		  jiffies + AMDGPU_FENCE_JIFFIES_TIMEOUT);
134 }
135 
136 /**
137  * amdgpu_fence_activity - check for fence activity
138  *
139  * @ring: pointer to struct amdgpu_ring
140  *
141  * Checks the current fence value and calculates the last
142  * signalled fence value. Returns true if activity occured
143  * on the ring, and the fence_queue should be waken up.
144  */
145 static bool amdgpu_fence_activity(struct amdgpu_ring *ring)
146 {
147 	uint64_t seq, last_seq, last_emitted;
148 	unsigned count_loop = 0;
149 	bool wake = false;
150 
151 	/* Note there is a scenario here for an infinite loop but it's
152 	 * very unlikely to happen. For it to happen, the current polling
153 	 * process need to be interrupted by another process and another
154 	 * process needs to update the last_seq btw the atomic read and
155 	 * xchg of the current process.
156 	 *
157 	 * More over for this to go in infinite loop there need to be
158 	 * continuously new fence signaled ie amdgpu_fence_read needs
159 	 * to return a different value each time for both the currently
160 	 * polling process and the other process that xchg the last_seq
161 	 * btw atomic read and xchg of the current process. And the
162 	 * value the other process set as last seq must be higher than
163 	 * the seq value we just read. Which means that current process
164 	 * need to be interrupted after amdgpu_fence_read and before
165 	 * atomic xchg.
166 	 *
167 	 * To be even more safe we count the number of time we loop and
168 	 * we bail after 10 loop just accepting the fact that we might
169 	 * have temporarly set the last_seq not to the true real last
170 	 * seq but to an older one.
171 	 */
172 	last_seq = atomic64_read(&ring->fence_drv.last_seq);
173 	do {
174 		last_emitted = ring->fence_drv.sync_seq;
175 		seq = amdgpu_fence_read(ring);
176 		seq |= last_seq & 0xffffffff00000000LL;
177 		if (seq < last_seq) {
178 			seq &= 0xffffffff;
179 			seq |= last_emitted & 0xffffffff00000000LL;
180 		}
181 
182 		if (seq <= last_seq || seq > last_emitted) {
183 			break;
184 		}
185 		/* If we loop over we don't want to return without
186 		 * checking if a fence is signaled as it means that the
187 		 * seq we just read is different from the previous on.
188 		 */
189 		wake = true;
190 		last_seq = seq;
191 		if ((count_loop++) > 10) {
192 			/* We looped over too many time leave with the
193 			 * fact that we might have set an older fence
194 			 * seq then the current real last seq as signaled
195 			 * by the hw.
196 			 */
197 			break;
198 		}
199 	} while (atomic64_xchg(&ring->fence_drv.last_seq, seq) > seq);
200 
201 	if (seq < last_emitted)
202 		amdgpu_fence_schedule_fallback(ring);
203 
204 	return wake;
205 }
206 
207 /**
208  * amdgpu_fence_process - process a fence
209  *
210  * @adev: amdgpu_device pointer
211  * @ring: ring index the fence is associated with
212  *
213  * Checks the current fence value and wakes the fence queue
214  * if the sequence number has increased (all asics).
215  */
216 void amdgpu_fence_process(struct amdgpu_ring *ring)
217 {
218 	if (amdgpu_fence_activity(ring))
219 		wake_up_all(&ring->fence_drv.fence_queue);
220 }
221 
222 /**
223  * amdgpu_fence_fallback - fallback for hardware interrupts
224  *
225  * @work: delayed work item
226  *
227  * Checks for fence activity.
228  */
229 static void amdgpu_fence_fallback(unsigned long arg)
230 {
231 	struct amdgpu_ring *ring = (void *)arg;
232 
233 	amdgpu_fence_process(ring);
234 }
235 
236 /**
237  * amdgpu_fence_seq_signaled - check if a fence sequence number has signaled
238  *
239  * @ring: ring the fence is associated with
240  * @seq: sequence number
241  *
242  * Check if the last signaled fence sequnce number is >= the requested
243  * sequence number (all asics).
244  * Returns true if the fence has signaled (current fence value
245  * is >= requested value) or false if it has not (current fence
246  * value is < the requested value.  Helper function for
247  * amdgpu_fence_signaled().
248  */
249 static bool amdgpu_fence_seq_signaled(struct amdgpu_ring *ring, u64 seq)
250 {
251 	if (atomic64_read(&ring->fence_drv.last_seq) >= seq)
252 		return true;
253 
254 	/* poll new last sequence at least once */
255 	amdgpu_fence_process(ring);
256 	if (atomic64_read(&ring->fence_drv.last_seq) >= seq)
257 		return true;
258 
259 	return false;
260 }
261 
262 /*
263  * amdgpu_ring_wait_seq_timeout - wait for seq of the specific ring to signal
264  * @ring: ring to wait on for the seq number
265  * @seq: seq number wait for
266  *
267  * return value:
268  * 0: seq signaled, and gpu not hang
269  * -EDEADL: GPU hang detected
270  * -EINVAL: some paramter is not valid
271  */
272 static int amdgpu_fence_ring_wait_seq(struct amdgpu_ring *ring, uint64_t seq)
273 {
274 	bool signaled = false;
275 
276 	BUG_ON(!ring);
277 	if (seq > ring->fence_drv.sync_seq)
278 		return -EINVAL;
279 
280 	if (atomic64_read(&ring->fence_drv.last_seq) >= seq)
281 		return 0;
282 
283 	amdgpu_fence_schedule_fallback(ring);
284 	wait_event(ring->fence_drv.fence_queue, (
285 		   (signaled = amdgpu_fence_seq_signaled(ring, seq))));
286 
287 	if (signaled)
288 		return 0;
289 	else
290 		return -EDEADLK;
291 }
292 
293 /**
294  * amdgpu_fence_wait_next - wait for the next fence to signal
295  *
296  * @adev: amdgpu device pointer
297  * @ring: ring index the fence is associated with
298  *
299  * Wait for the next fence on the requested ring to signal (all asics).
300  * Returns 0 if the next fence has passed, error for all other cases.
301  * Caller must hold ring lock.
302  */
303 int amdgpu_fence_wait_next(struct amdgpu_ring *ring)
304 {
305 	uint64_t seq = atomic64_read(&ring->fence_drv.last_seq) + 1ULL;
306 
307 	if (seq >= ring->fence_drv.sync_seq)
308 		return -ENOENT;
309 
310 	return amdgpu_fence_ring_wait_seq(ring, seq);
311 }
312 
313 /**
314  * amdgpu_fence_wait_empty - wait for all fences to signal
315  *
316  * @adev: amdgpu device pointer
317  * @ring: ring index the fence is associated with
318  *
319  * Wait for all fences on the requested ring to signal (all asics).
320  * Returns 0 if the fences have passed, error for all other cases.
321  * Caller must hold ring lock.
322  */
323 int amdgpu_fence_wait_empty(struct amdgpu_ring *ring)
324 {
325 	uint64_t seq = ring->fence_drv.sync_seq;
326 
327 	if (!seq)
328 		return 0;
329 
330 	return amdgpu_fence_ring_wait_seq(ring, seq);
331 }
332 
333 /**
334  * amdgpu_fence_count_emitted - get the count of emitted fences
335  *
336  * @ring: ring the fence is associated with
337  *
338  * Get the number of fences emitted on the requested ring (all asics).
339  * Returns the number of emitted fences on the ring.  Used by the
340  * dynpm code to ring track activity.
341  */
342 unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring)
343 {
344 	uint64_t emitted;
345 
346 	/* We are not protected by ring lock when reading the last sequence
347 	 * but it's ok to report slightly wrong fence count here.
348 	 */
349 	amdgpu_fence_process(ring);
350 	emitted = ring->fence_drv.sync_seq
351 		- atomic64_read(&ring->fence_drv.last_seq);
352 	/* to avoid 32bits warp around */
353 	if (emitted > 0x10000000)
354 		emitted = 0x10000000;
355 
356 	return (unsigned)emitted;
357 }
358 
359 /**
360  * amdgpu_fence_driver_start_ring - make the fence driver
361  * ready for use on the requested ring.
362  *
363  * @ring: ring to start the fence driver on
364  * @irq_src: interrupt source to use for this ring
365  * @irq_type: interrupt type to use for this ring
366  *
367  * Make the fence driver ready for processing (all asics).
368  * Not all asics have all rings, so each asic will only
369  * start the fence driver on the rings it has.
370  * Returns 0 for success, errors for failure.
371  */
372 int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
373 				   struct amdgpu_irq_src *irq_src,
374 				   unsigned irq_type)
375 {
376 	struct amdgpu_device *adev = ring->adev;
377 	uint64_t index;
378 
379 	if (ring != &adev->uvd.ring) {
380 		ring->fence_drv.cpu_addr = &adev->wb.wb[ring->fence_offs];
381 		ring->fence_drv.gpu_addr = adev->wb.gpu_addr + (ring->fence_offs * 4);
382 	} else {
383 		/* put fence directly behind firmware */
384 		index = ALIGN(adev->uvd.fw->size, 8);
385 		ring->fence_drv.cpu_addr = adev->uvd.cpu_addr + index;
386 		ring->fence_drv.gpu_addr = adev->uvd.gpu_addr + index;
387 	}
388 	amdgpu_fence_write(ring, atomic64_read(&ring->fence_drv.last_seq));
389 	amdgpu_irq_get(adev, irq_src, irq_type);
390 
391 	ring->fence_drv.irq_src = irq_src;
392 	ring->fence_drv.irq_type = irq_type;
393 	ring->fence_drv.initialized = true;
394 
395 	dev_info(adev->dev, "fence driver on ring %d use gpu addr 0x%016llx, "
396 		 "cpu addr 0x%p\n", ring->idx,
397 		 ring->fence_drv.gpu_addr, ring->fence_drv.cpu_addr);
398 	return 0;
399 }
400 
401 /**
402  * amdgpu_fence_driver_init_ring - init the fence driver
403  * for the requested ring.
404  *
405  * @ring: ring to init the fence driver on
406  *
407  * Init the fence driver for the requested ring (all asics).
408  * Helper function for amdgpu_fence_driver_init().
409  */
410 int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring)
411 {
412 	long timeout;
413 	int r;
414 
415 	ring->fence_drv.cpu_addr = NULL;
416 	ring->fence_drv.gpu_addr = 0;
417 	ring->fence_drv.sync_seq = 0;
418 	atomic64_set(&ring->fence_drv.last_seq, 0);
419 	ring->fence_drv.initialized = false;
420 
421 	setup_timer(&ring->fence_drv.fallback_timer, amdgpu_fence_fallback,
422 		    (unsigned long)ring);
423 
424 	init_waitqueue_head(&ring->fence_drv.fence_queue);
425 
426 	timeout = msecs_to_jiffies(amdgpu_lockup_timeout);
427 	if (timeout == 0) {
428 		/*
429 		 * FIXME:
430 		 * Delayed workqueue cannot use it directly,
431 		 * so the scheduler will not use delayed workqueue if
432 		 * MAX_SCHEDULE_TIMEOUT is set.
433 		 * Currently keep it simple and silly.
434 		 */
435 		timeout = MAX_SCHEDULE_TIMEOUT;
436 	}
437 	r = amd_sched_init(&ring->sched, &amdgpu_sched_ops,
438 			   amdgpu_sched_hw_submission,
439 			   timeout, ring->name);
440 	if (r) {
441 		DRM_ERROR("Failed to create scheduler on ring %s.\n",
442 			  ring->name);
443 		return r;
444 	}
445 
446 	return 0;
447 }
448 
449 /**
450  * amdgpu_fence_driver_init - init the fence driver
451  * for all possible rings.
452  *
453  * @adev: amdgpu device pointer
454  *
455  * Init the fence driver for all possible rings (all asics).
456  * Not all asics have all rings, so each asic will only
457  * start the fence driver on the rings it has using
458  * amdgpu_fence_driver_start_ring().
459  * Returns 0 for success.
460  */
461 int amdgpu_fence_driver_init(struct amdgpu_device *adev)
462 {
463 	if (atomic_inc_return(&amdgpu_fence_slab_ref) == 1) {
464 		amdgpu_fence_slab = kmem_cache_create(
465 			"amdgpu_fence", sizeof(struct amdgpu_fence), 0,
466 			SLAB_HWCACHE_ALIGN, NULL);
467 		if (!amdgpu_fence_slab)
468 			return -ENOMEM;
469 	}
470 	if (amdgpu_debugfs_fence_init(adev))
471 		dev_err(adev->dev, "fence debugfs file creation failed\n");
472 
473 	return 0;
474 }
475 
476 /**
477  * amdgpu_fence_driver_fini - tear down the fence driver
478  * for all possible rings.
479  *
480  * @adev: amdgpu device pointer
481  *
482  * Tear down the fence driver for all possible rings (all asics).
483  */
484 void amdgpu_fence_driver_fini(struct amdgpu_device *adev)
485 {
486 	int i, r;
487 
488 	if (atomic_dec_and_test(&amdgpu_fence_slab_ref))
489 		kmem_cache_destroy(amdgpu_fence_slab);
490 	for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
491 		struct amdgpu_ring *ring = adev->rings[i];
492 
493 		if (!ring || !ring->fence_drv.initialized)
494 			continue;
495 		r = amdgpu_fence_wait_empty(ring);
496 		if (r) {
497 			/* no need to trigger GPU reset as we are unloading */
498 			amdgpu_fence_driver_force_completion(adev);
499 		}
500 		wake_up_all(&ring->fence_drv.fence_queue);
501 		amdgpu_irq_put(adev, ring->fence_drv.irq_src,
502 			       ring->fence_drv.irq_type);
503 		amd_sched_fini(&ring->sched);
504 		del_timer_sync(&ring->fence_drv.fallback_timer);
505 		ring->fence_drv.initialized = false;
506 	}
507 }
508 
509 /**
510  * amdgpu_fence_driver_suspend - suspend the fence driver
511  * for all possible rings.
512  *
513  * @adev: amdgpu device pointer
514  *
515  * Suspend the fence driver for all possible rings (all asics).
516  */
517 void amdgpu_fence_driver_suspend(struct amdgpu_device *adev)
518 {
519 	int i, r;
520 
521 	for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
522 		struct amdgpu_ring *ring = adev->rings[i];
523 		if (!ring || !ring->fence_drv.initialized)
524 			continue;
525 
526 		/* wait for gpu to finish processing current batch */
527 		r = amdgpu_fence_wait_empty(ring);
528 		if (r) {
529 			/* delay GPU reset to resume */
530 			amdgpu_fence_driver_force_completion(adev);
531 		}
532 
533 		/* disable the interrupt */
534 		amdgpu_irq_put(adev, ring->fence_drv.irq_src,
535 			       ring->fence_drv.irq_type);
536 	}
537 }
538 
539 /**
540  * amdgpu_fence_driver_resume - resume the fence driver
541  * for all possible rings.
542  *
543  * @adev: amdgpu device pointer
544  *
545  * Resume the fence driver for all possible rings (all asics).
546  * Not all asics have all rings, so each asic will only
547  * start the fence driver on the rings it has using
548  * amdgpu_fence_driver_start_ring().
549  * Returns 0 for success.
550  */
551 void amdgpu_fence_driver_resume(struct amdgpu_device *adev)
552 {
553 	int i;
554 
555 	for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
556 		struct amdgpu_ring *ring = adev->rings[i];
557 		if (!ring || !ring->fence_drv.initialized)
558 			continue;
559 
560 		/* enable the interrupt */
561 		amdgpu_irq_get(adev, ring->fence_drv.irq_src,
562 			       ring->fence_drv.irq_type);
563 	}
564 }
565 
566 /**
567  * amdgpu_fence_driver_force_completion - force all fence waiter to complete
568  *
569  * @adev: amdgpu device pointer
570  *
571  * In case of GPU reset failure make sure no process keep waiting on fence
572  * that will never complete.
573  */
574 void amdgpu_fence_driver_force_completion(struct amdgpu_device *adev)
575 {
576 	int i;
577 
578 	for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
579 		struct amdgpu_ring *ring = adev->rings[i];
580 		if (!ring || !ring->fence_drv.initialized)
581 			continue;
582 
583 		amdgpu_fence_write(ring, ring->fence_drv.sync_seq);
584 	}
585 }
586 
587 /*
588  * Common fence implementation
589  */
590 
591 static const char *amdgpu_fence_get_driver_name(struct fence *fence)
592 {
593 	return "amdgpu";
594 }
595 
596 static const char *amdgpu_fence_get_timeline_name(struct fence *f)
597 {
598 	struct amdgpu_fence *fence = to_amdgpu_fence(f);
599 	return (const char *)fence->ring->name;
600 }
601 
602 /**
603  * amdgpu_fence_is_signaled - test if fence is signaled
604  *
605  * @f: fence to test
606  *
607  * Test the fence sequence number if it is already signaled. If it isn't
608  * signaled start fence processing. Returns True if the fence is signaled.
609  */
610 static bool amdgpu_fence_is_signaled(struct fence *f)
611 {
612 	struct amdgpu_fence *fence = to_amdgpu_fence(f);
613 	struct amdgpu_ring *ring = fence->ring;
614 
615 	if (atomic64_read(&ring->fence_drv.last_seq) >= fence->seq)
616 		return true;
617 
618 	amdgpu_fence_process(ring);
619 
620 	if (atomic64_read(&ring->fence_drv.last_seq) >= fence->seq)
621 		return true;
622 
623 	return false;
624 }
625 
626 /**
627  * amdgpu_fence_check_signaled - callback from fence_queue
628  *
629  * this function is called with fence_queue lock held, which is also used
630  * for the fence locking itself, so unlocked variants are used for
631  * fence_signal, and remove_wait_queue.
632  */
633 static int amdgpu_fence_check_signaled(wait_queue_t *wait, unsigned mode, int flags, void *key)
634 {
635 	struct amdgpu_fence *fence;
636 	struct amdgpu_device *adev;
637 	u64 seq;
638 	int ret;
639 
640 	fence = container_of(wait, struct amdgpu_fence, fence_wake);
641 	adev = fence->ring->adev;
642 
643 	/*
644 	 * We cannot use amdgpu_fence_process here because we're already
645 	 * in the waitqueue, in a call from wake_up_all.
646 	 */
647 	seq = atomic64_read(&fence->ring->fence_drv.last_seq);
648 	if (seq >= fence->seq) {
649 		ret = fence_signal_locked(&fence->base);
650 		if (!ret)
651 			FENCE_TRACE(&fence->base, "signaled from irq context\n");
652 		else
653 			FENCE_TRACE(&fence->base, "was already signaled\n");
654 
655 		__remove_wait_queue(&fence->ring->fence_drv.fence_queue, &fence->fence_wake);
656 		fence_put(&fence->base);
657 	} else
658 		FENCE_TRACE(&fence->base, "pending\n");
659 	return 0;
660 }
661 
662 /**
663  * amdgpu_fence_enable_signaling - enable signalling on fence
664  * @fence: fence
665  *
666  * This function is called with fence_queue lock held, and adds a callback
667  * to fence_queue that checks if this fence is signaled, and if so it
668  * signals the fence and removes itself.
669  */
670 static bool amdgpu_fence_enable_signaling(struct fence *f)
671 {
672 	struct amdgpu_fence *fence = to_amdgpu_fence(f);
673 	struct amdgpu_ring *ring = fence->ring;
674 
675 	if (atomic64_read(&ring->fence_drv.last_seq) >= fence->seq)
676 		return false;
677 
678 	fence->fence_wake.flags = 0;
679 	fence->fence_wake.private = NULL;
680 	fence->fence_wake.func = amdgpu_fence_check_signaled;
681 	__add_wait_queue(&ring->fence_drv.fence_queue, &fence->fence_wake);
682 	fence_get(f);
683 	if (!timer_pending(&ring->fence_drv.fallback_timer))
684 		amdgpu_fence_schedule_fallback(ring);
685 	FENCE_TRACE(&fence->base, "armed on ring %i!\n", ring->idx);
686 	return true;
687 }
688 
689 static void amdgpu_fence_release(struct fence *f)
690 {
691 	struct amdgpu_fence *fence = to_amdgpu_fence(f);
692 	kmem_cache_free(amdgpu_fence_slab, fence);
693 }
694 
695 const struct fence_ops amdgpu_fence_ops = {
696 	.get_driver_name = amdgpu_fence_get_driver_name,
697 	.get_timeline_name = amdgpu_fence_get_timeline_name,
698 	.enable_signaling = amdgpu_fence_enable_signaling,
699 	.signaled = amdgpu_fence_is_signaled,
700 	.wait = fence_default_wait,
701 	.release = amdgpu_fence_release,
702 };
703 
704 /*
705  * Fence debugfs
706  */
707 #if defined(CONFIG_DEBUG_FS)
708 static int amdgpu_debugfs_fence_info(struct seq_file *m, void *data)
709 {
710 	struct drm_info_node *node = (struct drm_info_node *)m->private;
711 	struct drm_device *dev = node->minor->dev;
712 	struct amdgpu_device *adev = dev->dev_private;
713 	int i;
714 
715 	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
716 		struct amdgpu_ring *ring = adev->rings[i];
717 		if (!ring || !ring->fence_drv.initialized)
718 			continue;
719 
720 		amdgpu_fence_process(ring);
721 
722 		seq_printf(m, "--- ring %d (%s) ---\n", i, ring->name);
723 		seq_printf(m, "Last signaled fence 0x%016llx\n",
724 			   (unsigned long long)atomic64_read(&ring->fence_drv.last_seq));
725 		seq_printf(m, "Last emitted        0x%016llx\n",
726 			   ring->fence_drv.sync_seq);
727 	}
728 	return 0;
729 }
730 
731 /**
732  * amdgpu_debugfs_gpu_reset - manually trigger a gpu reset
733  *
734  * Manually trigger a gpu reset at the next fence wait.
735  */
736 static int amdgpu_debugfs_gpu_reset(struct seq_file *m, void *data)
737 {
738 	struct drm_info_node *node = (struct drm_info_node *) m->private;
739 	struct drm_device *dev = node->minor->dev;
740 	struct amdgpu_device *adev = dev->dev_private;
741 
742 	seq_printf(m, "gpu reset\n");
743 	amdgpu_gpu_reset(adev);
744 
745 	return 0;
746 }
747 
748 static struct drm_info_list amdgpu_debugfs_fence_list[] = {
749 	{"amdgpu_fence_info", &amdgpu_debugfs_fence_info, 0, NULL},
750 	{"amdgpu_gpu_reset", &amdgpu_debugfs_gpu_reset, 0, NULL}
751 };
752 #endif
753 
754 int amdgpu_debugfs_fence_init(struct amdgpu_device *adev)
755 {
756 #if defined(CONFIG_DEBUG_FS)
757 	return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_fence_list, 2);
758 #else
759 	return 0;
760 #endif
761 }
762 
763