1d38ceaf9SAlex Deucher /*
2d38ceaf9SAlex Deucher  * Copyright 2009 Jerome Glisse.
3d38ceaf9SAlex Deucher  * All Rights Reserved.
4d38ceaf9SAlex Deucher  *
5d38ceaf9SAlex Deucher  * Permission is hereby granted, free of charge, to any person obtaining a
6d38ceaf9SAlex Deucher  * copy of this software and associated documentation files (the
7d38ceaf9SAlex Deucher  * "Software"), to deal in the Software without restriction, including
8d38ceaf9SAlex Deucher  * without limitation the rights to use, copy, modify, merge, publish,
9d38ceaf9SAlex Deucher  * distribute, sub license, and/or sell copies of the Software, and to
10d38ceaf9SAlex Deucher  * permit persons to whom the Software is furnished to do so, subject to
11d38ceaf9SAlex Deucher  * the following conditions:
12d38ceaf9SAlex Deucher  *
13d38ceaf9SAlex Deucher  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14d38ceaf9SAlex Deucher  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15d38ceaf9SAlex Deucher  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16d38ceaf9SAlex Deucher  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17d38ceaf9SAlex Deucher  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18d38ceaf9SAlex Deucher  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19d38ceaf9SAlex Deucher  * USE OR OTHER DEALINGS IN THE SOFTWARE.
20d38ceaf9SAlex Deucher  *
21d38ceaf9SAlex Deucher  * The above copyright notice and this permission notice (including the
22d38ceaf9SAlex Deucher  * next paragraph) shall be included in all copies or substantial portions
23d38ceaf9SAlex Deucher  * of the Software.
24d38ceaf9SAlex Deucher  *
25d38ceaf9SAlex Deucher  */
26d38ceaf9SAlex Deucher /*
27d38ceaf9SAlex Deucher  * Authors:
28d38ceaf9SAlex Deucher  *    Jerome Glisse <glisse@freedesktop.org>
29d38ceaf9SAlex Deucher  *    Dave Airlie
30d38ceaf9SAlex Deucher  */
31d38ceaf9SAlex Deucher #include <linux/seq_file.h>
32d38ceaf9SAlex Deucher #include <linux/atomic.h>
33d38ceaf9SAlex Deucher #include <linux/wait.h>
34d38ceaf9SAlex Deucher #include <linux/kref.h>
35d38ceaf9SAlex Deucher #include <linux/slab.h>
36d38ceaf9SAlex Deucher #include <linux/firmware.h>
37d38ceaf9SAlex Deucher #include <drm/drmP.h>
38d38ceaf9SAlex Deucher #include "amdgpu.h"
39d38ceaf9SAlex Deucher #include "amdgpu_trace.h"
40d38ceaf9SAlex Deucher 
41d38ceaf9SAlex Deucher /*
42d38ceaf9SAlex Deucher  * Fences
43d38ceaf9SAlex Deucher  * Fences mark an event in the GPUs pipeline and are used
44d38ceaf9SAlex Deucher  * for GPU/CPU synchronization.  When the fence is written,
45d38ceaf9SAlex Deucher  * it is expected that all buffers associated with that fence
46d38ceaf9SAlex Deucher  * are no longer in use by the associated ring on the GPU and
47d38ceaf9SAlex Deucher  * that the the relevant GPU caches have been flushed.
48d38ceaf9SAlex Deucher  */
49d38ceaf9SAlex Deucher 
50d38ceaf9SAlex Deucher /**
51d38ceaf9SAlex Deucher  * amdgpu_fence_write - write a fence value
52d38ceaf9SAlex Deucher  *
53d38ceaf9SAlex Deucher  * @ring: ring the fence is associated with
54d38ceaf9SAlex Deucher  * @seq: sequence number to write
55d38ceaf9SAlex Deucher  *
56d38ceaf9SAlex Deucher  * Writes a fence value to memory (all asics).
57d38ceaf9SAlex Deucher  */
58d38ceaf9SAlex Deucher static void amdgpu_fence_write(struct amdgpu_ring *ring, u32 seq)
59d38ceaf9SAlex Deucher {
60d38ceaf9SAlex Deucher 	struct amdgpu_fence_driver *drv = &ring->fence_drv;
61d38ceaf9SAlex Deucher 
62d38ceaf9SAlex Deucher 	if (drv->cpu_addr)
63d38ceaf9SAlex Deucher 		*drv->cpu_addr = cpu_to_le32(seq);
64d38ceaf9SAlex Deucher }
65d38ceaf9SAlex Deucher 
66d38ceaf9SAlex Deucher /**
67d38ceaf9SAlex Deucher  * amdgpu_fence_read - read a fence value
68d38ceaf9SAlex Deucher  *
69d38ceaf9SAlex Deucher  * @ring: ring the fence is associated with
70d38ceaf9SAlex Deucher  *
71d38ceaf9SAlex Deucher  * Reads a fence value from memory (all asics).
72d38ceaf9SAlex Deucher  * Returns the value of the fence read from memory.
73d38ceaf9SAlex Deucher  */
74d38ceaf9SAlex Deucher static u32 amdgpu_fence_read(struct amdgpu_ring *ring)
75d38ceaf9SAlex Deucher {
76d38ceaf9SAlex Deucher 	struct amdgpu_fence_driver *drv = &ring->fence_drv;
77d38ceaf9SAlex Deucher 	u32 seq = 0;
78d38ceaf9SAlex Deucher 
79d38ceaf9SAlex Deucher 	if (drv->cpu_addr)
80d38ceaf9SAlex Deucher 		seq = le32_to_cpu(*drv->cpu_addr);
81d38ceaf9SAlex Deucher 	else
82d38ceaf9SAlex Deucher 		seq = lower_32_bits(atomic64_read(&drv->last_seq));
83d38ceaf9SAlex Deucher 
84d38ceaf9SAlex Deucher 	return seq;
85d38ceaf9SAlex Deucher }
86d38ceaf9SAlex Deucher 
87d38ceaf9SAlex Deucher /**
88d38ceaf9SAlex Deucher  * amdgpu_fence_schedule_check - schedule lockup check
89d38ceaf9SAlex Deucher  *
90d38ceaf9SAlex Deucher  * @ring: pointer to struct amdgpu_ring
91d38ceaf9SAlex Deucher  *
92d38ceaf9SAlex Deucher  * Queues a delayed work item to check for lockups.
93d38ceaf9SAlex Deucher  */
94d38ceaf9SAlex Deucher static void amdgpu_fence_schedule_check(struct amdgpu_ring *ring)
95d38ceaf9SAlex Deucher {
96d38ceaf9SAlex Deucher 	/*
97d38ceaf9SAlex Deucher 	 * Do not reset the timer here with mod_delayed_work,
98d38ceaf9SAlex Deucher 	 * this can livelock in an interaction with TTM delayed destroy.
99d38ceaf9SAlex Deucher 	 */
100d38ceaf9SAlex Deucher 	queue_delayed_work(system_power_efficient_wq,
101d38ceaf9SAlex Deucher 		&ring->fence_drv.lockup_work,
102d38ceaf9SAlex Deucher 		AMDGPU_FENCE_JIFFIES_TIMEOUT);
103d38ceaf9SAlex Deucher }
104d38ceaf9SAlex Deucher 
105d38ceaf9SAlex Deucher /**
106d38ceaf9SAlex Deucher  * amdgpu_fence_emit - emit a fence on the requested ring
107d38ceaf9SAlex Deucher  *
108d38ceaf9SAlex Deucher  * @ring: ring the fence is associated with
109d38ceaf9SAlex Deucher  * @owner: creator of the fence
110d38ceaf9SAlex Deucher  * @fence: amdgpu fence object
111d38ceaf9SAlex Deucher  *
112d38ceaf9SAlex Deucher  * Emits a fence command on the requested ring (all asics).
113d38ceaf9SAlex Deucher  * Returns 0 on success, -ENOMEM on failure.
114d38ceaf9SAlex Deucher  */
115d38ceaf9SAlex Deucher int amdgpu_fence_emit(struct amdgpu_ring *ring, void *owner,
116d38ceaf9SAlex Deucher 		      struct amdgpu_fence **fence)
117d38ceaf9SAlex Deucher {
118d38ceaf9SAlex Deucher 	struct amdgpu_device *adev = ring->adev;
119d38ceaf9SAlex Deucher 
120d38ceaf9SAlex Deucher 	/* we are protected by the ring emission mutex */
121d38ceaf9SAlex Deucher 	*fence = kmalloc(sizeof(struct amdgpu_fence), GFP_KERNEL);
122d38ceaf9SAlex Deucher 	if ((*fence) == NULL) {
123d38ceaf9SAlex Deucher 		return -ENOMEM;
124d38ceaf9SAlex Deucher 	}
125d38ceaf9SAlex Deucher 	(*fence)->seq = ++ring->fence_drv.sync_seq[ring->idx];
126d38ceaf9SAlex Deucher 	(*fence)->ring = ring;
127d38ceaf9SAlex Deucher 	(*fence)->owner = owner;
128d38ceaf9SAlex Deucher 	fence_init(&(*fence)->base, &amdgpu_fence_ops,
129d38ceaf9SAlex Deucher 		&adev->fence_queue.lock, adev->fence_context + ring->idx,
130d38ceaf9SAlex Deucher 		(*fence)->seq);
131890ee23fSChunming Zhou 	amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr,
132890ee23fSChunming Zhou 			       (*fence)->seq,
133890ee23fSChunming Zhou 			       AMDGPU_FENCE_FLAG_INT);
134d38ceaf9SAlex Deucher 	trace_amdgpu_fence_emit(ring->adev->ddev, ring->idx, (*fence)->seq);
135d38ceaf9SAlex Deucher 	return 0;
136d38ceaf9SAlex Deucher }
137d38ceaf9SAlex Deucher 
138d38ceaf9SAlex Deucher /**
139d38ceaf9SAlex Deucher  * amdgpu_fence_check_signaled - callback from fence_queue
140d38ceaf9SAlex Deucher  *
141d38ceaf9SAlex Deucher  * this function is called with fence_queue lock held, which is also used
142d38ceaf9SAlex Deucher  * for the fence locking itself, so unlocked variants are used for
143d38ceaf9SAlex Deucher  * fence_signal, and remove_wait_queue.
144d38ceaf9SAlex Deucher  */
145d38ceaf9SAlex Deucher static int amdgpu_fence_check_signaled(wait_queue_t *wait, unsigned mode, int flags, void *key)
146d38ceaf9SAlex Deucher {
147d38ceaf9SAlex Deucher 	struct amdgpu_fence *fence;
148d38ceaf9SAlex Deucher 	struct amdgpu_device *adev;
149d38ceaf9SAlex Deucher 	u64 seq;
150d38ceaf9SAlex Deucher 	int ret;
151d38ceaf9SAlex Deucher 
152d38ceaf9SAlex Deucher 	fence = container_of(wait, struct amdgpu_fence, fence_wake);
153d38ceaf9SAlex Deucher 	adev = fence->ring->adev;
154d38ceaf9SAlex Deucher 
155d38ceaf9SAlex Deucher 	/*
156d38ceaf9SAlex Deucher 	 * We cannot use amdgpu_fence_process here because we're already
157d38ceaf9SAlex Deucher 	 * in the waitqueue, in a call from wake_up_all.
158d38ceaf9SAlex Deucher 	 */
159d38ceaf9SAlex Deucher 	seq = atomic64_read(&fence->ring->fence_drv.last_seq);
160d38ceaf9SAlex Deucher 	if (seq >= fence->seq) {
161d38ceaf9SAlex Deucher 		ret = fence_signal_locked(&fence->base);
162d38ceaf9SAlex Deucher 		if (!ret)
163d38ceaf9SAlex Deucher 			FENCE_TRACE(&fence->base, "signaled from irq context\n");
164d38ceaf9SAlex Deucher 		else
165d38ceaf9SAlex Deucher 			FENCE_TRACE(&fence->base, "was already signaled\n");
166d38ceaf9SAlex Deucher 
167d38ceaf9SAlex Deucher 		__remove_wait_queue(&adev->fence_queue, &fence->fence_wake);
168d38ceaf9SAlex Deucher 		fence_put(&fence->base);
169d38ceaf9SAlex Deucher 	} else
170d38ceaf9SAlex Deucher 		FENCE_TRACE(&fence->base, "pending\n");
171d38ceaf9SAlex Deucher 	return 0;
172d38ceaf9SAlex Deucher }
173d38ceaf9SAlex Deucher 
174d38ceaf9SAlex Deucher /**
175d38ceaf9SAlex Deucher  * amdgpu_fence_activity - check for fence activity
176d38ceaf9SAlex Deucher  *
177d38ceaf9SAlex Deucher  * @ring: pointer to struct amdgpu_ring
178d38ceaf9SAlex Deucher  *
179d38ceaf9SAlex Deucher  * Checks the current fence value and calculates the last
180d38ceaf9SAlex Deucher  * signalled fence value. Returns true if activity occured
181d38ceaf9SAlex Deucher  * on the ring, and the fence_queue should be waken up.
182d38ceaf9SAlex Deucher  */
183d38ceaf9SAlex Deucher static bool amdgpu_fence_activity(struct amdgpu_ring *ring)
184d38ceaf9SAlex Deucher {
185d38ceaf9SAlex Deucher 	uint64_t seq, last_seq, last_emitted;
186d38ceaf9SAlex Deucher 	unsigned count_loop = 0;
187d38ceaf9SAlex Deucher 	bool wake = false;
188d38ceaf9SAlex Deucher 
189d38ceaf9SAlex Deucher 	/* Note there is a scenario here for an infinite loop but it's
190d38ceaf9SAlex Deucher 	 * very unlikely to happen. For it to happen, the current polling
191d38ceaf9SAlex Deucher 	 * process need to be interrupted by another process and another
192d38ceaf9SAlex Deucher 	 * process needs to update the last_seq btw the atomic read and
193d38ceaf9SAlex Deucher 	 * xchg of the current process.
194d38ceaf9SAlex Deucher 	 *
195d38ceaf9SAlex Deucher 	 * More over for this to go in infinite loop there need to be
19686c2b790SJammy Zhou 	 * continuously new fence signaled ie amdgpu_fence_read needs
197d38ceaf9SAlex Deucher 	 * to return a different value each time for both the currently
198d38ceaf9SAlex Deucher 	 * polling process and the other process that xchg the last_seq
199d38ceaf9SAlex Deucher 	 * btw atomic read and xchg of the current process. And the
200d38ceaf9SAlex Deucher 	 * value the other process set as last seq must be higher than
201d38ceaf9SAlex Deucher 	 * the seq value we just read. Which means that current process
20286c2b790SJammy Zhou 	 * need to be interrupted after amdgpu_fence_read and before
203d38ceaf9SAlex Deucher 	 * atomic xchg.
204d38ceaf9SAlex Deucher 	 *
205d38ceaf9SAlex Deucher 	 * To be even more safe we count the number of time we loop and
206d38ceaf9SAlex Deucher 	 * we bail after 10 loop just accepting the fact that we might
207d38ceaf9SAlex Deucher 	 * have temporarly set the last_seq not to the true real last
208d38ceaf9SAlex Deucher 	 * seq but to an older one.
209d38ceaf9SAlex Deucher 	 */
210d38ceaf9SAlex Deucher 	last_seq = atomic64_read(&ring->fence_drv.last_seq);
211d38ceaf9SAlex Deucher 	do {
212d38ceaf9SAlex Deucher 		last_emitted = ring->fence_drv.sync_seq[ring->idx];
213d38ceaf9SAlex Deucher 		seq = amdgpu_fence_read(ring);
214d38ceaf9SAlex Deucher 		seq |= last_seq & 0xffffffff00000000LL;
215d38ceaf9SAlex Deucher 		if (seq < last_seq) {
216d38ceaf9SAlex Deucher 			seq &= 0xffffffff;
217d38ceaf9SAlex Deucher 			seq |= last_emitted & 0xffffffff00000000LL;
218d38ceaf9SAlex Deucher 		}
219d38ceaf9SAlex Deucher 
220d38ceaf9SAlex Deucher 		if (seq <= last_seq || seq > last_emitted) {
221d38ceaf9SAlex Deucher 			break;
222d38ceaf9SAlex Deucher 		}
223d38ceaf9SAlex Deucher 		/* If we loop over we don't want to return without
224d38ceaf9SAlex Deucher 		 * checking if a fence is signaled as it means that the
225d38ceaf9SAlex Deucher 		 * seq we just read is different from the previous on.
226d38ceaf9SAlex Deucher 		 */
227d38ceaf9SAlex Deucher 		wake = true;
228d38ceaf9SAlex Deucher 		last_seq = seq;
229d38ceaf9SAlex Deucher 		if ((count_loop++) > 10) {
230d38ceaf9SAlex Deucher 			/* We looped over too many time leave with the
231d38ceaf9SAlex Deucher 			 * fact that we might have set an older fence
232d38ceaf9SAlex Deucher 			 * seq then the current real last seq as signaled
233d38ceaf9SAlex Deucher 			 * by the hw.
234d38ceaf9SAlex Deucher 			 */
235d38ceaf9SAlex Deucher 			break;
236d38ceaf9SAlex Deucher 		}
237d38ceaf9SAlex Deucher 	} while (atomic64_xchg(&ring->fence_drv.last_seq, seq) > seq);
238d38ceaf9SAlex Deucher 
239d38ceaf9SAlex Deucher 	if (seq < last_emitted)
240d38ceaf9SAlex Deucher 		amdgpu_fence_schedule_check(ring);
241d38ceaf9SAlex Deucher 
242d38ceaf9SAlex Deucher 	return wake;
243d38ceaf9SAlex Deucher }
244d38ceaf9SAlex Deucher 
245d38ceaf9SAlex Deucher /**
246d38ceaf9SAlex Deucher  * amdgpu_fence_check_lockup - check for hardware lockup
247d38ceaf9SAlex Deucher  *
248d38ceaf9SAlex Deucher  * @work: delayed work item
249d38ceaf9SAlex Deucher  *
250d38ceaf9SAlex Deucher  * Checks for fence activity and if there is none probe
251d38ceaf9SAlex Deucher  * the hardware if a lockup occured.
252d38ceaf9SAlex Deucher  */
253d38ceaf9SAlex Deucher static void amdgpu_fence_check_lockup(struct work_struct *work)
254d38ceaf9SAlex Deucher {
255d38ceaf9SAlex Deucher 	struct amdgpu_fence_driver *fence_drv;
256d38ceaf9SAlex Deucher 	struct amdgpu_ring *ring;
257d38ceaf9SAlex Deucher 
258d38ceaf9SAlex Deucher 	fence_drv = container_of(work, struct amdgpu_fence_driver,
259d38ceaf9SAlex Deucher 				lockup_work.work);
260d38ceaf9SAlex Deucher 	ring = fence_drv->ring;
261d38ceaf9SAlex Deucher 
262d38ceaf9SAlex Deucher 	if (!down_read_trylock(&ring->adev->exclusive_lock)) {
263d38ceaf9SAlex Deucher 		/* just reschedule the check if a reset is going on */
264d38ceaf9SAlex Deucher 		amdgpu_fence_schedule_check(ring);
265d38ceaf9SAlex Deucher 		return;
266d38ceaf9SAlex Deucher 	}
267d38ceaf9SAlex Deucher 
268d38ceaf9SAlex Deucher 	if (amdgpu_fence_activity(ring))
269d38ceaf9SAlex Deucher 		wake_up_all(&ring->adev->fence_queue);
270d38ceaf9SAlex Deucher 	else if (amdgpu_ring_is_lockup(ring)) {
271d38ceaf9SAlex Deucher 		/* good news we believe it's a lockup */
272d38ceaf9SAlex Deucher 		dev_warn(ring->adev->dev, "GPU lockup (current fence id "
273d38ceaf9SAlex Deucher 			"0x%016llx last fence id 0x%016llx on ring %d)\n",
274d38ceaf9SAlex Deucher 			(uint64_t)atomic64_read(&fence_drv->last_seq),
275d38ceaf9SAlex Deucher 			fence_drv->sync_seq[ring->idx], ring->idx);
276d38ceaf9SAlex Deucher 
277d38ceaf9SAlex Deucher 		/* remember that we need an reset */
278d38ceaf9SAlex Deucher 		ring->adev->needs_reset = true;
279d38ceaf9SAlex Deucher 		wake_up_all(&ring->adev->fence_queue);
280d38ceaf9SAlex Deucher 	}
281d38ceaf9SAlex Deucher 	up_read(&ring->adev->exclusive_lock);
282d38ceaf9SAlex Deucher }
283d38ceaf9SAlex Deucher 
284d38ceaf9SAlex Deucher /**
285d38ceaf9SAlex Deucher  * amdgpu_fence_process - process a fence
286d38ceaf9SAlex Deucher  *
287d38ceaf9SAlex Deucher  * @adev: amdgpu_device pointer
288d38ceaf9SAlex Deucher  * @ring: ring index the fence is associated with
289d38ceaf9SAlex Deucher  *
290d38ceaf9SAlex Deucher  * Checks the current fence value and wakes the fence queue
291d38ceaf9SAlex Deucher  * if the sequence number has increased (all asics).
292d38ceaf9SAlex Deucher  */
293d38ceaf9SAlex Deucher void amdgpu_fence_process(struct amdgpu_ring *ring)
294d38ceaf9SAlex Deucher {
295d38ceaf9SAlex Deucher 	uint64_t seq, last_seq, last_emitted;
296d38ceaf9SAlex Deucher 	unsigned count_loop = 0;
297d38ceaf9SAlex Deucher 	bool wake = false;
298176e1ab1SChunming Zhou 	unsigned long irqflags;
299d38ceaf9SAlex Deucher 
300d38ceaf9SAlex Deucher 	/* Note there is a scenario here for an infinite loop but it's
301d38ceaf9SAlex Deucher 	 * very unlikely to happen. For it to happen, the current polling
302d38ceaf9SAlex Deucher 	 * process need to be interrupted by another process and another
303d38ceaf9SAlex Deucher 	 * process needs to update the last_seq btw the atomic read and
304d38ceaf9SAlex Deucher 	 * xchg of the current process.
305d38ceaf9SAlex Deucher 	 *
306d38ceaf9SAlex Deucher 	 * More over for this to go in infinite loop there need to be
307d38ceaf9SAlex Deucher 	 * continuously new fence signaled ie amdgpu_fence_read needs
308d38ceaf9SAlex Deucher 	 * to return a different value each time for both the currently
309d38ceaf9SAlex Deucher 	 * polling process and the other process that xchg the last_seq
310d38ceaf9SAlex Deucher 	 * btw atomic read and xchg of the current process. And the
311d38ceaf9SAlex Deucher 	 * value the other process set as last seq must be higher than
312d38ceaf9SAlex Deucher 	 * the seq value we just read. Which means that current process
313d38ceaf9SAlex Deucher 	 * need to be interrupted after amdgpu_fence_read and before
314d38ceaf9SAlex Deucher 	 * atomic xchg.
315d38ceaf9SAlex Deucher 	 *
316d38ceaf9SAlex Deucher 	 * To be even more safe we count the number of time we loop and
317d38ceaf9SAlex Deucher 	 * we bail after 10 loop just accepting the fact that we might
318d38ceaf9SAlex Deucher 	 * have temporarly set the last_seq not to the true real last
319d38ceaf9SAlex Deucher 	 * seq but to an older one.
320d38ceaf9SAlex Deucher 	 */
321176e1ab1SChunming Zhou 	spin_lock_irqsave(&ring->fence_lock, irqflags);
322d38ceaf9SAlex Deucher 	last_seq = atomic64_read(&ring->fence_drv.last_seq);
323d38ceaf9SAlex Deucher 	do {
324d38ceaf9SAlex Deucher 		last_emitted = ring->fence_drv.sync_seq[ring->idx];
325d38ceaf9SAlex Deucher 		seq = amdgpu_fence_read(ring);
326d38ceaf9SAlex Deucher 		seq |= last_seq & 0xffffffff00000000LL;
327d38ceaf9SAlex Deucher 		if (seq < last_seq) {
328d38ceaf9SAlex Deucher 			seq &= 0xffffffff;
329d38ceaf9SAlex Deucher 			seq |= last_emitted & 0xffffffff00000000LL;
330d38ceaf9SAlex Deucher 		}
331d38ceaf9SAlex Deucher 
332d38ceaf9SAlex Deucher 		if (seq <= last_seq || seq > last_emitted) {
333d38ceaf9SAlex Deucher 			break;
334d38ceaf9SAlex Deucher 		}
335d38ceaf9SAlex Deucher 		/* If we loop over we don't want to return without
336d38ceaf9SAlex Deucher 		 * checking if a fence is signaled as it means that the
337d38ceaf9SAlex Deucher 		 * seq we just read is different from the previous on.
338d38ceaf9SAlex Deucher 		 */
339d38ceaf9SAlex Deucher 		wake = true;
340d38ceaf9SAlex Deucher 		last_seq = seq;
341d38ceaf9SAlex Deucher 		if ((count_loop++) > 10) {
342d38ceaf9SAlex Deucher 			/* We looped over too many time leave with the
343d38ceaf9SAlex Deucher 			 * fact that we might have set an older fence
344d38ceaf9SAlex Deucher 			 * seq then the current real last seq as signaled
345d38ceaf9SAlex Deucher 			 * by the hw.
346d38ceaf9SAlex Deucher 			 */
347d38ceaf9SAlex Deucher 			break;
348d38ceaf9SAlex Deucher 		}
349d38ceaf9SAlex Deucher 	} while (atomic64_xchg(&ring->fence_drv.last_seq, seq) > seq);
350d38ceaf9SAlex Deucher 
351e0d8f3c3SChunming Zhou 	if (wake) {
352e0d8f3c3SChunming Zhou 		if (amdgpu_enable_scheduler) {
353e0d8f3c3SChunming Zhou 			uint64_t handled_seq =
354e0d8f3c3SChunming Zhou 				amd_sched_get_handled_seq(ring->scheduler);
355e0d8f3c3SChunming Zhou 			uint64_t latest_seq =
356e0d8f3c3SChunming Zhou 				atomic64_read(&ring->fence_drv.last_seq);
357e0d8f3c3SChunming Zhou 			if (handled_seq == latest_seq) {
358e0d8f3c3SChunming Zhou 				DRM_ERROR("ring %d, EOP without seq update (lastest_seq=%llu)\n",
359e0d8f3c3SChunming Zhou 					  ring->idx, latest_seq);
360176e1ab1SChunming Zhou 				goto exit;
361e0d8f3c3SChunming Zhou 			}
362e0d8f3c3SChunming Zhou 			do {
363e0d8f3c3SChunming Zhou 				amd_sched_isr(ring->scheduler);
364e0d8f3c3SChunming Zhou 			} while (amd_sched_get_handled_seq(ring->scheduler) < latest_seq);
365e0d8f3c3SChunming Zhou 		}
366e0d8f3c3SChunming Zhou 
367d38ceaf9SAlex Deucher 		wake_up_all(&ring->adev->fence_queue);
368d38ceaf9SAlex Deucher 	}
369176e1ab1SChunming Zhou exit:
370176e1ab1SChunming Zhou 	spin_unlock_irqrestore(&ring->fence_lock, irqflags);
371e0d8f3c3SChunming Zhou }
372d38ceaf9SAlex Deucher 
373d38ceaf9SAlex Deucher /**
374d38ceaf9SAlex Deucher  * amdgpu_fence_seq_signaled - check if a fence sequence number has signaled
375d38ceaf9SAlex Deucher  *
376d38ceaf9SAlex Deucher  * @ring: ring the fence is associated with
377d38ceaf9SAlex Deucher  * @seq: sequence number
378d38ceaf9SAlex Deucher  *
379d38ceaf9SAlex Deucher  * Check if the last signaled fence sequnce number is >= the requested
380d38ceaf9SAlex Deucher  * sequence number (all asics).
381d38ceaf9SAlex Deucher  * Returns true if the fence has signaled (current fence value
382d38ceaf9SAlex Deucher  * is >= requested value) or false if it has not (current fence
383d38ceaf9SAlex Deucher  * value is < the requested value.  Helper function for
384d38ceaf9SAlex Deucher  * amdgpu_fence_signaled().
385d38ceaf9SAlex Deucher  */
386d38ceaf9SAlex Deucher static bool amdgpu_fence_seq_signaled(struct amdgpu_ring *ring, u64 seq)
387d38ceaf9SAlex Deucher {
388d38ceaf9SAlex Deucher 	if (atomic64_read(&ring->fence_drv.last_seq) >= seq)
389d38ceaf9SAlex Deucher 		return true;
390d38ceaf9SAlex Deucher 
391d38ceaf9SAlex Deucher 	/* poll new last sequence at least once */
392d38ceaf9SAlex Deucher 	amdgpu_fence_process(ring);
393d38ceaf9SAlex Deucher 	if (atomic64_read(&ring->fence_drv.last_seq) >= seq)
394d38ceaf9SAlex Deucher 		return true;
395d38ceaf9SAlex Deucher 
396d38ceaf9SAlex Deucher 	return false;
397d38ceaf9SAlex Deucher }
398d38ceaf9SAlex Deucher 
399d38ceaf9SAlex Deucher static bool amdgpu_fence_is_signaled(struct fence *f)
400d38ceaf9SAlex Deucher {
401d38ceaf9SAlex Deucher 	struct amdgpu_fence *fence = to_amdgpu_fence(f);
402d38ceaf9SAlex Deucher 	struct amdgpu_ring *ring = fence->ring;
403d38ceaf9SAlex Deucher 	struct amdgpu_device *adev = ring->adev;
404d38ceaf9SAlex Deucher 
405d38ceaf9SAlex Deucher 	if (atomic64_read(&ring->fence_drv.last_seq) >= fence->seq)
406d38ceaf9SAlex Deucher 		return true;
407d38ceaf9SAlex Deucher 
408d38ceaf9SAlex Deucher 	if (down_read_trylock(&adev->exclusive_lock)) {
409d38ceaf9SAlex Deucher 		amdgpu_fence_process(ring);
410d38ceaf9SAlex Deucher 		up_read(&adev->exclusive_lock);
411d38ceaf9SAlex Deucher 
412d38ceaf9SAlex Deucher 		if (atomic64_read(&ring->fence_drv.last_seq) >= fence->seq)
413d38ceaf9SAlex Deucher 			return true;
414d38ceaf9SAlex Deucher 	}
415d38ceaf9SAlex Deucher 	return false;
416d38ceaf9SAlex Deucher }
417d38ceaf9SAlex Deucher 
418d38ceaf9SAlex Deucher /**
419d38ceaf9SAlex Deucher  * amdgpu_fence_enable_signaling - enable signalling on fence
420d38ceaf9SAlex Deucher  * @fence: fence
421d38ceaf9SAlex Deucher  *
422d38ceaf9SAlex Deucher  * This function is called with fence_queue lock held, and adds a callback
423d38ceaf9SAlex Deucher  * to fence_queue that checks if this fence is signaled, and if so it
424d38ceaf9SAlex Deucher  * signals the fence and removes itself.
425d38ceaf9SAlex Deucher  */
426d38ceaf9SAlex Deucher static bool amdgpu_fence_enable_signaling(struct fence *f)
427d38ceaf9SAlex Deucher {
428d38ceaf9SAlex Deucher 	struct amdgpu_fence *fence = to_amdgpu_fence(f);
429d38ceaf9SAlex Deucher 	struct amdgpu_ring *ring = fence->ring;
430d38ceaf9SAlex Deucher 	struct amdgpu_device *adev = ring->adev;
431d38ceaf9SAlex Deucher 
432d38ceaf9SAlex Deucher 	if (atomic64_read(&ring->fence_drv.last_seq) >= fence->seq)
433d38ceaf9SAlex Deucher 		return false;
434d38ceaf9SAlex Deucher 
435d38ceaf9SAlex Deucher 	fence->fence_wake.flags = 0;
436d38ceaf9SAlex Deucher 	fence->fence_wake.private = NULL;
437d38ceaf9SAlex Deucher 	fence->fence_wake.func = amdgpu_fence_check_signaled;
438d38ceaf9SAlex Deucher 	__add_wait_queue(&adev->fence_queue, &fence->fence_wake);
439d38ceaf9SAlex Deucher 	fence_get(f);
440d38ceaf9SAlex Deucher 	FENCE_TRACE(&fence->base, "armed on ring %i!\n", ring->idx);
441d38ceaf9SAlex Deucher 	return true;
442d38ceaf9SAlex Deucher }
443d38ceaf9SAlex Deucher 
444d38ceaf9SAlex Deucher /**
445d38ceaf9SAlex Deucher  * amdgpu_fence_signaled - check if a fence has signaled
446d38ceaf9SAlex Deucher  *
447d38ceaf9SAlex Deucher  * @fence: amdgpu fence object
448d38ceaf9SAlex Deucher  *
449d38ceaf9SAlex Deucher  * Check if the requested fence has signaled (all asics).
450d38ceaf9SAlex Deucher  * Returns true if the fence has signaled or false if it has not.
451d38ceaf9SAlex Deucher  */
452d38ceaf9SAlex Deucher bool amdgpu_fence_signaled(struct amdgpu_fence *fence)
453d38ceaf9SAlex Deucher {
454d38ceaf9SAlex Deucher 	if (!fence)
455d38ceaf9SAlex Deucher 		return true;
456d38ceaf9SAlex Deucher 
457d38ceaf9SAlex Deucher 	if (amdgpu_fence_seq_signaled(fence->ring, fence->seq)) {
458d38ceaf9SAlex Deucher 		if (!fence_signal(&fence->base))
459d38ceaf9SAlex Deucher 			FENCE_TRACE(&fence->base, "signaled from amdgpu_fence_signaled\n");
460d38ceaf9SAlex Deucher 		return true;
461d38ceaf9SAlex Deucher 	}
462d38ceaf9SAlex Deucher 
463d38ceaf9SAlex Deucher 	return false;
464d38ceaf9SAlex Deucher }
465d38ceaf9SAlex Deucher 
466d38ceaf9SAlex Deucher /**
467d38ceaf9SAlex Deucher  * amdgpu_fence_any_seq_signaled - check if any sequence number is signaled
468d38ceaf9SAlex Deucher  *
469d38ceaf9SAlex Deucher  * @adev: amdgpu device pointer
470d38ceaf9SAlex Deucher  * @seq: sequence numbers
471d38ceaf9SAlex Deucher  *
472d38ceaf9SAlex Deucher  * Check if the last signaled fence sequnce number is >= the requested
473d38ceaf9SAlex Deucher  * sequence number (all asics).
474d38ceaf9SAlex Deucher  * Returns true if any has signaled (current value is >= requested value)
475d38ceaf9SAlex Deucher  * or false if it has not. Helper function for amdgpu_fence_wait_seq.
476d38ceaf9SAlex Deucher  */
477d38ceaf9SAlex Deucher static bool amdgpu_fence_any_seq_signaled(struct amdgpu_device *adev, u64 *seq)
478d38ceaf9SAlex Deucher {
479d38ceaf9SAlex Deucher 	unsigned i;
480d38ceaf9SAlex Deucher 
481d38ceaf9SAlex Deucher 	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
482d38ceaf9SAlex Deucher 		if (!adev->rings[i] || !seq[i])
483d38ceaf9SAlex Deucher 			continue;
484d38ceaf9SAlex Deucher 
485d38ceaf9SAlex Deucher 		if (amdgpu_fence_seq_signaled(adev->rings[i], seq[i]))
486d38ceaf9SAlex Deucher 			return true;
487d38ceaf9SAlex Deucher 	}
488d38ceaf9SAlex Deucher 
489d38ceaf9SAlex Deucher 	return false;
490d38ceaf9SAlex Deucher }
491d38ceaf9SAlex Deucher 
492d38ceaf9SAlex Deucher /**
493d38ceaf9SAlex Deucher  * amdgpu_fence_wait_seq_timeout - wait for a specific sequence numbers
494d38ceaf9SAlex Deucher  *
495d38ceaf9SAlex Deucher  * @adev: amdgpu device pointer
496d38ceaf9SAlex Deucher  * @target_seq: sequence number(s) we want to wait for
497d38ceaf9SAlex Deucher  * @intr: use interruptable sleep
498d38ceaf9SAlex Deucher  * @timeout: maximum time to wait, or MAX_SCHEDULE_TIMEOUT for infinite wait
499d38ceaf9SAlex Deucher  *
500d38ceaf9SAlex Deucher  * Wait for the requested sequence number(s) to be written by any ring
501d38ceaf9SAlex Deucher  * (all asics).  Sequnce number array is indexed by ring id.
502d38ceaf9SAlex Deucher  * @intr selects whether to use interruptable (true) or non-interruptable
503d38ceaf9SAlex Deucher  * (false) sleep when waiting for the sequence number.  Helper function
504d38ceaf9SAlex Deucher  * for amdgpu_fence_wait_*().
505d38ceaf9SAlex Deucher  * Returns remaining time if the sequence number has passed, 0 when
506d38ceaf9SAlex Deucher  * the wait timeout, or an error for all other cases.
507d38ceaf9SAlex Deucher  * -EDEADLK is returned when a GPU lockup has been detected.
508d38ceaf9SAlex Deucher  */
50903507c4fSChristian König static long amdgpu_fence_wait_seq_timeout(struct amdgpu_device *adev,
51003507c4fSChristian König 					  u64 *target_seq, bool intr,
51103507c4fSChristian König 					  long timeout)
512d38ceaf9SAlex Deucher {
513d38ceaf9SAlex Deucher 	uint64_t last_seq[AMDGPU_MAX_RINGS];
514d38ceaf9SAlex Deucher 	bool signaled;
515332300b9Smonk.liu 	int i;
516332300b9Smonk.liu 	long r;
517d38ceaf9SAlex Deucher 
51825f45e63SJack Xiao 	if (timeout == 0) {
51925f45e63SJack Xiao 		return amdgpu_fence_any_seq_signaled(adev, target_seq);
52025f45e63SJack Xiao 	}
52125f45e63SJack Xiao 
522d38ceaf9SAlex Deucher 	while (!amdgpu_fence_any_seq_signaled(adev, target_seq)) {
523d38ceaf9SAlex Deucher 
524d38ceaf9SAlex Deucher 		/* Save current sequence values, used to check for GPU lockups */
525d38ceaf9SAlex Deucher 		for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
526d38ceaf9SAlex Deucher 			struct amdgpu_ring *ring = adev->rings[i];
527d38ceaf9SAlex Deucher 
528d38ceaf9SAlex Deucher 			if (!ring || !target_seq[i])
529d38ceaf9SAlex Deucher 				continue;
530d38ceaf9SAlex Deucher 
531d38ceaf9SAlex Deucher 			last_seq[i] = atomic64_read(&ring->fence_drv.last_seq);
532d38ceaf9SAlex Deucher 			trace_amdgpu_fence_wait_begin(adev->ddev, i, target_seq[i]);
533d38ceaf9SAlex Deucher 		}
534d38ceaf9SAlex Deucher 
535d38ceaf9SAlex Deucher 		if (intr) {
536d38ceaf9SAlex Deucher 			r = wait_event_interruptible_timeout(adev->fence_queue, (
537d38ceaf9SAlex Deucher 				(signaled = amdgpu_fence_any_seq_signaled(adev, target_seq))
538d38ceaf9SAlex Deucher 				 || adev->needs_reset), AMDGPU_FENCE_JIFFIES_TIMEOUT);
539d38ceaf9SAlex Deucher 		} else {
540d38ceaf9SAlex Deucher 			r = wait_event_timeout(adev->fence_queue, (
541d38ceaf9SAlex Deucher 				(signaled = amdgpu_fence_any_seq_signaled(adev, target_seq))
542d38ceaf9SAlex Deucher 				 || adev->needs_reset), AMDGPU_FENCE_JIFFIES_TIMEOUT);
543d38ceaf9SAlex Deucher 		}
544d38ceaf9SAlex Deucher 
545d38ceaf9SAlex Deucher 		for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
546d38ceaf9SAlex Deucher 			struct amdgpu_ring *ring = adev->rings[i];
547d38ceaf9SAlex Deucher 
548d38ceaf9SAlex Deucher 			if (!ring || !target_seq[i])
549d38ceaf9SAlex Deucher 				continue;
550d38ceaf9SAlex Deucher 
551d38ceaf9SAlex Deucher 			trace_amdgpu_fence_wait_end(adev->ddev, i, target_seq[i]);
552d38ceaf9SAlex Deucher 		}
553d38ceaf9SAlex Deucher 
554d38ceaf9SAlex Deucher 		if (unlikely(r < 0))
555d38ceaf9SAlex Deucher 			return r;
556d38ceaf9SAlex Deucher 
557d38ceaf9SAlex Deucher 		if (unlikely(!signaled)) {
558d38ceaf9SAlex Deucher 
559d38ceaf9SAlex Deucher 			if (adev->needs_reset)
560d38ceaf9SAlex Deucher 				return -EDEADLK;
561d38ceaf9SAlex Deucher 
562d38ceaf9SAlex Deucher 			/* we were interrupted for some reason and fence
563d38ceaf9SAlex Deucher 			 * isn't signaled yet, resume waiting */
564d38ceaf9SAlex Deucher 			if (r)
565d38ceaf9SAlex Deucher 				continue;
566d38ceaf9SAlex Deucher 
567d38ceaf9SAlex Deucher 			for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
568d38ceaf9SAlex Deucher 				struct amdgpu_ring *ring = adev->rings[i];
569d38ceaf9SAlex Deucher 
570d38ceaf9SAlex Deucher 				if (!ring || !target_seq[i])
571d38ceaf9SAlex Deucher 					continue;
572d38ceaf9SAlex Deucher 
573d38ceaf9SAlex Deucher 				if (last_seq[i] != atomic64_read(&ring->fence_drv.last_seq))
574d38ceaf9SAlex Deucher 					break;
575d38ceaf9SAlex Deucher 			}
576d38ceaf9SAlex Deucher 
577d38ceaf9SAlex Deucher 			if (i != AMDGPU_MAX_RINGS)
578d38ceaf9SAlex Deucher 				continue;
579d38ceaf9SAlex Deucher 
580d38ceaf9SAlex Deucher 			for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
581d38ceaf9SAlex Deucher 				if (!adev->rings[i] || !target_seq[i])
582d38ceaf9SAlex Deucher 					continue;
583d38ceaf9SAlex Deucher 
584d38ceaf9SAlex Deucher 				if (amdgpu_ring_is_lockup(adev->rings[i]))
585d38ceaf9SAlex Deucher 					break;
586d38ceaf9SAlex Deucher 			}
587d38ceaf9SAlex Deucher 
588d38ceaf9SAlex Deucher 			if (i < AMDGPU_MAX_RINGS) {
589d38ceaf9SAlex Deucher 				/* good news we believe it's a lockup */
590d38ceaf9SAlex Deucher 				dev_warn(adev->dev, "GPU lockup (waiting for "
591d38ceaf9SAlex Deucher 					 "0x%016llx last fence id 0x%016llx on"
592d38ceaf9SAlex Deucher 					 " ring %d)\n",
593d38ceaf9SAlex Deucher 					 target_seq[i], last_seq[i], i);
594d38ceaf9SAlex Deucher 
595d38ceaf9SAlex Deucher 				/* remember that we need an reset */
596d38ceaf9SAlex Deucher 				adev->needs_reset = true;
597d38ceaf9SAlex Deucher 				wake_up_all(&adev->fence_queue);
598d38ceaf9SAlex Deucher 				return -EDEADLK;
599d38ceaf9SAlex Deucher 			}
600d38ceaf9SAlex Deucher 
601d38ceaf9SAlex Deucher 			if (timeout < MAX_SCHEDULE_TIMEOUT) {
602d38ceaf9SAlex Deucher 				timeout -= AMDGPU_FENCE_JIFFIES_TIMEOUT;
603d38ceaf9SAlex Deucher 				if (timeout <= 0) {
604d38ceaf9SAlex Deucher 					return 0;
605d38ceaf9SAlex Deucher 				}
606d38ceaf9SAlex Deucher 			}
607d38ceaf9SAlex Deucher 		}
608d38ceaf9SAlex Deucher 	}
609d38ceaf9SAlex Deucher 	return timeout;
610d38ceaf9SAlex Deucher }
611d38ceaf9SAlex Deucher 
612d38ceaf9SAlex Deucher /**
613d38ceaf9SAlex Deucher  * amdgpu_fence_wait - wait for a fence to signal
614d38ceaf9SAlex Deucher  *
615d38ceaf9SAlex Deucher  * @fence: amdgpu fence object
616d38ceaf9SAlex Deucher  * @intr: use interruptable sleep
617d38ceaf9SAlex Deucher  *
618d38ceaf9SAlex Deucher  * Wait for the requested fence to signal (all asics).
619d38ceaf9SAlex Deucher  * @intr selects whether to use interruptable (true) or non-interruptable
620d38ceaf9SAlex Deucher  * (false) sleep when waiting for the fence.
621d38ceaf9SAlex Deucher  * Returns 0 if the fence has passed, error for all other cases.
622d38ceaf9SAlex Deucher  */
623d38ceaf9SAlex Deucher int amdgpu_fence_wait(struct amdgpu_fence *fence, bool intr)
624d38ceaf9SAlex Deucher {
625d38ceaf9SAlex Deucher 	uint64_t seq[AMDGPU_MAX_RINGS] = {};
626d38ceaf9SAlex Deucher 	long r;
627d38ceaf9SAlex Deucher 
628d38ceaf9SAlex Deucher 	seq[fence->ring->idx] = fence->seq;
629d38ceaf9SAlex Deucher 	r = amdgpu_fence_wait_seq_timeout(fence->ring->adev, seq, intr, MAX_SCHEDULE_TIMEOUT);
630d38ceaf9SAlex Deucher 	if (r < 0) {
631d38ceaf9SAlex Deucher 		return r;
632d38ceaf9SAlex Deucher 	}
633d38ceaf9SAlex Deucher 
634d38ceaf9SAlex Deucher 	r = fence_signal(&fence->base);
635d38ceaf9SAlex Deucher 	if (!r)
636d38ceaf9SAlex Deucher 		FENCE_TRACE(&fence->base, "signaled from fence_wait\n");
637d38ceaf9SAlex Deucher 	return 0;
638d38ceaf9SAlex Deucher }
639d38ceaf9SAlex Deucher 
640d38ceaf9SAlex Deucher /**
641d38ceaf9SAlex Deucher  * amdgpu_fence_wait_any - wait for a fence to signal on any ring
642d38ceaf9SAlex Deucher  *
643d38ceaf9SAlex Deucher  * @adev: amdgpu device pointer
644d38ceaf9SAlex Deucher  * @fences: amdgpu fence object(s)
645d38ceaf9SAlex Deucher  * @intr: use interruptable sleep
646d38ceaf9SAlex Deucher  *
647d38ceaf9SAlex Deucher  * Wait for any requested fence to signal (all asics).  Fence
648d38ceaf9SAlex Deucher  * array is indexed by ring id.  @intr selects whether to use
649d38ceaf9SAlex Deucher  * interruptable (true) or non-interruptable (false) sleep when
650d38ceaf9SAlex Deucher  * waiting for the fences. Used by the suballocator.
651d38ceaf9SAlex Deucher  * Returns 0 if any fence has passed, error for all other cases.
652d38ceaf9SAlex Deucher  */
653d38ceaf9SAlex Deucher int amdgpu_fence_wait_any(struct amdgpu_device *adev,
654d38ceaf9SAlex Deucher 			  struct amdgpu_fence **fences,
655d38ceaf9SAlex Deucher 			  bool intr)
656d38ceaf9SAlex Deucher {
657d38ceaf9SAlex Deucher 	uint64_t seq[AMDGPU_MAX_RINGS];
658d38ceaf9SAlex Deucher 	unsigned i, num_rings = 0;
659d38ceaf9SAlex Deucher 	long r;
660d38ceaf9SAlex Deucher 
661d38ceaf9SAlex Deucher 	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
662d38ceaf9SAlex Deucher 		seq[i] = 0;
663d38ceaf9SAlex Deucher 
664d38ceaf9SAlex Deucher 		if (!fences[i]) {
665d38ceaf9SAlex Deucher 			continue;
666d38ceaf9SAlex Deucher 		}
667d38ceaf9SAlex Deucher 
668d38ceaf9SAlex Deucher 		seq[i] = fences[i]->seq;
669d38ceaf9SAlex Deucher 		++num_rings;
670d38ceaf9SAlex Deucher 	}
671d38ceaf9SAlex Deucher 
672d38ceaf9SAlex Deucher 	/* nothing to wait for ? */
673d38ceaf9SAlex Deucher 	if (num_rings == 0)
674d38ceaf9SAlex Deucher 		return -ENOENT;
675d38ceaf9SAlex Deucher 
676d38ceaf9SAlex Deucher 	r = amdgpu_fence_wait_seq_timeout(adev, seq, intr, MAX_SCHEDULE_TIMEOUT);
677d38ceaf9SAlex Deucher 	if (r < 0) {
678d38ceaf9SAlex Deucher 		return r;
679d38ceaf9SAlex Deucher 	}
680d38ceaf9SAlex Deucher 	return 0;
681d38ceaf9SAlex Deucher }
682d38ceaf9SAlex Deucher 
683d38ceaf9SAlex Deucher /**
684d38ceaf9SAlex Deucher  * amdgpu_fence_wait_next - wait for the next fence to signal
685d38ceaf9SAlex Deucher  *
686d38ceaf9SAlex Deucher  * @adev: amdgpu device pointer
687d38ceaf9SAlex Deucher  * @ring: ring index the fence is associated with
688d38ceaf9SAlex Deucher  *
689d38ceaf9SAlex Deucher  * Wait for the next fence on the requested ring to signal (all asics).
690d38ceaf9SAlex Deucher  * Returns 0 if the next fence has passed, error for all other cases.
691d38ceaf9SAlex Deucher  * Caller must hold ring lock.
692d38ceaf9SAlex Deucher  */
693d38ceaf9SAlex Deucher int amdgpu_fence_wait_next(struct amdgpu_ring *ring)
694d38ceaf9SAlex Deucher {
695d38ceaf9SAlex Deucher 	uint64_t seq[AMDGPU_MAX_RINGS] = {};
696d38ceaf9SAlex Deucher 	long r;
697d38ceaf9SAlex Deucher 
698d38ceaf9SAlex Deucher 	seq[ring->idx] = atomic64_read(&ring->fence_drv.last_seq) + 1ULL;
699d38ceaf9SAlex Deucher 	if (seq[ring->idx] >= ring->fence_drv.sync_seq[ring->idx]) {
700d38ceaf9SAlex Deucher 		/* nothing to wait for, last_seq is
701d38ceaf9SAlex Deucher 		   already the last emited fence */
702d38ceaf9SAlex Deucher 		return -ENOENT;
703d38ceaf9SAlex Deucher 	}
704d38ceaf9SAlex Deucher 	r = amdgpu_fence_wait_seq_timeout(ring->adev, seq, false, MAX_SCHEDULE_TIMEOUT);
705d38ceaf9SAlex Deucher 	if (r < 0)
706d38ceaf9SAlex Deucher 		return r;
707d38ceaf9SAlex Deucher 	return 0;
708d38ceaf9SAlex Deucher }
709d38ceaf9SAlex Deucher 
710d38ceaf9SAlex Deucher /**
711d38ceaf9SAlex Deucher  * amdgpu_fence_wait_empty - wait for all fences to signal
712d38ceaf9SAlex Deucher  *
713d38ceaf9SAlex Deucher  * @adev: amdgpu device pointer
714d38ceaf9SAlex Deucher  * @ring: ring index the fence is associated with
715d38ceaf9SAlex Deucher  *
716d38ceaf9SAlex Deucher  * Wait for all fences on the requested ring to signal (all asics).
717d38ceaf9SAlex Deucher  * Returns 0 if the fences have passed, error for all other cases.
718d38ceaf9SAlex Deucher  * Caller must hold ring lock.
719d38ceaf9SAlex Deucher  */
720d38ceaf9SAlex Deucher int amdgpu_fence_wait_empty(struct amdgpu_ring *ring)
721d38ceaf9SAlex Deucher {
722d38ceaf9SAlex Deucher 	struct amdgpu_device *adev = ring->adev;
723d38ceaf9SAlex Deucher 	uint64_t seq[AMDGPU_MAX_RINGS] = {};
724d38ceaf9SAlex Deucher 	long r;
725d38ceaf9SAlex Deucher 
726d38ceaf9SAlex Deucher 	seq[ring->idx] = ring->fence_drv.sync_seq[ring->idx];
727d38ceaf9SAlex Deucher 	if (!seq[ring->idx])
728d38ceaf9SAlex Deucher 		return 0;
729d38ceaf9SAlex Deucher 
730d38ceaf9SAlex Deucher 	r = amdgpu_fence_wait_seq_timeout(adev, seq, false, MAX_SCHEDULE_TIMEOUT);
731d38ceaf9SAlex Deucher 	if (r < 0) {
732d38ceaf9SAlex Deucher 		if (r == -EDEADLK)
733d38ceaf9SAlex Deucher 			return -EDEADLK;
734d38ceaf9SAlex Deucher 
735d38ceaf9SAlex Deucher 		dev_err(adev->dev, "error waiting for ring[%d] to become idle (%ld)\n",
736d38ceaf9SAlex Deucher 			ring->idx, r);
737d38ceaf9SAlex Deucher 	}
738d38ceaf9SAlex Deucher 	return 0;
739d38ceaf9SAlex Deucher }
740d38ceaf9SAlex Deucher 
741d38ceaf9SAlex Deucher /**
742d38ceaf9SAlex Deucher  * amdgpu_fence_ref - take a ref on a fence
743d38ceaf9SAlex Deucher  *
744d38ceaf9SAlex Deucher  * @fence: amdgpu fence object
745d38ceaf9SAlex Deucher  *
746d38ceaf9SAlex Deucher  * Take a reference on a fence (all asics).
747d38ceaf9SAlex Deucher  * Returns the fence.
748d38ceaf9SAlex Deucher  */
749d38ceaf9SAlex Deucher struct amdgpu_fence *amdgpu_fence_ref(struct amdgpu_fence *fence)
750d38ceaf9SAlex Deucher {
751d38ceaf9SAlex Deucher 	fence_get(&fence->base);
752d38ceaf9SAlex Deucher 	return fence;
753d38ceaf9SAlex Deucher }
754d38ceaf9SAlex Deucher 
755d38ceaf9SAlex Deucher /**
756d38ceaf9SAlex Deucher  * amdgpu_fence_unref - remove a ref on a fence
757d38ceaf9SAlex Deucher  *
758d38ceaf9SAlex Deucher  * @fence: amdgpu fence object
759d38ceaf9SAlex Deucher  *
760d38ceaf9SAlex Deucher  * Remove a reference on a fence (all asics).
761d38ceaf9SAlex Deucher  */
762d38ceaf9SAlex Deucher void amdgpu_fence_unref(struct amdgpu_fence **fence)
763d38ceaf9SAlex Deucher {
764d38ceaf9SAlex Deucher 	struct amdgpu_fence *tmp = *fence;
765d38ceaf9SAlex Deucher 
766d38ceaf9SAlex Deucher 	*fence = NULL;
767d38ceaf9SAlex Deucher 	if (tmp)
768d38ceaf9SAlex Deucher 		fence_put(&tmp->base);
769d38ceaf9SAlex Deucher }
770d38ceaf9SAlex Deucher 
771d38ceaf9SAlex Deucher /**
772d38ceaf9SAlex Deucher  * amdgpu_fence_count_emitted - get the count of emitted fences
773d38ceaf9SAlex Deucher  *
774d38ceaf9SAlex Deucher  * @ring: ring the fence is associated with
775d38ceaf9SAlex Deucher  *
776d38ceaf9SAlex Deucher  * Get the number of fences emitted on the requested ring (all asics).
777d38ceaf9SAlex Deucher  * Returns the number of emitted fences on the ring.  Used by the
778d38ceaf9SAlex Deucher  * dynpm code to ring track activity.
779d38ceaf9SAlex Deucher  */
780d38ceaf9SAlex Deucher unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring)
781d38ceaf9SAlex Deucher {
782d38ceaf9SAlex Deucher 	uint64_t emitted;
783d38ceaf9SAlex Deucher 
784d38ceaf9SAlex Deucher 	/* We are not protected by ring lock when reading the last sequence
785d38ceaf9SAlex Deucher 	 * but it's ok to report slightly wrong fence count here.
786d38ceaf9SAlex Deucher 	 */
787d38ceaf9SAlex Deucher 	amdgpu_fence_process(ring);
788d38ceaf9SAlex Deucher 	emitted = ring->fence_drv.sync_seq[ring->idx]
789d38ceaf9SAlex Deucher 		- atomic64_read(&ring->fence_drv.last_seq);
790d38ceaf9SAlex Deucher 	/* to avoid 32bits warp around */
791d38ceaf9SAlex Deucher 	if (emitted > 0x10000000)
792d38ceaf9SAlex Deucher 		emitted = 0x10000000;
793d38ceaf9SAlex Deucher 
794d38ceaf9SAlex Deucher 	return (unsigned)emitted;
795d38ceaf9SAlex Deucher }
796d38ceaf9SAlex Deucher 
797d38ceaf9SAlex Deucher /**
798d38ceaf9SAlex Deucher  * amdgpu_fence_need_sync - do we need a semaphore
799d38ceaf9SAlex Deucher  *
800d38ceaf9SAlex Deucher  * @fence: amdgpu fence object
801d38ceaf9SAlex Deucher  * @dst_ring: which ring to check against
802d38ceaf9SAlex Deucher  *
803d38ceaf9SAlex Deucher  * Check if the fence needs to be synced against another ring
804d38ceaf9SAlex Deucher  * (all asics).  If so, we need to emit a semaphore.
805d38ceaf9SAlex Deucher  * Returns true if we need to sync with another ring, false if
806d38ceaf9SAlex Deucher  * not.
807d38ceaf9SAlex Deucher  */
808d38ceaf9SAlex Deucher bool amdgpu_fence_need_sync(struct amdgpu_fence *fence,
809d38ceaf9SAlex Deucher 			    struct amdgpu_ring *dst_ring)
810d38ceaf9SAlex Deucher {
811d38ceaf9SAlex Deucher 	struct amdgpu_fence_driver *fdrv;
812d38ceaf9SAlex Deucher 
813d38ceaf9SAlex Deucher 	if (!fence)
814d38ceaf9SAlex Deucher 		return false;
815d38ceaf9SAlex Deucher 
816d38ceaf9SAlex Deucher 	if (fence->ring == dst_ring)
817d38ceaf9SAlex Deucher 		return false;
818d38ceaf9SAlex Deucher 
819d38ceaf9SAlex Deucher 	/* we are protected by the ring mutex */
820d38ceaf9SAlex Deucher 	fdrv = &dst_ring->fence_drv;
821d38ceaf9SAlex Deucher 	if (fence->seq <= fdrv->sync_seq[fence->ring->idx])
822d38ceaf9SAlex Deucher 		return false;
823d38ceaf9SAlex Deucher 
824d38ceaf9SAlex Deucher 	return true;
825d38ceaf9SAlex Deucher }
826d38ceaf9SAlex Deucher 
827d38ceaf9SAlex Deucher /**
828d38ceaf9SAlex Deucher  * amdgpu_fence_note_sync - record the sync point
829d38ceaf9SAlex Deucher  *
830d38ceaf9SAlex Deucher  * @fence: amdgpu fence object
831d38ceaf9SAlex Deucher  * @dst_ring: which ring to check against
832d38ceaf9SAlex Deucher  *
833d38ceaf9SAlex Deucher  * Note the sequence number at which point the fence will
834d38ceaf9SAlex Deucher  * be synced with the requested ring (all asics).
835d38ceaf9SAlex Deucher  */
836d38ceaf9SAlex Deucher void amdgpu_fence_note_sync(struct amdgpu_fence *fence,
837d38ceaf9SAlex Deucher 			    struct amdgpu_ring *dst_ring)
838d38ceaf9SAlex Deucher {
839d38ceaf9SAlex Deucher 	struct amdgpu_fence_driver *dst, *src;
840d38ceaf9SAlex Deucher 	unsigned i;
841d38ceaf9SAlex Deucher 
842d38ceaf9SAlex Deucher 	if (!fence)
843d38ceaf9SAlex Deucher 		return;
844d38ceaf9SAlex Deucher 
845d38ceaf9SAlex Deucher 	if (fence->ring == dst_ring)
846d38ceaf9SAlex Deucher 		return;
847d38ceaf9SAlex Deucher 
848d38ceaf9SAlex Deucher 	/* we are protected by the ring mutex */
849d38ceaf9SAlex Deucher 	src = &fence->ring->fence_drv;
850d38ceaf9SAlex Deucher 	dst = &dst_ring->fence_drv;
851d38ceaf9SAlex Deucher 	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
852d38ceaf9SAlex Deucher 		if (i == dst_ring->idx)
853d38ceaf9SAlex Deucher 			continue;
854d38ceaf9SAlex Deucher 
855d38ceaf9SAlex Deucher 		dst->sync_seq[i] = max(dst->sync_seq[i], src->sync_seq[i]);
856d38ceaf9SAlex Deucher 	}
857d38ceaf9SAlex Deucher }
858d38ceaf9SAlex Deucher 
859d38ceaf9SAlex Deucher /**
860d38ceaf9SAlex Deucher  * amdgpu_fence_driver_start_ring - make the fence driver
861d38ceaf9SAlex Deucher  * ready for use on the requested ring.
862d38ceaf9SAlex Deucher  *
863d38ceaf9SAlex Deucher  * @ring: ring to start the fence driver on
864d38ceaf9SAlex Deucher  * @irq_src: interrupt source to use for this ring
865d38ceaf9SAlex Deucher  * @irq_type: interrupt type to use for this ring
866d38ceaf9SAlex Deucher  *
867d38ceaf9SAlex Deucher  * Make the fence driver ready for processing (all asics).
868d38ceaf9SAlex Deucher  * Not all asics have all rings, so each asic will only
869d38ceaf9SAlex Deucher  * start the fence driver on the rings it has.
870d38ceaf9SAlex Deucher  * Returns 0 for success, errors for failure.
871d38ceaf9SAlex Deucher  */
872d38ceaf9SAlex Deucher int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
873d38ceaf9SAlex Deucher 				   struct amdgpu_irq_src *irq_src,
874d38ceaf9SAlex Deucher 				   unsigned irq_type)
875d38ceaf9SAlex Deucher {
876d38ceaf9SAlex Deucher 	struct amdgpu_device *adev = ring->adev;
877d38ceaf9SAlex Deucher 	uint64_t index;
878d38ceaf9SAlex Deucher 
879d38ceaf9SAlex Deucher 	if (ring != &adev->uvd.ring) {
880d38ceaf9SAlex Deucher 		ring->fence_drv.cpu_addr = &adev->wb.wb[ring->fence_offs];
881d38ceaf9SAlex Deucher 		ring->fence_drv.gpu_addr = adev->wb.gpu_addr + (ring->fence_offs * 4);
882d38ceaf9SAlex Deucher 	} else {
883d38ceaf9SAlex Deucher 		/* put fence directly behind firmware */
884d38ceaf9SAlex Deucher 		index = ALIGN(adev->uvd.fw->size, 8);
885d38ceaf9SAlex Deucher 		ring->fence_drv.cpu_addr = adev->uvd.cpu_addr + index;
886d38ceaf9SAlex Deucher 		ring->fence_drv.gpu_addr = adev->uvd.gpu_addr + index;
887d38ceaf9SAlex Deucher 	}
888d38ceaf9SAlex Deucher 	amdgpu_fence_write(ring, atomic64_read(&ring->fence_drv.last_seq));
889c6a4079bSChunming Zhou 	amdgpu_irq_get(adev, irq_src, irq_type);
890c6a4079bSChunming Zhou 
891d38ceaf9SAlex Deucher 	ring->fence_drv.irq_src = irq_src;
892d38ceaf9SAlex Deucher 	ring->fence_drv.irq_type = irq_type;
893c6a4079bSChunming Zhou 	ring->fence_drv.initialized = true;
894c6a4079bSChunming Zhou 
895d38ceaf9SAlex Deucher 	dev_info(adev->dev, "fence driver on ring %d use gpu addr 0x%016llx, "
896d38ceaf9SAlex Deucher 		 "cpu addr 0x%p\n", ring->idx,
897d38ceaf9SAlex Deucher 		 ring->fence_drv.gpu_addr, ring->fence_drv.cpu_addr);
898d38ceaf9SAlex Deucher 	return 0;
899d38ceaf9SAlex Deucher }
900d38ceaf9SAlex Deucher 
901d38ceaf9SAlex Deucher /**
902d38ceaf9SAlex Deucher  * amdgpu_fence_driver_init_ring - init the fence driver
903d38ceaf9SAlex Deucher  * for the requested ring.
904d38ceaf9SAlex Deucher  *
905d38ceaf9SAlex Deucher  * @ring: ring to init the fence driver on
906d38ceaf9SAlex Deucher  *
907d38ceaf9SAlex Deucher  * Init the fence driver for the requested ring (all asics).
908d38ceaf9SAlex Deucher  * Helper function for amdgpu_fence_driver_init().
909d38ceaf9SAlex Deucher  */
910d38ceaf9SAlex Deucher void amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring)
911d38ceaf9SAlex Deucher {
912d38ceaf9SAlex Deucher 	int i;
913d38ceaf9SAlex Deucher 
914d38ceaf9SAlex Deucher 	ring->fence_drv.cpu_addr = NULL;
915d38ceaf9SAlex Deucher 	ring->fence_drv.gpu_addr = 0;
916d38ceaf9SAlex Deucher 	for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
917d38ceaf9SAlex Deucher 		ring->fence_drv.sync_seq[i] = 0;
918d38ceaf9SAlex Deucher 
919d38ceaf9SAlex Deucher 	atomic64_set(&ring->fence_drv.last_seq, 0);
920d38ceaf9SAlex Deucher 	ring->fence_drv.initialized = false;
921d38ceaf9SAlex Deucher 
922d38ceaf9SAlex Deucher 	INIT_DELAYED_WORK(&ring->fence_drv.lockup_work,
923d38ceaf9SAlex Deucher 			amdgpu_fence_check_lockup);
924d38ceaf9SAlex Deucher 	ring->fence_drv.ring = ring;
925b80d8475SAlex Deucher 
926b80d8475SAlex Deucher 	if (amdgpu_enable_scheduler) {
927b80d8475SAlex Deucher 		ring->scheduler = amd_sched_create((void *)ring->adev,
928c1b69ed0SChunming Zhou 						   &amdgpu_sched_ops,
9294afcb303SJammy Zhou 						   ring->idx, 5, 0,
9304afcb303SJammy Zhou 						   amdgpu_sched_hw_submission);
931b80d8475SAlex Deucher 		if (!ring->scheduler)
932b80d8475SAlex Deucher 			DRM_ERROR("Failed to create scheduler on ring %d.\n",
933b80d8475SAlex Deucher 				  ring->idx);
934b80d8475SAlex Deucher 	}
935d38ceaf9SAlex Deucher }
936d38ceaf9SAlex Deucher 
937d38ceaf9SAlex Deucher /**
938d38ceaf9SAlex Deucher  * amdgpu_fence_driver_init - init the fence driver
939d38ceaf9SAlex Deucher  * for all possible rings.
940d38ceaf9SAlex Deucher  *
941d38ceaf9SAlex Deucher  * @adev: amdgpu device pointer
942d38ceaf9SAlex Deucher  *
943d38ceaf9SAlex Deucher  * Init the fence driver for all possible rings (all asics).
944d38ceaf9SAlex Deucher  * Not all asics have all rings, so each asic will only
945d38ceaf9SAlex Deucher  * start the fence driver on the rings it has using
946d38ceaf9SAlex Deucher  * amdgpu_fence_driver_start_ring().
947d38ceaf9SAlex Deucher  * Returns 0 for success.
948d38ceaf9SAlex Deucher  */
949d38ceaf9SAlex Deucher int amdgpu_fence_driver_init(struct amdgpu_device *adev)
950d38ceaf9SAlex Deucher {
951d38ceaf9SAlex Deucher 	init_waitqueue_head(&adev->fence_queue);
952d38ceaf9SAlex Deucher 	if (amdgpu_debugfs_fence_init(adev))
953d38ceaf9SAlex Deucher 		dev_err(adev->dev, "fence debugfs file creation failed\n");
954d38ceaf9SAlex Deucher 
955d38ceaf9SAlex Deucher 	return 0;
956d38ceaf9SAlex Deucher }
957d38ceaf9SAlex Deucher 
958d38ceaf9SAlex Deucher /**
959d38ceaf9SAlex Deucher  * amdgpu_fence_driver_fini - tear down the fence driver
960d38ceaf9SAlex Deucher  * for all possible rings.
961d38ceaf9SAlex Deucher  *
962d38ceaf9SAlex Deucher  * @adev: amdgpu device pointer
963d38ceaf9SAlex Deucher  *
964d38ceaf9SAlex Deucher  * Tear down the fence driver for all possible rings (all asics).
965d38ceaf9SAlex Deucher  */
966d38ceaf9SAlex Deucher void amdgpu_fence_driver_fini(struct amdgpu_device *adev)
967d38ceaf9SAlex Deucher {
968d38ceaf9SAlex Deucher 	int i, r;
969d38ceaf9SAlex Deucher 
970d38ceaf9SAlex Deucher 	mutex_lock(&adev->ring_lock);
971d38ceaf9SAlex Deucher 	for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
972d38ceaf9SAlex Deucher 		struct amdgpu_ring *ring = adev->rings[i];
973d38ceaf9SAlex Deucher 		if (!ring || !ring->fence_drv.initialized)
974d38ceaf9SAlex Deucher 			continue;
975d38ceaf9SAlex Deucher 		r = amdgpu_fence_wait_empty(ring);
976d38ceaf9SAlex Deucher 		if (r) {
977d38ceaf9SAlex Deucher 			/* no need to trigger GPU reset as we are unloading */
978d38ceaf9SAlex Deucher 			amdgpu_fence_driver_force_completion(adev);
979d38ceaf9SAlex Deucher 		}
980d38ceaf9SAlex Deucher 		wake_up_all(&adev->fence_queue);
981c6a4079bSChunming Zhou 		amdgpu_irq_put(adev, ring->fence_drv.irq_src,
982c6a4079bSChunming Zhou 			       ring->fence_drv.irq_type);
983b80d8475SAlex Deucher 		if (ring->scheduler)
984b80d8475SAlex Deucher 			amd_sched_destroy(ring->scheduler);
985d38ceaf9SAlex Deucher 		ring->fence_drv.initialized = false;
986d38ceaf9SAlex Deucher 	}
987d38ceaf9SAlex Deucher 	mutex_unlock(&adev->ring_lock);
988d38ceaf9SAlex Deucher }
989d38ceaf9SAlex Deucher 
990d38ceaf9SAlex Deucher /**
9915ceb54c6SAlex Deucher  * amdgpu_fence_driver_suspend - suspend the fence driver
9925ceb54c6SAlex Deucher  * for all possible rings.
9935ceb54c6SAlex Deucher  *
9945ceb54c6SAlex Deucher  * @adev: amdgpu device pointer
9955ceb54c6SAlex Deucher  *
9965ceb54c6SAlex Deucher  * Suspend the fence driver for all possible rings (all asics).
9975ceb54c6SAlex Deucher  */
9985ceb54c6SAlex Deucher void amdgpu_fence_driver_suspend(struct amdgpu_device *adev)
9995ceb54c6SAlex Deucher {
10005ceb54c6SAlex Deucher 	int i, r;
10015ceb54c6SAlex Deucher 
10025ceb54c6SAlex Deucher 	mutex_lock(&adev->ring_lock);
10035ceb54c6SAlex Deucher 	for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
10045ceb54c6SAlex Deucher 		struct amdgpu_ring *ring = adev->rings[i];
10055ceb54c6SAlex Deucher 		if (!ring || !ring->fence_drv.initialized)
10065ceb54c6SAlex Deucher 			continue;
10075ceb54c6SAlex Deucher 
10085ceb54c6SAlex Deucher 		/* wait for gpu to finish processing current batch */
10095ceb54c6SAlex Deucher 		r = amdgpu_fence_wait_empty(ring);
10105ceb54c6SAlex Deucher 		if (r) {
10115ceb54c6SAlex Deucher 			/* delay GPU reset to resume */
10125ceb54c6SAlex Deucher 			amdgpu_fence_driver_force_completion(adev);
10135ceb54c6SAlex Deucher 		}
10145ceb54c6SAlex Deucher 
10155ceb54c6SAlex Deucher 		/* disable the interrupt */
10165ceb54c6SAlex Deucher 		amdgpu_irq_put(adev, ring->fence_drv.irq_src,
10175ceb54c6SAlex Deucher 			       ring->fence_drv.irq_type);
10185ceb54c6SAlex Deucher 	}
10195ceb54c6SAlex Deucher 	mutex_unlock(&adev->ring_lock);
10205ceb54c6SAlex Deucher }
10215ceb54c6SAlex Deucher 
10225ceb54c6SAlex Deucher /**
10235ceb54c6SAlex Deucher  * amdgpu_fence_driver_resume - resume the fence driver
10245ceb54c6SAlex Deucher  * for all possible rings.
10255ceb54c6SAlex Deucher  *
10265ceb54c6SAlex Deucher  * @adev: amdgpu device pointer
10275ceb54c6SAlex Deucher  *
10285ceb54c6SAlex Deucher  * Resume the fence driver for all possible rings (all asics).
10295ceb54c6SAlex Deucher  * Not all asics have all rings, so each asic will only
10305ceb54c6SAlex Deucher  * start the fence driver on the rings it has using
10315ceb54c6SAlex Deucher  * amdgpu_fence_driver_start_ring().
10325ceb54c6SAlex Deucher  * Returns 0 for success.
10335ceb54c6SAlex Deucher  */
10345ceb54c6SAlex Deucher void amdgpu_fence_driver_resume(struct amdgpu_device *adev)
10355ceb54c6SAlex Deucher {
10365ceb54c6SAlex Deucher 	int i;
10375ceb54c6SAlex Deucher 
10385ceb54c6SAlex Deucher 	mutex_lock(&adev->ring_lock);
10395ceb54c6SAlex Deucher 	for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
10405ceb54c6SAlex Deucher 		struct amdgpu_ring *ring = adev->rings[i];
10415ceb54c6SAlex Deucher 		if (!ring || !ring->fence_drv.initialized)
10425ceb54c6SAlex Deucher 			continue;
10435ceb54c6SAlex Deucher 
10445ceb54c6SAlex Deucher 		/* enable the interrupt */
10455ceb54c6SAlex Deucher 		amdgpu_irq_get(adev, ring->fence_drv.irq_src,
10465ceb54c6SAlex Deucher 			       ring->fence_drv.irq_type);
10475ceb54c6SAlex Deucher 	}
10485ceb54c6SAlex Deucher 	mutex_unlock(&adev->ring_lock);
10495ceb54c6SAlex Deucher }
10505ceb54c6SAlex Deucher 
10515ceb54c6SAlex Deucher /**
1052d38ceaf9SAlex Deucher  * amdgpu_fence_driver_force_completion - force all fence waiter to complete
1053d38ceaf9SAlex Deucher  *
1054d38ceaf9SAlex Deucher  * @adev: amdgpu device pointer
1055d38ceaf9SAlex Deucher  *
1056d38ceaf9SAlex Deucher  * In case of GPU reset failure make sure no process keep waiting on fence
1057d38ceaf9SAlex Deucher  * that will never complete.
1058d38ceaf9SAlex Deucher  */
1059d38ceaf9SAlex Deucher void amdgpu_fence_driver_force_completion(struct amdgpu_device *adev)
1060d38ceaf9SAlex Deucher {
1061d38ceaf9SAlex Deucher 	int i;
1062d38ceaf9SAlex Deucher 
1063d38ceaf9SAlex Deucher 	for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
1064d38ceaf9SAlex Deucher 		struct amdgpu_ring *ring = adev->rings[i];
1065d38ceaf9SAlex Deucher 		if (!ring || !ring->fence_drv.initialized)
1066d38ceaf9SAlex Deucher 			continue;
1067d38ceaf9SAlex Deucher 
1068d38ceaf9SAlex Deucher 		amdgpu_fence_write(ring, ring->fence_drv.sync_seq[i]);
1069d38ceaf9SAlex Deucher 	}
1070d38ceaf9SAlex Deucher }
1071d38ceaf9SAlex Deucher 
1072d38ceaf9SAlex Deucher 
1073d38ceaf9SAlex Deucher /*
1074d38ceaf9SAlex Deucher  * Fence debugfs
1075d38ceaf9SAlex Deucher  */
1076d38ceaf9SAlex Deucher #if defined(CONFIG_DEBUG_FS)
1077d38ceaf9SAlex Deucher static int amdgpu_debugfs_fence_info(struct seq_file *m, void *data)
1078d38ceaf9SAlex Deucher {
1079d38ceaf9SAlex Deucher 	struct drm_info_node *node = (struct drm_info_node *)m->private;
1080d38ceaf9SAlex Deucher 	struct drm_device *dev = node->minor->dev;
1081d38ceaf9SAlex Deucher 	struct amdgpu_device *adev = dev->dev_private;
1082d38ceaf9SAlex Deucher 	int i, j;
1083d38ceaf9SAlex Deucher 
1084d38ceaf9SAlex Deucher 	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
1085d38ceaf9SAlex Deucher 		struct amdgpu_ring *ring = adev->rings[i];
1086d38ceaf9SAlex Deucher 		if (!ring || !ring->fence_drv.initialized)
1087d38ceaf9SAlex Deucher 			continue;
1088d38ceaf9SAlex Deucher 
1089d38ceaf9SAlex Deucher 		amdgpu_fence_process(ring);
1090d38ceaf9SAlex Deucher 
1091344c19f9SChristian König 		seq_printf(m, "--- ring %d (%s) ---\n", i, ring->name);
1092d38ceaf9SAlex Deucher 		seq_printf(m, "Last signaled fence 0x%016llx\n",
1093d38ceaf9SAlex Deucher 			   (unsigned long long)atomic64_read(&ring->fence_drv.last_seq));
1094d38ceaf9SAlex Deucher 		seq_printf(m, "Last emitted        0x%016llx\n",
1095d38ceaf9SAlex Deucher 			   ring->fence_drv.sync_seq[i]);
1096d38ceaf9SAlex Deucher 
1097d38ceaf9SAlex Deucher 		for (j = 0; j < AMDGPU_MAX_RINGS; ++j) {
1098d38ceaf9SAlex Deucher 			struct amdgpu_ring *other = adev->rings[j];
1099344c19f9SChristian König 			if (i != j && other && other->fence_drv.initialized &&
1100344c19f9SChristian König 			    ring->fence_drv.sync_seq[j])
1101d38ceaf9SAlex Deucher 				seq_printf(m, "Last sync to ring %d 0x%016llx\n",
1102d38ceaf9SAlex Deucher 					   j, ring->fence_drv.sync_seq[j]);
1103d38ceaf9SAlex Deucher 		}
1104d38ceaf9SAlex Deucher 	}
1105d38ceaf9SAlex Deucher 	return 0;
1106d38ceaf9SAlex Deucher }
1107d38ceaf9SAlex Deucher 
1108d38ceaf9SAlex Deucher static struct drm_info_list amdgpu_debugfs_fence_list[] = {
1109d38ceaf9SAlex Deucher 	{"amdgpu_fence_info", &amdgpu_debugfs_fence_info, 0, NULL},
1110d38ceaf9SAlex Deucher };
1111d38ceaf9SAlex Deucher #endif
1112d38ceaf9SAlex Deucher 
1113d38ceaf9SAlex Deucher int amdgpu_debugfs_fence_init(struct amdgpu_device *adev)
1114d38ceaf9SAlex Deucher {
1115d38ceaf9SAlex Deucher #if defined(CONFIG_DEBUG_FS)
1116d38ceaf9SAlex Deucher 	return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_fence_list, 1);
1117d38ceaf9SAlex Deucher #else
1118d38ceaf9SAlex Deucher 	return 0;
1119d38ceaf9SAlex Deucher #endif
1120d38ceaf9SAlex Deucher }
1121d38ceaf9SAlex Deucher 
1122d38ceaf9SAlex Deucher static const char *amdgpu_fence_get_driver_name(struct fence *fence)
1123d38ceaf9SAlex Deucher {
1124d38ceaf9SAlex Deucher 	return "amdgpu";
1125d38ceaf9SAlex Deucher }
1126d38ceaf9SAlex Deucher 
1127d38ceaf9SAlex Deucher static const char *amdgpu_fence_get_timeline_name(struct fence *f)
1128d38ceaf9SAlex Deucher {
1129d38ceaf9SAlex Deucher 	struct amdgpu_fence *fence = to_amdgpu_fence(f);
1130d38ceaf9SAlex Deucher 	return (const char *)fence->ring->name;
1131d38ceaf9SAlex Deucher }
1132d38ceaf9SAlex Deucher 
1133d38ceaf9SAlex Deucher static inline bool amdgpu_test_signaled(struct amdgpu_fence *fence)
1134d38ceaf9SAlex Deucher {
1135d38ceaf9SAlex Deucher 	return test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->base.flags);
1136d38ceaf9SAlex Deucher }
1137d38ceaf9SAlex Deucher 
1138d38ceaf9SAlex Deucher struct amdgpu_wait_cb {
1139d38ceaf9SAlex Deucher 	struct fence_cb base;
1140d38ceaf9SAlex Deucher 	struct task_struct *task;
1141d38ceaf9SAlex Deucher };
1142d38ceaf9SAlex Deucher 
1143d38ceaf9SAlex Deucher static void amdgpu_fence_wait_cb(struct fence *fence, struct fence_cb *cb)
1144d38ceaf9SAlex Deucher {
1145d38ceaf9SAlex Deucher 	struct amdgpu_wait_cb *wait =
1146d38ceaf9SAlex Deucher 		container_of(cb, struct amdgpu_wait_cb, base);
1147d38ceaf9SAlex Deucher 	wake_up_process(wait->task);
1148d38ceaf9SAlex Deucher }
1149d38ceaf9SAlex Deucher 
1150d38ceaf9SAlex Deucher static signed long amdgpu_fence_default_wait(struct fence *f, bool intr,
1151d38ceaf9SAlex Deucher 					     signed long t)
1152d38ceaf9SAlex Deucher {
1153d38ceaf9SAlex Deucher 	struct amdgpu_fence *fence = to_amdgpu_fence(f);
1154d38ceaf9SAlex Deucher 	struct amdgpu_device *adev = fence->ring->adev;
1155d38ceaf9SAlex Deucher 	struct amdgpu_wait_cb cb;
1156d38ceaf9SAlex Deucher 
1157d38ceaf9SAlex Deucher 	cb.task = current;
1158d38ceaf9SAlex Deucher 
1159d38ceaf9SAlex Deucher 	if (fence_add_callback(f, &cb.base, amdgpu_fence_wait_cb))
1160d38ceaf9SAlex Deucher 		return t;
1161d38ceaf9SAlex Deucher 
1162d38ceaf9SAlex Deucher 	while (t > 0) {
1163d38ceaf9SAlex Deucher 		if (intr)
1164d38ceaf9SAlex Deucher 			set_current_state(TASK_INTERRUPTIBLE);
1165d38ceaf9SAlex Deucher 		else
1166d38ceaf9SAlex Deucher 			set_current_state(TASK_UNINTERRUPTIBLE);
1167d38ceaf9SAlex Deucher 
1168d38ceaf9SAlex Deucher 		/*
1169d38ceaf9SAlex Deucher 		 * amdgpu_test_signaled must be called after
1170d38ceaf9SAlex Deucher 		 * set_current_state to prevent a race with wake_up_process
1171d38ceaf9SAlex Deucher 		 */
1172d38ceaf9SAlex Deucher 		if (amdgpu_test_signaled(fence))
1173d38ceaf9SAlex Deucher 			break;
1174d38ceaf9SAlex Deucher 
1175d38ceaf9SAlex Deucher 		if (adev->needs_reset) {
1176d38ceaf9SAlex Deucher 			t = -EDEADLK;
1177d38ceaf9SAlex Deucher 			break;
1178d38ceaf9SAlex Deucher 		}
1179d38ceaf9SAlex Deucher 
1180d38ceaf9SAlex Deucher 		t = schedule_timeout(t);
1181d38ceaf9SAlex Deucher 
1182d38ceaf9SAlex Deucher 		if (t > 0 && intr && signal_pending(current))
1183d38ceaf9SAlex Deucher 			t = -ERESTARTSYS;
1184d38ceaf9SAlex Deucher 	}
1185d38ceaf9SAlex Deucher 
1186d38ceaf9SAlex Deucher 	__set_current_state(TASK_RUNNING);
1187d38ceaf9SAlex Deucher 	fence_remove_callback(f, &cb.base);
1188d38ceaf9SAlex Deucher 
1189d38ceaf9SAlex Deucher 	return t;
1190d38ceaf9SAlex Deucher }
1191d38ceaf9SAlex Deucher 
1192d38ceaf9SAlex Deucher const struct fence_ops amdgpu_fence_ops = {
1193d38ceaf9SAlex Deucher 	.get_driver_name = amdgpu_fence_get_driver_name,
1194d38ceaf9SAlex Deucher 	.get_timeline_name = amdgpu_fence_get_timeline_name,
1195d38ceaf9SAlex Deucher 	.enable_signaling = amdgpu_fence_enable_signaling,
1196d38ceaf9SAlex Deucher 	.signaled = amdgpu_fence_is_signaled,
1197d38ceaf9SAlex Deucher 	.wait = amdgpu_fence_default_wait,
1198d38ceaf9SAlex Deucher 	.release = NULL,
1199d38ceaf9SAlex Deucher };
1200