1d38ceaf9SAlex Deucher /*
2d38ceaf9SAlex Deucher  * Copyright 2015 Advanced Micro Devices, Inc.
3d38ceaf9SAlex Deucher  *
4d38ceaf9SAlex Deucher  * Permission is hereby granted, free of charge, to any person obtaining a
5d38ceaf9SAlex Deucher  * copy of this software and associated documentation files (the "Software"),
6d38ceaf9SAlex Deucher  * to deal in the Software without restriction, including without limitation
7d38ceaf9SAlex Deucher  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8d38ceaf9SAlex Deucher  * and/or sell copies of the Software, and to permit persons to whom the
9d38ceaf9SAlex Deucher  * Software is furnished to do so, subject to the following conditions:
10d38ceaf9SAlex Deucher  *
11d38ceaf9SAlex Deucher  * The above copyright notice and this permission notice shall be included in
12d38ceaf9SAlex Deucher  * all copies or substantial portions of the Software.
13d38ceaf9SAlex Deucher  *
14d38ceaf9SAlex Deucher  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15d38ceaf9SAlex Deucher  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16d38ceaf9SAlex Deucher  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17d38ceaf9SAlex Deucher  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18d38ceaf9SAlex Deucher  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19d38ceaf9SAlex Deucher  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20d38ceaf9SAlex Deucher  * OTHER DEALINGS IN THE SOFTWARE.
21d38ceaf9SAlex Deucher  *
22d38ceaf9SAlex Deucher  * Authors: monk liu <monk.liu@amd.com>
23d38ceaf9SAlex Deucher  */
24d38ceaf9SAlex Deucher 
25d38ceaf9SAlex Deucher #include <drm/drmP.h>
26c2636dc5SAndres Rodriguez #include <drm/drm_auth.h>
27d38ceaf9SAlex Deucher #include "amdgpu.h"
2852c6a62cSAndres Rodriguez #include "amdgpu_sched.h"
29d38ceaf9SAlex Deucher 
30c2636dc5SAndres Rodriguez static int amdgpu_ctx_priority_permit(struct drm_file *filp,
31c2636dc5SAndres Rodriguez 				      enum amd_sched_priority priority)
32c2636dc5SAndres Rodriguez {
33c2636dc5SAndres Rodriguez 	/* NORMAL and below are accessible by everyone */
34c2636dc5SAndres Rodriguez 	if (priority <= AMD_SCHED_PRIORITY_NORMAL)
35c2636dc5SAndres Rodriguez 		return 0;
36c2636dc5SAndres Rodriguez 
37c2636dc5SAndres Rodriguez 	if (capable(CAP_SYS_NICE))
38c2636dc5SAndres Rodriguez 		return 0;
39c2636dc5SAndres Rodriguez 
40c2636dc5SAndres Rodriguez 	if (drm_is_current_master(filp))
41c2636dc5SAndres Rodriguez 		return 0;
42c2636dc5SAndres Rodriguez 
43c2636dc5SAndres Rodriguez 	return -EACCES;
44c2636dc5SAndres Rodriguez }
45c2636dc5SAndres Rodriguez 
46c2636dc5SAndres Rodriguez static int amdgpu_ctx_init(struct amdgpu_device *adev,
47c2636dc5SAndres Rodriguez 			   enum amd_sched_priority priority,
48c2636dc5SAndres Rodriguez 			   struct drm_file *filp,
49c2636dc5SAndres Rodriguez 			   struct amdgpu_ctx *ctx)
50d38ceaf9SAlex Deucher {
5147f38501SChristian König 	unsigned i, j;
5247f38501SChristian König 	int r;
5347f38501SChristian König 
54c2636dc5SAndres Rodriguez 	if (priority < 0 || priority >= AMD_SCHED_PRIORITY_MAX)
55c2636dc5SAndres Rodriguez 		return -EINVAL;
56c2636dc5SAndres Rodriguez 
57c2636dc5SAndres Rodriguez 	r = amdgpu_ctx_priority_permit(filp, priority);
58c2636dc5SAndres Rodriguez 	if (r)
59c2636dc5SAndres Rodriguez 		return r;
60c2636dc5SAndres Rodriguez 
61d38ceaf9SAlex Deucher 	memset(ctx, 0, sizeof(*ctx));
629cb7e5a9SChunming Zhou 	ctx->adev = adev;
63d38ceaf9SAlex Deucher 	kref_init(&ctx->refcount);
6421c16bf6SChristian König 	spin_lock_init(&ctx->ring_lock);
65a750b47eSChristian König 	ctx->fences = kcalloc(amdgpu_sched_jobs * AMDGPU_MAX_RINGS,
66f54d1867SChris Wilson 			      sizeof(struct dma_fence*), GFP_KERNEL);
6737cd0ca2SChunming Zhou 	if (!ctx->fences)
6837cd0ca2SChunming Zhou 		return -ENOMEM;
6923ca0e4eSChunming Zhou 
700ae94444SAndrey Grodzovsky 	mutex_init(&ctx->lock);
710ae94444SAndrey Grodzovsky 
7237cd0ca2SChunming Zhou 	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
7337cd0ca2SChunming Zhou 		ctx->rings[i].sequence = 1;
74a750b47eSChristian König 		ctx->rings[i].fences = &ctx->fences[amdgpu_sched_jobs * i];
7537cd0ca2SChunming Zhou 	}
76ce199ad6SNicolai Hähnle 
77ce199ad6SNicolai Hähnle 	ctx->reset_counter = atomic_read(&adev->gpu_reset_counter);
78c23be4aeSAndres Rodriguez 	ctx->init_priority = priority;
79c23be4aeSAndres Rodriguez 	ctx->override_priority = AMD_SCHED_PRIORITY_UNSET;
80ce199ad6SNicolai Hähnle 
819cb7e5a9SChunming Zhou 	/* create context entity for each ring */
829cb7e5a9SChunming Zhou 	for (i = 0; i < adev->num_rings; i++) {
8320874179SChristian König 		struct amdgpu_ring *ring = adev->rings[i];
84432a4ff8SChristian König 		struct amd_sched_rq *rq;
8520874179SChristian König 
86c2636dc5SAndres Rodriguez 		rq = &ring->sched.sched_rq[priority];
8775fbed20SMonk Liu 
8875fbed20SMonk Liu 		if (ring == &adev->gfx.kiq.ring)
8975fbed20SMonk Liu 			continue;
9075fbed20SMonk Liu 
9120874179SChristian König 		r = amd_sched_entity_init(&ring->sched, &ctx->rings[i].entity,
92ddf94d33SChristian König 					  rq, amdgpu_sched_jobs);
939cb7e5a9SChunming Zhou 		if (r)
948ed8147aSHuang Rui 			goto failed;
959cb7e5a9SChunming Zhou 	}
969cb7e5a9SChunming Zhou 
97effd924dSAndres Rodriguez 	r = amdgpu_queue_mgr_init(adev, &ctx->queue_mgr);
98effd924dSAndres Rodriguez 	if (r)
99effd924dSAndres Rodriguez 		goto failed;
100effd924dSAndres Rodriguez 
1018ed8147aSHuang Rui 	return 0;
1028ed8147aSHuang Rui 
1038ed8147aSHuang Rui failed:
1049cb7e5a9SChunming Zhou 	for (j = 0; j < i; j++)
1054f839a24SChristian König 		amd_sched_entity_fini(&adev->rings[j]->sched,
10691404fb2SChristian König 				      &ctx->rings[j].entity);
10737cd0ca2SChunming Zhou 	kfree(ctx->fences);
10854ddf3a6SGrazvydas Ignotas 	ctx->fences = NULL;
10947f38501SChristian König 	return r;
1109cb7e5a9SChunming Zhou }
111d38ceaf9SAlex Deucher 
11220874179SChristian König static void amdgpu_ctx_fini(struct amdgpu_ctx *ctx)
11347f38501SChristian König {
11447f38501SChristian König 	struct amdgpu_device *adev = ctx->adev;
11547f38501SChristian König 	unsigned i, j;
11647f38501SChristian König 
117fe295b27SDave Airlie 	if (!adev)
118fe295b27SDave Airlie 		return;
119fe295b27SDave Airlie 
12047f38501SChristian König 	for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
12137cd0ca2SChunming Zhou 		for (j = 0; j < amdgpu_sched_jobs; ++j)
122f54d1867SChris Wilson 			dma_fence_put(ctx->rings[i].fences[j]);
12337cd0ca2SChunming Zhou 	kfree(ctx->fences);
12454ddf3a6SGrazvydas Ignotas 	ctx->fences = NULL;
12547f38501SChristian König 
12647f38501SChristian König 	for (i = 0; i < adev->num_rings; i++)
1274f839a24SChristian König 		amd_sched_entity_fini(&adev->rings[i]->sched,
12891404fb2SChristian König 				      &ctx->rings[i].entity);
129effd924dSAndres Rodriguez 
130effd924dSAndres Rodriguez 	amdgpu_queue_mgr_fini(adev, &ctx->queue_mgr);
1310ae94444SAndrey Grodzovsky 
1320ae94444SAndrey Grodzovsky 	mutex_destroy(&ctx->lock);
13347f38501SChristian König }
13447f38501SChristian König 
13547f38501SChristian König static int amdgpu_ctx_alloc(struct amdgpu_device *adev,
13647f38501SChristian König 			    struct amdgpu_fpriv *fpriv,
137c2636dc5SAndres Rodriguez 			    struct drm_file *filp,
138c2636dc5SAndres Rodriguez 			    enum amd_sched_priority priority,
13947f38501SChristian König 			    uint32_t *id)
14047f38501SChristian König {
14147f38501SChristian König 	struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr;
14247f38501SChristian König 	struct amdgpu_ctx *ctx;
14347f38501SChristian König 	int r;
14447f38501SChristian König 
14547f38501SChristian König 	ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
14647f38501SChristian König 	if (!ctx)
14747f38501SChristian König 		return -ENOMEM;
14847f38501SChristian König 
14947f38501SChristian König 	mutex_lock(&mgr->lock);
15047f38501SChristian König 	r = idr_alloc(&mgr->ctx_handles, ctx, 1, 0, GFP_KERNEL);
15147f38501SChristian König 	if (r < 0) {
15247f38501SChristian König 		mutex_unlock(&mgr->lock);
15347f38501SChristian König 		kfree(ctx);
15447f38501SChristian König 		return r;
15547f38501SChristian König 	}
156c2636dc5SAndres Rodriguez 
15747f38501SChristian König 	*id = (uint32_t)r;
158c2636dc5SAndres Rodriguez 	r = amdgpu_ctx_init(adev, priority, filp, ctx);
159c648ed7cSChunming Zhou 	if (r) {
160c648ed7cSChunming Zhou 		idr_remove(&mgr->ctx_handles, *id);
161c648ed7cSChunming Zhou 		*id = 0;
162c648ed7cSChunming Zhou 		kfree(ctx);
163c648ed7cSChunming Zhou 	}
16447f38501SChristian König 	mutex_unlock(&mgr->lock);
16547f38501SChristian König 	return r;
16647f38501SChristian König }
16747f38501SChristian König 
16847f38501SChristian König static void amdgpu_ctx_do_release(struct kref *ref)
169d38ceaf9SAlex Deucher {
170d38ceaf9SAlex Deucher 	struct amdgpu_ctx *ctx;
171d38ceaf9SAlex Deucher 
17247f38501SChristian König 	ctx = container_of(ref, struct amdgpu_ctx, refcount);
17347f38501SChristian König 
17447f38501SChristian König 	amdgpu_ctx_fini(ctx);
17547f38501SChristian König 
17647f38501SChristian König 	kfree(ctx);
17747f38501SChristian König }
17847f38501SChristian König 
17947f38501SChristian König static int amdgpu_ctx_free(struct amdgpu_fpriv *fpriv, uint32_t id)
18047f38501SChristian König {
18123ca0e4eSChunming Zhou 	struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr;
18247f38501SChristian König 	struct amdgpu_ctx *ctx;
18347f38501SChristian König 
1840147ee0fSMarek Olšák 	mutex_lock(&mgr->lock);
185d3e709e6SMatthew Wilcox 	ctx = idr_remove(&mgr->ctx_handles, id);
186d3e709e6SMatthew Wilcox 	if (ctx)
187f11358daSMarek Olšák 		kref_put(&ctx->refcount, amdgpu_ctx_do_release);
1880147ee0fSMarek Olšák 	mutex_unlock(&mgr->lock);
189d3e709e6SMatthew Wilcox 	return ctx ? 0 : -EINVAL;
190d38ceaf9SAlex Deucher }
191d38ceaf9SAlex Deucher 
192d94aed5aSMarek Olšák static int amdgpu_ctx_query(struct amdgpu_device *adev,
193d94aed5aSMarek Olšák 			    struct amdgpu_fpriv *fpriv, uint32_t id,
194d94aed5aSMarek Olšák 			    union drm_amdgpu_ctx_out *out)
195d38ceaf9SAlex Deucher {
196d38ceaf9SAlex Deucher 	struct amdgpu_ctx *ctx;
19723ca0e4eSChunming Zhou 	struct amdgpu_ctx_mgr *mgr;
198d94aed5aSMarek Olšák 	unsigned reset_counter;
199d38ceaf9SAlex Deucher 
20023ca0e4eSChunming Zhou 	if (!fpriv)
20123ca0e4eSChunming Zhou 		return -EINVAL;
20223ca0e4eSChunming Zhou 
20323ca0e4eSChunming Zhou 	mgr = &fpriv->ctx_mgr;
2040147ee0fSMarek Olšák 	mutex_lock(&mgr->lock);
205d38ceaf9SAlex Deucher 	ctx = idr_find(&mgr->ctx_handles, id);
206d94aed5aSMarek Olšák 	if (!ctx) {
2070147ee0fSMarek Olšák 		mutex_unlock(&mgr->lock);
208d38ceaf9SAlex Deucher 		return -EINVAL;
209d38ceaf9SAlex Deucher 	}
210d38ceaf9SAlex Deucher 
211d94aed5aSMarek Olšák 	/* TODO: these two are always zero */
2120b492a4cSAlex Deucher 	out->state.flags = 0x0;
2130b492a4cSAlex Deucher 	out->state.hangs = 0x0;
214d94aed5aSMarek Olšák 
215d94aed5aSMarek Olšák 	/* determine if a GPU reset has occured since the last call */
216d94aed5aSMarek Olšák 	reset_counter = atomic_read(&adev->gpu_reset_counter);
217d94aed5aSMarek Olšák 	/* TODO: this should ideally return NO, GUILTY, or INNOCENT. */
218d94aed5aSMarek Olšák 	if (ctx->reset_counter == reset_counter)
219d94aed5aSMarek Olšák 		out->state.reset_status = AMDGPU_CTX_NO_RESET;
220d94aed5aSMarek Olšák 	else
221d94aed5aSMarek Olšák 		out->state.reset_status = AMDGPU_CTX_UNKNOWN_RESET;
222d94aed5aSMarek Olšák 	ctx->reset_counter = reset_counter;
223d94aed5aSMarek Olšák 
224d94aed5aSMarek Olšák 	mutex_unlock(&mgr->lock);
225d94aed5aSMarek Olšák 	return 0;
226d94aed5aSMarek Olšák }
227d94aed5aSMarek Olšák 
228d38ceaf9SAlex Deucher int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
229d38ceaf9SAlex Deucher 		     struct drm_file *filp)
230d38ceaf9SAlex Deucher {
231d38ceaf9SAlex Deucher 	int r;
232d38ceaf9SAlex Deucher 	uint32_t id;
233c2636dc5SAndres Rodriguez 	enum amd_sched_priority priority;
234d38ceaf9SAlex Deucher 
235d38ceaf9SAlex Deucher 	union drm_amdgpu_ctx *args = data;
236d38ceaf9SAlex Deucher 	struct amdgpu_device *adev = dev->dev_private;
237d38ceaf9SAlex Deucher 	struct amdgpu_fpriv *fpriv = filp->driver_priv;
238d38ceaf9SAlex Deucher 
239d38ceaf9SAlex Deucher 	r = 0;
240d38ceaf9SAlex Deucher 	id = args->in.ctx_id;
241c2636dc5SAndres Rodriguez 	priority = amdgpu_to_sched_priority(args->in.priority);
242c2636dc5SAndres Rodriguez 
243b6d8a439SAndres Rodriguez 	/* For backwards compatibility reasons, we need to accept
244b6d8a439SAndres Rodriguez 	 * ioctls with garbage in the priority field */
245b6d8a439SAndres Rodriguez 	if (priority == AMD_SCHED_PRIORITY_INVALID)
246b6d8a439SAndres Rodriguez 		priority = AMD_SCHED_PRIORITY_NORMAL;
247d38ceaf9SAlex Deucher 
248d38ceaf9SAlex Deucher 	switch (args->in.op) {
249d38ceaf9SAlex Deucher 	case AMDGPU_CTX_OP_ALLOC_CTX:
250c2636dc5SAndres Rodriguez 		r = amdgpu_ctx_alloc(adev, fpriv, filp, priority, &id);
251d38ceaf9SAlex Deucher 		args->out.alloc.ctx_id = id;
252d38ceaf9SAlex Deucher 		break;
253d38ceaf9SAlex Deucher 	case AMDGPU_CTX_OP_FREE_CTX:
25447f38501SChristian König 		r = amdgpu_ctx_free(fpriv, id);
255d38ceaf9SAlex Deucher 		break;
256d38ceaf9SAlex Deucher 	case AMDGPU_CTX_OP_QUERY_STATE:
257d94aed5aSMarek Olšák 		r = amdgpu_ctx_query(adev, fpriv, id, &args->out);
258d38ceaf9SAlex Deucher 		break;
259d38ceaf9SAlex Deucher 	default:
260d38ceaf9SAlex Deucher 		return -EINVAL;
261d38ceaf9SAlex Deucher 	}
262d38ceaf9SAlex Deucher 
263d38ceaf9SAlex Deucher 	return r;
264d38ceaf9SAlex Deucher }
26566b3cf2aSJammy Zhou 
26666b3cf2aSJammy Zhou struct amdgpu_ctx *amdgpu_ctx_get(struct amdgpu_fpriv *fpriv, uint32_t id)
26766b3cf2aSJammy Zhou {
26866b3cf2aSJammy Zhou 	struct amdgpu_ctx *ctx;
26923ca0e4eSChunming Zhou 	struct amdgpu_ctx_mgr *mgr;
27023ca0e4eSChunming Zhou 
27123ca0e4eSChunming Zhou 	if (!fpriv)
27223ca0e4eSChunming Zhou 		return NULL;
27323ca0e4eSChunming Zhou 
27423ca0e4eSChunming Zhou 	mgr = &fpriv->ctx_mgr;
27566b3cf2aSJammy Zhou 
27666b3cf2aSJammy Zhou 	mutex_lock(&mgr->lock);
27766b3cf2aSJammy Zhou 	ctx = idr_find(&mgr->ctx_handles, id);
27866b3cf2aSJammy Zhou 	if (ctx)
27966b3cf2aSJammy Zhou 		kref_get(&ctx->refcount);
28066b3cf2aSJammy Zhou 	mutex_unlock(&mgr->lock);
28166b3cf2aSJammy Zhou 	return ctx;
28266b3cf2aSJammy Zhou }
28366b3cf2aSJammy Zhou 
28466b3cf2aSJammy Zhou int amdgpu_ctx_put(struct amdgpu_ctx *ctx)
28566b3cf2aSJammy Zhou {
28666b3cf2aSJammy Zhou 	if (ctx == NULL)
28766b3cf2aSJammy Zhou 		return -EINVAL;
28866b3cf2aSJammy Zhou 
28966b3cf2aSJammy Zhou 	kref_put(&ctx->refcount, amdgpu_ctx_do_release);
29066b3cf2aSJammy Zhou 	return 0;
29166b3cf2aSJammy Zhou }
29221c16bf6SChristian König 
293eb01abc7SMonk Liu int amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring,
294eb01abc7SMonk Liu 			      struct dma_fence *fence, uint64_t* handler)
29521c16bf6SChristian König {
29621c16bf6SChristian König 	struct amdgpu_ctx_ring *cring = & ctx->rings[ring->idx];
297ce882e6dSChristian König 	uint64_t seq = cring->sequence;
298b43a9a7eSChunming Zhou 	unsigned idx = 0;
299f54d1867SChris Wilson 	struct dma_fence *other = NULL;
30021c16bf6SChristian König 
3015b011235SChunming Zhou 	idx = seq & (amdgpu_sched_jobs - 1);
302b43a9a7eSChunming Zhou 	other = cring->fences[idx];
3030ae94444SAndrey Grodzovsky 	if (other)
3040ae94444SAndrey Grodzovsky 		BUG_ON(!dma_fence_is_signaled(other));
30521c16bf6SChristian König 
306f54d1867SChris Wilson 	dma_fence_get(fence);
30721c16bf6SChristian König 
30821c16bf6SChristian König 	spin_lock(&ctx->ring_lock);
30921c16bf6SChristian König 	cring->fences[idx] = fence;
31021c16bf6SChristian König 	cring->sequence++;
31121c16bf6SChristian König 	spin_unlock(&ctx->ring_lock);
31221c16bf6SChristian König 
313f54d1867SChris Wilson 	dma_fence_put(other);
314eb01abc7SMonk Liu 	if (handler)
315eb01abc7SMonk Liu 		*handler = seq;
31621c16bf6SChristian König 
317eb01abc7SMonk Liu 	return 0;
31821c16bf6SChristian König }
31921c16bf6SChristian König 
320f54d1867SChris Wilson struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
32121c16bf6SChristian König 				       struct amdgpu_ring *ring, uint64_t seq)
32221c16bf6SChristian König {
32321c16bf6SChristian König 	struct amdgpu_ctx_ring *cring = & ctx->rings[ring->idx];
324f54d1867SChris Wilson 	struct dma_fence *fence;
32521c16bf6SChristian König 
32621c16bf6SChristian König 	spin_lock(&ctx->ring_lock);
327b43a9a7eSChunming Zhou 
328d7b1eeb2SMonk Liu 	if (seq == ~0ull)
329d7b1eeb2SMonk Liu 		seq = ctx->rings[ring->idx].sequence - 1;
330d7b1eeb2SMonk Liu 
331ce882e6dSChristian König 	if (seq >= cring->sequence) {
33221c16bf6SChristian König 		spin_unlock(&ctx->ring_lock);
33321c16bf6SChristian König 		return ERR_PTR(-EINVAL);
33421c16bf6SChristian König 	}
33521c16bf6SChristian König 
336b43a9a7eSChunming Zhou 
33737cd0ca2SChunming Zhou 	if (seq + amdgpu_sched_jobs < cring->sequence) {
33821c16bf6SChristian König 		spin_unlock(&ctx->ring_lock);
33921c16bf6SChristian König 		return NULL;
34021c16bf6SChristian König 	}
34121c16bf6SChristian König 
342f54d1867SChris Wilson 	fence = dma_fence_get(cring->fences[seq & (amdgpu_sched_jobs - 1)]);
34321c16bf6SChristian König 	spin_unlock(&ctx->ring_lock);
34421c16bf6SChristian König 
34521c16bf6SChristian König 	return fence;
34621c16bf6SChristian König }
347efd4ccb5SChristian König 
348c23be4aeSAndres Rodriguez void amdgpu_ctx_priority_override(struct amdgpu_ctx *ctx,
349c23be4aeSAndres Rodriguez 				  enum amd_sched_priority priority)
350c23be4aeSAndres Rodriguez {
351c23be4aeSAndres Rodriguez 	int i;
352c23be4aeSAndres Rodriguez 	struct amdgpu_device *adev = ctx->adev;
353c23be4aeSAndres Rodriguez 	struct amd_sched_rq *rq;
354c23be4aeSAndres Rodriguez 	struct amd_sched_entity *entity;
355c23be4aeSAndres Rodriguez 	struct amdgpu_ring *ring;
356c23be4aeSAndres Rodriguez 	enum amd_sched_priority ctx_prio;
357c23be4aeSAndres Rodriguez 
358c23be4aeSAndres Rodriguez 	ctx->override_priority = priority;
359c23be4aeSAndres Rodriguez 
360c23be4aeSAndres Rodriguez 	ctx_prio = (ctx->override_priority == AMD_SCHED_PRIORITY_UNSET) ?
361c23be4aeSAndres Rodriguez 			ctx->init_priority : ctx->override_priority;
362c23be4aeSAndres Rodriguez 
363c23be4aeSAndres Rodriguez 	for (i = 0; i < adev->num_rings; i++) {
364c23be4aeSAndres Rodriguez 		ring = adev->rings[i];
365c23be4aeSAndres Rodriguez 		entity = &ctx->rings[i].entity;
366c23be4aeSAndres Rodriguez 		rq = &ring->sched.sched_rq[ctx_prio];
367c23be4aeSAndres Rodriguez 
368c23be4aeSAndres Rodriguez 		if (ring->funcs->type == AMDGPU_RING_TYPE_KIQ)
369c23be4aeSAndres Rodriguez 			continue;
370c23be4aeSAndres Rodriguez 
371c23be4aeSAndres Rodriguez 		amd_sched_entity_set_rq(entity, rq);
372c23be4aeSAndres Rodriguez 	}
373c23be4aeSAndres Rodriguez }
374c23be4aeSAndres Rodriguez 
3750ae94444SAndrey Grodzovsky int amdgpu_ctx_wait_prev_fence(struct amdgpu_ctx *ctx, unsigned ring_id)
3760ae94444SAndrey Grodzovsky {
3770ae94444SAndrey Grodzovsky 	struct amdgpu_ctx_ring *cring = &ctx->rings[ring_id];
3780ae94444SAndrey Grodzovsky 	unsigned idx = cring->sequence & (amdgpu_sched_jobs - 1);
3790ae94444SAndrey Grodzovsky 	struct dma_fence *other = cring->fences[idx];
3800ae94444SAndrey Grodzovsky 
3810ae94444SAndrey Grodzovsky 	if (other) {
3820ae94444SAndrey Grodzovsky 		signed long r;
3830ae94444SAndrey Grodzovsky 		r = dma_fence_wait_timeout(other, false, MAX_SCHEDULE_TIMEOUT);
3840ae94444SAndrey Grodzovsky 		if (r < 0) {
3850ae94444SAndrey Grodzovsky 			DRM_ERROR("Error (%ld) waiting for fence!\n", r);
3860ae94444SAndrey Grodzovsky 			return r;
3870ae94444SAndrey Grodzovsky 		}
3880ae94444SAndrey Grodzovsky 	}
3890ae94444SAndrey Grodzovsky 
3900ae94444SAndrey Grodzovsky 	return 0;
3910ae94444SAndrey Grodzovsky }
3920ae94444SAndrey Grodzovsky 
393efd4ccb5SChristian König void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr)
394efd4ccb5SChristian König {
395efd4ccb5SChristian König 	mutex_init(&mgr->lock);
396efd4ccb5SChristian König 	idr_init(&mgr->ctx_handles);
397efd4ccb5SChristian König }
398efd4ccb5SChristian König 
399efd4ccb5SChristian König void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr)
400efd4ccb5SChristian König {
401efd4ccb5SChristian König 	struct amdgpu_ctx *ctx;
402efd4ccb5SChristian König 	struct idr *idp;
403efd4ccb5SChristian König 	uint32_t id;
404efd4ccb5SChristian König 
405efd4ccb5SChristian König 	idp = &mgr->ctx_handles;
406efd4ccb5SChristian König 
407efd4ccb5SChristian König 	idr_for_each_entry(idp, ctx, id) {
408efd4ccb5SChristian König 		if (kref_put(&ctx->refcount, amdgpu_ctx_do_release) != 1)
409efd4ccb5SChristian König 			DRM_ERROR("ctx %p is still alive\n", ctx);
410efd4ccb5SChristian König 	}
411efd4ccb5SChristian König 
412efd4ccb5SChristian König 	idr_destroy(&mgr->ctx_handles);
413efd4ccb5SChristian König 	mutex_destroy(&mgr->lock);
414efd4ccb5SChristian König }
415