1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: monk liu <monk.liu@amd.com>
23  */
24 
25 #include <drm/drm_auth.h>
26 #include "amdgpu.h"
27 #include "amdgpu_sched.h"
28 #include "amdgpu_ras.h"
29 
30 #define to_amdgpu_ctx_entity(e)	\
31 	container_of((e), struct amdgpu_ctx_entity, entity)
32 
33 const unsigned int amdgpu_ctx_num_entities[AMDGPU_HW_IP_NUM] = {
34 	[AMDGPU_HW_IP_GFX]	=	1,
35 	[AMDGPU_HW_IP_COMPUTE]	=	4,
36 	[AMDGPU_HW_IP_DMA]	=	2,
37 	[AMDGPU_HW_IP_UVD]	=	1,
38 	[AMDGPU_HW_IP_VCE]	=	1,
39 	[AMDGPU_HW_IP_UVD_ENC]	=	1,
40 	[AMDGPU_HW_IP_VCN_DEC]	=	1,
41 	[AMDGPU_HW_IP_VCN_ENC]	=	1,
42 	[AMDGPU_HW_IP_VCN_JPEG]	=	1,
43 };
44 
45 static int amdgpu_ctx_total_num_entities(void)
46 {
47 	unsigned i, num_entities = 0;
48 
49 	for (i = 0; i < AMDGPU_HW_IP_NUM; ++i)
50 		num_entities += amdgpu_ctx_num_entities[i];
51 
52 	return num_entities;
53 }
54 
55 static int amdgpu_ctx_priority_permit(struct drm_file *filp,
56 				      enum drm_sched_priority priority)
57 {
58 	/* NORMAL and below are accessible by everyone */
59 	if (priority <= DRM_SCHED_PRIORITY_NORMAL)
60 		return 0;
61 
62 	if (capable(CAP_SYS_NICE))
63 		return 0;
64 
65 	if (drm_is_current_master(filp))
66 		return 0;
67 
68 	return -EACCES;
69 }
70 
71 static int amdgpu_ctx_init(struct amdgpu_device *adev,
72 			   enum drm_sched_priority priority,
73 			   struct drm_file *filp,
74 			   struct amdgpu_ctx *ctx)
75 {
76 	unsigned num_entities = amdgpu_ctx_total_num_entities();
77 	unsigned i, j, k;
78 	int r;
79 
80 	if (priority < 0 || priority >= DRM_SCHED_PRIORITY_MAX)
81 		return -EINVAL;
82 
83 	r = amdgpu_ctx_priority_permit(filp, priority);
84 	if (r)
85 		return r;
86 
87 	memset(ctx, 0, sizeof(*ctx));
88 	ctx->adev = adev;
89 
90 	ctx->fences = kcalloc(amdgpu_sched_jobs * num_entities,
91 			      sizeof(struct dma_fence*), GFP_KERNEL);
92 	if (!ctx->fences)
93 		return -ENOMEM;
94 
95 	ctx->entities[0] = kcalloc(num_entities,
96 				   sizeof(struct amdgpu_ctx_entity),
97 				   GFP_KERNEL);
98 	if (!ctx->entities[0]) {
99 		r = -ENOMEM;
100 		goto error_free_fences;
101 	}
102 
103 	for (i = 0; i < num_entities; ++i) {
104 		struct amdgpu_ctx_entity *entity = &ctx->entities[0][i];
105 
106 		entity->sequence = 1;
107 		entity->fences = &ctx->fences[amdgpu_sched_jobs * i];
108 	}
109 	for (i = 1; i < AMDGPU_HW_IP_NUM; ++i)
110 		ctx->entities[i] = ctx->entities[i - 1] +
111 			amdgpu_ctx_num_entities[i - 1];
112 
113 	kref_init(&ctx->refcount);
114 	spin_lock_init(&ctx->ring_lock);
115 	mutex_init(&ctx->lock);
116 
117 	ctx->reset_counter = atomic_read(&adev->gpu_reset_counter);
118 	ctx->reset_counter_query = ctx->reset_counter;
119 	ctx->vram_lost_counter = atomic_read(&adev->vram_lost_counter);
120 	ctx->init_priority = priority;
121 	ctx->override_priority = DRM_SCHED_PRIORITY_UNSET;
122 
123 	for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) {
124 		struct amdgpu_ring *rings[AMDGPU_MAX_RINGS];
125 		struct drm_sched_rq *rqs[AMDGPU_MAX_RINGS];
126 		unsigned num_rings = 0;
127 		unsigned num_rqs = 0;
128 
129 		switch (i) {
130 		case AMDGPU_HW_IP_GFX:
131 			rings[0] = &adev->gfx.gfx_ring[0];
132 			num_rings = 1;
133 			break;
134 		case AMDGPU_HW_IP_COMPUTE:
135 			for (j = 0; j < adev->gfx.num_compute_rings; ++j)
136 				rings[j] = &adev->gfx.compute_ring[j];
137 			num_rings = adev->gfx.num_compute_rings;
138 			break;
139 		case AMDGPU_HW_IP_DMA:
140 			for (j = 0; j < adev->sdma.num_instances; ++j)
141 				rings[j] = &adev->sdma.instance[j].ring;
142 			num_rings = adev->sdma.num_instances;
143 			break;
144 		case AMDGPU_HW_IP_UVD:
145 			rings[0] = &adev->uvd.inst[0].ring;
146 			num_rings = 1;
147 			break;
148 		case AMDGPU_HW_IP_VCE:
149 			rings[0] = &adev->vce.ring[0];
150 			num_rings = 1;
151 			break;
152 		case AMDGPU_HW_IP_UVD_ENC:
153 			rings[0] = &adev->uvd.inst[0].ring_enc[0];
154 			num_rings = 1;
155 			break;
156 		case AMDGPU_HW_IP_VCN_DEC:
157 			for (j = 0; j < adev->vcn.num_vcn_inst; ++j) {
158 				if (adev->vcn.harvest_config & (1 << j))
159 					continue;
160 				rings[num_rings++] = &adev->vcn.inst[j].ring_dec;
161 			}
162 			break;
163 		case AMDGPU_HW_IP_VCN_ENC:
164 			for (j = 0; j < adev->vcn.num_vcn_inst; ++j) {
165 				if (adev->vcn.harvest_config & (1 << j))
166 					continue;
167 				for (k = 0; k < adev->vcn.num_enc_rings; ++k)
168 					rings[num_rings++] = &adev->vcn.inst[j].ring_enc[k];
169 			}
170 			break;
171 		case AMDGPU_HW_IP_VCN_JPEG:
172 			for (j = 0; j < adev->vcn.num_vcn_inst; ++j) {
173 				if (adev->vcn.harvest_config & (1 << j))
174 					continue;
175 				rings[num_rings++] = &adev->vcn.inst[j].ring_jpeg;
176 			}
177 			break;
178 		}
179 
180 		for (j = 0; j < num_rings; ++j) {
181 			if (!rings[j]->adev)
182 				continue;
183 
184 			rqs[num_rqs++] = &rings[j]->sched.sched_rq[priority];
185 		}
186 
187 		for (j = 0; j < amdgpu_ctx_num_entities[i]; ++j)
188 			r = drm_sched_entity_init(&ctx->entities[i][j].entity,
189 						  rqs, num_rqs, &ctx->guilty);
190 		if (r)
191 			goto error_cleanup_entities;
192 	}
193 
194 	return 0;
195 
196 error_cleanup_entities:
197 	for (i = 0; i < num_entities; ++i)
198 		drm_sched_entity_destroy(&ctx->entities[0][i].entity);
199 	kfree(ctx->entities[0]);
200 
201 error_free_fences:
202 	kfree(ctx->fences);
203 	ctx->fences = NULL;
204 	return r;
205 }
206 
207 static void amdgpu_ctx_fini(struct kref *ref)
208 {
209 	struct amdgpu_ctx *ctx = container_of(ref, struct amdgpu_ctx, refcount);
210 	unsigned num_entities = amdgpu_ctx_total_num_entities();
211 	struct amdgpu_device *adev = ctx->adev;
212 	unsigned i, j;
213 
214 	if (!adev)
215 		return;
216 
217 	for (i = 0; i < num_entities; ++i)
218 		for (j = 0; j < amdgpu_sched_jobs; ++j)
219 			dma_fence_put(ctx->entities[0][i].fences[j]);
220 	kfree(ctx->fences);
221 	kfree(ctx->entities[0]);
222 
223 	mutex_destroy(&ctx->lock);
224 
225 	kfree(ctx);
226 }
227 
228 int amdgpu_ctx_get_entity(struct amdgpu_ctx *ctx, u32 hw_ip, u32 instance,
229 			  u32 ring, struct drm_sched_entity **entity)
230 {
231 	if (hw_ip >= AMDGPU_HW_IP_NUM) {
232 		DRM_ERROR("unknown HW IP type: %d\n", hw_ip);
233 		return -EINVAL;
234 	}
235 
236 	/* Right now all IPs have only one instance - multiple rings. */
237 	if (instance != 0) {
238 		DRM_DEBUG("invalid ip instance: %d\n", instance);
239 		return -EINVAL;
240 	}
241 
242 	if (ring >= amdgpu_ctx_num_entities[hw_ip]) {
243 		DRM_DEBUG("invalid ring: %d %d\n", hw_ip, ring);
244 		return -EINVAL;
245 	}
246 
247 	*entity = &ctx->entities[hw_ip][ring].entity;
248 	return 0;
249 }
250 
251 static int amdgpu_ctx_alloc(struct amdgpu_device *adev,
252 			    struct amdgpu_fpriv *fpriv,
253 			    struct drm_file *filp,
254 			    enum drm_sched_priority priority,
255 			    uint32_t *id)
256 {
257 	struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr;
258 	struct amdgpu_ctx *ctx;
259 	int r;
260 
261 	ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
262 	if (!ctx)
263 		return -ENOMEM;
264 
265 	mutex_lock(&mgr->lock);
266 	r = idr_alloc(&mgr->ctx_handles, ctx, 1, AMDGPU_VM_MAX_NUM_CTX, GFP_KERNEL);
267 	if (r < 0) {
268 		mutex_unlock(&mgr->lock);
269 		kfree(ctx);
270 		return r;
271 	}
272 
273 	*id = (uint32_t)r;
274 	r = amdgpu_ctx_init(adev, priority, filp, ctx);
275 	if (r) {
276 		idr_remove(&mgr->ctx_handles, *id);
277 		*id = 0;
278 		kfree(ctx);
279 	}
280 	mutex_unlock(&mgr->lock);
281 	return r;
282 }
283 
284 static void amdgpu_ctx_do_release(struct kref *ref)
285 {
286 	struct amdgpu_ctx *ctx;
287 	unsigned num_entities;
288 	u32 i;
289 
290 	ctx = container_of(ref, struct amdgpu_ctx, refcount);
291 
292 	num_entities = amdgpu_ctx_total_num_entities();
293 	for (i = 0; i < num_entities; i++)
294 		drm_sched_entity_destroy(&ctx->entities[0][i].entity);
295 
296 	amdgpu_ctx_fini(ref);
297 }
298 
299 static int amdgpu_ctx_free(struct amdgpu_fpriv *fpriv, uint32_t id)
300 {
301 	struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr;
302 	struct amdgpu_ctx *ctx;
303 
304 	mutex_lock(&mgr->lock);
305 	ctx = idr_remove(&mgr->ctx_handles, id);
306 	if (ctx)
307 		kref_put(&ctx->refcount, amdgpu_ctx_do_release);
308 	mutex_unlock(&mgr->lock);
309 	return ctx ? 0 : -EINVAL;
310 }
311 
312 static int amdgpu_ctx_query(struct amdgpu_device *adev,
313 			    struct amdgpu_fpriv *fpriv, uint32_t id,
314 			    union drm_amdgpu_ctx_out *out)
315 {
316 	struct amdgpu_ctx *ctx;
317 	struct amdgpu_ctx_mgr *mgr;
318 	unsigned reset_counter;
319 
320 	if (!fpriv)
321 		return -EINVAL;
322 
323 	mgr = &fpriv->ctx_mgr;
324 	mutex_lock(&mgr->lock);
325 	ctx = idr_find(&mgr->ctx_handles, id);
326 	if (!ctx) {
327 		mutex_unlock(&mgr->lock);
328 		return -EINVAL;
329 	}
330 
331 	/* TODO: these two are always zero */
332 	out->state.flags = 0x0;
333 	out->state.hangs = 0x0;
334 
335 	/* determine if a GPU reset has occured since the last call */
336 	reset_counter = atomic_read(&adev->gpu_reset_counter);
337 	/* TODO: this should ideally return NO, GUILTY, or INNOCENT. */
338 	if (ctx->reset_counter_query == reset_counter)
339 		out->state.reset_status = AMDGPU_CTX_NO_RESET;
340 	else
341 		out->state.reset_status = AMDGPU_CTX_UNKNOWN_RESET;
342 	ctx->reset_counter_query = reset_counter;
343 
344 	mutex_unlock(&mgr->lock);
345 	return 0;
346 }
347 
348 static int amdgpu_ctx_query2(struct amdgpu_device *adev,
349 	struct amdgpu_fpriv *fpriv, uint32_t id,
350 	union drm_amdgpu_ctx_out *out)
351 {
352 	struct amdgpu_ctx *ctx;
353 	struct amdgpu_ctx_mgr *mgr;
354 	unsigned long ras_counter;
355 
356 	if (!fpriv)
357 		return -EINVAL;
358 
359 	mgr = &fpriv->ctx_mgr;
360 	mutex_lock(&mgr->lock);
361 	ctx = idr_find(&mgr->ctx_handles, id);
362 	if (!ctx) {
363 		mutex_unlock(&mgr->lock);
364 		return -EINVAL;
365 	}
366 
367 	out->state.flags = 0x0;
368 	out->state.hangs = 0x0;
369 
370 	if (ctx->reset_counter != atomic_read(&adev->gpu_reset_counter))
371 		out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_RESET;
372 
373 	if (ctx->vram_lost_counter != atomic_read(&adev->vram_lost_counter))
374 		out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_VRAMLOST;
375 
376 	if (atomic_read(&ctx->guilty))
377 		out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_GUILTY;
378 
379 	/*query ue count*/
380 	ras_counter = amdgpu_ras_query_error_count(adev, false);
381 	/*ras counter is monotonic increasing*/
382 	if (ras_counter != ctx->ras_counter_ue) {
383 		out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_RAS_UE;
384 		ctx->ras_counter_ue = ras_counter;
385 	}
386 
387 	/*query ce count*/
388 	ras_counter = amdgpu_ras_query_error_count(adev, true);
389 	if (ras_counter != ctx->ras_counter_ce) {
390 		out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_RAS_CE;
391 		ctx->ras_counter_ce = ras_counter;
392 	}
393 
394 	mutex_unlock(&mgr->lock);
395 	return 0;
396 }
397 
398 int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
399 		     struct drm_file *filp)
400 {
401 	int r;
402 	uint32_t id;
403 	enum drm_sched_priority priority;
404 
405 	union drm_amdgpu_ctx *args = data;
406 	struct amdgpu_device *adev = dev->dev_private;
407 	struct amdgpu_fpriv *fpriv = filp->driver_priv;
408 
409 	r = 0;
410 	id = args->in.ctx_id;
411 	priority = amdgpu_to_sched_priority(args->in.priority);
412 
413 	/* For backwards compatibility reasons, we need to accept
414 	 * ioctls with garbage in the priority field */
415 	if (priority == DRM_SCHED_PRIORITY_INVALID)
416 		priority = DRM_SCHED_PRIORITY_NORMAL;
417 
418 	switch (args->in.op) {
419 	case AMDGPU_CTX_OP_ALLOC_CTX:
420 		r = amdgpu_ctx_alloc(adev, fpriv, filp, priority, &id);
421 		args->out.alloc.ctx_id = id;
422 		break;
423 	case AMDGPU_CTX_OP_FREE_CTX:
424 		r = amdgpu_ctx_free(fpriv, id);
425 		break;
426 	case AMDGPU_CTX_OP_QUERY_STATE:
427 		r = amdgpu_ctx_query(adev, fpriv, id, &args->out);
428 		break;
429 	case AMDGPU_CTX_OP_QUERY_STATE2:
430 		r = amdgpu_ctx_query2(adev, fpriv, id, &args->out);
431 		break;
432 	default:
433 		return -EINVAL;
434 	}
435 
436 	return r;
437 }
438 
439 struct amdgpu_ctx *amdgpu_ctx_get(struct amdgpu_fpriv *fpriv, uint32_t id)
440 {
441 	struct amdgpu_ctx *ctx;
442 	struct amdgpu_ctx_mgr *mgr;
443 
444 	if (!fpriv)
445 		return NULL;
446 
447 	mgr = &fpriv->ctx_mgr;
448 
449 	mutex_lock(&mgr->lock);
450 	ctx = idr_find(&mgr->ctx_handles, id);
451 	if (ctx)
452 		kref_get(&ctx->refcount);
453 	mutex_unlock(&mgr->lock);
454 	return ctx;
455 }
456 
457 int amdgpu_ctx_put(struct amdgpu_ctx *ctx)
458 {
459 	if (ctx == NULL)
460 		return -EINVAL;
461 
462 	kref_put(&ctx->refcount, amdgpu_ctx_do_release);
463 	return 0;
464 }
465 
466 void amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx,
467 			  struct drm_sched_entity *entity,
468 			  struct dma_fence *fence, uint64_t* handle)
469 {
470 	struct amdgpu_ctx_entity *centity = to_amdgpu_ctx_entity(entity);
471 	uint64_t seq = centity->sequence;
472 	struct dma_fence *other = NULL;
473 	unsigned idx = 0;
474 
475 	idx = seq & (amdgpu_sched_jobs - 1);
476 	other = centity->fences[idx];
477 	if (other)
478 		BUG_ON(!dma_fence_is_signaled(other));
479 
480 	dma_fence_get(fence);
481 
482 	spin_lock(&ctx->ring_lock);
483 	centity->fences[idx] = fence;
484 	centity->sequence++;
485 	spin_unlock(&ctx->ring_lock);
486 
487 	dma_fence_put(other);
488 	if (handle)
489 		*handle = seq;
490 }
491 
492 struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
493 				       struct drm_sched_entity *entity,
494 				       uint64_t seq)
495 {
496 	struct amdgpu_ctx_entity *centity = to_amdgpu_ctx_entity(entity);
497 	struct dma_fence *fence;
498 
499 	spin_lock(&ctx->ring_lock);
500 
501 	if (seq == ~0ull)
502 		seq = centity->sequence - 1;
503 
504 	if (seq >= centity->sequence) {
505 		spin_unlock(&ctx->ring_lock);
506 		return ERR_PTR(-EINVAL);
507 	}
508 
509 
510 	if (seq + amdgpu_sched_jobs < centity->sequence) {
511 		spin_unlock(&ctx->ring_lock);
512 		return NULL;
513 	}
514 
515 	fence = dma_fence_get(centity->fences[seq & (amdgpu_sched_jobs - 1)]);
516 	spin_unlock(&ctx->ring_lock);
517 
518 	return fence;
519 }
520 
521 void amdgpu_ctx_priority_override(struct amdgpu_ctx *ctx,
522 				  enum drm_sched_priority priority)
523 {
524 	unsigned num_entities = amdgpu_ctx_total_num_entities();
525 	enum drm_sched_priority ctx_prio;
526 	unsigned i;
527 
528 	ctx->override_priority = priority;
529 
530 	ctx_prio = (ctx->override_priority == DRM_SCHED_PRIORITY_UNSET) ?
531 			ctx->init_priority : ctx->override_priority;
532 
533 	for (i = 0; i < num_entities; i++) {
534 		struct drm_sched_entity *entity = &ctx->entities[0][i].entity;
535 
536 		drm_sched_entity_set_priority(entity, ctx_prio);
537 	}
538 }
539 
540 int amdgpu_ctx_wait_prev_fence(struct amdgpu_ctx *ctx,
541 			       struct drm_sched_entity *entity)
542 {
543 	struct amdgpu_ctx_entity *centity = to_amdgpu_ctx_entity(entity);
544 	struct dma_fence *other;
545 	unsigned idx;
546 	long r;
547 
548 	spin_lock(&ctx->ring_lock);
549 	idx = centity->sequence & (amdgpu_sched_jobs - 1);
550 	other = dma_fence_get(centity->fences[idx]);
551 	spin_unlock(&ctx->ring_lock);
552 
553 	if (!other)
554 		return 0;
555 
556 	r = dma_fence_wait(other, true);
557 	if (r < 0 && r != -ERESTARTSYS)
558 		DRM_ERROR("Error (%ld) waiting for fence!\n", r);
559 
560 	dma_fence_put(other);
561 	return r;
562 }
563 
564 void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr)
565 {
566 	mutex_init(&mgr->lock);
567 	idr_init(&mgr->ctx_handles);
568 }
569 
570 long amdgpu_ctx_mgr_entity_flush(struct amdgpu_ctx_mgr *mgr, long timeout)
571 {
572 	unsigned num_entities = amdgpu_ctx_total_num_entities();
573 	struct amdgpu_ctx *ctx;
574 	struct idr *idp;
575 	uint32_t id, i;
576 
577 	idp = &mgr->ctx_handles;
578 
579 	mutex_lock(&mgr->lock);
580 	idr_for_each_entry(idp, ctx, id) {
581 		for (i = 0; i < num_entities; i++) {
582 			struct drm_sched_entity *entity;
583 
584 			entity = &ctx->entities[0][i].entity;
585 			timeout = drm_sched_entity_flush(entity, timeout);
586 		}
587 	}
588 	mutex_unlock(&mgr->lock);
589 	return timeout;
590 }
591 
592 void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr)
593 {
594 	unsigned num_entities = amdgpu_ctx_total_num_entities();
595 	struct amdgpu_ctx *ctx;
596 	struct idr *idp;
597 	uint32_t id, i;
598 
599 	idp = &mgr->ctx_handles;
600 
601 	idr_for_each_entry(idp, ctx, id) {
602 		if (kref_read(&ctx->refcount) != 1) {
603 			DRM_ERROR("ctx %p is still alive\n", ctx);
604 			continue;
605 		}
606 
607 		for (i = 0; i < num_entities; i++)
608 			drm_sched_entity_fini(&ctx->entities[0][i].entity);
609 	}
610 }
611 
612 void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr)
613 {
614 	struct amdgpu_ctx *ctx;
615 	struct idr *idp;
616 	uint32_t id;
617 
618 	amdgpu_ctx_mgr_entity_fini(mgr);
619 
620 	idp = &mgr->ctx_handles;
621 
622 	idr_for_each_entry(idp, ctx, id) {
623 		if (kref_put(&ctx->refcount, amdgpu_ctx_fini) != 1)
624 			DRM_ERROR("ctx %p is still alive\n", ctx);
625 	}
626 
627 	idr_destroy(&mgr->ctx_handles);
628 	mutex_destroy(&mgr->lock);
629 }
630