xref: /openbmc/linux/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c (revision fb71a336cdc2ec45507b37c0690130a5e39f9733)
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: monk liu <monk.liu@amd.com>
23  */
24 
25 #include <drm/drm_auth.h>
26 #include "amdgpu.h"
27 #include "amdgpu_sched.h"
28 #include "amdgpu_ras.h"
29 
30 #define to_amdgpu_ctx_entity(e)	\
31 	container_of((e), struct amdgpu_ctx_entity, entity)
32 
33 const unsigned int amdgpu_ctx_num_entities[AMDGPU_HW_IP_NUM] = {
34 	[AMDGPU_HW_IP_GFX]	=	1,
35 	[AMDGPU_HW_IP_COMPUTE]	=	4,
36 	[AMDGPU_HW_IP_DMA]	=	2,
37 	[AMDGPU_HW_IP_UVD]	=	1,
38 	[AMDGPU_HW_IP_VCE]	=	1,
39 	[AMDGPU_HW_IP_UVD_ENC]	=	1,
40 	[AMDGPU_HW_IP_VCN_DEC]	=	1,
41 	[AMDGPU_HW_IP_VCN_ENC]	=	1,
42 	[AMDGPU_HW_IP_VCN_JPEG]	=	1,
43 };
44 
45 static int amdgpu_ctx_total_num_entities(void)
46 {
47 	unsigned i, num_entities = 0;
48 
49 	for (i = 0; i < AMDGPU_HW_IP_NUM; ++i)
50 		num_entities += amdgpu_ctx_num_entities[i];
51 
52 	return num_entities;
53 }
54 
55 static int amdgpu_ctx_priority_permit(struct drm_file *filp,
56 				      enum drm_sched_priority priority)
57 {
58 	/* NORMAL and below are accessible by everyone */
59 	if (priority <= DRM_SCHED_PRIORITY_NORMAL)
60 		return 0;
61 
62 	if (capable(CAP_SYS_NICE))
63 		return 0;
64 
65 	if (drm_is_current_master(filp))
66 		return 0;
67 
68 	return -EACCES;
69 }
70 
71 static int amdgpu_ctx_init(struct amdgpu_device *adev,
72 			   enum drm_sched_priority priority,
73 			   struct drm_file *filp,
74 			   struct amdgpu_ctx *ctx)
75 {
76 	unsigned num_entities = amdgpu_ctx_total_num_entities();
77 	unsigned i, j;
78 	int r;
79 
80 	if (priority < 0 || priority >= DRM_SCHED_PRIORITY_MAX)
81 		return -EINVAL;
82 
83 	r = amdgpu_ctx_priority_permit(filp, priority);
84 	if (r)
85 		return r;
86 
87 	memset(ctx, 0, sizeof(*ctx));
88 	ctx->adev = adev;
89 
90 	ctx->fences = kcalloc(amdgpu_sched_jobs * num_entities,
91 			      sizeof(struct dma_fence*), GFP_KERNEL);
92 	if (!ctx->fences)
93 		return -ENOMEM;
94 
95 	ctx->entities[0] = kcalloc(num_entities,
96 				   sizeof(struct amdgpu_ctx_entity),
97 				   GFP_KERNEL);
98 	if (!ctx->entities[0]) {
99 		r = -ENOMEM;
100 		goto error_free_fences;
101 	}
102 
103 	for (i = 0; i < num_entities; ++i) {
104 		struct amdgpu_ctx_entity *entity = &ctx->entities[0][i];
105 
106 		entity->sequence = 1;
107 		entity->fences = &ctx->fences[amdgpu_sched_jobs * i];
108 	}
109 	for (i = 1; i < AMDGPU_HW_IP_NUM; ++i)
110 		ctx->entities[i] = ctx->entities[i - 1] +
111 			amdgpu_ctx_num_entities[i - 1];
112 
113 	kref_init(&ctx->refcount);
114 	spin_lock_init(&ctx->ring_lock);
115 	mutex_init(&ctx->lock);
116 
117 	ctx->reset_counter = atomic_read(&adev->gpu_reset_counter);
118 	ctx->reset_counter_query = ctx->reset_counter;
119 	ctx->vram_lost_counter = atomic_read(&adev->vram_lost_counter);
120 	ctx->init_priority = priority;
121 	ctx->override_priority = DRM_SCHED_PRIORITY_UNSET;
122 
123 	for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) {
124 		struct drm_gpu_scheduler **scheds;
125 		struct drm_gpu_scheduler *sched;
126 		unsigned num_scheds = 0;
127 
128 		switch (i) {
129 		case AMDGPU_HW_IP_GFX:
130 			scheds = adev->gfx.gfx_sched;
131 			num_scheds = 1;
132 			break;
133 		case AMDGPU_HW_IP_COMPUTE:
134 			scheds = adev->gfx.compute_sched;
135 			num_scheds = adev->gfx.num_compute_sched;
136 			break;
137 		case AMDGPU_HW_IP_DMA:
138 			scheds = adev->sdma.sdma_sched;
139 			num_scheds = adev->sdma.num_sdma_sched;
140 			break;
141 		case AMDGPU_HW_IP_UVD:
142 			sched = &adev->uvd.inst[0].ring.sched;
143 			scheds = &sched;
144 			num_scheds = 1;
145 			break;
146 		case AMDGPU_HW_IP_VCE:
147 			sched = &adev->vce.ring[0].sched;
148 			scheds = &sched;
149 			num_scheds = 1;
150 			break;
151 		case AMDGPU_HW_IP_UVD_ENC:
152 			sched = &adev->uvd.inst[0].ring_enc[0].sched;
153 			scheds = &sched;
154 			num_scheds = 1;
155 			break;
156 		case AMDGPU_HW_IP_VCN_DEC:
157 			scheds = adev->vcn.vcn_dec_sched;
158 			num_scheds =  adev->vcn.num_vcn_dec_sched;
159 			break;
160 		case AMDGPU_HW_IP_VCN_ENC:
161 			scheds = adev->vcn.vcn_enc_sched;
162 			num_scheds =  adev->vcn.num_vcn_enc_sched;
163 			break;
164 		case AMDGPU_HW_IP_VCN_JPEG:
165 			scheds = adev->jpeg.jpeg_sched;
166 			num_scheds =  adev->jpeg.num_jpeg_sched;
167 			break;
168 		}
169 
170 		for (j = 0; j < amdgpu_ctx_num_entities[i]; ++j)
171 			r = drm_sched_entity_init(&ctx->entities[i][j].entity,
172 						  priority, scheds,
173 						  num_scheds, &ctx->guilty);
174 		if (r)
175 			goto error_cleanup_entities;
176 	}
177 
178 	return 0;
179 
180 error_cleanup_entities:
181 	for (i = 0; i < num_entities; ++i)
182 		drm_sched_entity_destroy(&ctx->entities[0][i].entity);
183 	kfree(ctx->entities[0]);
184 
185 error_free_fences:
186 	kfree(ctx->fences);
187 	ctx->fences = NULL;
188 	return r;
189 }
190 
191 static void amdgpu_ctx_fini(struct kref *ref)
192 {
193 	struct amdgpu_ctx *ctx = container_of(ref, struct amdgpu_ctx, refcount);
194 	unsigned num_entities = amdgpu_ctx_total_num_entities();
195 	struct amdgpu_device *adev = ctx->adev;
196 	unsigned i, j;
197 
198 	if (!adev)
199 		return;
200 
201 	for (i = 0; i < num_entities; ++i)
202 		for (j = 0; j < amdgpu_sched_jobs; ++j)
203 			dma_fence_put(ctx->entities[0][i].fences[j]);
204 	kfree(ctx->fences);
205 	kfree(ctx->entities[0]);
206 
207 	mutex_destroy(&ctx->lock);
208 
209 	kfree(ctx);
210 }
211 
212 int amdgpu_ctx_get_entity(struct amdgpu_ctx *ctx, u32 hw_ip, u32 instance,
213 			  u32 ring, struct drm_sched_entity **entity)
214 {
215 	if (hw_ip >= AMDGPU_HW_IP_NUM) {
216 		DRM_ERROR("unknown HW IP type: %d\n", hw_ip);
217 		return -EINVAL;
218 	}
219 
220 	/* Right now all IPs have only one instance - multiple rings. */
221 	if (instance != 0) {
222 		DRM_DEBUG("invalid ip instance: %d\n", instance);
223 		return -EINVAL;
224 	}
225 
226 	if (ring >= amdgpu_ctx_num_entities[hw_ip]) {
227 		DRM_DEBUG("invalid ring: %d %d\n", hw_ip, ring);
228 		return -EINVAL;
229 	}
230 
231 	*entity = &ctx->entities[hw_ip][ring].entity;
232 	return 0;
233 }
234 
235 static int amdgpu_ctx_alloc(struct amdgpu_device *adev,
236 			    struct amdgpu_fpriv *fpriv,
237 			    struct drm_file *filp,
238 			    enum drm_sched_priority priority,
239 			    uint32_t *id)
240 {
241 	struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr;
242 	struct amdgpu_ctx *ctx;
243 	int r;
244 
245 	ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
246 	if (!ctx)
247 		return -ENOMEM;
248 
249 	mutex_lock(&mgr->lock);
250 	r = idr_alloc(&mgr->ctx_handles, ctx, 1, AMDGPU_VM_MAX_NUM_CTX, GFP_KERNEL);
251 	if (r < 0) {
252 		mutex_unlock(&mgr->lock);
253 		kfree(ctx);
254 		return r;
255 	}
256 
257 	*id = (uint32_t)r;
258 	r = amdgpu_ctx_init(adev, priority, filp, ctx);
259 	if (r) {
260 		idr_remove(&mgr->ctx_handles, *id);
261 		*id = 0;
262 		kfree(ctx);
263 	}
264 	mutex_unlock(&mgr->lock);
265 	return r;
266 }
267 
268 static void amdgpu_ctx_do_release(struct kref *ref)
269 {
270 	struct amdgpu_ctx *ctx;
271 	unsigned num_entities;
272 	u32 i;
273 
274 	ctx = container_of(ref, struct amdgpu_ctx, refcount);
275 
276 	num_entities = amdgpu_ctx_total_num_entities();
277 	for (i = 0; i < num_entities; i++)
278 		drm_sched_entity_destroy(&ctx->entities[0][i].entity);
279 
280 	amdgpu_ctx_fini(ref);
281 }
282 
283 static int amdgpu_ctx_free(struct amdgpu_fpriv *fpriv, uint32_t id)
284 {
285 	struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr;
286 	struct amdgpu_ctx *ctx;
287 
288 	mutex_lock(&mgr->lock);
289 	ctx = idr_remove(&mgr->ctx_handles, id);
290 	if (ctx)
291 		kref_put(&ctx->refcount, amdgpu_ctx_do_release);
292 	mutex_unlock(&mgr->lock);
293 	return ctx ? 0 : -EINVAL;
294 }
295 
296 static int amdgpu_ctx_query(struct amdgpu_device *adev,
297 			    struct amdgpu_fpriv *fpriv, uint32_t id,
298 			    union drm_amdgpu_ctx_out *out)
299 {
300 	struct amdgpu_ctx *ctx;
301 	struct amdgpu_ctx_mgr *mgr;
302 	unsigned reset_counter;
303 
304 	if (!fpriv)
305 		return -EINVAL;
306 
307 	mgr = &fpriv->ctx_mgr;
308 	mutex_lock(&mgr->lock);
309 	ctx = idr_find(&mgr->ctx_handles, id);
310 	if (!ctx) {
311 		mutex_unlock(&mgr->lock);
312 		return -EINVAL;
313 	}
314 
315 	/* TODO: these two are always zero */
316 	out->state.flags = 0x0;
317 	out->state.hangs = 0x0;
318 
319 	/* determine if a GPU reset has occured since the last call */
320 	reset_counter = atomic_read(&adev->gpu_reset_counter);
321 	/* TODO: this should ideally return NO, GUILTY, or INNOCENT. */
322 	if (ctx->reset_counter_query == reset_counter)
323 		out->state.reset_status = AMDGPU_CTX_NO_RESET;
324 	else
325 		out->state.reset_status = AMDGPU_CTX_UNKNOWN_RESET;
326 	ctx->reset_counter_query = reset_counter;
327 
328 	mutex_unlock(&mgr->lock);
329 	return 0;
330 }
331 
332 static int amdgpu_ctx_query2(struct amdgpu_device *adev,
333 	struct amdgpu_fpriv *fpriv, uint32_t id,
334 	union drm_amdgpu_ctx_out *out)
335 {
336 	struct amdgpu_ctx *ctx;
337 	struct amdgpu_ctx_mgr *mgr;
338 	unsigned long ras_counter;
339 
340 	if (!fpriv)
341 		return -EINVAL;
342 
343 	mgr = &fpriv->ctx_mgr;
344 	mutex_lock(&mgr->lock);
345 	ctx = idr_find(&mgr->ctx_handles, id);
346 	if (!ctx) {
347 		mutex_unlock(&mgr->lock);
348 		return -EINVAL;
349 	}
350 
351 	out->state.flags = 0x0;
352 	out->state.hangs = 0x0;
353 
354 	if (ctx->reset_counter != atomic_read(&adev->gpu_reset_counter))
355 		out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_RESET;
356 
357 	if (ctx->vram_lost_counter != atomic_read(&adev->vram_lost_counter))
358 		out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_VRAMLOST;
359 
360 	if (atomic_read(&ctx->guilty))
361 		out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_GUILTY;
362 
363 	/*query ue count*/
364 	ras_counter = amdgpu_ras_query_error_count(adev, false);
365 	/*ras counter is monotonic increasing*/
366 	if (ras_counter != ctx->ras_counter_ue) {
367 		out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_RAS_UE;
368 		ctx->ras_counter_ue = ras_counter;
369 	}
370 
371 	/*query ce count*/
372 	ras_counter = amdgpu_ras_query_error_count(adev, true);
373 	if (ras_counter != ctx->ras_counter_ce) {
374 		out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_RAS_CE;
375 		ctx->ras_counter_ce = ras_counter;
376 	}
377 
378 	mutex_unlock(&mgr->lock);
379 	return 0;
380 }
381 
382 int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
383 		     struct drm_file *filp)
384 {
385 	int r;
386 	uint32_t id;
387 	enum drm_sched_priority priority;
388 
389 	union drm_amdgpu_ctx *args = data;
390 	struct amdgpu_device *adev = dev->dev_private;
391 	struct amdgpu_fpriv *fpriv = filp->driver_priv;
392 
393 	r = 0;
394 	id = args->in.ctx_id;
395 	priority = amdgpu_to_sched_priority(args->in.priority);
396 
397 	/* For backwards compatibility reasons, we need to accept
398 	 * ioctls with garbage in the priority field */
399 	if (priority == DRM_SCHED_PRIORITY_INVALID)
400 		priority = DRM_SCHED_PRIORITY_NORMAL;
401 
402 	switch (args->in.op) {
403 	case AMDGPU_CTX_OP_ALLOC_CTX:
404 		r = amdgpu_ctx_alloc(adev, fpriv, filp, priority, &id);
405 		args->out.alloc.ctx_id = id;
406 		break;
407 	case AMDGPU_CTX_OP_FREE_CTX:
408 		r = amdgpu_ctx_free(fpriv, id);
409 		break;
410 	case AMDGPU_CTX_OP_QUERY_STATE:
411 		r = amdgpu_ctx_query(adev, fpriv, id, &args->out);
412 		break;
413 	case AMDGPU_CTX_OP_QUERY_STATE2:
414 		r = amdgpu_ctx_query2(adev, fpriv, id, &args->out);
415 		break;
416 	default:
417 		return -EINVAL;
418 	}
419 
420 	return r;
421 }
422 
423 struct amdgpu_ctx *amdgpu_ctx_get(struct amdgpu_fpriv *fpriv, uint32_t id)
424 {
425 	struct amdgpu_ctx *ctx;
426 	struct amdgpu_ctx_mgr *mgr;
427 
428 	if (!fpriv)
429 		return NULL;
430 
431 	mgr = &fpriv->ctx_mgr;
432 
433 	mutex_lock(&mgr->lock);
434 	ctx = idr_find(&mgr->ctx_handles, id);
435 	if (ctx)
436 		kref_get(&ctx->refcount);
437 	mutex_unlock(&mgr->lock);
438 	return ctx;
439 }
440 
441 int amdgpu_ctx_put(struct amdgpu_ctx *ctx)
442 {
443 	if (ctx == NULL)
444 		return -EINVAL;
445 
446 	kref_put(&ctx->refcount, amdgpu_ctx_do_release);
447 	return 0;
448 }
449 
450 void amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx,
451 			  struct drm_sched_entity *entity,
452 			  struct dma_fence *fence, uint64_t* handle)
453 {
454 	struct amdgpu_ctx_entity *centity = to_amdgpu_ctx_entity(entity);
455 	uint64_t seq = centity->sequence;
456 	struct dma_fence *other = NULL;
457 	unsigned idx = 0;
458 
459 	idx = seq & (amdgpu_sched_jobs - 1);
460 	other = centity->fences[idx];
461 	if (other)
462 		BUG_ON(!dma_fence_is_signaled(other));
463 
464 	dma_fence_get(fence);
465 
466 	spin_lock(&ctx->ring_lock);
467 	centity->fences[idx] = fence;
468 	centity->sequence++;
469 	spin_unlock(&ctx->ring_lock);
470 
471 	dma_fence_put(other);
472 	if (handle)
473 		*handle = seq;
474 }
475 
476 struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
477 				       struct drm_sched_entity *entity,
478 				       uint64_t seq)
479 {
480 	struct amdgpu_ctx_entity *centity = to_amdgpu_ctx_entity(entity);
481 	struct dma_fence *fence;
482 
483 	spin_lock(&ctx->ring_lock);
484 
485 	if (seq == ~0ull)
486 		seq = centity->sequence - 1;
487 
488 	if (seq >= centity->sequence) {
489 		spin_unlock(&ctx->ring_lock);
490 		return ERR_PTR(-EINVAL);
491 	}
492 
493 
494 	if (seq + amdgpu_sched_jobs < centity->sequence) {
495 		spin_unlock(&ctx->ring_lock);
496 		return NULL;
497 	}
498 
499 	fence = dma_fence_get(centity->fences[seq & (amdgpu_sched_jobs - 1)]);
500 	spin_unlock(&ctx->ring_lock);
501 
502 	return fence;
503 }
504 
505 void amdgpu_ctx_priority_override(struct amdgpu_ctx *ctx,
506 				  enum drm_sched_priority priority)
507 {
508 	unsigned num_entities = amdgpu_ctx_total_num_entities();
509 	enum drm_sched_priority ctx_prio;
510 	unsigned i;
511 
512 	ctx->override_priority = priority;
513 
514 	ctx_prio = (ctx->override_priority == DRM_SCHED_PRIORITY_UNSET) ?
515 			ctx->init_priority : ctx->override_priority;
516 
517 	for (i = 0; i < num_entities; i++) {
518 		struct drm_sched_entity *entity = &ctx->entities[0][i].entity;
519 
520 		drm_sched_entity_set_priority(entity, ctx_prio);
521 	}
522 }
523 
524 int amdgpu_ctx_wait_prev_fence(struct amdgpu_ctx *ctx,
525 			       struct drm_sched_entity *entity)
526 {
527 	struct amdgpu_ctx_entity *centity = to_amdgpu_ctx_entity(entity);
528 	struct dma_fence *other;
529 	unsigned idx;
530 	long r;
531 
532 	spin_lock(&ctx->ring_lock);
533 	idx = centity->sequence & (amdgpu_sched_jobs - 1);
534 	other = dma_fence_get(centity->fences[idx]);
535 	spin_unlock(&ctx->ring_lock);
536 
537 	if (!other)
538 		return 0;
539 
540 	r = dma_fence_wait(other, true);
541 	if (r < 0 && r != -ERESTARTSYS)
542 		DRM_ERROR("Error (%ld) waiting for fence!\n", r);
543 
544 	dma_fence_put(other);
545 	return r;
546 }
547 
548 void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr)
549 {
550 	mutex_init(&mgr->lock);
551 	idr_init(&mgr->ctx_handles);
552 }
553 
554 long amdgpu_ctx_mgr_entity_flush(struct amdgpu_ctx_mgr *mgr, long timeout)
555 {
556 	unsigned num_entities = amdgpu_ctx_total_num_entities();
557 	struct amdgpu_ctx *ctx;
558 	struct idr *idp;
559 	uint32_t id, i;
560 
561 	idp = &mgr->ctx_handles;
562 
563 	mutex_lock(&mgr->lock);
564 	idr_for_each_entry(idp, ctx, id) {
565 		for (i = 0; i < num_entities; i++) {
566 			struct drm_sched_entity *entity;
567 
568 			entity = &ctx->entities[0][i].entity;
569 			timeout = drm_sched_entity_flush(entity, timeout);
570 		}
571 	}
572 	mutex_unlock(&mgr->lock);
573 	return timeout;
574 }
575 
576 void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr)
577 {
578 	unsigned num_entities = amdgpu_ctx_total_num_entities();
579 	struct amdgpu_ctx *ctx;
580 	struct idr *idp;
581 	uint32_t id, i;
582 
583 	idp = &mgr->ctx_handles;
584 
585 	idr_for_each_entry(idp, ctx, id) {
586 		if (kref_read(&ctx->refcount) != 1) {
587 			DRM_ERROR("ctx %p is still alive\n", ctx);
588 			continue;
589 		}
590 
591 		for (i = 0; i < num_entities; i++)
592 			drm_sched_entity_fini(&ctx->entities[0][i].entity);
593 	}
594 }
595 
596 void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr)
597 {
598 	struct amdgpu_ctx *ctx;
599 	struct idr *idp;
600 	uint32_t id;
601 
602 	amdgpu_ctx_mgr_entity_fini(mgr);
603 
604 	idp = &mgr->ctx_handles;
605 
606 	idr_for_each_entry(idp, ctx, id) {
607 		if (kref_put(&ctx->refcount, amdgpu_ctx_fini) != 1)
608 			DRM_ERROR("ctx %p is still alive\n", ctx);
609 	}
610 
611 	idr_destroy(&mgr->ctx_handles);
612 	mutex_destroy(&mgr->lock);
613 }
614 
615 void amdgpu_ctx_init_sched(struct amdgpu_device *adev)
616 {
617 	int i, j;
618 
619 	for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
620 		adev->gfx.gfx_sched[i] = &adev->gfx.gfx_ring[i].sched;
621 		adev->gfx.num_gfx_sched++;
622 	}
623 
624 	for (i = 0; i < adev->gfx.num_compute_rings; i++) {
625 		adev->gfx.compute_sched[i] = &adev->gfx.compute_ring[i].sched;
626 		adev->gfx.num_compute_sched++;
627 	}
628 
629 	for (i = 0; i < adev->sdma.num_instances; i++) {
630 		adev->sdma.sdma_sched[i] = &adev->sdma.instance[i].ring.sched;
631 		adev->sdma.num_sdma_sched++;
632 	}
633 
634 	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
635 		if (adev->vcn.harvest_config & (1 << i))
636 			continue;
637 		adev->vcn.vcn_dec_sched[adev->vcn.num_vcn_dec_sched++] =
638 			&adev->vcn.inst[i].ring_dec.sched;
639 	}
640 
641 	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
642 		if (adev->vcn.harvest_config & (1 << i))
643 			continue;
644 		for (j = 0; j < adev->vcn.num_enc_rings; ++j)
645 			adev->vcn.vcn_enc_sched[adev->vcn.num_vcn_enc_sched++] =
646 				&adev->vcn.inst[i].ring_enc[j].sched;
647 	}
648 
649 	for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
650 		if (adev->jpeg.harvest_config & (1 << i))
651 			continue;
652 		adev->jpeg.jpeg_sched[adev->jpeg.num_jpeg_sched++] =
653 			&adev->jpeg.inst[i].ring_dec.sched;
654 	}
655 }
656