xref: /openbmc/linux/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c (revision 5ec498ba)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
4  */
5 
6 #define pr_fmt(fmt)	"[drm:%s] " fmt, __func__
7 #include "dpu_kms.h"
8 #include "dpu_hw_lm.h"
9 #include "dpu_hw_ctl.h"
10 #include "dpu_hw_pingpong.h"
11 #include "dpu_hw_intf.h"
12 #include "dpu_hw_wb.h"
13 #include "dpu_hw_dspp.h"
14 #include "dpu_hw_merge3d.h"
15 #include "dpu_hw_dsc.h"
16 #include "dpu_encoder.h"
17 #include "dpu_trace.h"
18 
19 
20 static inline bool reserved_by_other(uint32_t *res_map, int idx,
21 				     uint32_t enc_id)
22 {
23 	return res_map[idx] && res_map[idx] != enc_id;
24 }
25 
26 /**
27  * struct dpu_rm_requirements - Reservation requirements parameter bundle
28  * @topology:  selected topology for the display
29  * @hw_res:	   Hardware resources required as reported by the encoders
30  */
31 struct dpu_rm_requirements {
32 	struct msm_display_topology topology;
33 };
34 
35 int dpu_rm_destroy(struct dpu_rm *rm)
36 {
37 	int i;
38 
39 	for (i = 0; i < ARRAY_SIZE(rm->dspp_blks); i++) {
40 		struct dpu_hw_dspp *hw;
41 
42 		if (rm->dspp_blks[i]) {
43 			hw = to_dpu_hw_dspp(rm->dspp_blks[i]);
44 			dpu_hw_dspp_destroy(hw);
45 		}
46 	}
47 	for (i = 0; i < ARRAY_SIZE(rm->pingpong_blks); i++) {
48 		struct dpu_hw_pingpong *hw;
49 
50 		if (rm->pingpong_blks[i]) {
51 			hw = to_dpu_hw_pingpong(rm->pingpong_blks[i]);
52 			dpu_hw_pingpong_destroy(hw);
53 		}
54 	}
55 	for (i = 0; i < ARRAY_SIZE(rm->merge_3d_blks); i++) {
56 		struct dpu_hw_merge_3d *hw;
57 
58 		if (rm->merge_3d_blks[i]) {
59 			hw = to_dpu_hw_merge_3d(rm->merge_3d_blks[i]);
60 			dpu_hw_merge_3d_destroy(hw);
61 		}
62 	}
63 	for (i = 0; i < ARRAY_SIZE(rm->mixer_blks); i++) {
64 		struct dpu_hw_mixer *hw;
65 
66 		if (rm->mixer_blks[i]) {
67 			hw = to_dpu_hw_mixer(rm->mixer_blks[i]);
68 			dpu_hw_lm_destroy(hw);
69 		}
70 	}
71 	for (i = 0; i < ARRAY_SIZE(rm->ctl_blks); i++) {
72 		struct dpu_hw_ctl *hw;
73 
74 		if (rm->ctl_blks[i]) {
75 			hw = to_dpu_hw_ctl(rm->ctl_blks[i]);
76 			dpu_hw_ctl_destroy(hw);
77 		}
78 	}
79 	for (i = 0; i < ARRAY_SIZE(rm->hw_intf); i++)
80 		dpu_hw_intf_destroy(rm->hw_intf[i]);
81 
82 	for (i = 0; i < ARRAY_SIZE(rm->dsc_blks); i++) {
83 		struct dpu_hw_dsc *hw;
84 
85 		if (rm->dsc_blks[i]) {
86 			hw = to_dpu_hw_dsc(rm->dsc_blks[i]);
87 			dpu_hw_dsc_destroy(hw);
88 		}
89 	}
90 
91 	for (i = 0; i < ARRAY_SIZE(rm->hw_wb); i++)
92 		dpu_hw_wb_destroy(rm->hw_wb[i]);
93 
94 	return 0;
95 }
96 
97 int dpu_rm_init(struct dpu_rm *rm,
98 		const struct dpu_mdss_cfg *cat,
99 		void __iomem *mmio)
100 {
101 	int rc, i;
102 
103 	if (!rm || !cat || !mmio) {
104 		DPU_ERROR("invalid kms\n");
105 		return -EINVAL;
106 	}
107 
108 	/* Clear, setup lists */
109 	memset(rm, 0, sizeof(*rm));
110 
111 	/* Interrogate HW catalog and create tracking items for hw blocks */
112 	for (i = 0; i < cat->mixer_count; i++) {
113 		struct dpu_hw_mixer *hw;
114 		const struct dpu_lm_cfg *lm = &cat->mixer[i];
115 
116 		if (lm->pingpong == PINGPONG_MAX) {
117 			DPU_DEBUG("skip mixer %d without pingpong\n", lm->id);
118 			continue;
119 		}
120 
121 		if (lm->id < LM_0 || lm->id >= LM_MAX) {
122 			DPU_ERROR("skip mixer %d with invalid id\n", lm->id);
123 			continue;
124 		}
125 		hw = dpu_hw_lm_init(lm->id, mmio, cat);
126 		if (IS_ERR(hw)) {
127 			rc = PTR_ERR(hw);
128 			DPU_ERROR("failed lm object creation: err %d\n", rc);
129 			goto fail;
130 		}
131 		rm->mixer_blks[lm->id - LM_0] = &hw->base;
132 	}
133 
134 	for (i = 0; i < cat->merge_3d_count; i++) {
135 		struct dpu_hw_merge_3d *hw;
136 		const struct dpu_merge_3d_cfg *merge_3d = &cat->merge_3d[i];
137 
138 		if (merge_3d->id < MERGE_3D_0 || merge_3d->id >= MERGE_3D_MAX) {
139 			DPU_ERROR("skip merge_3d %d with invalid id\n", merge_3d->id);
140 			continue;
141 		}
142 		hw = dpu_hw_merge_3d_init(merge_3d->id, mmio, cat);
143 		if (IS_ERR(hw)) {
144 			rc = PTR_ERR(hw);
145 			DPU_ERROR("failed merge_3d object creation: err %d\n",
146 				rc);
147 			goto fail;
148 		}
149 		rm->merge_3d_blks[merge_3d->id - MERGE_3D_0] = &hw->base;
150 	}
151 
152 	for (i = 0; i < cat->pingpong_count; i++) {
153 		struct dpu_hw_pingpong *hw;
154 		const struct dpu_pingpong_cfg *pp = &cat->pingpong[i];
155 
156 		if (pp->id < PINGPONG_0 || pp->id >= PINGPONG_MAX) {
157 			DPU_ERROR("skip pingpong %d with invalid id\n", pp->id);
158 			continue;
159 		}
160 		hw = dpu_hw_pingpong_init(pp->id, mmio, cat);
161 		if (IS_ERR(hw)) {
162 			rc = PTR_ERR(hw);
163 			DPU_ERROR("failed pingpong object creation: err %d\n",
164 				rc);
165 			goto fail;
166 		}
167 		if (pp->merge_3d && pp->merge_3d < MERGE_3D_MAX)
168 			hw->merge_3d = to_dpu_hw_merge_3d(rm->merge_3d_blks[pp->merge_3d - MERGE_3D_0]);
169 		rm->pingpong_blks[pp->id - PINGPONG_0] = &hw->base;
170 	}
171 
172 	for (i = 0; i < cat->intf_count; i++) {
173 		struct dpu_hw_intf *hw;
174 		const struct dpu_intf_cfg *intf = &cat->intf[i];
175 
176 		if (intf->type == INTF_NONE) {
177 			DPU_DEBUG("skip intf %d with type none\n", i);
178 			continue;
179 		}
180 		if (intf->id < INTF_0 || intf->id >= INTF_MAX) {
181 			DPU_ERROR("skip intf %d with invalid id\n", intf->id);
182 			continue;
183 		}
184 		hw = dpu_hw_intf_init(intf->id, mmio, cat);
185 		if (IS_ERR(hw)) {
186 			rc = PTR_ERR(hw);
187 			DPU_ERROR("failed intf object creation: err %d\n", rc);
188 			goto fail;
189 		}
190 		rm->hw_intf[intf->id - INTF_0] = hw;
191 	}
192 
193 	for (i = 0; i < cat->wb_count; i++) {
194 		struct dpu_hw_wb *hw;
195 		const struct dpu_wb_cfg *wb = &cat->wb[i];
196 
197 		if (wb->id < WB_0 || wb->id >= WB_MAX) {
198 			DPU_ERROR("skip intf %d with invalid id\n", wb->id);
199 			continue;
200 		}
201 
202 		hw = dpu_hw_wb_init(wb->id, mmio, cat);
203 		if (IS_ERR(hw)) {
204 			rc = PTR_ERR(hw);
205 			DPU_ERROR("failed wb object creation: err %d\n", rc);
206 			goto fail;
207 		}
208 		rm->hw_wb[wb->id - WB_0] = hw;
209 	}
210 
211 	for (i = 0; i < cat->ctl_count; i++) {
212 		struct dpu_hw_ctl *hw;
213 		const struct dpu_ctl_cfg *ctl = &cat->ctl[i];
214 
215 		if (ctl->id < CTL_0 || ctl->id >= CTL_MAX) {
216 			DPU_ERROR("skip ctl %d with invalid id\n", ctl->id);
217 			continue;
218 		}
219 		hw = dpu_hw_ctl_init(ctl->id, mmio, cat);
220 		if (IS_ERR(hw)) {
221 			rc = PTR_ERR(hw);
222 			DPU_ERROR("failed ctl object creation: err %d\n", rc);
223 			goto fail;
224 		}
225 		rm->ctl_blks[ctl->id - CTL_0] = &hw->base;
226 	}
227 
228 	for (i = 0; i < cat->dspp_count; i++) {
229 		struct dpu_hw_dspp *hw;
230 		const struct dpu_dspp_cfg *dspp = &cat->dspp[i];
231 
232 		if (dspp->id < DSPP_0 || dspp->id >= DSPP_MAX) {
233 			DPU_ERROR("skip dspp %d with invalid id\n", dspp->id);
234 			continue;
235 		}
236 		hw = dpu_hw_dspp_init(dspp->id, mmio, cat);
237 		if (IS_ERR(hw)) {
238 			rc = PTR_ERR(hw);
239 			DPU_ERROR("failed dspp object creation: err %d\n", rc);
240 			goto fail;
241 		}
242 		rm->dspp_blks[dspp->id - DSPP_0] = &hw->base;
243 	}
244 
245 	for (i = 0; i < cat->dsc_count; i++) {
246 		struct dpu_hw_dsc *hw;
247 		const struct dpu_dsc_cfg *dsc = &cat->dsc[i];
248 
249 		hw = dpu_hw_dsc_init(dsc->id, mmio, cat);
250 		if (IS_ERR_OR_NULL(hw)) {
251 			rc = PTR_ERR(hw);
252 			DPU_ERROR("failed dsc object creation: err %d\n", rc);
253 			goto fail;
254 		}
255 		rm->dsc_blks[dsc->id - DSC_0] = &hw->base;
256 	}
257 
258 	return 0;
259 
260 fail:
261 	dpu_rm_destroy(rm);
262 
263 	return rc ? rc : -EFAULT;
264 }
265 
266 static bool _dpu_rm_needs_split_display(const struct msm_display_topology *top)
267 {
268 	return top->num_intf > 1;
269 }
270 
271 /**
272  * _dpu_rm_check_lm_peer - check if a mixer is a peer of the primary
273  * @rm: dpu resource manager handle
274  * @primary_idx: index of primary mixer in rm->mixer_blks[]
275  * @peer_idx: index of other mixer in rm->mixer_blks[]
276  * Return: true if rm->mixer_blks[peer_idx] is a peer of
277  *          rm->mixer_blks[primary_idx]
278  */
279 static bool _dpu_rm_check_lm_peer(struct dpu_rm *rm, int primary_idx,
280 		int peer_idx)
281 {
282 	const struct dpu_lm_cfg *prim_lm_cfg;
283 	const struct dpu_lm_cfg *peer_cfg;
284 
285 	prim_lm_cfg = to_dpu_hw_mixer(rm->mixer_blks[primary_idx])->cap;
286 	peer_cfg = to_dpu_hw_mixer(rm->mixer_blks[peer_idx])->cap;
287 
288 	if (!test_bit(peer_cfg->id, &prim_lm_cfg->lm_pair_mask)) {
289 		DPU_DEBUG("lm %d not peer of lm %d\n", peer_cfg->id,
290 				peer_cfg->id);
291 		return false;
292 	}
293 	return true;
294 }
295 
296 /**
297  * _dpu_rm_check_lm_and_get_connected_blks - check if proposed layer mixer meets
298  *	proposed use case requirements, incl. hardwired dependent blocks like
299  *	pingpong
300  * @rm: dpu resource manager handle
301  * @global_state: resources shared across multiple kms objects
302  * @enc_id: encoder id requesting for allocation
303  * @lm_idx: index of proposed layer mixer in rm->mixer_blks[], function checks
304  *      if lm, and all other hardwired blocks connected to the lm (pp) is
305  *      available and appropriate
306  * @pp_idx: output parameter, index of pingpong block attached to the layer
307  *      mixer in rm->pingpong_blks[].
308  * @dspp_idx: output parameter, index of dspp block attached to the layer
309  *      mixer in rm->dspp_blks[].
310  * @reqs: input parameter, rm requirements for HW blocks needed in the
311  *      datapath.
312  * Return: true if lm matches all requirements, false otherwise
313  */
314 static bool _dpu_rm_check_lm_and_get_connected_blks(struct dpu_rm *rm,
315 		struct dpu_global_state *global_state,
316 		uint32_t enc_id, int lm_idx, int *pp_idx, int *dspp_idx,
317 		struct dpu_rm_requirements *reqs)
318 {
319 	const struct dpu_lm_cfg *lm_cfg;
320 	int idx;
321 
322 	/* Already reserved? */
323 	if (reserved_by_other(global_state->mixer_to_enc_id, lm_idx, enc_id)) {
324 		DPU_DEBUG("lm %d already reserved\n", lm_idx + LM_0);
325 		return false;
326 	}
327 
328 	lm_cfg = to_dpu_hw_mixer(rm->mixer_blks[lm_idx])->cap;
329 	idx = lm_cfg->pingpong - PINGPONG_0;
330 	if (idx < 0 || idx >= ARRAY_SIZE(rm->pingpong_blks)) {
331 		DPU_ERROR("failed to get pp on lm %d\n", lm_cfg->pingpong);
332 		return false;
333 	}
334 
335 	if (reserved_by_other(global_state->pingpong_to_enc_id, idx, enc_id)) {
336 		DPU_DEBUG("lm %d pp %d already reserved\n", lm_cfg->id,
337 				lm_cfg->pingpong);
338 		return false;
339 	}
340 	*pp_idx = idx;
341 
342 	if (!reqs->topology.num_dspp)
343 		return true;
344 
345 	idx = lm_cfg->dspp - DSPP_0;
346 	if (idx < 0 || idx >= ARRAY_SIZE(rm->dspp_blks)) {
347 		DPU_ERROR("failed to get dspp on lm %d\n", lm_cfg->dspp);
348 		return false;
349 	}
350 
351 	if (reserved_by_other(global_state->dspp_to_enc_id, idx, enc_id)) {
352 		DPU_DEBUG("lm %d dspp %d already reserved\n", lm_cfg->id,
353 				lm_cfg->dspp);
354 		return false;
355 	}
356 	*dspp_idx = idx;
357 
358 	return true;
359 }
360 
361 static int _dpu_rm_reserve_lms(struct dpu_rm *rm,
362 			       struct dpu_global_state *global_state,
363 			       uint32_t enc_id,
364 			       struct dpu_rm_requirements *reqs)
365 
366 {
367 	int lm_idx[MAX_BLOCKS];
368 	int pp_idx[MAX_BLOCKS];
369 	int dspp_idx[MAX_BLOCKS] = {0};
370 	int i, j, lm_count = 0;
371 
372 	if (!reqs->topology.num_lm) {
373 		DPU_ERROR("invalid number of lm: %d\n", reqs->topology.num_lm);
374 		return -EINVAL;
375 	}
376 
377 	/* Find a primary mixer */
378 	for (i = 0; i < ARRAY_SIZE(rm->mixer_blks) &&
379 			lm_count < reqs->topology.num_lm; i++) {
380 		if (!rm->mixer_blks[i])
381 			continue;
382 
383 		lm_count = 0;
384 		lm_idx[lm_count] = i;
385 
386 		if (!_dpu_rm_check_lm_and_get_connected_blks(rm, global_state,
387 				enc_id, i, &pp_idx[lm_count],
388 				&dspp_idx[lm_count], reqs)) {
389 			continue;
390 		}
391 
392 		++lm_count;
393 
394 		/* Valid primary mixer found, find matching peers */
395 		for (j = i + 1; j < ARRAY_SIZE(rm->mixer_blks) &&
396 				lm_count < reqs->topology.num_lm; j++) {
397 			if (!rm->mixer_blks[j])
398 				continue;
399 
400 			if (!_dpu_rm_check_lm_peer(rm, i, j)) {
401 				DPU_DEBUG("lm %d not peer of lm %d\n", LM_0 + j,
402 						LM_0 + i);
403 				continue;
404 			}
405 
406 			if (!_dpu_rm_check_lm_and_get_connected_blks(rm,
407 					global_state, enc_id, j,
408 					&pp_idx[lm_count], &dspp_idx[lm_count],
409 					reqs)) {
410 				continue;
411 			}
412 
413 			lm_idx[lm_count] = j;
414 			++lm_count;
415 		}
416 	}
417 
418 	if (lm_count != reqs->topology.num_lm) {
419 		DPU_DEBUG("unable to find appropriate mixers\n");
420 		return -ENAVAIL;
421 	}
422 
423 	for (i = 0; i < lm_count; i++) {
424 		global_state->mixer_to_enc_id[lm_idx[i]] = enc_id;
425 		global_state->pingpong_to_enc_id[pp_idx[i]] = enc_id;
426 		global_state->dspp_to_enc_id[dspp_idx[i]] =
427 			reqs->topology.num_dspp ? enc_id : 0;
428 
429 		trace_dpu_rm_reserve_lms(lm_idx[i] + LM_0, enc_id,
430 					 pp_idx[i] + PINGPONG_0);
431 	}
432 
433 	return 0;
434 }
435 
436 static int _dpu_rm_reserve_ctls(
437 		struct dpu_rm *rm,
438 		struct dpu_global_state *global_state,
439 		uint32_t enc_id,
440 		const struct msm_display_topology *top)
441 {
442 	int ctl_idx[MAX_BLOCKS];
443 	int i = 0, j, num_ctls;
444 	bool needs_split_display;
445 
446 	/* each hw_intf needs its own hw_ctrl to program its control path */
447 	num_ctls = top->num_intf;
448 
449 	needs_split_display = _dpu_rm_needs_split_display(top);
450 
451 	for (j = 0; j < ARRAY_SIZE(rm->ctl_blks); j++) {
452 		const struct dpu_hw_ctl *ctl;
453 		unsigned long features;
454 		bool has_split_display;
455 
456 		if (!rm->ctl_blks[j])
457 			continue;
458 		if (reserved_by_other(global_state->ctl_to_enc_id, j, enc_id))
459 			continue;
460 
461 		ctl = to_dpu_hw_ctl(rm->ctl_blks[j]);
462 		features = ctl->caps->features;
463 		has_split_display = BIT(DPU_CTL_SPLIT_DISPLAY) & features;
464 
465 		DPU_DEBUG("ctl %d caps 0x%lX\n", j + CTL_0, features);
466 
467 		if (needs_split_display != has_split_display)
468 			continue;
469 
470 		ctl_idx[i] = j;
471 		DPU_DEBUG("ctl %d match\n", j + CTL_0);
472 
473 		if (++i == num_ctls)
474 			break;
475 
476 	}
477 
478 	if (i != num_ctls)
479 		return -ENAVAIL;
480 
481 	for (i = 0; i < ARRAY_SIZE(ctl_idx) && i < num_ctls; i++) {
482 		global_state->ctl_to_enc_id[ctl_idx[i]] = enc_id;
483 		trace_dpu_rm_reserve_ctls(i + CTL_0, enc_id);
484 	}
485 
486 	return 0;
487 }
488 
489 static int _dpu_rm_reserve_dsc(struct dpu_rm *rm,
490 			       struct dpu_global_state *global_state,
491 			       struct drm_encoder *enc,
492 			       const struct msm_display_topology *top)
493 {
494 	int num_dsc = top->num_dsc;
495 	int i;
496 
497 	/* check if DSC required are allocated or not */
498 	for (i = 0; i < num_dsc; i++) {
499 		if (!rm->dsc_blks[i]) {
500 			DPU_ERROR("DSC %d does not exist\n", i);
501 			return -EIO;
502 		}
503 
504 		if (global_state->dsc_to_enc_id[i]) {
505 			DPU_ERROR("DSC %d is already allocated\n", i);
506 			return -EIO;
507 		}
508 	}
509 
510 	for (i = 0; i < num_dsc; i++)
511 		global_state->dsc_to_enc_id[i] = enc->base.id;
512 
513 	return 0;
514 }
515 
516 static int _dpu_rm_make_reservation(
517 		struct dpu_rm *rm,
518 		struct dpu_global_state *global_state,
519 		struct drm_encoder *enc,
520 		struct dpu_rm_requirements *reqs)
521 {
522 	int ret;
523 
524 	ret = _dpu_rm_reserve_lms(rm, global_state, enc->base.id, reqs);
525 	if (ret) {
526 		DPU_ERROR("unable to find appropriate mixers\n");
527 		return ret;
528 	}
529 
530 	ret = _dpu_rm_reserve_ctls(rm, global_state, enc->base.id,
531 				&reqs->topology);
532 	if (ret) {
533 		DPU_ERROR("unable to find appropriate CTL\n");
534 		return ret;
535 	}
536 
537 	ret  = _dpu_rm_reserve_dsc(rm, global_state, enc, &reqs->topology);
538 	if (ret)
539 		return ret;
540 
541 	return ret;
542 }
543 
544 static int _dpu_rm_populate_requirements(
545 		struct drm_encoder *enc,
546 		struct dpu_rm_requirements *reqs,
547 		struct msm_display_topology req_topology)
548 {
549 	reqs->topology = req_topology;
550 
551 	DRM_DEBUG_KMS("num_lm: %d num_dsc: %d num_intf: %d\n",
552 		      reqs->topology.num_lm, reqs->topology.num_dsc,
553 		      reqs->topology.num_intf);
554 
555 	return 0;
556 }
557 
558 static void _dpu_rm_clear_mapping(uint32_t *res_mapping, int cnt,
559 				  uint32_t enc_id)
560 {
561 	int i;
562 
563 	for (i = 0; i < cnt; i++) {
564 		if (res_mapping[i] == enc_id)
565 			res_mapping[i] = 0;
566 	}
567 }
568 
569 void dpu_rm_release(struct dpu_global_state *global_state,
570 		    struct drm_encoder *enc)
571 {
572 	_dpu_rm_clear_mapping(global_state->pingpong_to_enc_id,
573 		ARRAY_SIZE(global_state->pingpong_to_enc_id), enc->base.id);
574 	_dpu_rm_clear_mapping(global_state->mixer_to_enc_id,
575 		ARRAY_SIZE(global_state->mixer_to_enc_id), enc->base.id);
576 	_dpu_rm_clear_mapping(global_state->ctl_to_enc_id,
577 		ARRAY_SIZE(global_state->ctl_to_enc_id), enc->base.id);
578 	_dpu_rm_clear_mapping(global_state->dsc_to_enc_id,
579 		ARRAY_SIZE(global_state->dsc_to_enc_id), enc->base.id);
580 	_dpu_rm_clear_mapping(global_state->dspp_to_enc_id,
581 		ARRAY_SIZE(global_state->dspp_to_enc_id), enc->base.id);
582 }
583 
584 int dpu_rm_reserve(
585 		struct dpu_rm *rm,
586 		struct dpu_global_state *global_state,
587 		struct drm_encoder *enc,
588 		struct drm_crtc_state *crtc_state,
589 		struct msm_display_topology topology)
590 {
591 	struct dpu_rm_requirements reqs;
592 	int ret;
593 
594 	/* Check if this is just a page-flip */
595 	if (!drm_atomic_crtc_needs_modeset(crtc_state))
596 		return 0;
597 
598 	if (IS_ERR(global_state)) {
599 		DPU_ERROR("failed to global state\n");
600 		return PTR_ERR(global_state);
601 	}
602 
603 	DRM_DEBUG_KMS("reserving hw for enc %d crtc %d\n",
604 		      enc->base.id, crtc_state->crtc->base.id);
605 
606 	ret = _dpu_rm_populate_requirements(enc, &reqs, topology);
607 	if (ret) {
608 		DPU_ERROR("failed to populate hw requirements\n");
609 		return ret;
610 	}
611 
612 	ret = _dpu_rm_make_reservation(rm, global_state, enc, &reqs);
613 	if (ret)
614 		DPU_ERROR("failed to reserve hw resources: %d\n", ret);
615 
616 
617 
618 	return ret;
619 }
620 
621 int dpu_rm_get_assigned_resources(struct dpu_rm *rm,
622 	struct dpu_global_state *global_state, uint32_t enc_id,
623 	enum dpu_hw_blk_type type, struct dpu_hw_blk **blks, int blks_size)
624 {
625 	struct dpu_hw_blk **hw_blks;
626 	uint32_t *hw_to_enc_id;
627 	int i, num_blks, max_blks;
628 
629 	switch (type) {
630 	case DPU_HW_BLK_PINGPONG:
631 		hw_blks = rm->pingpong_blks;
632 		hw_to_enc_id = global_state->pingpong_to_enc_id;
633 		max_blks = ARRAY_SIZE(rm->pingpong_blks);
634 		break;
635 	case DPU_HW_BLK_LM:
636 		hw_blks = rm->mixer_blks;
637 		hw_to_enc_id = global_state->mixer_to_enc_id;
638 		max_blks = ARRAY_SIZE(rm->mixer_blks);
639 		break;
640 	case DPU_HW_BLK_CTL:
641 		hw_blks = rm->ctl_blks;
642 		hw_to_enc_id = global_state->ctl_to_enc_id;
643 		max_blks = ARRAY_SIZE(rm->ctl_blks);
644 		break;
645 	case DPU_HW_BLK_DSPP:
646 		hw_blks = rm->dspp_blks;
647 		hw_to_enc_id = global_state->dspp_to_enc_id;
648 		max_blks = ARRAY_SIZE(rm->dspp_blks);
649 		break;
650 	case DPU_HW_BLK_DSC:
651 		hw_blks = rm->dsc_blks;
652 		hw_to_enc_id = global_state->dsc_to_enc_id;
653 		max_blks = ARRAY_SIZE(rm->dsc_blks);
654 		break;
655 	default:
656 		DPU_ERROR("blk type %d not managed by rm\n", type);
657 		return 0;
658 	}
659 
660 	num_blks = 0;
661 	for (i = 0; i < max_blks; i++) {
662 		if (hw_to_enc_id[i] != enc_id)
663 			continue;
664 
665 		if (num_blks == blks_size) {
666 			DPU_ERROR("More than %d resources assigned to enc %d\n",
667 				  blks_size, enc_id);
668 			break;
669 		}
670 		if (!hw_blks[i]) {
671 			DPU_ERROR("Allocated resource %d unavailable to assign to enc %d\n",
672 				  type, enc_id);
673 			break;
674 		}
675 		blks[num_blks++] = hw_blks[i];
676 	}
677 
678 	return num_blks;
679 }
680