1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved. 4 * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved. 5 */ 6 7 #define pr_fmt(fmt) "[drm:%s] " fmt, __func__ 8 #include "dpu_kms.h" 9 #include "dpu_hw_lm.h" 10 #include "dpu_hw_ctl.h" 11 #include "dpu_hw_pingpong.h" 12 #include "dpu_hw_sspp.h" 13 #include "dpu_hw_intf.h" 14 #include "dpu_hw_wb.h" 15 #include "dpu_hw_dspp.h" 16 #include "dpu_hw_merge3d.h" 17 #include "dpu_hw_dsc.h" 18 #include "dpu_encoder.h" 19 #include "dpu_trace.h" 20 21 22 static inline bool reserved_by_other(uint32_t *res_map, int idx, 23 uint32_t enc_id) 24 { 25 return res_map[idx] && res_map[idx] != enc_id; 26 } 27 28 /** 29 * struct dpu_rm_requirements - Reservation requirements parameter bundle 30 * @topology: selected topology for the display 31 * @hw_res: Hardware resources required as reported by the encoders 32 */ 33 struct dpu_rm_requirements { 34 struct msm_display_topology topology; 35 }; 36 37 int dpu_rm_destroy(struct dpu_rm *rm) 38 { 39 int i; 40 41 for (i = 0; i < ARRAY_SIZE(rm->dspp_blks); i++) { 42 struct dpu_hw_dspp *hw; 43 44 if (rm->dspp_blks[i]) { 45 hw = to_dpu_hw_dspp(rm->dspp_blks[i]); 46 dpu_hw_dspp_destroy(hw); 47 } 48 } 49 for (i = 0; i < ARRAY_SIZE(rm->pingpong_blks); i++) { 50 struct dpu_hw_pingpong *hw; 51 52 if (rm->pingpong_blks[i]) { 53 hw = to_dpu_hw_pingpong(rm->pingpong_blks[i]); 54 dpu_hw_pingpong_destroy(hw); 55 } 56 } 57 for (i = 0; i < ARRAY_SIZE(rm->merge_3d_blks); i++) { 58 struct dpu_hw_merge_3d *hw; 59 60 if (rm->merge_3d_blks[i]) { 61 hw = to_dpu_hw_merge_3d(rm->merge_3d_blks[i]); 62 dpu_hw_merge_3d_destroy(hw); 63 } 64 } 65 for (i = 0; i < ARRAY_SIZE(rm->mixer_blks); i++) { 66 struct dpu_hw_mixer *hw; 67 68 if (rm->mixer_blks[i]) { 69 hw = to_dpu_hw_mixer(rm->mixer_blks[i]); 70 dpu_hw_lm_destroy(hw); 71 } 72 } 73 for (i = 0; i < ARRAY_SIZE(rm->ctl_blks); i++) { 74 struct dpu_hw_ctl *hw; 75 76 if (rm->ctl_blks[i]) { 77 hw = to_dpu_hw_ctl(rm->ctl_blks[i]); 78 dpu_hw_ctl_destroy(hw); 79 } 80 } 81 for (i = 0; i < ARRAY_SIZE(rm->hw_intf); i++) 82 dpu_hw_intf_destroy(rm->hw_intf[i]); 83 84 for (i = 0; i < ARRAY_SIZE(rm->dsc_blks); i++) { 85 struct dpu_hw_dsc *hw; 86 87 if (rm->dsc_blks[i]) { 88 hw = to_dpu_hw_dsc(rm->dsc_blks[i]); 89 dpu_hw_dsc_destroy(hw); 90 } 91 } 92 93 for (i = 0; i < ARRAY_SIZE(rm->hw_wb); i++) 94 dpu_hw_wb_destroy(rm->hw_wb[i]); 95 96 for (i = 0; i < ARRAY_SIZE(rm->hw_sspp); i++) 97 dpu_hw_sspp_destroy(rm->hw_sspp[i]); 98 99 return 0; 100 } 101 102 int dpu_rm_init(struct dpu_rm *rm, 103 const struct dpu_mdss_cfg *cat, 104 void __iomem *mmio) 105 { 106 int rc, i; 107 108 if (!rm || !cat || !mmio) { 109 DPU_ERROR("invalid kms\n"); 110 return -EINVAL; 111 } 112 113 /* Clear, setup lists */ 114 memset(rm, 0, sizeof(*rm)); 115 116 /* Interrogate HW catalog and create tracking items for hw blocks */ 117 for (i = 0; i < cat->mixer_count; i++) { 118 struct dpu_hw_mixer *hw; 119 const struct dpu_lm_cfg *lm = &cat->mixer[i]; 120 121 hw = dpu_hw_lm_init(lm, mmio); 122 if (IS_ERR(hw)) { 123 rc = PTR_ERR(hw); 124 DPU_ERROR("failed lm object creation: err %d\n", rc); 125 goto fail; 126 } 127 rm->mixer_blks[lm->id - LM_0] = &hw->base; 128 } 129 130 for (i = 0; i < cat->merge_3d_count; i++) { 131 struct dpu_hw_merge_3d *hw; 132 const struct dpu_merge_3d_cfg *merge_3d = &cat->merge_3d[i]; 133 134 hw = dpu_hw_merge_3d_init(merge_3d, mmio); 135 if (IS_ERR(hw)) { 136 rc = PTR_ERR(hw); 137 DPU_ERROR("failed merge_3d object creation: err %d\n", 138 rc); 139 goto fail; 140 } 141 rm->merge_3d_blks[merge_3d->id - MERGE_3D_0] = &hw->base; 142 } 143 144 for (i = 0; i < cat->pingpong_count; i++) { 145 struct dpu_hw_pingpong *hw; 146 const struct dpu_pingpong_cfg *pp = &cat->pingpong[i]; 147 148 hw = dpu_hw_pingpong_init(pp, mmio); 149 if (IS_ERR(hw)) { 150 rc = PTR_ERR(hw); 151 DPU_ERROR("failed pingpong object creation: err %d\n", 152 rc); 153 goto fail; 154 } 155 if (pp->merge_3d && pp->merge_3d < MERGE_3D_MAX) 156 hw->merge_3d = to_dpu_hw_merge_3d(rm->merge_3d_blks[pp->merge_3d - MERGE_3D_0]); 157 rm->pingpong_blks[pp->id - PINGPONG_0] = &hw->base; 158 } 159 160 for (i = 0; i < cat->intf_count; i++) { 161 struct dpu_hw_intf *hw; 162 const struct dpu_intf_cfg *intf = &cat->intf[i]; 163 164 hw = dpu_hw_intf_init(intf, mmio); 165 if (IS_ERR(hw)) { 166 rc = PTR_ERR(hw); 167 DPU_ERROR("failed intf object creation: err %d\n", rc); 168 goto fail; 169 } 170 rm->hw_intf[intf->id - INTF_0] = hw; 171 } 172 173 for (i = 0; i < cat->wb_count; i++) { 174 struct dpu_hw_wb *hw; 175 const struct dpu_wb_cfg *wb = &cat->wb[i]; 176 177 hw = dpu_hw_wb_init(wb, mmio); 178 if (IS_ERR(hw)) { 179 rc = PTR_ERR(hw); 180 DPU_ERROR("failed wb object creation: err %d\n", rc); 181 goto fail; 182 } 183 rm->hw_wb[wb->id - WB_0] = hw; 184 } 185 186 for (i = 0; i < cat->ctl_count; i++) { 187 struct dpu_hw_ctl *hw; 188 const struct dpu_ctl_cfg *ctl = &cat->ctl[i]; 189 190 hw = dpu_hw_ctl_init(ctl, mmio, cat->mixer_count, cat->mixer); 191 if (IS_ERR(hw)) { 192 rc = PTR_ERR(hw); 193 DPU_ERROR("failed ctl object creation: err %d\n", rc); 194 goto fail; 195 } 196 rm->ctl_blks[ctl->id - CTL_0] = &hw->base; 197 } 198 199 for (i = 0; i < cat->dspp_count; i++) { 200 struct dpu_hw_dspp *hw; 201 const struct dpu_dspp_cfg *dspp = &cat->dspp[i]; 202 203 hw = dpu_hw_dspp_init(dspp, mmio); 204 if (IS_ERR(hw)) { 205 rc = PTR_ERR(hw); 206 DPU_ERROR("failed dspp object creation: err %d\n", rc); 207 goto fail; 208 } 209 rm->dspp_blks[dspp->id - DSPP_0] = &hw->base; 210 } 211 212 for (i = 0; i < cat->dsc_count; i++) { 213 struct dpu_hw_dsc *hw; 214 const struct dpu_dsc_cfg *dsc = &cat->dsc[i]; 215 216 if (test_bit(DPU_DSC_HW_REV_1_2, &dsc->features)) 217 hw = dpu_hw_dsc_init_1_2(dsc, mmio); 218 else 219 hw = dpu_hw_dsc_init(dsc, mmio); 220 221 if (IS_ERR(hw)) { 222 rc = PTR_ERR(hw); 223 DPU_ERROR("failed dsc object creation: err %d\n", rc); 224 goto fail; 225 } 226 rm->dsc_blks[dsc->id - DSC_0] = &hw->base; 227 } 228 229 for (i = 0; i < cat->sspp_count; i++) { 230 struct dpu_hw_sspp *hw; 231 const struct dpu_sspp_cfg *sspp = &cat->sspp[i]; 232 233 hw = dpu_hw_sspp_init(sspp, mmio, cat->ubwc); 234 if (IS_ERR(hw)) { 235 rc = PTR_ERR(hw); 236 DPU_ERROR("failed sspp object creation: err %d\n", rc); 237 goto fail; 238 } 239 rm->hw_sspp[sspp->id - SSPP_NONE] = hw; 240 } 241 242 return 0; 243 244 fail: 245 dpu_rm_destroy(rm); 246 247 return rc ? rc : -EFAULT; 248 } 249 250 static bool _dpu_rm_needs_split_display(const struct msm_display_topology *top) 251 { 252 return top->num_intf > 1; 253 } 254 255 /** 256 * _dpu_rm_check_lm_peer - check if a mixer is a peer of the primary 257 * @rm: dpu resource manager handle 258 * @primary_idx: index of primary mixer in rm->mixer_blks[] 259 * @peer_idx: index of other mixer in rm->mixer_blks[] 260 * Return: true if rm->mixer_blks[peer_idx] is a peer of 261 * rm->mixer_blks[primary_idx] 262 */ 263 static bool _dpu_rm_check_lm_peer(struct dpu_rm *rm, int primary_idx, 264 int peer_idx) 265 { 266 const struct dpu_lm_cfg *prim_lm_cfg; 267 const struct dpu_lm_cfg *peer_cfg; 268 269 prim_lm_cfg = to_dpu_hw_mixer(rm->mixer_blks[primary_idx])->cap; 270 peer_cfg = to_dpu_hw_mixer(rm->mixer_blks[peer_idx])->cap; 271 272 if (!test_bit(peer_cfg->id, &prim_lm_cfg->lm_pair_mask)) { 273 DPU_DEBUG("lm %d not peer of lm %d\n", peer_cfg->id, 274 peer_cfg->id); 275 return false; 276 } 277 return true; 278 } 279 280 /** 281 * _dpu_rm_check_lm_and_get_connected_blks - check if proposed layer mixer meets 282 * proposed use case requirements, incl. hardwired dependent blocks like 283 * pingpong 284 * @rm: dpu resource manager handle 285 * @global_state: resources shared across multiple kms objects 286 * @enc_id: encoder id requesting for allocation 287 * @lm_idx: index of proposed layer mixer in rm->mixer_blks[], function checks 288 * if lm, and all other hardwired blocks connected to the lm (pp) is 289 * available and appropriate 290 * @pp_idx: output parameter, index of pingpong block attached to the layer 291 * mixer in rm->pingpong_blks[]. 292 * @dspp_idx: output parameter, index of dspp block attached to the layer 293 * mixer in rm->dspp_blks[]. 294 * @reqs: input parameter, rm requirements for HW blocks needed in the 295 * datapath. 296 * Return: true if lm matches all requirements, false otherwise 297 */ 298 static bool _dpu_rm_check_lm_and_get_connected_blks(struct dpu_rm *rm, 299 struct dpu_global_state *global_state, 300 uint32_t enc_id, int lm_idx, int *pp_idx, int *dspp_idx, 301 struct dpu_rm_requirements *reqs) 302 { 303 const struct dpu_lm_cfg *lm_cfg; 304 int idx; 305 306 /* Already reserved? */ 307 if (reserved_by_other(global_state->mixer_to_enc_id, lm_idx, enc_id)) { 308 DPU_DEBUG("lm %d already reserved\n", lm_idx + LM_0); 309 return false; 310 } 311 312 lm_cfg = to_dpu_hw_mixer(rm->mixer_blks[lm_idx])->cap; 313 idx = lm_cfg->pingpong - PINGPONG_0; 314 if (idx < 0 || idx >= ARRAY_SIZE(rm->pingpong_blks)) { 315 DPU_ERROR("failed to get pp on lm %d\n", lm_cfg->pingpong); 316 return false; 317 } 318 319 if (reserved_by_other(global_state->pingpong_to_enc_id, idx, enc_id)) { 320 DPU_DEBUG("lm %d pp %d already reserved\n", lm_cfg->id, 321 lm_cfg->pingpong); 322 return false; 323 } 324 *pp_idx = idx; 325 326 if (!reqs->topology.num_dspp) 327 return true; 328 329 idx = lm_cfg->dspp - DSPP_0; 330 if (idx < 0 || idx >= ARRAY_SIZE(rm->dspp_blks)) { 331 DPU_ERROR("failed to get dspp on lm %d\n", lm_cfg->dspp); 332 return false; 333 } 334 335 if (reserved_by_other(global_state->dspp_to_enc_id, idx, enc_id)) { 336 DPU_DEBUG("lm %d dspp %d already reserved\n", lm_cfg->id, 337 lm_cfg->dspp); 338 return false; 339 } 340 *dspp_idx = idx; 341 342 return true; 343 } 344 345 static int _dpu_rm_reserve_lms(struct dpu_rm *rm, 346 struct dpu_global_state *global_state, 347 uint32_t enc_id, 348 struct dpu_rm_requirements *reqs) 349 350 { 351 int lm_idx[MAX_BLOCKS]; 352 int pp_idx[MAX_BLOCKS]; 353 int dspp_idx[MAX_BLOCKS] = {0}; 354 int i, j, lm_count = 0; 355 356 if (!reqs->topology.num_lm) { 357 DPU_ERROR("invalid number of lm: %d\n", reqs->topology.num_lm); 358 return -EINVAL; 359 } 360 361 /* Find a primary mixer */ 362 for (i = 0; i < ARRAY_SIZE(rm->mixer_blks) && 363 lm_count < reqs->topology.num_lm; i++) { 364 if (!rm->mixer_blks[i]) 365 continue; 366 367 lm_count = 0; 368 lm_idx[lm_count] = i; 369 370 if (!_dpu_rm_check_lm_and_get_connected_blks(rm, global_state, 371 enc_id, i, &pp_idx[lm_count], 372 &dspp_idx[lm_count], reqs)) { 373 continue; 374 } 375 376 ++lm_count; 377 378 /* Valid primary mixer found, find matching peers */ 379 for (j = i + 1; j < ARRAY_SIZE(rm->mixer_blks) && 380 lm_count < reqs->topology.num_lm; j++) { 381 if (!rm->mixer_blks[j]) 382 continue; 383 384 if (!_dpu_rm_check_lm_peer(rm, i, j)) { 385 DPU_DEBUG("lm %d not peer of lm %d\n", LM_0 + j, 386 LM_0 + i); 387 continue; 388 } 389 390 if (!_dpu_rm_check_lm_and_get_connected_blks(rm, 391 global_state, enc_id, j, 392 &pp_idx[lm_count], &dspp_idx[lm_count], 393 reqs)) { 394 continue; 395 } 396 397 lm_idx[lm_count] = j; 398 ++lm_count; 399 } 400 } 401 402 if (lm_count != reqs->topology.num_lm) { 403 DPU_DEBUG("unable to find appropriate mixers\n"); 404 return -ENAVAIL; 405 } 406 407 for (i = 0; i < lm_count; i++) { 408 global_state->mixer_to_enc_id[lm_idx[i]] = enc_id; 409 global_state->pingpong_to_enc_id[pp_idx[i]] = enc_id; 410 global_state->dspp_to_enc_id[dspp_idx[i]] = 411 reqs->topology.num_dspp ? enc_id : 0; 412 413 trace_dpu_rm_reserve_lms(lm_idx[i] + LM_0, enc_id, 414 pp_idx[i] + PINGPONG_0); 415 } 416 417 return 0; 418 } 419 420 static int _dpu_rm_reserve_ctls( 421 struct dpu_rm *rm, 422 struct dpu_global_state *global_state, 423 uint32_t enc_id, 424 const struct msm_display_topology *top) 425 { 426 int ctl_idx[MAX_BLOCKS]; 427 int i = 0, j, num_ctls; 428 bool needs_split_display; 429 430 /* each hw_intf needs its own hw_ctrl to program its control path */ 431 num_ctls = top->num_intf; 432 433 needs_split_display = _dpu_rm_needs_split_display(top); 434 435 for (j = 0; j < ARRAY_SIZE(rm->ctl_blks); j++) { 436 const struct dpu_hw_ctl *ctl; 437 unsigned long features; 438 bool has_split_display; 439 440 if (!rm->ctl_blks[j]) 441 continue; 442 if (reserved_by_other(global_state->ctl_to_enc_id, j, enc_id)) 443 continue; 444 445 ctl = to_dpu_hw_ctl(rm->ctl_blks[j]); 446 features = ctl->caps->features; 447 has_split_display = BIT(DPU_CTL_SPLIT_DISPLAY) & features; 448 449 DPU_DEBUG("ctl %d caps 0x%lX\n", j + CTL_0, features); 450 451 if (needs_split_display != has_split_display) 452 continue; 453 454 ctl_idx[i] = j; 455 DPU_DEBUG("ctl %d match\n", j + CTL_0); 456 457 if (++i == num_ctls) 458 break; 459 460 } 461 462 if (i != num_ctls) 463 return -ENAVAIL; 464 465 for (i = 0; i < ARRAY_SIZE(ctl_idx) && i < num_ctls; i++) { 466 global_state->ctl_to_enc_id[ctl_idx[i]] = enc_id; 467 trace_dpu_rm_reserve_ctls(i + CTL_0, enc_id); 468 } 469 470 return 0; 471 } 472 473 static int _dpu_rm_reserve_dsc(struct dpu_rm *rm, 474 struct dpu_global_state *global_state, 475 struct drm_encoder *enc, 476 const struct msm_display_topology *top) 477 { 478 int num_dsc = top->num_dsc; 479 int i; 480 481 /* check if DSC required are allocated or not */ 482 for (i = 0; i < num_dsc; i++) { 483 if (!rm->dsc_blks[i]) { 484 DPU_ERROR("DSC %d does not exist\n", i); 485 return -EIO; 486 } 487 488 if (global_state->dsc_to_enc_id[i]) { 489 DPU_ERROR("DSC %d is already allocated\n", i); 490 return -EIO; 491 } 492 } 493 494 for (i = 0; i < num_dsc; i++) 495 global_state->dsc_to_enc_id[i] = enc->base.id; 496 497 return 0; 498 } 499 500 static int _dpu_rm_make_reservation( 501 struct dpu_rm *rm, 502 struct dpu_global_state *global_state, 503 struct drm_encoder *enc, 504 struct dpu_rm_requirements *reqs) 505 { 506 int ret; 507 508 ret = _dpu_rm_reserve_lms(rm, global_state, enc->base.id, reqs); 509 if (ret) { 510 DPU_ERROR("unable to find appropriate mixers\n"); 511 return ret; 512 } 513 514 ret = _dpu_rm_reserve_ctls(rm, global_state, enc->base.id, 515 &reqs->topology); 516 if (ret) { 517 DPU_ERROR("unable to find appropriate CTL\n"); 518 return ret; 519 } 520 521 ret = _dpu_rm_reserve_dsc(rm, global_state, enc, &reqs->topology); 522 if (ret) 523 return ret; 524 525 return ret; 526 } 527 528 static int _dpu_rm_populate_requirements( 529 struct drm_encoder *enc, 530 struct dpu_rm_requirements *reqs, 531 struct msm_display_topology req_topology) 532 { 533 reqs->topology = req_topology; 534 535 DRM_DEBUG_KMS("num_lm: %d num_dsc: %d num_intf: %d\n", 536 reqs->topology.num_lm, reqs->topology.num_dsc, 537 reqs->topology.num_intf); 538 539 return 0; 540 } 541 542 static void _dpu_rm_clear_mapping(uint32_t *res_mapping, int cnt, 543 uint32_t enc_id) 544 { 545 int i; 546 547 for (i = 0; i < cnt; i++) { 548 if (res_mapping[i] == enc_id) 549 res_mapping[i] = 0; 550 } 551 } 552 553 void dpu_rm_release(struct dpu_global_state *global_state, 554 struct drm_encoder *enc) 555 { 556 _dpu_rm_clear_mapping(global_state->pingpong_to_enc_id, 557 ARRAY_SIZE(global_state->pingpong_to_enc_id), enc->base.id); 558 _dpu_rm_clear_mapping(global_state->mixer_to_enc_id, 559 ARRAY_SIZE(global_state->mixer_to_enc_id), enc->base.id); 560 _dpu_rm_clear_mapping(global_state->ctl_to_enc_id, 561 ARRAY_SIZE(global_state->ctl_to_enc_id), enc->base.id); 562 _dpu_rm_clear_mapping(global_state->dsc_to_enc_id, 563 ARRAY_SIZE(global_state->dsc_to_enc_id), enc->base.id); 564 _dpu_rm_clear_mapping(global_state->dspp_to_enc_id, 565 ARRAY_SIZE(global_state->dspp_to_enc_id), enc->base.id); 566 } 567 568 int dpu_rm_reserve( 569 struct dpu_rm *rm, 570 struct dpu_global_state *global_state, 571 struct drm_encoder *enc, 572 struct drm_crtc_state *crtc_state, 573 struct msm_display_topology topology) 574 { 575 struct dpu_rm_requirements reqs; 576 int ret; 577 578 /* Check if this is just a page-flip */ 579 if (!drm_atomic_crtc_needs_modeset(crtc_state)) 580 return 0; 581 582 if (IS_ERR(global_state)) { 583 DPU_ERROR("failed to global state\n"); 584 return PTR_ERR(global_state); 585 } 586 587 DRM_DEBUG_KMS("reserving hw for enc %d crtc %d\n", 588 enc->base.id, crtc_state->crtc->base.id); 589 590 ret = _dpu_rm_populate_requirements(enc, &reqs, topology); 591 if (ret) { 592 DPU_ERROR("failed to populate hw requirements\n"); 593 return ret; 594 } 595 596 ret = _dpu_rm_make_reservation(rm, global_state, enc, &reqs); 597 if (ret) 598 DPU_ERROR("failed to reserve hw resources: %d\n", ret); 599 600 601 602 return ret; 603 } 604 605 int dpu_rm_get_assigned_resources(struct dpu_rm *rm, 606 struct dpu_global_state *global_state, uint32_t enc_id, 607 enum dpu_hw_blk_type type, struct dpu_hw_blk **blks, int blks_size) 608 { 609 struct dpu_hw_blk **hw_blks; 610 uint32_t *hw_to_enc_id; 611 int i, num_blks, max_blks; 612 613 switch (type) { 614 case DPU_HW_BLK_PINGPONG: 615 hw_blks = rm->pingpong_blks; 616 hw_to_enc_id = global_state->pingpong_to_enc_id; 617 max_blks = ARRAY_SIZE(rm->pingpong_blks); 618 break; 619 case DPU_HW_BLK_LM: 620 hw_blks = rm->mixer_blks; 621 hw_to_enc_id = global_state->mixer_to_enc_id; 622 max_blks = ARRAY_SIZE(rm->mixer_blks); 623 break; 624 case DPU_HW_BLK_CTL: 625 hw_blks = rm->ctl_blks; 626 hw_to_enc_id = global_state->ctl_to_enc_id; 627 max_blks = ARRAY_SIZE(rm->ctl_blks); 628 break; 629 case DPU_HW_BLK_DSPP: 630 hw_blks = rm->dspp_blks; 631 hw_to_enc_id = global_state->dspp_to_enc_id; 632 max_blks = ARRAY_SIZE(rm->dspp_blks); 633 break; 634 case DPU_HW_BLK_DSC: 635 hw_blks = rm->dsc_blks; 636 hw_to_enc_id = global_state->dsc_to_enc_id; 637 max_blks = ARRAY_SIZE(rm->dsc_blks); 638 break; 639 default: 640 DPU_ERROR("blk type %d not managed by rm\n", type); 641 return 0; 642 } 643 644 num_blks = 0; 645 for (i = 0; i < max_blks; i++) { 646 if (hw_to_enc_id[i] != enc_id) 647 continue; 648 649 if (num_blks == blks_size) { 650 DPU_ERROR("More than %d resources assigned to enc %d\n", 651 blks_size, enc_id); 652 break; 653 } 654 if (!hw_blks[i]) { 655 DPU_ERROR("Allocated resource %d unavailable to assign to enc %d\n", 656 type, enc_id); 657 break; 658 } 659 blks[num_blks++] = hw_blks[i]; 660 } 661 662 return num_blks; 663 } 664