1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved. 4 */ 5 6 #define pr_fmt(fmt) "[drm:%s] " fmt, __func__ 7 #include "dpu_kms.h" 8 #include "dpu_hw_lm.h" 9 #include "dpu_hw_ctl.h" 10 #include "dpu_hw_pingpong.h" 11 #include "dpu_hw_sspp.h" 12 #include "dpu_hw_intf.h" 13 #include "dpu_hw_wb.h" 14 #include "dpu_hw_dspp.h" 15 #include "dpu_hw_merge3d.h" 16 #include "dpu_hw_dsc.h" 17 #include "dpu_encoder.h" 18 #include "dpu_trace.h" 19 20 21 static inline bool reserved_by_other(uint32_t *res_map, int idx, 22 uint32_t enc_id) 23 { 24 return res_map[idx] && res_map[idx] != enc_id; 25 } 26 27 /** 28 * struct dpu_rm_requirements - Reservation requirements parameter bundle 29 * @topology: selected topology for the display 30 * @hw_res: Hardware resources required as reported by the encoders 31 */ 32 struct dpu_rm_requirements { 33 struct msm_display_topology topology; 34 }; 35 36 int dpu_rm_destroy(struct dpu_rm *rm) 37 { 38 int i; 39 40 for (i = 0; i < ARRAY_SIZE(rm->dspp_blks); i++) { 41 struct dpu_hw_dspp *hw; 42 43 if (rm->dspp_blks[i]) { 44 hw = to_dpu_hw_dspp(rm->dspp_blks[i]); 45 dpu_hw_dspp_destroy(hw); 46 } 47 } 48 for (i = 0; i < ARRAY_SIZE(rm->pingpong_blks); i++) { 49 struct dpu_hw_pingpong *hw; 50 51 if (rm->pingpong_blks[i]) { 52 hw = to_dpu_hw_pingpong(rm->pingpong_blks[i]); 53 dpu_hw_pingpong_destroy(hw); 54 } 55 } 56 for (i = 0; i < ARRAY_SIZE(rm->merge_3d_blks); i++) { 57 struct dpu_hw_merge_3d *hw; 58 59 if (rm->merge_3d_blks[i]) { 60 hw = to_dpu_hw_merge_3d(rm->merge_3d_blks[i]); 61 dpu_hw_merge_3d_destroy(hw); 62 } 63 } 64 for (i = 0; i < ARRAY_SIZE(rm->mixer_blks); i++) { 65 struct dpu_hw_mixer *hw; 66 67 if (rm->mixer_blks[i]) { 68 hw = to_dpu_hw_mixer(rm->mixer_blks[i]); 69 dpu_hw_lm_destroy(hw); 70 } 71 } 72 for (i = 0; i < ARRAY_SIZE(rm->ctl_blks); i++) { 73 struct dpu_hw_ctl *hw; 74 75 if (rm->ctl_blks[i]) { 76 hw = to_dpu_hw_ctl(rm->ctl_blks[i]); 77 dpu_hw_ctl_destroy(hw); 78 } 79 } 80 for (i = 0; i < ARRAY_SIZE(rm->hw_intf); i++) 81 dpu_hw_intf_destroy(rm->hw_intf[i]); 82 83 for (i = 0; i < ARRAY_SIZE(rm->dsc_blks); i++) { 84 struct dpu_hw_dsc *hw; 85 86 if (rm->dsc_blks[i]) { 87 hw = to_dpu_hw_dsc(rm->dsc_blks[i]); 88 dpu_hw_dsc_destroy(hw); 89 } 90 } 91 92 for (i = 0; i < ARRAY_SIZE(rm->hw_wb); i++) 93 dpu_hw_wb_destroy(rm->hw_wb[i]); 94 95 for (i = 0; i < ARRAY_SIZE(rm->hw_sspp); i++) 96 dpu_hw_sspp_destroy(rm->hw_sspp[i]); 97 98 return 0; 99 } 100 101 int dpu_rm_init(struct dpu_rm *rm, 102 const struct dpu_mdss_cfg *cat, 103 void __iomem *mmio) 104 { 105 int rc, i; 106 107 if (!rm || !cat || !mmio) { 108 DPU_ERROR("invalid kms\n"); 109 return -EINVAL; 110 } 111 112 /* Clear, setup lists */ 113 memset(rm, 0, sizeof(*rm)); 114 115 /* Interrogate HW catalog and create tracking items for hw blocks */ 116 for (i = 0; i < cat->mixer_count; i++) { 117 struct dpu_hw_mixer *hw; 118 const struct dpu_lm_cfg *lm = &cat->mixer[i]; 119 120 if (lm->pingpong == PINGPONG_MAX) { 121 DPU_DEBUG("skip mixer %d without pingpong\n", lm->id); 122 continue; 123 } 124 125 if (lm->id < LM_0 || lm->id >= LM_MAX) { 126 DPU_ERROR("skip mixer %d with invalid id\n", lm->id); 127 continue; 128 } 129 hw = dpu_hw_lm_init(lm->id, mmio, cat); 130 if (IS_ERR(hw)) { 131 rc = PTR_ERR(hw); 132 DPU_ERROR("failed lm object creation: err %d\n", rc); 133 goto fail; 134 } 135 rm->mixer_blks[lm->id - LM_0] = &hw->base; 136 } 137 138 for (i = 0; i < cat->merge_3d_count; i++) { 139 struct dpu_hw_merge_3d *hw; 140 const struct dpu_merge_3d_cfg *merge_3d = &cat->merge_3d[i]; 141 142 if (merge_3d->id < MERGE_3D_0 || merge_3d->id >= MERGE_3D_MAX) { 143 DPU_ERROR("skip merge_3d %d with invalid id\n", merge_3d->id); 144 continue; 145 } 146 hw = dpu_hw_merge_3d_init(merge_3d->id, mmio, cat); 147 if (IS_ERR(hw)) { 148 rc = PTR_ERR(hw); 149 DPU_ERROR("failed merge_3d object creation: err %d\n", 150 rc); 151 goto fail; 152 } 153 rm->merge_3d_blks[merge_3d->id - MERGE_3D_0] = &hw->base; 154 } 155 156 for (i = 0; i < cat->pingpong_count; i++) { 157 struct dpu_hw_pingpong *hw; 158 const struct dpu_pingpong_cfg *pp = &cat->pingpong[i]; 159 160 if (pp->id < PINGPONG_0 || pp->id >= PINGPONG_MAX) { 161 DPU_ERROR("skip pingpong %d with invalid id\n", pp->id); 162 continue; 163 } 164 hw = dpu_hw_pingpong_init(pp->id, mmio, cat); 165 if (IS_ERR(hw)) { 166 rc = PTR_ERR(hw); 167 DPU_ERROR("failed pingpong object creation: err %d\n", 168 rc); 169 goto fail; 170 } 171 if (pp->merge_3d && pp->merge_3d < MERGE_3D_MAX) 172 hw->merge_3d = to_dpu_hw_merge_3d(rm->merge_3d_blks[pp->merge_3d - MERGE_3D_0]); 173 rm->pingpong_blks[pp->id - PINGPONG_0] = &hw->base; 174 } 175 176 for (i = 0; i < cat->intf_count; i++) { 177 struct dpu_hw_intf *hw; 178 const struct dpu_intf_cfg *intf = &cat->intf[i]; 179 180 if (intf->type == INTF_NONE) { 181 DPU_DEBUG("skip intf %d with type none\n", i); 182 continue; 183 } 184 if (intf->id < INTF_0 || intf->id >= INTF_MAX) { 185 DPU_ERROR("skip intf %d with invalid id\n", intf->id); 186 continue; 187 } 188 hw = dpu_hw_intf_init(intf->id, mmio, cat); 189 if (IS_ERR(hw)) { 190 rc = PTR_ERR(hw); 191 DPU_ERROR("failed intf object creation: err %d\n", rc); 192 goto fail; 193 } 194 rm->hw_intf[intf->id - INTF_0] = hw; 195 } 196 197 for (i = 0; i < cat->wb_count; i++) { 198 struct dpu_hw_wb *hw; 199 const struct dpu_wb_cfg *wb = &cat->wb[i]; 200 201 if (wb->id < WB_0 || wb->id >= WB_MAX) { 202 DPU_ERROR("skip intf %d with invalid id\n", wb->id); 203 continue; 204 } 205 206 hw = dpu_hw_wb_init(wb->id, mmio, cat); 207 if (IS_ERR(hw)) { 208 rc = PTR_ERR(hw); 209 DPU_ERROR("failed wb object creation: err %d\n", rc); 210 goto fail; 211 } 212 rm->hw_wb[wb->id - WB_0] = hw; 213 } 214 215 for (i = 0; i < cat->ctl_count; i++) { 216 struct dpu_hw_ctl *hw; 217 const struct dpu_ctl_cfg *ctl = &cat->ctl[i]; 218 219 if (ctl->id < CTL_0 || ctl->id >= CTL_MAX) { 220 DPU_ERROR("skip ctl %d with invalid id\n", ctl->id); 221 continue; 222 } 223 hw = dpu_hw_ctl_init(ctl->id, mmio, cat); 224 if (IS_ERR(hw)) { 225 rc = PTR_ERR(hw); 226 DPU_ERROR("failed ctl object creation: err %d\n", rc); 227 goto fail; 228 } 229 rm->ctl_blks[ctl->id - CTL_0] = &hw->base; 230 } 231 232 for (i = 0; i < cat->dspp_count; i++) { 233 struct dpu_hw_dspp *hw; 234 const struct dpu_dspp_cfg *dspp = &cat->dspp[i]; 235 236 if (dspp->id < DSPP_0 || dspp->id >= DSPP_MAX) { 237 DPU_ERROR("skip dspp %d with invalid id\n", dspp->id); 238 continue; 239 } 240 hw = dpu_hw_dspp_init(dspp->id, mmio, cat); 241 if (IS_ERR(hw)) { 242 rc = PTR_ERR(hw); 243 DPU_ERROR("failed dspp object creation: err %d\n", rc); 244 goto fail; 245 } 246 rm->dspp_blks[dspp->id - DSPP_0] = &hw->base; 247 } 248 249 for (i = 0; i < cat->dsc_count; i++) { 250 struct dpu_hw_dsc *hw; 251 const struct dpu_dsc_cfg *dsc = &cat->dsc[i]; 252 253 hw = dpu_hw_dsc_init(dsc->id, mmio, cat); 254 if (IS_ERR_OR_NULL(hw)) { 255 rc = PTR_ERR(hw); 256 DPU_ERROR("failed dsc object creation: err %d\n", rc); 257 goto fail; 258 } 259 rm->dsc_blks[dsc->id - DSC_0] = &hw->base; 260 } 261 262 for (i = 0; i < cat->sspp_count; i++) { 263 struct dpu_hw_sspp *hw; 264 const struct dpu_sspp_cfg *sspp = &cat->sspp[i]; 265 266 if (sspp->id < SSPP_NONE || sspp->id >= SSPP_MAX) { 267 DPU_ERROR("skip intf %d with invalid id\n", sspp->id); 268 continue; 269 } 270 271 hw = dpu_hw_sspp_init(sspp->id, mmio, cat); 272 if (IS_ERR(hw)) { 273 rc = PTR_ERR(hw); 274 DPU_ERROR("failed sspp object creation: err %d\n", rc); 275 goto fail; 276 } 277 rm->hw_sspp[sspp->id - SSPP_NONE] = hw; 278 } 279 280 return 0; 281 282 fail: 283 dpu_rm_destroy(rm); 284 285 return rc ? rc : -EFAULT; 286 } 287 288 static bool _dpu_rm_needs_split_display(const struct msm_display_topology *top) 289 { 290 return top->num_intf > 1; 291 } 292 293 /** 294 * _dpu_rm_check_lm_peer - check if a mixer is a peer of the primary 295 * @rm: dpu resource manager handle 296 * @primary_idx: index of primary mixer in rm->mixer_blks[] 297 * @peer_idx: index of other mixer in rm->mixer_blks[] 298 * Return: true if rm->mixer_blks[peer_idx] is a peer of 299 * rm->mixer_blks[primary_idx] 300 */ 301 static bool _dpu_rm_check_lm_peer(struct dpu_rm *rm, int primary_idx, 302 int peer_idx) 303 { 304 const struct dpu_lm_cfg *prim_lm_cfg; 305 const struct dpu_lm_cfg *peer_cfg; 306 307 prim_lm_cfg = to_dpu_hw_mixer(rm->mixer_blks[primary_idx])->cap; 308 peer_cfg = to_dpu_hw_mixer(rm->mixer_blks[peer_idx])->cap; 309 310 if (!test_bit(peer_cfg->id, &prim_lm_cfg->lm_pair_mask)) { 311 DPU_DEBUG("lm %d not peer of lm %d\n", peer_cfg->id, 312 peer_cfg->id); 313 return false; 314 } 315 return true; 316 } 317 318 /** 319 * _dpu_rm_check_lm_and_get_connected_blks - check if proposed layer mixer meets 320 * proposed use case requirements, incl. hardwired dependent blocks like 321 * pingpong 322 * @rm: dpu resource manager handle 323 * @global_state: resources shared across multiple kms objects 324 * @enc_id: encoder id requesting for allocation 325 * @lm_idx: index of proposed layer mixer in rm->mixer_blks[], function checks 326 * if lm, and all other hardwired blocks connected to the lm (pp) is 327 * available and appropriate 328 * @pp_idx: output parameter, index of pingpong block attached to the layer 329 * mixer in rm->pingpong_blks[]. 330 * @dspp_idx: output parameter, index of dspp block attached to the layer 331 * mixer in rm->dspp_blks[]. 332 * @reqs: input parameter, rm requirements for HW blocks needed in the 333 * datapath. 334 * Return: true if lm matches all requirements, false otherwise 335 */ 336 static bool _dpu_rm_check_lm_and_get_connected_blks(struct dpu_rm *rm, 337 struct dpu_global_state *global_state, 338 uint32_t enc_id, int lm_idx, int *pp_idx, int *dspp_idx, 339 struct dpu_rm_requirements *reqs) 340 { 341 const struct dpu_lm_cfg *lm_cfg; 342 int idx; 343 344 /* Already reserved? */ 345 if (reserved_by_other(global_state->mixer_to_enc_id, lm_idx, enc_id)) { 346 DPU_DEBUG("lm %d already reserved\n", lm_idx + LM_0); 347 return false; 348 } 349 350 lm_cfg = to_dpu_hw_mixer(rm->mixer_blks[lm_idx])->cap; 351 idx = lm_cfg->pingpong - PINGPONG_0; 352 if (idx < 0 || idx >= ARRAY_SIZE(rm->pingpong_blks)) { 353 DPU_ERROR("failed to get pp on lm %d\n", lm_cfg->pingpong); 354 return false; 355 } 356 357 if (reserved_by_other(global_state->pingpong_to_enc_id, idx, enc_id)) { 358 DPU_DEBUG("lm %d pp %d already reserved\n", lm_cfg->id, 359 lm_cfg->pingpong); 360 return false; 361 } 362 *pp_idx = idx; 363 364 if (!reqs->topology.num_dspp) 365 return true; 366 367 idx = lm_cfg->dspp - DSPP_0; 368 if (idx < 0 || idx >= ARRAY_SIZE(rm->dspp_blks)) { 369 DPU_ERROR("failed to get dspp on lm %d\n", lm_cfg->dspp); 370 return false; 371 } 372 373 if (reserved_by_other(global_state->dspp_to_enc_id, idx, enc_id)) { 374 DPU_DEBUG("lm %d dspp %d already reserved\n", lm_cfg->id, 375 lm_cfg->dspp); 376 return false; 377 } 378 *dspp_idx = idx; 379 380 return true; 381 } 382 383 static int _dpu_rm_reserve_lms(struct dpu_rm *rm, 384 struct dpu_global_state *global_state, 385 uint32_t enc_id, 386 struct dpu_rm_requirements *reqs) 387 388 { 389 int lm_idx[MAX_BLOCKS]; 390 int pp_idx[MAX_BLOCKS]; 391 int dspp_idx[MAX_BLOCKS] = {0}; 392 int i, j, lm_count = 0; 393 394 if (!reqs->topology.num_lm) { 395 DPU_ERROR("invalid number of lm: %d\n", reqs->topology.num_lm); 396 return -EINVAL; 397 } 398 399 /* Find a primary mixer */ 400 for (i = 0; i < ARRAY_SIZE(rm->mixer_blks) && 401 lm_count < reqs->topology.num_lm; i++) { 402 if (!rm->mixer_blks[i]) 403 continue; 404 405 lm_count = 0; 406 lm_idx[lm_count] = i; 407 408 if (!_dpu_rm_check_lm_and_get_connected_blks(rm, global_state, 409 enc_id, i, &pp_idx[lm_count], 410 &dspp_idx[lm_count], reqs)) { 411 continue; 412 } 413 414 ++lm_count; 415 416 /* Valid primary mixer found, find matching peers */ 417 for (j = i + 1; j < ARRAY_SIZE(rm->mixer_blks) && 418 lm_count < reqs->topology.num_lm; j++) { 419 if (!rm->mixer_blks[j]) 420 continue; 421 422 if (!_dpu_rm_check_lm_peer(rm, i, j)) { 423 DPU_DEBUG("lm %d not peer of lm %d\n", LM_0 + j, 424 LM_0 + i); 425 continue; 426 } 427 428 if (!_dpu_rm_check_lm_and_get_connected_blks(rm, 429 global_state, enc_id, j, 430 &pp_idx[lm_count], &dspp_idx[lm_count], 431 reqs)) { 432 continue; 433 } 434 435 lm_idx[lm_count] = j; 436 ++lm_count; 437 } 438 } 439 440 if (lm_count != reqs->topology.num_lm) { 441 DPU_DEBUG("unable to find appropriate mixers\n"); 442 return -ENAVAIL; 443 } 444 445 for (i = 0; i < lm_count; i++) { 446 global_state->mixer_to_enc_id[lm_idx[i]] = enc_id; 447 global_state->pingpong_to_enc_id[pp_idx[i]] = enc_id; 448 global_state->dspp_to_enc_id[dspp_idx[i]] = 449 reqs->topology.num_dspp ? enc_id : 0; 450 451 trace_dpu_rm_reserve_lms(lm_idx[i] + LM_0, enc_id, 452 pp_idx[i] + PINGPONG_0); 453 } 454 455 return 0; 456 } 457 458 static int _dpu_rm_reserve_ctls( 459 struct dpu_rm *rm, 460 struct dpu_global_state *global_state, 461 uint32_t enc_id, 462 const struct msm_display_topology *top) 463 { 464 int ctl_idx[MAX_BLOCKS]; 465 int i = 0, j, num_ctls; 466 bool needs_split_display; 467 468 /* each hw_intf needs its own hw_ctrl to program its control path */ 469 num_ctls = top->num_intf; 470 471 needs_split_display = _dpu_rm_needs_split_display(top); 472 473 for (j = 0; j < ARRAY_SIZE(rm->ctl_blks); j++) { 474 const struct dpu_hw_ctl *ctl; 475 unsigned long features; 476 bool has_split_display; 477 478 if (!rm->ctl_blks[j]) 479 continue; 480 if (reserved_by_other(global_state->ctl_to_enc_id, j, enc_id)) 481 continue; 482 483 ctl = to_dpu_hw_ctl(rm->ctl_blks[j]); 484 features = ctl->caps->features; 485 has_split_display = BIT(DPU_CTL_SPLIT_DISPLAY) & features; 486 487 DPU_DEBUG("ctl %d caps 0x%lX\n", j + CTL_0, features); 488 489 if (needs_split_display != has_split_display) 490 continue; 491 492 ctl_idx[i] = j; 493 DPU_DEBUG("ctl %d match\n", j + CTL_0); 494 495 if (++i == num_ctls) 496 break; 497 498 } 499 500 if (i != num_ctls) 501 return -ENAVAIL; 502 503 for (i = 0; i < ARRAY_SIZE(ctl_idx) && i < num_ctls; i++) { 504 global_state->ctl_to_enc_id[ctl_idx[i]] = enc_id; 505 trace_dpu_rm_reserve_ctls(i + CTL_0, enc_id); 506 } 507 508 return 0; 509 } 510 511 static int _dpu_rm_reserve_dsc(struct dpu_rm *rm, 512 struct dpu_global_state *global_state, 513 struct drm_encoder *enc, 514 const struct msm_display_topology *top) 515 { 516 int num_dsc = top->num_dsc; 517 int i; 518 519 /* check if DSC required are allocated or not */ 520 for (i = 0; i < num_dsc; i++) { 521 if (!rm->dsc_blks[i]) { 522 DPU_ERROR("DSC %d does not exist\n", i); 523 return -EIO; 524 } 525 526 if (global_state->dsc_to_enc_id[i]) { 527 DPU_ERROR("DSC %d is already allocated\n", i); 528 return -EIO; 529 } 530 } 531 532 for (i = 0; i < num_dsc; i++) 533 global_state->dsc_to_enc_id[i] = enc->base.id; 534 535 return 0; 536 } 537 538 static int _dpu_rm_make_reservation( 539 struct dpu_rm *rm, 540 struct dpu_global_state *global_state, 541 struct drm_encoder *enc, 542 struct dpu_rm_requirements *reqs) 543 { 544 int ret; 545 546 ret = _dpu_rm_reserve_lms(rm, global_state, enc->base.id, reqs); 547 if (ret) { 548 DPU_ERROR("unable to find appropriate mixers\n"); 549 return ret; 550 } 551 552 ret = _dpu_rm_reserve_ctls(rm, global_state, enc->base.id, 553 &reqs->topology); 554 if (ret) { 555 DPU_ERROR("unable to find appropriate CTL\n"); 556 return ret; 557 } 558 559 ret = _dpu_rm_reserve_dsc(rm, global_state, enc, &reqs->topology); 560 if (ret) 561 return ret; 562 563 return ret; 564 } 565 566 static int _dpu_rm_populate_requirements( 567 struct drm_encoder *enc, 568 struct dpu_rm_requirements *reqs, 569 struct msm_display_topology req_topology) 570 { 571 reqs->topology = req_topology; 572 573 DRM_DEBUG_KMS("num_lm: %d num_dsc: %d num_intf: %d\n", 574 reqs->topology.num_lm, reqs->topology.num_dsc, 575 reqs->topology.num_intf); 576 577 return 0; 578 } 579 580 static void _dpu_rm_clear_mapping(uint32_t *res_mapping, int cnt, 581 uint32_t enc_id) 582 { 583 int i; 584 585 for (i = 0; i < cnt; i++) { 586 if (res_mapping[i] == enc_id) 587 res_mapping[i] = 0; 588 } 589 } 590 591 void dpu_rm_release(struct dpu_global_state *global_state, 592 struct drm_encoder *enc) 593 { 594 _dpu_rm_clear_mapping(global_state->pingpong_to_enc_id, 595 ARRAY_SIZE(global_state->pingpong_to_enc_id), enc->base.id); 596 _dpu_rm_clear_mapping(global_state->mixer_to_enc_id, 597 ARRAY_SIZE(global_state->mixer_to_enc_id), enc->base.id); 598 _dpu_rm_clear_mapping(global_state->ctl_to_enc_id, 599 ARRAY_SIZE(global_state->ctl_to_enc_id), enc->base.id); 600 _dpu_rm_clear_mapping(global_state->dsc_to_enc_id, 601 ARRAY_SIZE(global_state->dsc_to_enc_id), enc->base.id); 602 _dpu_rm_clear_mapping(global_state->dspp_to_enc_id, 603 ARRAY_SIZE(global_state->dspp_to_enc_id), enc->base.id); 604 } 605 606 int dpu_rm_reserve( 607 struct dpu_rm *rm, 608 struct dpu_global_state *global_state, 609 struct drm_encoder *enc, 610 struct drm_crtc_state *crtc_state, 611 struct msm_display_topology topology) 612 { 613 struct dpu_rm_requirements reqs; 614 int ret; 615 616 /* Check if this is just a page-flip */ 617 if (!drm_atomic_crtc_needs_modeset(crtc_state)) 618 return 0; 619 620 if (IS_ERR(global_state)) { 621 DPU_ERROR("failed to global state\n"); 622 return PTR_ERR(global_state); 623 } 624 625 DRM_DEBUG_KMS("reserving hw for enc %d crtc %d\n", 626 enc->base.id, crtc_state->crtc->base.id); 627 628 ret = _dpu_rm_populate_requirements(enc, &reqs, topology); 629 if (ret) { 630 DPU_ERROR("failed to populate hw requirements\n"); 631 return ret; 632 } 633 634 ret = _dpu_rm_make_reservation(rm, global_state, enc, &reqs); 635 if (ret) 636 DPU_ERROR("failed to reserve hw resources: %d\n", ret); 637 638 639 640 return ret; 641 } 642 643 int dpu_rm_get_assigned_resources(struct dpu_rm *rm, 644 struct dpu_global_state *global_state, uint32_t enc_id, 645 enum dpu_hw_blk_type type, struct dpu_hw_blk **blks, int blks_size) 646 { 647 struct dpu_hw_blk **hw_blks; 648 uint32_t *hw_to_enc_id; 649 int i, num_blks, max_blks; 650 651 switch (type) { 652 case DPU_HW_BLK_PINGPONG: 653 hw_blks = rm->pingpong_blks; 654 hw_to_enc_id = global_state->pingpong_to_enc_id; 655 max_blks = ARRAY_SIZE(rm->pingpong_blks); 656 break; 657 case DPU_HW_BLK_LM: 658 hw_blks = rm->mixer_blks; 659 hw_to_enc_id = global_state->mixer_to_enc_id; 660 max_blks = ARRAY_SIZE(rm->mixer_blks); 661 break; 662 case DPU_HW_BLK_CTL: 663 hw_blks = rm->ctl_blks; 664 hw_to_enc_id = global_state->ctl_to_enc_id; 665 max_blks = ARRAY_SIZE(rm->ctl_blks); 666 break; 667 case DPU_HW_BLK_DSPP: 668 hw_blks = rm->dspp_blks; 669 hw_to_enc_id = global_state->dspp_to_enc_id; 670 max_blks = ARRAY_SIZE(rm->dspp_blks); 671 break; 672 case DPU_HW_BLK_DSC: 673 hw_blks = rm->dsc_blks; 674 hw_to_enc_id = global_state->dsc_to_enc_id; 675 max_blks = ARRAY_SIZE(rm->dsc_blks); 676 break; 677 default: 678 DPU_ERROR("blk type %d not managed by rm\n", type); 679 return 0; 680 } 681 682 num_blks = 0; 683 for (i = 0; i < max_blks; i++) { 684 if (hw_to_enc_id[i] != enc_id) 685 continue; 686 687 if (num_blks == blks_size) { 688 DPU_ERROR("More than %d resources assigned to enc %d\n", 689 blks_size, enc_id); 690 break; 691 } 692 if (!hw_blks[i]) { 693 DPU_ERROR("Allocated resource %d unavailable to assign to enc %d\n", 694 type, enc_id); 695 break; 696 } 697 blks[num_blks++] = hw_blks[i]; 698 } 699 700 return num_blks; 701 } 702