1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved. 4 */ 5 6 #define pr_fmt(fmt) "[drm:%s] " fmt, __func__ 7 #include "dpu_kms.h" 8 #include "dpu_hw_lm.h" 9 #include "dpu_hw_ctl.h" 10 #include "dpu_hw_pingpong.h" 11 #include "dpu_hw_intf.h" 12 #include "dpu_encoder.h" 13 #include "dpu_trace.h" 14 15 16 static inline bool reserved_by_other(uint32_t *res_map, int idx, 17 uint32_t enc_id) 18 { 19 return res_map[idx] && res_map[idx] != enc_id; 20 } 21 22 /** 23 * struct dpu_rm_requirements - Reservation requirements parameter bundle 24 * @topology: selected topology for the display 25 * @hw_res: Hardware resources required as reported by the encoders 26 */ 27 struct dpu_rm_requirements { 28 struct msm_display_topology topology; 29 struct dpu_encoder_hw_resources hw_res; 30 }; 31 32 int dpu_rm_destroy(struct dpu_rm *rm) 33 { 34 int i; 35 36 for (i = 0; i < ARRAY_SIZE(rm->pingpong_blks); i++) { 37 struct dpu_hw_pingpong *hw; 38 39 if (rm->pingpong_blks[i]) { 40 hw = to_dpu_hw_pingpong(rm->pingpong_blks[i]); 41 dpu_hw_pingpong_destroy(hw); 42 } 43 } 44 for (i = 0; i < ARRAY_SIZE(rm->mixer_blks); i++) { 45 struct dpu_hw_mixer *hw; 46 47 if (rm->mixer_blks[i]) { 48 hw = to_dpu_hw_mixer(rm->mixer_blks[i]); 49 dpu_hw_lm_destroy(hw); 50 } 51 } 52 for (i = 0; i < ARRAY_SIZE(rm->ctl_blks); i++) { 53 struct dpu_hw_ctl *hw; 54 55 if (rm->ctl_blks[i]) { 56 hw = to_dpu_hw_ctl(rm->ctl_blks[i]); 57 dpu_hw_ctl_destroy(hw); 58 } 59 } 60 for (i = 0; i < ARRAY_SIZE(rm->intf_blks); i++) { 61 struct dpu_hw_intf *hw; 62 63 if (rm->intf_blks[i]) { 64 hw = to_dpu_hw_intf(rm->intf_blks[i]); 65 dpu_hw_intf_destroy(hw); 66 } 67 } 68 69 return 0; 70 } 71 72 int dpu_rm_init(struct dpu_rm *rm, 73 struct dpu_mdss_cfg *cat, 74 void __iomem *mmio) 75 { 76 int rc, i; 77 78 if (!rm || !cat || !mmio) { 79 DPU_ERROR("invalid kms\n"); 80 return -EINVAL; 81 } 82 83 /* Clear, setup lists */ 84 memset(rm, 0, sizeof(*rm)); 85 86 /* Interrogate HW catalog and create tracking items for hw blocks */ 87 for (i = 0; i < cat->mixer_count; i++) { 88 struct dpu_hw_mixer *hw; 89 const struct dpu_lm_cfg *lm = &cat->mixer[i]; 90 91 if (lm->pingpong == PINGPONG_MAX) { 92 DPU_DEBUG("skip mixer %d without pingpong\n", lm->id); 93 continue; 94 } 95 96 if (lm->id < LM_0 || lm->id >= LM_MAX) { 97 DPU_ERROR("skip mixer %d with invalid id\n", lm->id); 98 continue; 99 } 100 hw = dpu_hw_lm_init(lm->id, mmio, cat); 101 if (IS_ERR_OR_NULL(hw)) { 102 rc = PTR_ERR(hw); 103 DPU_ERROR("failed lm object creation: err %d\n", rc); 104 goto fail; 105 } 106 rm->mixer_blks[lm->id - LM_0] = &hw->base; 107 108 if (!rm->lm_max_width) { 109 rm->lm_max_width = lm->sblk->maxwidth; 110 } else if (rm->lm_max_width != lm->sblk->maxwidth) { 111 /* 112 * Don't expect to have hw where lm max widths differ. 113 * If found, take the min. 114 */ 115 DPU_ERROR("unsupported: lm maxwidth differs\n"); 116 if (rm->lm_max_width > lm->sblk->maxwidth) 117 rm->lm_max_width = lm->sblk->maxwidth; 118 } 119 } 120 121 for (i = 0; i < cat->pingpong_count; i++) { 122 struct dpu_hw_pingpong *hw; 123 const struct dpu_pingpong_cfg *pp = &cat->pingpong[i]; 124 125 if (pp->id < PINGPONG_0 || pp->id >= PINGPONG_MAX) { 126 DPU_ERROR("skip pingpong %d with invalid id\n", pp->id); 127 continue; 128 } 129 hw = dpu_hw_pingpong_init(pp->id, mmio, cat); 130 if (IS_ERR_OR_NULL(hw)) { 131 rc = PTR_ERR(hw); 132 DPU_ERROR("failed pingpong object creation: err %d\n", 133 rc); 134 goto fail; 135 } 136 rm->pingpong_blks[pp->id - PINGPONG_0] = &hw->base; 137 } 138 139 for (i = 0; i < cat->intf_count; i++) { 140 struct dpu_hw_intf *hw; 141 const struct dpu_intf_cfg *intf = &cat->intf[i]; 142 143 if (intf->type == INTF_NONE) { 144 DPU_DEBUG("skip intf %d with type none\n", i); 145 continue; 146 } 147 if (intf->id < INTF_0 || intf->id >= INTF_MAX) { 148 DPU_ERROR("skip intf %d with invalid id\n", intf->id); 149 continue; 150 } 151 hw = dpu_hw_intf_init(intf->id, mmio, cat); 152 if (IS_ERR_OR_NULL(hw)) { 153 rc = PTR_ERR(hw); 154 DPU_ERROR("failed intf object creation: err %d\n", rc); 155 goto fail; 156 } 157 rm->intf_blks[intf->id - INTF_0] = &hw->base; 158 } 159 160 for (i = 0; i < cat->ctl_count; i++) { 161 struct dpu_hw_ctl *hw; 162 const struct dpu_ctl_cfg *ctl = &cat->ctl[i]; 163 164 if (ctl->id < CTL_0 || ctl->id >= CTL_MAX) { 165 DPU_ERROR("skip ctl %d with invalid id\n", ctl->id); 166 continue; 167 } 168 hw = dpu_hw_ctl_init(ctl->id, mmio, cat); 169 if (IS_ERR_OR_NULL(hw)) { 170 rc = PTR_ERR(hw); 171 DPU_ERROR("failed ctl object creation: err %d\n", rc); 172 goto fail; 173 } 174 rm->ctl_blks[ctl->id - CTL_0] = &hw->base; 175 } 176 177 return 0; 178 179 fail: 180 dpu_rm_destroy(rm); 181 182 return rc ? rc : -EFAULT; 183 } 184 185 static bool _dpu_rm_needs_split_display(const struct msm_display_topology *top) 186 { 187 return top->num_intf > 1; 188 } 189 190 /** 191 * _dpu_rm_check_lm_peer - check if a mixer is a peer of the primary 192 * @rm: dpu resource manager handle 193 * @primary_idx: index of primary mixer in rm->mixer_blks[] 194 * @peer_idx: index of other mixer in rm->mixer_blks[] 195 * @Return: true if rm->mixer_blks[peer_idx] is a peer of 196 * rm->mixer_blks[primary_idx] 197 */ 198 static bool _dpu_rm_check_lm_peer(struct dpu_rm *rm, int primary_idx, 199 int peer_idx) 200 { 201 const struct dpu_lm_cfg *prim_lm_cfg; 202 const struct dpu_lm_cfg *peer_cfg; 203 204 prim_lm_cfg = to_dpu_hw_mixer(rm->mixer_blks[primary_idx])->cap; 205 peer_cfg = to_dpu_hw_mixer(rm->mixer_blks[peer_idx])->cap; 206 207 if (!test_bit(peer_cfg->id, &prim_lm_cfg->lm_pair_mask)) { 208 DPU_DEBUG("lm %d not peer of lm %d\n", peer_cfg->id, 209 peer_cfg->id); 210 return false; 211 } 212 return true; 213 } 214 215 /** 216 * _dpu_rm_check_lm_and_get_connected_blks - check if proposed layer mixer meets 217 * proposed use case requirements, incl. hardwired dependent blocks like 218 * pingpong 219 * @rm: dpu resource manager handle 220 * @enc_id: encoder id requesting for allocation 221 * @lm_idx: index of proposed layer mixer in rm->mixer_blks[], function checks 222 * if lm, and all other hardwired blocks connected to the lm (pp) is 223 * available and appropriate 224 * @pp_idx: output parameter, index of pingpong block attached to the layer 225 * mixer in rm->pongpong_blks[]. 226 * @Return: true if lm matches all requirements, false otherwise 227 */ 228 static bool _dpu_rm_check_lm_and_get_connected_blks(struct dpu_rm *rm, 229 struct dpu_global_state *global_state, 230 uint32_t enc_id, int lm_idx, int *pp_idx) 231 { 232 const struct dpu_lm_cfg *lm_cfg; 233 int idx; 234 235 /* Already reserved? */ 236 if (reserved_by_other(global_state->mixer_to_enc_id, lm_idx, enc_id)) { 237 DPU_DEBUG("lm %d already reserved\n", lm_idx + LM_0); 238 return false; 239 } 240 241 lm_cfg = to_dpu_hw_mixer(rm->mixer_blks[lm_idx])->cap; 242 idx = lm_cfg->pingpong - PINGPONG_0; 243 if (idx < 0 || idx >= ARRAY_SIZE(rm->pingpong_blks)) { 244 DPU_ERROR("failed to get pp on lm %d\n", lm_cfg->pingpong); 245 return false; 246 } 247 248 if (reserved_by_other(global_state->pingpong_to_enc_id, idx, enc_id)) { 249 DPU_DEBUG("lm %d pp %d already reserved\n", lm_cfg->id, 250 lm_cfg->pingpong); 251 return false; 252 } 253 *pp_idx = idx; 254 return true; 255 } 256 257 static int _dpu_rm_reserve_lms(struct dpu_rm *rm, 258 struct dpu_global_state *global_state, 259 uint32_t enc_id, 260 struct dpu_rm_requirements *reqs) 261 262 { 263 int lm_idx[MAX_BLOCKS]; 264 int pp_idx[MAX_BLOCKS]; 265 int i, j, lm_count = 0; 266 267 if (!reqs->topology.num_lm) { 268 DPU_ERROR("invalid number of lm: %d\n", reqs->topology.num_lm); 269 return -EINVAL; 270 } 271 272 /* Find a primary mixer */ 273 for (i = 0; i < ARRAY_SIZE(rm->mixer_blks) && 274 lm_count < reqs->topology.num_lm; i++) { 275 if (!rm->mixer_blks[i]) 276 continue; 277 278 lm_count = 0; 279 lm_idx[lm_count] = i; 280 281 if (!_dpu_rm_check_lm_and_get_connected_blks(rm, global_state, 282 enc_id, i, &pp_idx[lm_count])) { 283 continue; 284 } 285 286 ++lm_count; 287 288 /* Valid primary mixer found, find matching peers */ 289 for (j = i + 1; j < ARRAY_SIZE(rm->mixer_blks) && 290 lm_count < reqs->topology.num_lm; j++) { 291 if (!rm->mixer_blks[j]) 292 continue; 293 294 if (!_dpu_rm_check_lm_peer(rm, i, j)) { 295 DPU_DEBUG("lm %d not peer of lm %d\n", LM_0 + j, 296 LM_0 + i); 297 continue; 298 } 299 300 if (!_dpu_rm_check_lm_and_get_connected_blks(rm, 301 global_state, enc_id, j, 302 &pp_idx[lm_count])) { 303 continue; 304 } 305 306 lm_idx[lm_count] = j; 307 ++lm_count; 308 } 309 } 310 311 if (lm_count != reqs->topology.num_lm) { 312 DPU_DEBUG("unable to find appropriate mixers\n"); 313 return -ENAVAIL; 314 } 315 316 for (i = 0; i < lm_count; i++) { 317 global_state->mixer_to_enc_id[lm_idx[i]] = enc_id; 318 global_state->pingpong_to_enc_id[pp_idx[i]] = enc_id; 319 320 trace_dpu_rm_reserve_lms(lm_idx[i] + LM_0, enc_id, 321 pp_idx[i] + PINGPONG_0); 322 } 323 324 return 0; 325 } 326 327 static int _dpu_rm_reserve_ctls( 328 struct dpu_rm *rm, 329 struct dpu_global_state *global_state, 330 uint32_t enc_id, 331 const struct msm_display_topology *top) 332 { 333 int ctl_idx[MAX_BLOCKS]; 334 int i = 0, j, num_ctls; 335 bool needs_split_display; 336 337 /* each hw_intf needs its own hw_ctrl to program its control path */ 338 num_ctls = top->num_intf; 339 340 needs_split_display = _dpu_rm_needs_split_display(top); 341 342 for (j = 0; j < ARRAY_SIZE(rm->ctl_blks); j++) { 343 const struct dpu_hw_ctl *ctl; 344 unsigned long features; 345 bool has_split_display; 346 347 if (!rm->ctl_blks[j]) 348 continue; 349 if (reserved_by_other(global_state->ctl_to_enc_id, j, enc_id)) 350 continue; 351 352 ctl = to_dpu_hw_ctl(rm->ctl_blks[j]); 353 features = ctl->caps->features; 354 has_split_display = BIT(DPU_CTL_SPLIT_DISPLAY) & features; 355 356 DPU_DEBUG("ctl %d caps 0x%lX\n", rm->ctl_blks[j]->id, features); 357 358 if (needs_split_display != has_split_display) 359 continue; 360 361 ctl_idx[i] = j; 362 DPU_DEBUG("ctl %d match\n", j + CTL_0); 363 364 if (++i == num_ctls) 365 break; 366 367 } 368 369 if (i != num_ctls) 370 return -ENAVAIL; 371 372 for (i = 0; i < ARRAY_SIZE(ctl_idx) && i < num_ctls; i++) { 373 global_state->ctl_to_enc_id[ctl_idx[i]] = enc_id; 374 trace_dpu_rm_reserve_ctls(i + CTL_0, enc_id); 375 } 376 377 return 0; 378 } 379 380 static int _dpu_rm_reserve_intf( 381 struct dpu_rm *rm, 382 struct dpu_global_state *global_state, 383 uint32_t enc_id, 384 uint32_t id) 385 { 386 int idx = id - INTF_0; 387 388 if (idx < 0 || idx >= ARRAY_SIZE(rm->intf_blks)) { 389 DPU_ERROR("invalid intf id: %d", id); 390 return -EINVAL; 391 } 392 393 if (!rm->intf_blks[idx]) { 394 DPU_ERROR("couldn't find intf id %d\n", id); 395 return -EINVAL; 396 } 397 398 if (reserved_by_other(global_state->intf_to_enc_id, idx, enc_id)) { 399 DPU_ERROR("intf id %d already reserved\n", id); 400 return -ENAVAIL; 401 } 402 403 global_state->intf_to_enc_id[idx] = enc_id; 404 return 0; 405 } 406 407 static int _dpu_rm_reserve_intf_related_hw( 408 struct dpu_rm *rm, 409 struct dpu_global_state *global_state, 410 uint32_t enc_id, 411 struct dpu_encoder_hw_resources *hw_res) 412 { 413 int i, ret = 0; 414 u32 id; 415 416 for (i = 0; i < ARRAY_SIZE(hw_res->intfs); i++) { 417 if (hw_res->intfs[i] == INTF_MODE_NONE) 418 continue; 419 id = i + INTF_0; 420 ret = _dpu_rm_reserve_intf(rm, global_state, enc_id, id); 421 if (ret) 422 return ret; 423 } 424 425 return ret; 426 } 427 428 static int _dpu_rm_make_reservation( 429 struct dpu_rm *rm, 430 struct dpu_global_state *global_state, 431 struct drm_encoder *enc, 432 struct dpu_rm_requirements *reqs) 433 { 434 int ret; 435 436 ret = _dpu_rm_reserve_lms(rm, global_state, enc->base.id, reqs); 437 if (ret) { 438 DPU_ERROR("unable to find appropriate mixers\n"); 439 return ret; 440 } 441 442 ret = _dpu_rm_reserve_ctls(rm, global_state, enc->base.id, 443 &reqs->topology); 444 if (ret) { 445 DPU_ERROR("unable to find appropriate CTL\n"); 446 return ret; 447 } 448 449 ret = _dpu_rm_reserve_intf_related_hw(rm, global_state, enc->base.id, 450 &reqs->hw_res); 451 if (ret) 452 return ret; 453 454 return ret; 455 } 456 457 static int _dpu_rm_populate_requirements( 458 struct drm_encoder *enc, 459 struct dpu_rm_requirements *reqs, 460 struct msm_display_topology req_topology) 461 { 462 dpu_encoder_get_hw_resources(enc, &reqs->hw_res); 463 464 reqs->topology = req_topology; 465 466 DRM_DEBUG_KMS("num_lm: %d num_enc: %d num_intf: %d\n", 467 reqs->topology.num_lm, reqs->topology.num_enc, 468 reqs->topology.num_intf); 469 470 return 0; 471 } 472 473 static void _dpu_rm_clear_mapping(uint32_t *res_mapping, int cnt, 474 uint32_t enc_id) 475 { 476 int i; 477 478 for (i = 0; i < cnt; i++) { 479 if (res_mapping[i] == enc_id) 480 res_mapping[i] = 0; 481 } 482 } 483 484 void dpu_rm_release(struct dpu_global_state *global_state, 485 struct drm_encoder *enc) 486 { 487 _dpu_rm_clear_mapping(global_state->pingpong_to_enc_id, 488 ARRAY_SIZE(global_state->pingpong_to_enc_id), enc->base.id); 489 _dpu_rm_clear_mapping(global_state->mixer_to_enc_id, 490 ARRAY_SIZE(global_state->mixer_to_enc_id), enc->base.id); 491 _dpu_rm_clear_mapping(global_state->ctl_to_enc_id, 492 ARRAY_SIZE(global_state->ctl_to_enc_id), enc->base.id); 493 _dpu_rm_clear_mapping(global_state->intf_to_enc_id, 494 ARRAY_SIZE(global_state->intf_to_enc_id), enc->base.id); 495 } 496 497 int dpu_rm_reserve( 498 struct dpu_rm *rm, 499 struct dpu_global_state *global_state, 500 struct drm_encoder *enc, 501 struct drm_crtc_state *crtc_state, 502 struct msm_display_topology topology) 503 { 504 struct dpu_rm_requirements reqs; 505 int ret; 506 507 /* Check if this is just a page-flip */ 508 if (!drm_atomic_crtc_needs_modeset(crtc_state)) 509 return 0; 510 511 if (IS_ERR(global_state)) { 512 DPU_ERROR("failed to global state\n"); 513 return PTR_ERR(global_state); 514 } 515 516 DRM_DEBUG_KMS("reserving hw for enc %d crtc %d\n", 517 enc->base.id, crtc_state->crtc->base.id); 518 519 ret = _dpu_rm_populate_requirements(enc, &reqs, topology); 520 if (ret) { 521 DPU_ERROR("failed to populate hw requirements\n"); 522 return ret; 523 } 524 525 ret = _dpu_rm_make_reservation(rm, global_state, enc, &reqs); 526 if (ret) 527 DPU_ERROR("failed to reserve hw resources: %d\n", ret); 528 529 530 531 return ret; 532 } 533 534 int dpu_rm_get_assigned_resources(struct dpu_rm *rm, 535 struct dpu_global_state *global_state, uint32_t enc_id, 536 enum dpu_hw_blk_type type, struct dpu_hw_blk **blks, int blks_size) 537 { 538 struct dpu_hw_blk **hw_blks; 539 uint32_t *hw_to_enc_id; 540 int i, num_blks, max_blks; 541 542 switch (type) { 543 case DPU_HW_BLK_PINGPONG: 544 hw_blks = rm->pingpong_blks; 545 hw_to_enc_id = global_state->pingpong_to_enc_id; 546 max_blks = ARRAY_SIZE(rm->pingpong_blks); 547 break; 548 case DPU_HW_BLK_LM: 549 hw_blks = rm->mixer_blks; 550 hw_to_enc_id = global_state->mixer_to_enc_id; 551 max_blks = ARRAY_SIZE(rm->mixer_blks); 552 break; 553 case DPU_HW_BLK_CTL: 554 hw_blks = rm->ctl_blks; 555 hw_to_enc_id = global_state->ctl_to_enc_id; 556 max_blks = ARRAY_SIZE(rm->ctl_blks); 557 break; 558 case DPU_HW_BLK_INTF: 559 hw_blks = rm->intf_blks; 560 hw_to_enc_id = global_state->intf_to_enc_id; 561 max_blks = ARRAY_SIZE(rm->intf_blks); 562 break; 563 default: 564 DPU_ERROR("blk type %d not managed by rm\n", type); 565 return 0; 566 } 567 568 num_blks = 0; 569 for (i = 0; i < max_blks; i++) { 570 if (hw_to_enc_id[i] != enc_id) 571 continue; 572 573 if (num_blks == blks_size) { 574 DPU_ERROR("More than %d resources assigned to enc %d\n", 575 blks_size, enc_id); 576 break; 577 } 578 blks[num_blks++] = hw_blks[i]; 579 } 580 581 return num_blks; 582 } 583