1 /* 2 * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved. 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License version 2 and 6 * only version 2 as published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it will be useful, 9 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 * GNU General Public License for more details. 12 * 13 */ 14 15 #define pr_fmt(fmt) "[drm:%s] " fmt, __func__ 16 #include "dpu_kms.h" 17 #include "dpu_hw_lm.h" 18 #include "dpu_hw_ctl.h" 19 #include "dpu_hw_cdm.h" 20 #include "dpu_hw_pingpong.h" 21 #include "dpu_hw_intf.h" 22 #include "dpu_encoder.h" 23 #include "dpu_trace.h" 24 25 #define RESERVED_BY_OTHER(h, r) \ 26 ((h)->rsvp && ((h)->rsvp->enc_id != (r)->enc_id)) 27 28 #define RM_RQ_LOCK(r) ((r)->top_ctrl & BIT(DPU_RM_TOPCTL_RESERVE_LOCK)) 29 #define RM_RQ_CLEAR(r) ((r)->top_ctrl & BIT(DPU_RM_TOPCTL_RESERVE_CLEAR)) 30 #define RM_RQ_DS(r) ((r)->top_ctrl & BIT(DPU_RM_TOPCTL_DS)) 31 #define RM_IS_TOPOLOGY_MATCH(t, r) ((t).num_lm == (r).num_lm && \ 32 (t).num_comp_enc == (r).num_enc && \ 33 (t).num_intf == (r).num_intf) 34 35 struct dpu_rm_topology_def { 36 enum dpu_rm_topology_name top_name; 37 int num_lm; 38 int num_comp_enc; 39 int num_intf; 40 int num_ctl; 41 int needs_split_display; 42 }; 43 44 static const struct dpu_rm_topology_def g_top_table[] = { 45 { DPU_RM_TOPOLOGY_NONE, 0, 0, 0, 0, false }, 46 { DPU_RM_TOPOLOGY_SINGLEPIPE, 1, 0, 1, 1, false }, 47 { DPU_RM_TOPOLOGY_DUALPIPE, 2, 0, 2, 2, true }, 48 { DPU_RM_TOPOLOGY_DUALPIPE_3DMERGE, 2, 0, 1, 1, false }, 49 }; 50 51 /** 52 * struct dpu_rm_requirements - Reservation requirements parameter bundle 53 * @top_ctrl: topology control preference from kernel client 54 * @top: selected topology for the display 55 * @hw_res: Hardware resources required as reported by the encoders 56 */ 57 struct dpu_rm_requirements { 58 uint64_t top_ctrl; 59 const struct dpu_rm_topology_def *topology; 60 struct dpu_encoder_hw_resources hw_res; 61 }; 62 63 /** 64 * struct dpu_rm_rsvp - Use Case Reservation tagging structure 65 * Used to tag HW blocks as reserved by a CRTC->Encoder->Connector chain 66 * By using as a tag, rather than lists of pointers to HW blocks used 67 * we can avoid some list management since we don't know how many blocks 68 * of each type a given use case may require. 69 * @list: List head for list of all reservations 70 * @seq: Global RSVP sequence number for debugging, especially for 71 * differentiating differenct allocations for same encoder. 72 * @enc_id: Reservations are tracked by Encoder DRM object ID. 73 * CRTCs may be connected to multiple Encoders. 74 * An encoder or connector id identifies the display path. 75 * @topology DRM<->HW topology use case 76 */ 77 struct dpu_rm_rsvp { 78 struct list_head list; 79 uint32_t seq; 80 uint32_t enc_id; 81 enum dpu_rm_topology_name topology; 82 }; 83 84 /** 85 * struct dpu_rm_hw_blk - hardware block tracking list member 86 * @list: List head for list of all hardware blocks tracking items 87 * @rsvp: Pointer to use case reservation if reserved by a client 88 * @rsvp_nxt: Temporary pointer used during reservation to the incoming 89 * request. Will be swapped into rsvp if proposal is accepted 90 * @type: Type of hardware block this structure tracks 91 * @id: Hardware ID number, within it's own space, ie. LM_X 92 * @catalog: Pointer to the hardware catalog entry for this block 93 * @hw: Pointer to the hardware register access object for this block 94 */ 95 struct dpu_rm_hw_blk { 96 struct list_head list; 97 struct dpu_rm_rsvp *rsvp; 98 struct dpu_rm_rsvp *rsvp_nxt; 99 enum dpu_hw_blk_type type; 100 uint32_t id; 101 struct dpu_hw_blk *hw; 102 }; 103 104 /** 105 * dpu_rm_dbg_rsvp_stage - enum of steps in making reservation for event logging 106 */ 107 enum dpu_rm_dbg_rsvp_stage { 108 DPU_RM_STAGE_BEGIN, 109 DPU_RM_STAGE_AFTER_CLEAR, 110 DPU_RM_STAGE_AFTER_RSVPNEXT, 111 DPU_RM_STAGE_FINAL 112 }; 113 114 static void _dpu_rm_print_rsvps( 115 struct dpu_rm *rm, 116 enum dpu_rm_dbg_rsvp_stage stage) 117 { 118 struct dpu_rm_rsvp *rsvp; 119 struct dpu_rm_hw_blk *blk; 120 enum dpu_hw_blk_type type; 121 122 DPU_DEBUG("%d\n", stage); 123 124 list_for_each_entry(rsvp, &rm->rsvps, list) { 125 DRM_DEBUG_KMS("%d rsvp[s%ue%u] topology %d\n", stage, rsvp->seq, 126 rsvp->enc_id, rsvp->topology); 127 } 128 129 for (type = 0; type < DPU_HW_BLK_MAX; type++) { 130 list_for_each_entry(blk, &rm->hw_blks[type], list) { 131 if (!blk->rsvp && !blk->rsvp_nxt) 132 continue; 133 134 DRM_DEBUG_KMS("%d rsvp[s%ue%u->s%ue%u] %d %d\n", stage, 135 (blk->rsvp) ? blk->rsvp->seq : 0, 136 (blk->rsvp) ? blk->rsvp->enc_id : 0, 137 (blk->rsvp_nxt) ? blk->rsvp_nxt->seq : 0, 138 (blk->rsvp_nxt) ? blk->rsvp_nxt->enc_id : 0, 139 blk->type, blk->id); 140 } 141 } 142 } 143 144 struct dpu_hw_mdp *dpu_rm_get_mdp(struct dpu_rm *rm) 145 { 146 return rm->hw_mdp; 147 } 148 149 enum dpu_rm_topology_name 150 dpu_rm_get_topology_name(struct msm_display_topology topology) 151 { 152 int i; 153 154 for (i = 0; i < DPU_RM_TOPOLOGY_MAX; i++) 155 if (RM_IS_TOPOLOGY_MATCH(g_top_table[i], topology)) 156 return g_top_table[i].top_name; 157 158 return DPU_RM_TOPOLOGY_NONE; 159 } 160 161 void dpu_rm_init_hw_iter( 162 struct dpu_rm_hw_iter *iter, 163 uint32_t enc_id, 164 enum dpu_hw_blk_type type) 165 { 166 memset(iter, 0, sizeof(*iter)); 167 iter->enc_id = enc_id; 168 iter->type = type; 169 } 170 171 static bool _dpu_rm_get_hw_locked(struct dpu_rm *rm, struct dpu_rm_hw_iter *i) 172 { 173 struct list_head *blk_list; 174 175 if (!rm || !i || i->type >= DPU_HW_BLK_MAX) { 176 DPU_ERROR("invalid rm\n"); 177 return false; 178 } 179 180 i->hw = NULL; 181 blk_list = &rm->hw_blks[i->type]; 182 183 if (i->blk && (&i->blk->list == blk_list)) { 184 DPU_DEBUG("attempt resume iteration past last\n"); 185 return false; 186 } 187 188 i->blk = list_prepare_entry(i->blk, blk_list, list); 189 190 list_for_each_entry_continue(i->blk, blk_list, list) { 191 struct dpu_rm_rsvp *rsvp = i->blk->rsvp; 192 193 if (i->blk->type != i->type) { 194 DPU_ERROR("found incorrect block type %d on %d list\n", 195 i->blk->type, i->type); 196 return false; 197 } 198 199 if ((i->enc_id == 0) || (rsvp && rsvp->enc_id == i->enc_id)) { 200 i->hw = i->blk->hw; 201 DPU_DEBUG("found type %d id %d for enc %d\n", 202 i->type, i->blk->id, i->enc_id); 203 return true; 204 } 205 } 206 207 DPU_DEBUG("no match, type %d for enc %d\n", i->type, i->enc_id); 208 209 return false; 210 } 211 212 bool dpu_rm_get_hw(struct dpu_rm *rm, struct dpu_rm_hw_iter *i) 213 { 214 bool ret; 215 216 mutex_lock(&rm->rm_lock); 217 ret = _dpu_rm_get_hw_locked(rm, i); 218 mutex_unlock(&rm->rm_lock); 219 220 return ret; 221 } 222 223 static void _dpu_rm_hw_destroy(enum dpu_hw_blk_type type, void *hw) 224 { 225 switch (type) { 226 case DPU_HW_BLK_LM: 227 dpu_hw_lm_destroy(hw); 228 break; 229 case DPU_HW_BLK_CTL: 230 dpu_hw_ctl_destroy(hw); 231 break; 232 case DPU_HW_BLK_CDM: 233 dpu_hw_cdm_destroy(hw); 234 break; 235 case DPU_HW_BLK_PINGPONG: 236 dpu_hw_pingpong_destroy(hw); 237 break; 238 case DPU_HW_BLK_INTF: 239 dpu_hw_intf_destroy(hw); 240 break; 241 case DPU_HW_BLK_SSPP: 242 /* SSPPs are not managed by the resource manager */ 243 case DPU_HW_BLK_TOP: 244 /* Top is a singleton, not managed in hw_blks list */ 245 case DPU_HW_BLK_MAX: 246 default: 247 DPU_ERROR("unsupported block type %d\n", type); 248 break; 249 } 250 } 251 252 int dpu_rm_destroy(struct dpu_rm *rm) 253 { 254 255 struct dpu_rm_rsvp *rsvp_cur, *rsvp_nxt; 256 struct dpu_rm_hw_blk *hw_cur, *hw_nxt; 257 enum dpu_hw_blk_type type; 258 259 if (!rm) { 260 DPU_ERROR("invalid rm\n"); 261 return -EINVAL; 262 } 263 264 list_for_each_entry_safe(rsvp_cur, rsvp_nxt, &rm->rsvps, list) { 265 list_del(&rsvp_cur->list); 266 kfree(rsvp_cur); 267 } 268 269 270 for (type = 0; type < DPU_HW_BLK_MAX; type++) { 271 list_for_each_entry_safe(hw_cur, hw_nxt, &rm->hw_blks[type], 272 list) { 273 list_del(&hw_cur->list); 274 _dpu_rm_hw_destroy(hw_cur->type, hw_cur->hw); 275 kfree(hw_cur); 276 } 277 } 278 279 dpu_hw_mdp_destroy(rm->hw_mdp); 280 rm->hw_mdp = NULL; 281 282 mutex_destroy(&rm->rm_lock); 283 284 return 0; 285 } 286 287 static int _dpu_rm_hw_blk_create( 288 struct dpu_rm *rm, 289 struct dpu_mdss_cfg *cat, 290 void __iomem *mmio, 291 enum dpu_hw_blk_type type, 292 uint32_t id, 293 void *hw_catalog_info) 294 { 295 struct dpu_rm_hw_blk *blk; 296 struct dpu_hw_mdp *hw_mdp; 297 void *hw; 298 299 hw_mdp = rm->hw_mdp; 300 301 switch (type) { 302 case DPU_HW_BLK_LM: 303 hw = dpu_hw_lm_init(id, mmio, cat); 304 break; 305 case DPU_HW_BLK_CTL: 306 hw = dpu_hw_ctl_init(id, mmio, cat); 307 break; 308 case DPU_HW_BLK_CDM: 309 hw = dpu_hw_cdm_init(id, mmio, cat, hw_mdp); 310 break; 311 case DPU_HW_BLK_PINGPONG: 312 hw = dpu_hw_pingpong_init(id, mmio, cat); 313 break; 314 case DPU_HW_BLK_INTF: 315 hw = dpu_hw_intf_init(id, mmio, cat); 316 break; 317 case DPU_HW_BLK_SSPP: 318 /* SSPPs are not managed by the resource manager */ 319 case DPU_HW_BLK_TOP: 320 /* Top is a singleton, not managed in hw_blks list */ 321 case DPU_HW_BLK_MAX: 322 default: 323 DPU_ERROR("unsupported block type %d\n", type); 324 return -EINVAL; 325 } 326 327 if (IS_ERR_OR_NULL(hw)) { 328 DPU_ERROR("failed hw object creation: type %d, err %ld\n", 329 type, PTR_ERR(hw)); 330 return -EFAULT; 331 } 332 333 blk = kzalloc(sizeof(*blk), GFP_KERNEL); 334 if (!blk) { 335 _dpu_rm_hw_destroy(type, hw); 336 return -ENOMEM; 337 } 338 339 blk->type = type; 340 blk->id = id; 341 blk->hw = hw; 342 list_add_tail(&blk->list, &rm->hw_blks[type]); 343 344 return 0; 345 } 346 347 int dpu_rm_init(struct dpu_rm *rm, 348 struct dpu_mdss_cfg *cat, 349 void __iomem *mmio, 350 struct drm_device *dev) 351 { 352 int rc, i; 353 enum dpu_hw_blk_type type; 354 355 if (!rm || !cat || !mmio || !dev) { 356 DPU_ERROR("invalid kms\n"); 357 return -EINVAL; 358 } 359 360 /* Clear, setup lists */ 361 memset(rm, 0, sizeof(*rm)); 362 363 mutex_init(&rm->rm_lock); 364 365 INIT_LIST_HEAD(&rm->rsvps); 366 for (type = 0; type < DPU_HW_BLK_MAX; type++) 367 INIT_LIST_HEAD(&rm->hw_blks[type]); 368 369 rm->dev = dev; 370 371 /* Some of the sub-blocks require an mdptop to be created */ 372 rm->hw_mdp = dpu_hw_mdptop_init(MDP_TOP, mmio, cat); 373 if (IS_ERR_OR_NULL(rm->hw_mdp)) { 374 rc = PTR_ERR(rm->hw_mdp); 375 rm->hw_mdp = NULL; 376 DPU_ERROR("failed: mdp hw not available\n"); 377 goto fail; 378 } 379 380 /* Interrogate HW catalog and create tracking items for hw blocks */ 381 for (i = 0; i < cat->mixer_count; i++) { 382 struct dpu_lm_cfg *lm = &cat->mixer[i]; 383 384 if (lm->pingpong == PINGPONG_MAX) { 385 DPU_DEBUG("skip mixer %d without pingpong\n", lm->id); 386 continue; 387 } 388 389 rc = _dpu_rm_hw_blk_create(rm, cat, mmio, DPU_HW_BLK_LM, 390 cat->mixer[i].id, &cat->mixer[i]); 391 if (rc) { 392 DPU_ERROR("failed: lm hw not available\n"); 393 goto fail; 394 } 395 396 if (!rm->lm_max_width) { 397 rm->lm_max_width = lm->sblk->maxwidth; 398 } else if (rm->lm_max_width != lm->sblk->maxwidth) { 399 /* 400 * Don't expect to have hw where lm max widths differ. 401 * If found, take the min. 402 */ 403 DPU_ERROR("unsupported: lm maxwidth differs\n"); 404 if (rm->lm_max_width > lm->sblk->maxwidth) 405 rm->lm_max_width = lm->sblk->maxwidth; 406 } 407 } 408 409 for (i = 0; i < cat->pingpong_count; i++) { 410 rc = _dpu_rm_hw_blk_create(rm, cat, mmio, DPU_HW_BLK_PINGPONG, 411 cat->pingpong[i].id, &cat->pingpong[i]); 412 if (rc) { 413 DPU_ERROR("failed: pp hw not available\n"); 414 goto fail; 415 } 416 } 417 418 for (i = 0; i < cat->intf_count; i++) { 419 if (cat->intf[i].type == INTF_NONE) { 420 DPU_DEBUG("skip intf %d with type none\n", i); 421 continue; 422 } 423 424 rc = _dpu_rm_hw_blk_create(rm, cat, mmio, DPU_HW_BLK_INTF, 425 cat->intf[i].id, &cat->intf[i]); 426 if (rc) { 427 DPU_ERROR("failed: intf hw not available\n"); 428 goto fail; 429 } 430 } 431 432 for (i = 0; i < cat->ctl_count; i++) { 433 rc = _dpu_rm_hw_blk_create(rm, cat, mmio, DPU_HW_BLK_CTL, 434 cat->ctl[i].id, &cat->ctl[i]); 435 if (rc) { 436 DPU_ERROR("failed: ctl hw not available\n"); 437 goto fail; 438 } 439 } 440 441 for (i = 0; i < cat->cdm_count; i++) { 442 rc = _dpu_rm_hw_blk_create(rm, cat, mmio, DPU_HW_BLK_CDM, 443 cat->cdm[i].id, &cat->cdm[i]); 444 if (rc) { 445 DPU_ERROR("failed: cdm hw not available\n"); 446 goto fail; 447 } 448 } 449 450 return 0; 451 452 fail: 453 dpu_rm_destroy(rm); 454 455 return rc; 456 } 457 458 /** 459 * _dpu_rm_check_lm_and_get_connected_blks - check if proposed layer mixer meets 460 * proposed use case requirements, incl. hardwired dependent blocks like 461 * pingpong 462 * @rm: dpu resource manager handle 463 * @rsvp: reservation currently being created 464 * @reqs: proposed use case requirements 465 * @lm: proposed layer mixer, function checks if lm, and all other hardwired 466 * blocks connected to the lm (pp) is available and appropriate 467 * @pp: output parameter, pingpong block attached to the layer mixer. 468 * NULL if pp was not available, or not matching requirements. 469 * @primary_lm: if non-null, this function check if lm is compatible primary_lm 470 * as well as satisfying all other requirements 471 * @Return: true if lm matches all requirements, false otherwise 472 */ 473 static bool _dpu_rm_check_lm_and_get_connected_blks( 474 struct dpu_rm *rm, 475 struct dpu_rm_rsvp *rsvp, 476 struct dpu_rm_requirements *reqs, 477 struct dpu_rm_hw_blk *lm, 478 struct dpu_rm_hw_blk **pp, 479 struct dpu_rm_hw_blk *primary_lm) 480 { 481 const struct dpu_lm_cfg *lm_cfg = to_dpu_hw_mixer(lm->hw)->cap; 482 struct dpu_rm_hw_iter iter; 483 484 *pp = NULL; 485 486 DPU_DEBUG("check lm %d pp %d\n", 487 lm_cfg->id, lm_cfg->pingpong); 488 489 /* Check if this layer mixer is a peer of the proposed primary LM */ 490 if (primary_lm) { 491 const struct dpu_lm_cfg *prim_lm_cfg = 492 to_dpu_hw_mixer(primary_lm->hw)->cap; 493 494 if (!test_bit(lm_cfg->id, &prim_lm_cfg->lm_pair_mask)) { 495 DPU_DEBUG("lm %d not peer of lm %d\n", lm_cfg->id, 496 prim_lm_cfg->id); 497 return false; 498 } 499 } 500 501 /* Already reserved? */ 502 if (RESERVED_BY_OTHER(lm, rsvp)) { 503 DPU_DEBUG("lm %d already reserved\n", lm_cfg->id); 504 return false; 505 } 506 507 dpu_rm_init_hw_iter(&iter, 0, DPU_HW_BLK_PINGPONG); 508 while (_dpu_rm_get_hw_locked(rm, &iter)) { 509 if (iter.blk->id == lm_cfg->pingpong) { 510 *pp = iter.blk; 511 break; 512 } 513 } 514 515 if (!*pp) { 516 DPU_ERROR("failed to get pp on lm %d\n", lm_cfg->pingpong); 517 return false; 518 } 519 520 if (RESERVED_BY_OTHER(*pp, rsvp)) { 521 DPU_DEBUG("lm %d pp %d already reserved\n", lm->id, 522 (*pp)->id); 523 return false; 524 } 525 526 return true; 527 } 528 529 static int _dpu_rm_reserve_lms( 530 struct dpu_rm *rm, 531 struct dpu_rm_rsvp *rsvp, 532 struct dpu_rm_requirements *reqs) 533 534 { 535 struct dpu_rm_hw_blk *lm[MAX_BLOCKS]; 536 struct dpu_rm_hw_blk *pp[MAX_BLOCKS]; 537 struct dpu_rm_hw_iter iter_i, iter_j; 538 int lm_count = 0; 539 int i, rc = 0; 540 541 if (!reqs->topology->num_lm) { 542 DPU_ERROR("invalid number of lm: %d\n", reqs->topology->num_lm); 543 return -EINVAL; 544 } 545 546 /* Find a primary mixer */ 547 dpu_rm_init_hw_iter(&iter_i, 0, DPU_HW_BLK_LM); 548 while (lm_count != reqs->topology->num_lm && 549 _dpu_rm_get_hw_locked(rm, &iter_i)) { 550 memset(&lm, 0, sizeof(lm)); 551 memset(&pp, 0, sizeof(pp)); 552 553 lm_count = 0; 554 lm[lm_count] = iter_i.blk; 555 556 if (!_dpu_rm_check_lm_and_get_connected_blks( 557 rm, rsvp, reqs, lm[lm_count], 558 &pp[lm_count], NULL)) 559 continue; 560 561 ++lm_count; 562 563 /* Valid primary mixer found, find matching peers */ 564 dpu_rm_init_hw_iter(&iter_j, 0, DPU_HW_BLK_LM); 565 566 while (lm_count != reqs->topology->num_lm && 567 _dpu_rm_get_hw_locked(rm, &iter_j)) { 568 if (iter_i.blk == iter_j.blk) 569 continue; 570 571 if (!_dpu_rm_check_lm_and_get_connected_blks( 572 rm, rsvp, reqs, iter_j.blk, 573 &pp[lm_count], iter_i.blk)) 574 continue; 575 576 lm[lm_count] = iter_j.blk; 577 ++lm_count; 578 } 579 } 580 581 if (lm_count != reqs->topology->num_lm) { 582 DPU_DEBUG("unable to find appropriate mixers\n"); 583 return -ENAVAIL; 584 } 585 586 for (i = 0; i < ARRAY_SIZE(lm); i++) { 587 if (!lm[i]) 588 break; 589 590 lm[i]->rsvp_nxt = rsvp; 591 pp[i]->rsvp_nxt = rsvp; 592 593 trace_dpu_rm_reserve_lms(lm[i]->id, lm[i]->type, rsvp->enc_id, 594 pp[i]->id); 595 } 596 597 return rc; 598 } 599 600 static int _dpu_rm_reserve_ctls( 601 struct dpu_rm *rm, 602 struct dpu_rm_rsvp *rsvp, 603 const struct dpu_rm_topology_def *top) 604 { 605 struct dpu_rm_hw_blk *ctls[MAX_BLOCKS]; 606 struct dpu_rm_hw_iter iter; 607 int i = 0; 608 609 memset(&ctls, 0, sizeof(ctls)); 610 611 dpu_rm_init_hw_iter(&iter, 0, DPU_HW_BLK_CTL); 612 while (_dpu_rm_get_hw_locked(rm, &iter)) { 613 const struct dpu_hw_ctl *ctl = to_dpu_hw_ctl(iter.blk->hw); 614 unsigned long features = ctl->caps->features; 615 bool has_split_display; 616 617 if (RESERVED_BY_OTHER(iter.blk, rsvp)) 618 continue; 619 620 has_split_display = BIT(DPU_CTL_SPLIT_DISPLAY) & features; 621 622 DPU_DEBUG("ctl %d caps 0x%lX\n", iter.blk->id, features); 623 624 if (top->needs_split_display != has_split_display) 625 continue; 626 627 ctls[i] = iter.blk; 628 DPU_DEBUG("ctl %d match\n", iter.blk->id); 629 630 if (++i == top->num_ctl) 631 break; 632 } 633 634 if (i != top->num_ctl) 635 return -ENAVAIL; 636 637 for (i = 0; i < ARRAY_SIZE(ctls) && i < top->num_ctl; i++) { 638 ctls[i]->rsvp_nxt = rsvp; 639 trace_dpu_rm_reserve_ctls(ctls[i]->id, ctls[i]->type, 640 rsvp->enc_id); 641 } 642 643 return 0; 644 } 645 646 static int _dpu_rm_reserve_cdm( 647 struct dpu_rm *rm, 648 struct dpu_rm_rsvp *rsvp, 649 uint32_t id, 650 enum dpu_hw_blk_type type) 651 { 652 struct dpu_rm_hw_iter iter; 653 654 DRM_DEBUG_KMS("type %d id %d\n", type, id); 655 656 dpu_rm_init_hw_iter(&iter, 0, DPU_HW_BLK_CDM); 657 while (_dpu_rm_get_hw_locked(rm, &iter)) { 658 const struct dpu_hw_cdm *cdm = to_dpu_hw_cdm(iter.blk->hw); 659 const struct dpu_cdm_cfg *caps = cdm->caps; 660 bool match = false; 661 662 if (RESERVED_BY_OTHER(iter.blk, rsvp)) 663 continue; 664 665 if (type == DPU_HW_BLK_INTF && id != INTF_MAX) 666 match = test_bit(id, &caps->intf_connect); 667 668 DRM_DEBUG_KMS("iter: type:%d id:%d enc:%d cdm:%lu match:%d\n", 669 iter.blk->type, iter.blk->id, rsvp->enc_id, 670 caps->intf_connect, match); 671 672 if (!match) 673 continue; 674 675 trace_dpu_rm_reserve_cdm(iter.blk->id, iter.blk->type, 676 rsvp->enc_id); 677 iter.blk->rsvp_nxt = rsvp; 678 break; 679 } 680 681 if (!iter.hw) { 682 DPU_ERROR("couldn't reserve cdm for type %d id %d\n", type, id); 683 return -ENAVAIL; 684 } 685 686 return 0; 687 } 688 689 static int _dpu_rm_reserve_intf( 690 struct dpu_rm *rm, 691 struct dpu_rm_rsvp *rsvp, 692 uint32_t id, 693 enum dpu_hw_blk_type type, 694 bool needs_cdm) 695 { 696 struct dpu_rm_hw_iter iter; 697 int ret = 0; 698 699 /* Find the block entry in the rm, and note the reservation */ 700 dpu_rm_init_hw_iter(&iter, 0, type); 701 while (_dpu_rm_get_hw_locked(rm, &iter)) { 702 if (iter.blk->id != id) 703 continue; 704 705 if (RESERVED_BY_OTHER(iter.blk, rsvp)) { 706 DPU_ERROR("type %d id %d already reserved\n", type, id); 707 return -ENAVAIL; 708 } 709 710 iter.blk->rsvp_nxt = rsvp; 711 trace_dpu_rm_reserve_intf(iter.blk->id, iter.blk->type, 712 rsvp->enc_id); 713 break; 714 } 715 716 /* Shouldn't happen since intfs are fixed at probe */ 717 if (!iter.hw) { 718 DPU_ERROR("couldn't find type %d id %d\n", type, id); 719 return -EINVAL; 720 } 721 722 if (needs_cdm) 723 ret = _dpu_rm_reserve_cdm(rm, rsvp, id, type); 724 725 return ret; 726 } 727 728 static int _dpu_rm_reserve_intf_related_hw( 729 struct dpu_rm *rm, 730 struct dpu_rm_rsvp *rsvp, 731 struct dpu_encoder_hw_resources *hw_res) 732 { 733 int i, ret = 0; 734 u32 id; 735 736 for (i = 0; i < ARRAY_SIZE(hw_res->intfs); i++) { 737 if (hw_res->intfs[i] == INTF_MODE_NONE) 738 continue; 739 id = i + INTF_0; 740 ret = _dpu_rm_reserve_intf(rm, rsvp, id, 741 DPU_HW_BLK_INTF, hw_res->needs_cdm); 742 if (ret) 743 return ret; 744 } 745 746 return ret; 747 } 748 749 static int _dpu_rm_make_next_rsvp( 750 struct dpu_rm *rm, 751 struct drm_encoder *enc, 752 struct drm_crtc_state *crtc_state, 753 struct drm_connector_state *conn_state, 754 struct dpu_rm_rsvp *rsvp, 755 struct dpu_rm_requirements *reqs) 756 { 757 int ret; 758 struct dpu_rm_topology_def topology; 759 760 /* Create reservation info, tag reserved blocks with it as we go */ 761 rsvp->seq = ++rm->rsvp_next_seq; 762 rsvp->enc_id = enc->base.id; 763 rsvp->topology = reqs->topology->top_name; 764 list_add_tail(&rsvp->list, &rm->rsvps); 765 766 ret = _dpu_rm_reserve_lms(rm, rsvp, reqs); 767 if (ret) { 768 DPU_ERROR("unable to find appropriate mixers\n"); 769 return ret; 770 } 771 772 /* 773 * Do assignment preferring to give away low-resource CTLs first: 774 * - Check mixers without Split Display 775 * - Only then allow to grab from CTLs with split display capability 776 */ 777 _dpu_rm_reserve_ctls(rm, rsvp, reqs->topology); 778 if (ret && !reqs->topology->needs_split_display) { 779 memcpy(&topology, reqs->topology, sizeof(topology)); 780 topology.needs_split_display = true; 781 _dpu_rm_reserve_ctls(rm, rsvp, &topology); 782 } 783 if (ret) { 784 DPU_ERROR("unable to find appropriate CTL\n"); 785 return ret; 786 } 787 788 /* Assign INTFs and blks whose usage is tied to them: CTL & CDM */ 789 ret = _dpu_rm_reserve_intf_related_hw(rm, rsvp, &reqs->hw_res); 790 if (ret) 791 return ret; 792 793 return ret; 794 } 795 796 static int _dpu_rm_populate_requirements( 797 struct dpu_rm *rm, 798 struct drm_encoder *enc, 799 struct drm_crtc_state *crtc_state, 800 struct drm_connector_state *conn_state, 801 struct dpu_rm_requirements *reqs, 802 struct msm_display_topology req_topology) 803 { 804 int i; 805 806 memset(reqs, 0, sizeof(*reqs)); 807 808 dpu_encoder_get_hw_resources(enc, &reqs->hw_res, conn_state); 809 810 for (i = 0; i < DPU_RM_TOPOLOGY_MAX; i++) { 811 if (RM_IS_TOPOLOGY_MATCH(g_top_table[i], 812 req_topology)) { 813 reqs->topology = &g_top_table[i]; 814 break; 815 } 816 } 817 818 if (!reqs->topology) { 819 DPU_ERROR("invalid topology for the display\n"); 820 return -EINVAL; 821 } 822 823 /** 824 * Set the requirement based on caps if not set from user space 825 * This will ensure to select LM tied with DS blocks 826 * Currently, DS blocks are tied with LM 0 and LM 1 (primary display) 827 */ 828 if (!RM_RQ_DS(reqs) && rm->hw_mdp->caps->has_dest_scaler && 829 conn_state->connector->connector_type == DRM_MODE_CONNECTOR_DSI) 830 reqs->top_ctrl |= BIT(DPU_RM_TOPCTL_DS); 831 832 DRM_DEBUG_KMS("top_ctrl: 0x%llX num_h_tiles: %d\n", reqs->top_ctrl, 833 reqs->hw_res.display_num_of_h_tiles); 834 DRM_DEBUG_KMS("num_lm: %d num_ctl: %d topology: %d split_display: %d\n", 835 reqs->topology->num_lm, reqs->topology->num_ctl, 836 reqs->topology->top_name, 837 reqs->topology->needs_split_display); 838 839 return 0; 840 } 841 842 static struct dpu_rm_rsvp *_dpu_rm_get_rsvp( 843 struct dpu_rm *rm, 844 struct drm_encoder *enc) 845 { 846 struct dpu_rm_rsvp *i; 847 848 if (!rm || !enc) { 849 DPU_ERROR("invalid params\n"); 850 return NULL; 851 } 852 853 if (list_empty(&rm->rsvps)) 854 return NULL; 855 856 list_for_each_entry(i, &rm->rsvps, list) 857 if (i->enc_id == enc->base.id) 858 return i; 859 860 return NULL; 861 } 862 863 static struct drm_connector *_dpu_rm_get_connector( 864 struct drm_encoder *enc) 865 { 866 struct drm_connector *conn = NULL; 867 struct list_head *connector_list = 868 &enc->dev->mode_config.connector_list; 869 870 list_for_each_entry(conn, connector_list, head) 871 if (conn->encoder == enc) 872 return conn; 873 874 return NULL; 875 } 876 877 /** 878 * _dpu_rm_release_rsvp - release resources and release a reservation 879 * @rm: KMS handle 880 * @rsvp: RSVP pointer to release and release resources for 881 */ 882 static void _dpu_rm_release_rsvp( 883 struct dpu_rm *rm, 884 struct dpu_rm_rsvp *rsvp, 885 struct drm_connector *conn) 886 { 887 struct dpu_rm_rsvp *rsvp_c, *rsvp_n; 888 struct dpu_rm_hw_blk *blk; 889 enum dpu_hw_blk_type type; 890 891 if (!rsvp) 892 return; 893 894 DPU_DEBUG("rel rsvp %d enc %d\n", rsvp->seq, rsvp->enc_id); 895 896 list_for_each_entry_safe(rsvp_c, rsvp_n, &rm->rsvps, list) { 897 if (rsvp == rsvp_c) { 898 list_del(&rsvp_c->list); 899 break; 900 } 901 } 902 903 for (type = 0; type < DPU_HW_BLK_MAX; type++) { 904 list_for_each_entry(blk, &rm->hw_blks[type], list) { 905 if (blk->rsvp == rsvp) { 906 blk->rsvp = NULL; 907 DPU_DEBUG("rel rsvp %d enc %d %d %d\n", 908 rsvp->seq, rsvp->enc_id, 909 blk->type, blk->id); 910 } 911 if (blk->rsvp_nxt == rsvp) { 912 blk->rsvp_nxt = NULL; 913 DPU_DEBUG("rel rsvp_nxt %d enc %d %d %d\n", 914 rsvp->seq, rsvp->enc_id, 915 blk->type, blk->id); 916 } 917 } 918 } 919 920 kfree(rsvp); 921 } 922 923 void dpu_rm_release(struct dpu_rm *rm, struct drm_encoder *enc) 924 { 925 struct dpu_rm_rsvp *rsvp; 926 struct drm_connector *conn; 927 928 if (!rm || !enc) { 929 DPU_ERROR("invalid params\n"); 930 return; 931 } 932 933 mutex_lock(&rm->rm_lock); 934 935 rsvp = _dpu_rm_get_rsvp(rm, enc); 936 if (!rsvp) { 937 DPU_ERROR("failed to find rsvp for enc %d\n", enc->base.id); 938 goto end; 939 } 940 941 conn = _dpu_rm_get_connector(enc); 942 if (!conn) { 943 DPU_ERROR("failed to get connector for enc %d\n", enc->base.id); 944 goto end; 945 } 946 947 _dpu_rm_release_rsvp(rm, rsvp, conn); 948 end: 949 mutex_unlock(&rm->rm_lock); 950 } 951 952 static int _dpu_rm_commit_rsvp( 953 struct dpu_rm *rm, 954 struct dpu_rm_rsvp *rsvp, 955 struct drm_connector_state *conn_state) 956 { 957 struct dpu_rm_hw_blk *blk; 958 enum dpu_hw_blk_type type; 959 int ret = 0; 960 961 /* Swap next rsvp to be the active */ 962 for (type = 0; type < DPU_HW_BLK_MAX; type++) { 963 list_for_each_entry(blk, &rm->hw_blks[type], list) { 964 if (blk->rsvp_nxt) { 965 blk->rsvp = blk->rsvp_nxt; 966 blk->rsvp_nxt = NULL; 967 } 968 } 969 } 970 971 if (!ret) 972 DRM_DEBUG_KMS("rsrv enc %d topology %d\n", rsvp->enc_id, 973 rsvp->topology); 974 975 return ret; 976 } 977 978 int dpu_rm_reserve( 979 struct dpu_rm *rm, 980 struct drm_encoder *enc, 981 struct drm_crtc_state *crtc_state, 982 struct drm_connector_state *conn_state, 983 struct msm_display_topology topology, 984 bool test_only) 985 { 986 struct dpu_rm_rsvp *rsvp_cur, *rsvp_nxt; 987 struct dpu_rm_requirements reqs; 988 int ret; 989 990 if (!rm || !enc || !crtc_state || !conn_state) { 991 DPU_ERROR("invalid arguments\n"); 992 return -EINVAL; 993 } 994 995 /* Check if this is just a page-flip */ 996 if (!drm_atomic_crtc_needs_modeset(crtc_state)) 997 return 0; 998 999 DRM_DEBUG_KMS("reserving hw for conn %d enc %d crtc %d test_only %d\n", 1000 conn_state->connector->base.id, enc->base.id, 1001 crtc_state->crtc->base.id, test_only); 1002 1003 mutex_lock(&rm->rm_lock); 1004 1005 _dpu_rm_print_rsvps(rm, DPU_RM_STAGE_BEGIN); 1006 1007 ret = _dpu_rm_populate_requirements(rm, enc, crtc_state, 1008 conn_state, &reqs, topology); 1009 if (ret) { 1010 DPU_ERROR("failed to populate hw requirements\n"); 1011 goto end; 1012 } 1013 1014 /* 1015 * We only support one active reservation per-hw-block. But to implement 1016 * transactional semantics for test-only, and for allowing failure while 1017 * modifying your existing reservation, over the course of this 1018 * function we can have two reservations: 1019 * Current: Existing reservation 1020 * Next: Proposed reservation. The proposed reservation may fail, or may 1021 * be discarded if in test-only mode. 1022 * If reservation is successful, and we're not in test-only, then we 1023 * replace the current with the next. 1024 */ 1025 rsvp_nxt = kzalloc(sizeof(*rsvp_nxt), GFP_KERNEL); 1026 if (!rsvp_nxt) { 1027 ret = -ENOMEM; 1028 goto end; 1029 } 1030 1031 rsvp_cur = _dpu_rm_get_rsvp(rm, enc); 1032 1033 /* 1034 * User can request that we clear out any reservation during the 1035 * atomic_check phase by using this CLEAR bit 1036 */ 1037 if (rsvp_cur && test_only && RM_RQ_CLEAR(&reqs)) { 1038 DPU_DEBUG("test_only & CLEAR: clear rsvp[s%de%d]\n", 1039 rsvp_cur->seq, rsvp_cur->enc_id); 1040 _dpu_rm_release_rsvp(rm, rsvp_cur, conn_state->connector); 1041 rsvp_cur = NULL; 1042 _dpu_rm_print_rsvps(rm, DPU_RM_STAGE_AFTER_CLEAR); 1043 } 1044 1045 /* Check the proposed reservation, store it in hw's "next" field */ 1046 ret = _dpu_rm_make_next_rsvp(rm, enc, crtc_state, conn_state, 1047 rsvp_nxt, &reqs); 1048 1049 _dpu_rm_print_rsvps(rm, DPU_RM_STAGE_AFTER_RSVPNEXT); 1050 1051 if (ret) { 1052 DPU_ERROR("failed to reserve hw resources: %d\n", ret); 1053 _dpu_rm_release_rsvp(rm, rsvp_nxt, conn_state->connector); 1054 } else if (test_only && !RM_RQ_LOCK(&reqs)) { 1055 /* 1056 * Normally, if test_only, test the reservation and then undo 1057 * However, if the user requests LOCK, then keep the reservation 1058 * made during the atomic_check phase. 1059 */ 1060 DPU_DEBUG("test_only: discard test rsvp[s%de%d]\n", 1061 rsvp_nxt->seq, rsvp_nxt->enc_id); 1062 _dpu_rm_release_rsvp(rm, rsvp_nxt, conn_state->connector); 1063 } else { 1064 if (test_only && RM_RQ_LOCK(&reqs)) 1065 DPU_DEBUG("test_only & LOCK: lock rsvp[s%de%d]\n", 1066 rsvp_nxt->seq, rsvp_nxt->enc_id); 1067 1068 _dpu_rm_release_rsvp(rm, rsvp_cur, conn_state->connector); 1069 1070 ret = _dpu_rm_commit_rsvp(rm, rsvp_nxt, conn_state); 1071 } 1072 1073 _dpu_rm_print_rsvps(rm, DPU_RM_STAGE_FINAL); 1074 1075 end: 1076 mutex_unlock(&rm->rm_lock); 1077 1078 return ret; 1079 } 1080