1 // SPDX-License-Identifier: GPL-2.0-only 2 /* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved. 3 * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. 4 */ 5 6 #include <linux/delay.h> 7 #include "dpu_hwio.h" 8 #include "dpu_hw_ctl.h" 9 #include "dpu_kms.h" 10 #include "dpu_trace.h" 11 12 #define CTL_LAYER(lm) \ 13 (((lm) == LM_5) ? (0x024) : (((lm) - LM_0) * 0x004)) 14 #define CTL_LAYER_EXT(lm) \ 15 (0x40 + (((lm) - LM_0) * 0x004)) 16 #define CTL_LAYER_EXT2(lm) \ 17 (0x70 + (((lm) - LM_0) * 0x004)) 18 #define CTL_LAYER_EXT3(lm) \ 19 (0xA0 + (((lm) - LM_0) * 0x004)) 20 #define CTL_TOP 0x014 21 #define CTL_FLUSH 0x018 22 #define CTL_START 0x01C 23 #define CTL_PREPARE 0x0d0 24 #define CTL_SW_RESET 0x030 25 #define CTL_LAYER_EXTN_OFFSET 0x40 26 #define CTL_MERGE_3D_ACTIVE 0x0E4 27 #define CTL_WB_ACTIVE 0x0EC 28 #define CTL_INTF_ACTIVE 0x0F4 29 #define CTL_MERGE_3D_FLUSH 0x100 30 #define CTL_DSC_ACTIVE 0x0E8 31 #define CTL_DSC_FLUSH 0x104 32 #define CTL_WB_FLUSH 0x108 33 #define CTL_INTF_FLUSH 0x110 34 #define CTL_INTF_MASTER 0x134 35 #define CTL_FETCH_PIPE_ACTIVE 0x0FC 36 37 #define CTL_MIXER_BORDER_OUT BIT(24) 38 #define CTL_FLUSH_MASK_CTL BIT(17) 39 40 #define DPU_REG_RESET_TIMEOUT_US 2000 41 #define MERGE_3D_IDX 23 42 #define DSC_IDX 22 43 #define INTF_IDX 31 44 #define WB_IDX 16 45 #define CTL_INVALID_BIT 0xffff 46 #define CTL_DEFAULT_GROUP_ID 0xf 47 48 static const u32 fetch_tbl[SSPP_MAX] = {CTL_INVALID_BIT, 16, 17, 18, 19, 49 CTL_INVALID_BIT, CTL_INVALID_BIT, CTL_INVALID_BIT, CTL_INVALID_BIT, 0, 50 1, 2, 3, CTL_INVALID_BIT, CTL_INVALID_BIT}; 51 52 static const struct dpu_ctl_cfg *_ctl_offset(enum dpu_ctl ctl, 53 const struct dpu_mdss_cfg *m, 54 void __iomem *addr, 55 struct dpu_hw_blk_reg_map *b) 56 { 57 int i; 58 59 for (i = 0; i < m->ctl_count; i++) { 60 if (ctl == m->ctl[i].id) { 61 b->blk_addr = addr + m->ctl[i].base; 62 b->log_mask = DPU_DBG_MASK_CTL; 63 return &m->ctl[i]; 64 } 65 } 66 return ERR_PTR(-ENOMEM); 67 } 68 69 static int _mixer_stages(const struct dpu_lm_cfg *mixer, int count, 70 enum dpu_lm lm) 71 { 72 int i; 73 int stages = -EINVAL; 74 75 for (i = 0; i < count; i++) { 76 if (lm == mixer[i].id) { 77 stages = mixer[i].sblk->maxblendstages; 78 break; 79 } 80 } 81 82 return stages; 83 } 84 85 static inline u32 dpu_hw_ctl_get_flush_register(struct dpu_hw_ctl *ctx) 86 { 87 struct dpu_hw_blk_reg_map *c = &ctx->hw; 88 89 return DPU_REG_READ(c, CTL_FLUSH); 90 } 91 92 static inline void dpu_hw_ctl_trigger_start(struct dpu_hw_ctl *ctx) 93 { 94 trace_dpu_hw_ctl_trigger_start(ctx->pending_flush_mask, 95 dpu_hw_ctl_get_flush_register(ctx)); 96 DPU_REG_WRITE(&ctx->hw, CTL_START, 0x1); 97 } 98 99 static inline bool dpu_hw_ctl_is_started(struct dpu_hw_ctl *ctx) 100 { 101 return !!(DPU_REG_READ(&ctx->hw, CTL_START) & BIT(0)); 102 } 103 104 static inline void dpu_hw_ctl_trigger_pending(struct dpu_hw_ctl *ctx) 105 { 106 trace_dpu_hw_ctl_trigger_prepare(ctx->pending_flush_mask, 107 dpu_hw_ctl_get_flush_register(ctx)); 108 DPU_REG_WRITE(&ctx->hw, CTL_PREPARE, 0x1); 109 } 110 111 static inline void dpu_hw_ctl_clear_pending_flush(struct dpu_hw_ctl *ctx) 112 { 113 trace_dpu_hw_ctl_clear_pending_flush(ctx->pending_flush_mask, 114 dpu_hw_ctl_get_flush_register(ctx)); 115 ctx->pending_flush_mask = 0x0; 116 } 117 118 static inline void dpu_hw_ctl_update_pending_flush(struct dpu_hw_ctl *ctx, 119 u32 flushbits) 120 { 121 trace_dpu_hw_ctl_update_pending_flush(flushbits, 122 ctx->pending_flush_mask); 123 ctx->pending_flush_mask |= flushbits; 124 } 125 126 static u32 dpu_hw_ctl_get_pending_flush(struct dpu_hw_ctl *ctx) 127 { 128 return ctx->pending_flush_mask; 129 } 130 131 static inline void dpu_hw_ctl_trigger_flush_v1(struct dpu_hw_ctl *ctx) 132 { 133 if (ctx->pending_flush_mask & BIT(MERGE_3D_IDX)) 134 DPU_REG_WRITE(&ctx->hw, CTL_MERGE_3D_FLUSH, 135 ctx->pending_merge_3d_flush_mask); 136 if (ctx->pending_flush_mask & BIT(INTF_IDX)) 137 DPU_REG_WRITE(&ctx->hw, CTL_INTF_FLUSH, 138 ctx->pending_intf_flush_mask); 139 if (ctx->pending_flush_mask & BIT(WB_IDX)) 140 DPU_REG_WRITE(&ctx->hw, CTL_WB_FLUSH, 141 ctx->pending_wb_flush_mask); 142 143 DPU_REG_WRITE(&ctx->hw, CTL_FLUSH, ctx->pending_flush_mask); 144 } 145 146 static inline void dpu_hw_ctl_trigger_flush(struct dpu_hw_ctl *ctx) 147 { 148 trace_dpu_hw_ctl_trigger_pending_flush(ctx->pending_flush_mask, 149 dpu_hw_ctl_get_flush_register(ctx)); 150 DPU_REG_WRITE(&ctx->hw, CTL_FLUSH, ctx->pending_flush_mask); 151 } 152 153 static void dpu_hw_ctl_update_pending_flush_sspp(struct dpu_hw_ctl *ctx, 154 enum dpu_sspp sspp) 155 { 156 switch (sspp) { 157 case SSPP_VIG0: 158 ctx->pending_flush_mask |= BIT(0); 159 break; 160 case SSPP_VIG1: 161 ctx->pending_flush_mask |= BIT(1); 162 break; 163 case SSPP_VIG2: 164 ctx->pending_flush_mask |= BIT(2); 165 break; 166 case SSPP_VIG3: 167 ctx->pending_flush_mask |= BIT(18); 168 break; 169 case SSPP_RGB0: 170 ctx->pending_flush_mask |= BIT(3); 171 break; 172 case SSPP_RGB1: 173 ctx->pending_flush_mask |= BIT(4); 174 break; 175 case SSPP_RGB2: 176 ctx->pending_flush_mask |= BIT(5); 177 break; 178 case SSPP_RGB3: 179 ctx->pending_flush_mask |= BIT(19); 180 break; 181 case SSPP_DMA0: 182 ctx->pending_flush_mask |= BIT(11); 183 break; 184 case SSPP_DMA1: 185 ctx->pending_flush_mask |= BIT(12); 186 break; 187 case SSPP_DMA2: 188 ctx->pending_flush_mask |= BIT(24); 189 break; 190 case SSPP_DMA3: 191 ctx->pending_flush_mask |= BIT(25); 192 break; 193 case SSPP_CURSOR0: 194 ctx->pending_flush_mask |= BIT(22); 195 break; 196 case SSPP_CURSOR1: 197 ctx->pending_flush_mask |= BIT(23); 198 break; 199 default: 200 break; 201 } 202 } 203 204 static void dpu_hw_ctl_update_pending_flush_mixer(struct dpu_hw_ctl *ctx, 205 enum dpu_lm lm) 206 { 207 switch (lm) { 208 case LM_0: 209 ctx->pending_flush_mask |= BIT(6); 210 break; 211 case LM_1: 212 ctx->pending_flush_mask |= BIT(7); 213 break; 214 case LM_2: 215 ctx->pending_flush_mask |= BIT(8); 216 break; 217 case LM_3: 218 ctx->pending_flush_mask |= BIT(9); 219 break; 220 case LM_4: 221 ctx->pending_flush_mask |= BIT(10); 222 break; 223 case LM_5: 224 ctx->pending_flush_mask |= BIT(20); 225 break; 226 default: 227 break; 228 } 229 230 ctx->pending_flush_mask |= CTL_FLUSH_MASK_CTL; 231 } 232 233 static void dpu_hw_ctl_update_pending_flush_intf(struct dpu_hw_ctl *ctx, 234 enum dpu_intf intf) 235 { 236 switch (intf) { 237 case INTF_0: 238 ctx->pending_flush_mask |= BIT(31); 239 break; 240 case INTF_1: 241 ctx->pending_flush_mask |= BIT(30); 242 break; 243 case INTF_2: 244 ctx->pending_flush_mask |= BIT(29); 245 break; 246 case INTF_3: 247 ctx->pending_flush_mask |= BIT(28); 248 break; 249 default: 250 break; 251 } 252 } 253 254 static void dpu_hw_ctl_update_pending_flush_wb(struct dpu_hw_ctl *ctx, 255 enum dpu_wb wb) 256 { 257 switch (wb) { 258 case WB_0: 259 case WB_1: 260 case WB_2: 261 ctx->pending_flush_mask |= BIT(WB_IDX); 262 break; 263 default: 264 break; 265 } 266 } 267 268 static void dpu_hw_ctl_update_pending_flush_wb_v1(struct dpu_hw_ctl *ctx, 269 enum dpu_wb wb) 270 { 271 ctx->pending_wb_flush_mask |= BIT(wb - WB_0); 272 ctx->pending_flush_mask |= BIT(WB_IDX); 273 } 274 275 static void dpu_hw_ctl_update_pending_flush_intf_v1(struct dpu_hw_ctl *ctx, 276 enum dpu_intf intf) 277 { 278 ctx->pending_intf_flush_mask |= BIT(intf - INTF_0); 279 ctx->pending_flush_mask |= BIT(INTF_IDX); 280 } 281 282 static void dpu_hw_ctl_update_pending_flush_merge_3d_v1(struct dpu_hw_ctl *ctx, 283 enum dpu_merge_3d merge_3d) 284 { 285 ctx->pending_merge_3d_flush_mask |= BIT(merge_3d - MERGE_3D_0); 286 ctx->pending_flush_mask |= BIT(MERGE_3D_IDX); 287 } 288 289 static void dpu_hw_ctl_update_pending_flush_dspp(struct dpu_hw_ctl *ctx, 290 enum dpu_dspp dspp) 291 { 292 switch (dspp) { 293 case DSPP_0: 294 ctx->pending_flush_mask |= BIT(13); 295 break; 296 case DSPP_1: 297 ctx->pending_flush_mask |= BIT(14); 298 break; 299 case DSPP_2: 300 ctx->pending_flush_mask |= BIT(15); 301 break; 302 case DSPP_3: 303 ctx->pending_flush_mask |= BIT(21); 304 break; 305 default: 306 break; 307 } 308 } 309 310 static u32 dpu_hw_ctl_poll_reset_status(struct dpu_hw_ctl *ctx, u32 timeout_us) 311 { 312 struct dpu_hw_blk_reg_map *c = &ctx->hw; 313 ktime_t timeout; 314 u32 status; 315 316 timeout = ktime_add_us(ktime_get(), timeout_us); 317 318 /* 319 * it takes around 30us to have mdp finish resetting its ctl path 320 * poll every 50us so that reset should be completed at 1st poll 321 */ 322 do { 323 status = DPU_REG_READ(c, CTL_SW_RESET); 324 status &= 0x1; 325 if (status) 326 usleep_range(20, 50); 327 } while (status && ktime_compare_safe(ktime_get(), timeout) < 0); 328 329 return status; 330 } 331 332 static int dpu_hw_ctl_reset_control(struct dpu_hw_ctl *ctx) 333 { 334 struct dpu_hw_blk_reg_map *c = &ctx->hw; 335 336 pr_debug("issuing hw ctl reset for ctl:%d\n", ctx->idx); 337 DPU_REG_WRITE(c, CTL_SW_RESET, 0x1); 338 if (dpu_hw_ctl_poll_reset_status(ctx, DPU_REG_RESET_TIMEOUT_US)) 339 return -EINVAL; 340 341 return 0; 342 } 343 344 static int dpu_hw_ctl_wait_reset_status(struct dpu_hw_ctl *ctx) 345 { 346 struct dpu_hw_blk_reg_map *c = &ctx->hw; 347 u32 status; 348 349 status = DPU_REG_READ(c, CTL_SW_RESET); 350 status &= 0x01; 351 if (!status) 352 return 0; 353 354 pr_debug("hw ctl reset is set for ctl:%d\n", ctx->idx); 355 if (dpu_hw_ctl_poll_reset_status(ctx, DPU_REG_RESET_TIMEOUT_US)) { 356 pr_err("hw recovery is not complete for ctl:%d\n", ctx->idx); 357 return -EINVAL; 358 } 359 360 return 0; 361 } 362 363 static void dpu_hw_ctl_clear_all_blendstages(struct dpu_hw_ctl *ctx) 364 { 365 struct dpu_hw_blk_reg_map *c = &ctx->hw; 366 int i; 367 368 for (i = 0; i < ctx->mixer_count; i++) { 369 enum dpu_lm mixer_id = ctx->mixer_hw_caps[i].id; 370 371 DPU_REG_WRITE(c, CTL_LAYER(mixer_id), 0); 372 DPU_REG_WRITE(c, CTL_LAYER_EXT(mixer_id), 0); 373 DPU_REG_WRITE(c, CTL_LAYER_EXT2(mixer_id), 0); 374 DPU_REG_WRITE(c, CTL_LAYER_EXT3(mixer_id), 0); 375 } 376 377 DPU_REG_WRITE(c, CTL_FETCH_PIPE_ACTIVE, 0); 378 } 379 380 static void dpu_hw_ctl_setup_blendstage(struct dpu_hw_ctl *ctx, 381 enum dpu_lm lm, struct dpu_hw_stage_cfg *stage_cfg) 382 { 383 struct dpu_hw_blk_reg_map *c = &ctx->hw; 384 u32 mixercfg = 0, mixercfg_ext = 0, mix, ext; 385 u32 mixercfg_ext2 = 0, mixercfg_ext3 = 0; 386 int i, j; 387 int stages; 388 int pipes_per_stage; 389 390 stages = _mixer_stages(ctx->mixer_hw_caps, ctx->mixer_count, lm); 391 if (stages < 0) 392 return; 393 394 if (test_bit(DPU_MIXER_SOURCESPLIT, 395 &ctx->mixer_hw_caps->features)) 396 pipes_per_stage = PIPES_PER_STAGE; 397 else 398 pipes_per_stage = 1; 399 400 mixercfg = CTL_MIXER_BORDER_OUT; /* always set BORDER_OUT */ 401 402 if (!stage_cfg) 403 goto exit; 404 405 for (i = 0; i <= stages; i++) { 406 /* overflow to ext register if 'i + 1 > 7' */ 407 mix = (i + 1) & 0x7; 408 ext = i >= 7; 409 410 for (j = 0 ; j < pipes_per_stage; j++) { 411 enum dpu_sspp_multirect_index rect_index = 412 stage_cfg->multirect_index[i][j]; 413 414 switch (stage_cfg->stage[i][j]) { 415 case SSPP_VIG0: 416 if (rect_index == DPU_SSPP_RECT_1) { 417 mixercfg_ext3 |= ((i + 1) & 0xF) << 0; 418 } else { 419 mixercfg |= mix << 0; 420 mixercfg_ext |= ext << 0; 421 } 422 break; 423 case SSPP_VIG1: 424 if (rect_index == DPU_SSPP_RECT_1) { 425 mixercfg_ext3 |= ((i + 1) & 0xF) << 4; 426 } else { 427 mixercfg |= mix << 3; 428 mixercfg_ext |= ext << 2; 429 } 430 break; 431 case SSPP_VIG2: 432 if (rect_index == DPU_SSPP_RECT_1) { 433 mixercfg_ext3 |= ((i + 1) & 0xF) << 8; 434 } else { 435 mixercfg |= mix << 6; 436 mixercfg_ext |= ext << 4; 437 } 438 break; 439 case SSPP_VIG3: 440 if (rect_index == DPU_SSPP_RECT_1) { 441 mixercfg_ext3 |= ((i + 1) & 0xF) << 12; 442 } else { 443 mixercfg |= mix << 26; 444 mixercfg_ext |= ext << 6; 445 } 446 break; 447 case SSPP_RGB0: 448 mixercfg |= mix << 9; 449 mixercfg_ext |= ext << 8; 450 break; 451 case SSPP_RGB1: 452 mixercfg |= mix << 12; 453 mixercfg_ext |= ext << 10; 454 break; 455 case SSPP_RGB2: 456 mixercfg |= mix << 15; 457 mixercfg_ext |= ext << 12; 458 break; 459 case SSPP_RGB3: 460 mixercfg |= mix << 29; 461 mixercfg_ext |= ext << 14; 462 break; 463 case SSPP_DMA0: 464 if (rect_index == DPU_SSPP_RECT_1) { 465 mixercfg_ext2 |= ((i + 1) & 0xF) << 8; 466 } else { 467 mixercfg |= mix << 18; 468 mixercfg_ext |= ext << 16; 469 } 470 break; 471 case SSPP_DMA1: 472 if (rect_index == DPU_SSPP_RECT_1) { 473 mixercfg_ext2 |= ((i + 1) & 0xF) << 12; 474 } else { 475 mixercfg |= mix << 21; 476 mixercfg_ext |= ext << 18; 477 } 478 break; 479 case SSPP_DMA2: 480 if (rect_index == DPU_SSPP_RECT_1) { 481 mixercfg_ext2 |= ((i + 1) & 0xF) << 16; 482 } else { 483 mix |= (i + 1) & 0xF; 484 mixercfg_ext2 |= mix << 0; 485 } 486 break; 487 case SSPP_DMA3: 488 if (rect_index == DPU_SSPP_RECT_1) { 489 mixercfg_ext2 |= ((i + 1) & 0xF) << 20; 490 } else { 491 mix |= (i + 1) & 0xF; 492 mixercfg_ext2 |= mix << 4; 493 } 494 break; 495 case SSPP_CURSOR0: 496 mixercfg_ext |= ((i + 1) & 0xF) << 20; 497 break; 498 case SSPP_CURSOR1: 499 mixercfg_ext |= ((i + 1) & 0xF) << 26; 500 break; 501 default: 502 break; 503 } 504 } 505 } 506 507 exit: 508 DPU_REG_WRITE(c, CTL_LAYER(lm), mixercfg); 509 DPU_REG_WRITE(c, CTL_LAYER_EXT(lm), mixercfg_ext); 510 DPU_REG_WRITE(c, CTL_LAYER_EXT2(lm), mixercfg_ext2); 511 DPU_REG_WRITE(c, CTL_LAYER_EXT3(lm), mixercfg_ext3); 512 } 513 514 515 static void dpu_hw_ctl_intf_cfg_v1(struct dpu_hw_ctl *ctx, 516 struct dpu_hw_intf_cfg *cfg) 517 { 518 struct dpu_hw_blk_reg_map *c = &ctx->hw; 519 u32 intf_active = 0; 520 u32 wb_active = 0; 521 u32 mode_sel = 0; 522 523 /* CTL_TOP[31:28] carries group_id to collate CTL paths 524 * per VM. Explicitly disable it until VM support is 525 * added in SW. Power on reset value is not disable. 526 */ 527 if ((test_bit(DPU_CTL_VM_CFG, &ctx->caps->features))) 528 mode_sel = CTL_DEFAULT_GROUP_ID << 28; 529 530 if (cfg->dsc) 531 DPU_REG_WRITE(&ctx->hw, CTL_DSC_FLUSH, cfg->dsc); 532 533 if (cfg->intf_mode_sel == DPU_CTL_MODE_SEL_CMD) 534 mode_sel |= BIT(17); 535 536 intf_active = DPU_REG_READ(c, CTL_INTF_ACTIVE); 537 wb_active = DPU_REG_READ(c, CTL_WB_ACTIVE); 538 539 if (cfg->intf) 540 intf_active |= BIT(cfg->intf - INTF_0); 541 542 if (cfg->wb) 543 wb_active |= BIT(cfg->wb - WB_0); 544 545 DPU_REG_WRITE(c, CTL_TOP, mode_sel); 546 DPU_REG_WRITE(c, CTL_INTF_ACTIVE, intf_active); 547 DPU_REG_WRITE(c, CTL_WB_ACTIVE, wb_active); 548 549 if (cfg->merge_3d) 550 DPU_REG_WRITE(c, CTL_MERGE_3D_ACTIVE, 551 BIT(cfg->merge_3d - MERGE_3D_0)); 552 if (cfg->dsc) { 553 DPU_REG_WRITE(&ctx->hw, CTL_FLUSH, DSC_IDX); 554 DPU_REG_WRITE(c, CTL_DSC_ACTIVE, cfg->dsc); 555 } 556 } 557 558 static void dpu_hw_ctl_intf_cfg(struct dpu_hw_ctl *ctx, 559 struct dpu_hw_intf_cfg *cfg) 560 { 561 struct dpu_hw_blk_reg_map *c = &ctx->hw; 562 u32 intf_cfg = 0; 563 564 intf_cfg |= (cfg->intf & 0xF) << 4; 565 566 if (cfg->mode_3d) { 567 intf_cfg |= BIT(19); 568 intf_cfg |= (cfg->mode_3d - 0x1) << 20; 569 } 570 571 if (cfg->wb) 572 intf_cfg |= (cfg->wb & 0x3) + 2; 573 574 switch (cfg->intf_mode_sel) { 575 case DPU_CTL_MODE_SEL_VID: 576 intf_cfg &= ~BIT(17); 577 intf_cfg &= ~(0x3 << 15); 578 break; 579 case DPU_CTL_MODE_SEL_CMD: 580 intf_cfg |= BIT(17); 581 intf_cfg |= ((cfg->stream_sel & 0x3) << 15); 582 break; 583 default: 584 pr_err("unknown interface type %d\n", cfg->intf_mode_sel); 585 return; 586 } 587 588 DPU_REG_WRITE(c, CTL_TOP, intf_cfg); 589 } 590 591 static void dpu_hw_ctl_reset_intf_cfg_v1(struct dpu_hw_ctl *ctx, 592 struct dpu_hw_intf_cfg *cfg) 593 { 594 struct dpu_hw_blk_reg_map *c = &ctx->hw; 595 u32 intf_active = 0; 596 u32 wb_active = 0; 597 u32 merge3d_active = 0; 598 599 /* 600 * This API resets each portion of the CTL path namely, 601 * clearing the sspps staged on the lm, merge_3d block, 602 * interfaces , writeback etc to ensure clean teardown of the pipeline. 603 * This will be used for writeback to begin with to have a 604 * proper teardown of the writeback session but upon further 605 * validation, this can be extended to all interfaces. 606 */ 607 if (cfg->merge_3d) { 608 merge3d_active = DPU_REG_READ(c, CTL_MERGE_3D_ACTIVE); 609 merge3d_active &= ~BIT(cfg->merge_3d - MERGE_3D_0); 610 DPU_REG_WRITE(c, CTL_MERGE_3D_ACTIVE, 611 merge3d_active); 612 } 613 614 dpu_hw_ctl_clear_all_blendstages(ctx); 615 616 if (cfg->intf) { 617 intf_active = DPU_REG_READ(c, CTL_INTF_ACTIVE); 618 intf_active &= ~BIT(cfg->intf - INTF_0); 619 DPU_REG_WRITE(c, CTL_INTF_ACTIVE, intf_active); 620 } 621 622 if (cfg->wb) { 623 wb_active = DPU_REG_READ(c, CTL_WB_ACTIVE); 624 wb_active &= ~BIT(cfg->wb - WB_0); 625 DPU_REG_WRITE(c, CTL_WB_ACTIVE, wb_active); 626 } 627 } 628 629 static void dpu_hw_ctl_set_fetch_pipe_active(struct dpu_hw_ctl *ctx, 630 unsigned long *fetch_active) 631 { 632 int i; 633 u32 val = 0; 634 635 if (fetch_active) { 636 for (i = 0; i < SSPP_MAX; i++) { 637 if (test_bit(i, fetch_active) && 638 fetch_tbl[i] != CTL_INVALID_BIT) 639 val |= BIT(fetch_tbl[i]); 640 } 641 } 642 643 DPU_REG_WRITE(&ctx->hw, CTL_FETCH_PIPE_ACTIVE, val); 644 } 645 646 static void _setup_ctl_ops(struct dpu_hw_ctl_ops *ops, 647 unsigned long cap) 648 { 649 if (cap & BIT(DPU_CTL_ACTIVE_CFG)) { 650 ops->trigger_flush = dpu_hw_ctl_trigger_flush_v1; 651 ops->setup_intf_cfg = dpu_hw_ctl_intf_cfg_v1; 652 ops->reset_intf_cfg = dpu_hw_ctl_reset_intf_cfg_v1; 653 ops->update_pending_flush_intf = 654 dpu_hw_ctl_update_pending_flush_intf_v1; 655 ops->update_pending_flush_merge_3d = 656 dpu_hw_ctl_update_pending_flush_merge_3d_v1; 657 ops->update_pending_flush_wb = dpu_hw_ctl_update_pending_flush_wb_v1; 658 } else { 659 ops->trigger_flush = dpu_hw_ctl_trigger_flush; 660 ops->setup_intf_cfg = dpu_hw_ctl_intf_cfg; 661 ops->update_pending_flush_intf = 662 dpu_hw_ctl_update_pending_flush_intf; 663 ops->update_pending_flush_wb = dpu_hw_ctl_update_pending_flush_wb; 664 } 665 ops->clear_pending_flush = dpu_hw_ctl_clear_pending_flush; 666 ops->update_pending_flush = dpu_hw_ctl_update_pending_flush; 667 ops->get_pending_flush = dpu_hw_ctl_get_pending_flush; 668 ops->get_flush_register = dpu_hw_ctl_get_flush_register; 669 ops->trigger_start = dpu_hw_ctl_trigger_start; 670 ops->is_started = dpu_hw_ctl_is_started; 671 ops->trigger_pending = dpu_hw_ctl_trigger_pending; 672 ops->reset = dpu_hw_ctl_reset_control; 673 ops->wait_reset_status = dpu_hw_ctl_wait_reset_status; 674 ops->clear_all_blendstages = dpu_hw_ctl_clear_all_blendstages; 675 ops->setup_blendstage = dpu_hw_ctl_setup_blendstage; 676 ops->update_pending_flush_sspp = dpu_hw_ctl_update_pending_flush_sspp; 677 ops->update_pending_flush_mixer = dpu_hw_ctl_update_pending_flush_mixer; 678 ops->update_pending_flush_dspp = dpu_hw_ctl_update_pending_flush_dspp; 679 if (cap & BIT(DPU_CTL_FETCH_ACTIVE)) 680 ops->set_active_pipes = dpu_hw_ctl_set_fetch_pipe_active; 681 }; 682 683 struct dpu_hw_ctl *dpu_hw_ctl_init(enum dpu_ctl idx, 684 void __iomem *addr, 685 const struct dpu_mdss_cfg *m) 686 { 687 struct dpu_hw_ctl *c; 688 const struct dpu_ctl_cfg *cfg; 689 690 c = kzalloc(sizeof(*c), GFP_KERNEL); 691 if (!c) 692 return ERR_PTR(-ENOMEM); 693 694 cfg = _ctl_offset(idx, m, addr, &c->hw); 695 if (IS_ERR_OR_NULL(cfg)) { 696 kfree(c); 697 pr_err("failed to create dpu_hw_ctl %d\n", idx); 698 return ERR_PTR(-EINVAL); 699 } 700 701 c->caps = cfg; 702 _setup_ctl_ops(&c->ops, c->caps->features); 703 c->idx = idx; 704 c->mixer_count = m->mixer_count; 705 c->mixer_hw_caps = m->mixer; 706 707 return c; 708 } 709 710 void dpu_hw_ctl_destroy(struct dpu_hw_ctl *ctx) 711 { 712 kfree(ctx); 713 } 714