1 // SPDX-License-Identifier: GPL-2.0-only 2 /* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved. 3 * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. 4 */ 5 6 #include <linux/delay.h> 7 #include "dpu_hwio.h" 8 #include "dpu_hw_ctl.h" 9 #include "dpu_kms.h" 10 #include "dpu_trace.h" 11 12 #define CTL_LAYER(lm) \ 13 (((lm) == LM_5) ? (0x024) : (((lm) - LM_0) * 0x004)) 14 #define CTL_LAYER_EXT(lm) \ 15 (0x40 + (((lm) - LM_0) * 0x004)) 16 #define CTL_LAYER_EXT2(lm) \ 17 (0x70 + (((lm) - LM_0) * 0x004)) 18 #define CTL_LAYER_EXT3(lm) \ 19 (0xA0 + (((lm) - LM_0) * 0x004)) 20 #define CTL_LAYER_EXT4(lm) \ 21 (0xB8 + (((lm) - LM_0) * 0x004)) 22 #define CTL_TOP 0x014 23 #define CTL_FLUSH 0x018 24 #define CTL_START 0x01C 25 #define CTL_PREPARE 0x0d0 26 #define CTL_SW_RESET 0x030 27 #define CTL_LAYER_EXTN_OFFSET 0x40 28 #define CTL_MERGE_3D_ACTIVE 0x0E4 29 #define CTL_WB_ACTIVE 0x0EC 30 #define CTL_INTF_ACTIVE 0x0F4 31 #define CTL_MERGE_3D_FLUSH 0x100 32 #define CTL_DSC_ACTIVE 0x0E8 33 #define CTL_DSC_FLUSH 0x104 34 #define CTL_WB_FLUSH 0x108 35 #define CTL_INTF_FLUSH 0x110 36 #define CTL_INTF_MASTER 0x134 37 #define CTL_FETCH_PIPE_ACTIVE 0x0FC 38 39 #define CTL_MIXER_BORDER_OUT BIT(24) 40 #define CTL_FLUSH_MASK_CTL BIT(17) 41 42 #define DPU_REG_RESET_TIMEOUT_US 2000 43 #define MERGE_3D_IDX 23 44 #define DSC_IDX 22 45 #define INTF_IDX 31 46 #define WB_IDX 16 47 #define CTL_INVALID_BIT 0xffff 48 #define CTL_DEFAULT_GROUP_ID 0xf 49 50 static const u32 fetch_tbl[SSPP_MAX] = {CTL_INVALID_BIT, 16, 17, 18, 19, 51 CTL_INVALID_BIT, CTL_INVALID_BIT, CTL_INVALID_BIT, CTL_INVALID_BIT, 0, 52 1, 2, 3, CTL_INVALID_BIT, CTL_INVALID_BIT}; 53 54 static const struct dpu_ctl_cfg *_ctl_offset(enum dpu_ctl ctl, 55 const struct dpu_mdss_cfg *m, 56 void __iomem *addr, 57 struct dpu_hw_blk_reg_map *b) 58 { 59 int i; 60 61 for (i = 0; i < m->ctl_count; i++) { 62 if (ctl == m->ctl[i].id) { 63 b->blk_addr = addr + m->ctl[i].base; 64 b->log_mask = DPU_DBG_MASK_CTL; 65 return &m->ctl[i]; 66 } 67 } 68 return ERR_PTR(-ENOMEM); 69 } 70 71 static int _mixer_stages(const struct dpu_lm_cfg *mixer, int count, 72 enum dpu_lm lm) 73 { 74 int i; 75 int stages = -EINVAL; 76 77 for (i = 0; i < count; i++) { 78 if (lm == mixer[i].id) { 79 stages = mixer[i].sblk->maxblendstages; 80 break; 81 } 82 } 83 84 return stages; 85 } 86 87 static inline u32 dpu_hw_ctl_get_flush_register(struct dpu_hw_ctl *ctx) 88 { 89 struct dpu_hw_blk_reg_map *c = &ctx->hw; 90 91 return DPU_REG_READ(c, CTL_FLUSH); 92 } 93 94 static inline void dpu_hw_ctl_trigger_start(struct dpu_hw_ctl *ctx) 95 { 96 trace_dpu_hw_ctl_trigger_start(ctx->pending_flush_mask, 97 dpu_hw_ctl_get_flush_register(ctx)); 98 DPU_REG_WRITE(&ctx->hw, CTL_START, 0x1); 99 } 100 101 static inline bool dpu_hw_ctl_is_started(struct dpu_hw_ctl *ctx) 102 { 103 return !!(DPU_REG_READ(&ctx->hw, CTL_START) & BIT(0)); 104 } 105 106 static inline void dpu_hw_ctl_trigger_pending(struct dpu_hw_ctl *ctx) 107 { 108 trace_dpu_hw_ctl_trigger_prepare(ctx->pending_flush_mask, 109 dpu_hw_ctl_get_flush_register(ctx)); 110 DPU_REG_WRITE(&ctx->hw, CTL_PREPARE, 0x1); 111 } 112 113 static inline void dpu_hw_ctl_clear_pending_flush(struct dpu_hw_ctl *ctx) 114 { 115 trace_dpu_hw_ctl_clear_pending_flush(ctx->pending_flush_mask, 116 dpu_hw_ctl_get_flush_register(ctx)); 117 ctx->pending_flush_mask = 0x0; 118 } 119 120 static inline void dpu_hw_ctl_update_pending_flush(struct dpu_hw_ctl *ctx, 121 u32 flushbits) 122 { 123 trace_dpu_hw_ctl_update_pending_flush(flushbits, 124 ctx->pending_flush_mask); 125 ctx->pending_flush_mask |= flushbits; 126 } 127 128 static u32 dpu_hw_ctl_get_pending_flush(struct dpu_hw_ctl *ctx) 129 { 130 return ctx->pending_flush_mask; 131 } 132 133 static inline void dpu_hw_ctl_trigger_flush_v1(struct dpu_hw_ctl *ctx) 134 { 135 if (ctx->pending_flush_mask & BIT(MERGE_3D_IDX)) 136 DPU_REG_WRITE(&ctx->hw, CTL_MERGE_3D_FLUSH, 137 ctx->pending_merge_3d_flush_mask); 138 if (ctx->pending_flush_mask & BIT(INTF_IDX)) 139 DPU_REG_WRITE(&ctx->hw, CTL_INTF_FLUSH, 140 ctx->pending_intf_flush_mask); 141 if (ctx->pending_flush_mask & BIT(WB_IDX)) 142 DPU_REG_WRITE(&ctx->hw, CTL_WB_FLUSH, 143 ctx->pending_wb_flush_mask); 144 145 DPU_REG_WRITE(&ctx->hw, CTL_FLUSH, ctx->pending_flush_mask); 146 } 147 148 static inline void dpu_hw_ctl_trigger_flush(struct dpu_hw_ctl *ctx) 149 { 150 trace_dpu_hw_ctl_trigger_pending_flush(ctx->pending_flush_mask, 151 dpu_hw_ctl_get_flush_register(ctx)); 152 DPU_REG_WRITE(&ctx->hw, CTL_FLUSH, ctx->pending_flush_mask); 153 } 154 155 static void dpu_hw_ctl_update_pending_flush_sspp(struct dpu_hw_ctl *ctx, 156 enum dpu_sspp sspp) 157 { 158 switch (sspp) { 159 case SSPP_VIG0: 160 ctx->pending_flush_mask |= BIT(0); 161 break; 162 case SSPP_VIG1: 163 ctx->pending_flush_mask |= BIT(1); 164 break; 165 case SSPP_VIG2: 166 ctx->pending_flush_mask |= BIT(2); 167 break; 168 case SSPP_VIG3: 169 ctx->pending_flush_mask |= BIT(18); 170 break; 171 case SSPP_RGB0: 172 ctx->pending_flush_mask |= BIT(3); 173 break; 174 case SSPP_RGB1: 175 ctx->pending_flush_mask |= BIT(4); 176 break; 177 case SSPP_RGB2: 178 ctx->pending_flush_mask |= BIT(5); 179 break; 180 case SSPP_RGB3: 181 ctx->pending_flush_mask |= BIT(19); 182 break; 183 case SSPP_DMA0: 184 ctx->pending_flush_mask |= BIT(11); 185 break; 186 case SSPP_DMA1: 187 ctx->pending_flush_mask |= BIT(12); 188 break; 189 case SSPP_DMA2: 190 ctx->pending_flush_mask |= BIT(24); 191 break; 192 case SSPP_DMA3: 193 ctx->pending_flush_mask |= BIT(25); 194 break; 195 case SSPP_CURSOR0: 196 ctx->pending_flush_mask |= BIT(22); 197 break; 198 case SSPP_CURSOR1: 199 ctx->pending_flush_mask |= BIT(23); 200 break; 201 default: 202 break; 203 } 204 } 205 206 static void dpu_hw_ctl_update_pending_flush_mixer(struct dpu_hw_ctl *ctx, 207 enum dpu_lm lm) 208 { 209 switch (lm) { 210 case LM_0: 211 ctx->pending_flush_mask |= BIT(6); 212 break; 213 case LM_1: 214 ctx->pending_flush_mask |= BIT(7); 215 break; 216 case LM_2: 217 ctx->pending_flush_mask |= BIT(8); 218 break; 219 case LM_3: 220 ctx->pending_flush_mask |= BIT(9); 221 break; 222 case LM_4: 223 ctx->pending_flush_mask |= BIT(10); 224 break; 225 case LM_5: 226 ctx->pending_flush_mask |= BIT(20); 227 break; 228 default: 229 break; 230 } 231 232 ctx->pending_flush_mask |= CTL_FLUSH_MASK_CTL; 233 } 234 235 static void dpu_hw_ctl_update_pending_flush_intf(struct dpu_hw_ctl *ctx, 236 enum dpu_intf intf) 237 { 238 switch (intf) { 239 case INTF_0: 240 ctx->pending_flush_mask |= BIT(31); 241 break; 242 case INTF_1: 243 ctx->pending_flush_mask |= BIT(30); 244 break; 245 case INTF_2: 246 ctx->pending_flush_mask |= BIT(29); 247 break; 248 case INTF_3: 249 ctx->pending_flush_mask |= BIT(28); 250 break; 251 default: 252 break; 253 } 254 } 255 256 static void dpu_hw_ctl_update_pending_flush_wb(struct dpu_hw_ctl *ctx, 257 enum dpu_wb wb) 258 { 259 switch (wb) { 260 case WB_0: 261 case WB_1: 262 case WB_2: 263 ctx->pending_flush_mask |= BIT(WB_IDX); 264 break; 265 default: 266 break; 267 } 268 } 269 270 static void dpu_hw_ctl_update_pending_flush_wb_v1(struct dpu_hw_ctl *ctx, 271 enum dpu_wb wb) 272 { 273 ctx->pending_wb_flush_mask |= BIT(wb - WB_0); 274 ctx->pending_flush_mask |= BIT(WB_IDX); 275 } 276 277 static void dpu_hw_ctl_update_pending_flush_intf_v1(struct dpu_hw_ctl *ctx, 278 enum dpu_intf intf) 279 { 280 ctx->pending_intf_flush_mask |= BIT(intf - INTF_0); 281 ctx->pending_flush_mask |= BIT(INTF_IDX); 282 } 283 284 static void dpu_hw_ctl_update_pending_flush_merge_3d_v1(struct dpu_hw_ctl *ctx, 285 enum dpu_merge_3d merge_3d) 286 { 287 ctx->pending_merge_3d_flush_mask |= BIT(merge_3d - MERGE_3D_0); 288 ctx->pending_flush_mask |= BIT(MERGE_3D_IDX); 289 } 290 291 static void dpu_hw_ctl_update_pending_flush_dspp(struct dpu_hw_ctl *ctx, 292 enum dpu_dspp dspp) 293 { 294 switch (dspp) { 295 case DSPP_0: 296 ctx->pending_flush_mask |= BIT(13); 297 break; 298 case DSPP_1: 299 ctx->pending_flush_mask |= BIT(14); 300 break; 301 case DSPP_2: 302 ctx->pending_flush_mask |= BIT(15); 303 break; 304 case DSPP_3: 305 ctx->pending_flush_mask |= BIT(21); 306 break; 307 default: 308 break; 309 } 310 } 311 312 static u32 dpu_hw_ctl_poll_reset_status(struct dpu_hw_ctl *ctx, u32 timeout_us) 313 { 314 struct dpu_hw_blk_reg_map *c = &ctx->hw; 315 ktime_t timeout; 316 u32 status; 317 318 timeout = ktime_add_us(ktime_get(), timeout_us); 319 320 /* 321 * it takes around 30us to have mdp finish resetting its ctl path 322 * poll every 50us so that reset should be completed at 1st poll 323 */ 324 do { 325 status = DPU_REG_READ(c, CTL_SW_RESET); 326 status &= 0x1; 327 if (status) 328 usleep_range(20, 50); 329 } while (status && ktime_compare_safe(ktime_get(), timeout) < 0); 330 331 return status; 332 } 333 334 static int dpu_hw_ctl_reset_control(struct dpu_hw_ctl *ctx) 335 { 336 struct dpu_hw_blk_reg_map *c = &ctx->hw; 337 338 pr_debug("issuing hw ctl reset for ctl:%d\n", ctx->idx); 339 DPU_REG_WRITE(c, CTL_SW_RESET, 0x1); 340 if (dpu_hw_ctl_poll_reset_status(ctx, DPU_REG_RESET_TIMEOUT_US)) 341 return -EINVAL; 342 343 return 0; 344 } 345 346 static int dpu_hw_ctl_wait_reset_status(struct dpu_hw_ctl *ctx) 347 { 348 struct dpu_hw_blk_reg_map *c = &ctx->hw; 349 u32 status; 350 351 status = DPU_REG_READ(c, CTL_SW_RESET); 352 status &= 0x01; 353 if (!status) 354 return 0; 355 356 pr_debug("hw ctl reset is set for ctl:%d\n", ctx->idx); 357 if (dpu_hw_ctl_poll_reset_status(ctx, DPU_REG_RESET_TIMEOUT_US)) { 358 pr_err("hw recovery is not complete for ctl:%d\n", ctx->idx); 359 return -EINVAL; 360 } 361 362 return 0; 363 } 364 365 static void dpu_hw_ctl_clear_all_blendstages(struct dpu_hw_ctl *ctx) 366 { 367 struct dpu_hw_blk_reg_map *c = &ctx->hw; 368 int i; 369 370 for (i = 0; i < ctx->mixer_count; i++) { 371 enum dpu_lm mixer_id = ctx->mixer_hw_caps[i].id; 372 373 DPU_REG_WRITE(c, CTL_LAYER(mixer_id), 0); 374 DPU_REG_WRITE(c, CTL_LAYER_EXT(mixer_id), 0); 375 DPU_REG_WRITE(c, CTL_LAYER_EXT2(mixer_id), 0); 376 DPU_REG_WRITE(c, CTL_LAYER_EXT3(mixer_id), 0); 377 } 378 379 DPU_REG_WRITE(c, CTL_FETCH_PIPE_ACTIVE, 0); 380 } 381 382 struct ctl_blend_config { 383 int idx, shift, ext_shift; 384 }; 385 386 static const struct ctl_blend_config ctl_blend_config[][2] = { 387 [SSPP_NONE] = { { -1 }, { -1 } }, 388 [SSPP_MAX] = { { -1 }, { -1 } }, 389 [SSPP_VIG0] = { { 0, 0, 0 }, { 3, 0 } }, 390 [SSPP_VIG1] = { { 0, 3, 2 }, { 3, 4 } }, 391 [SSPP_VIG2] = { { 0, 6, 4 }, { 3, 8 } }, 392 [SSPP_VIG3] = { { 0, 26, 6 }, { 3, 12 } }, 393 [SSPP_RGB0] = { { 0, 9, 8 }, { -1 } }, 394 [SSPP_RGB1] = { { 0, 12, 10 }, { -1 } }, 395 [SSPP_RGB2] = { { 0, 15, 12 }, { -1 } }, 396 [SSPP_RGB3] = { { 0, 29, 14 }, { -1 } }, 397 [SSPP_DMA0] = { { 0, 18, 16 }, { 2, 8 } }, 398 [SSPP_DMA1] = { { 0, 21, 18 }, { 2, 12 } }, 399 [SSPP_DMA2] = { { 2, 0 }, { 2, 16 } }, 400 [SSPP_DMA3] = { { 2, 4 }, { 2, 20 } }, 401 [SSPP_DMA4] = { { 4, 0 }, { 4, 8 } }, 402 [SSPP_DMA5] = { { 4, 4 }, { 4, 12 } }, 403 [SSPP_CURSOR0] = { { 1, 20 }, { -1 } }, 404 [SSPP_CURSOR1] = { { 1, 26 }, { -1 } }, 405 }; 406 407 static void dpu_hw_ctl_setup_blendstage(struct dpu_hw_ctl *ctx, 408 enum dpu_lm lm, struct dpu_hw_stage_cfg *stage_cfg) 409 { 410 struct dpu_hw_blk_reg_map *c = &ctx->hw; 411 u32 mix, ext, mix_ext; 412 u32 mixercfg[5] = { 0 }; 413 int i, j; 414 int stages; 415 int pipes_per_stage; 416 417 stages = _mixer_stages(ctx->mixer_hw_caps, ctx->mixer_count, lm); 418 if (stages < 0) 419 return; 420 421 if (test_bit(DPU_MIXER_SOURCESPLIT, 422 &ctx->mixer_hw_caps->features)) 423 pipes_per_stage = PIPES_PER_STAGE; 424 else 425 pipes_per_stage = 1; 426 427 mixercfg[0] = CTL_MIXER_BORDER_OUT; /* always set BORDER_OUT */ 428 429 if (!stage_cfg) 430 goto exit; 431 432 for (i = 0; i <= stages; i++) { 433 /* overflow to ext register if 'i + 1 > 7' */ 434 mix = (i + 1) & 0x7; 435 ext = i >= 7; 436 mix_ext = (i + 1) & 0xf; 437 438 for (j = 0 ; j < pipes_per_stage; j++) { 439 enum dpu_sspp_multirect_index rect_index = 440 stage_cfg->multirect_index[i][j]; 441 enum dpu_sspp pipe = stage_cfg->stage[i][j]; 442 const struct ctl_blend_config *cfg = 443 &ctl_blend_config[pipe][rect_index == DPU_SSPP_RECT_1]; 444 445 /* 446 * CTL_LAYER has 3-bit field (and extra bits in EXT register), 447 * all EXT registers has 4-bit fields. 448 */ 449 if (cfg->idx == -1) { 450 continue; 451 } else if (cfg->idx == 0) { 452 mixercfg[0] |= mix << cfg->shift; 453 mixercfg[1] |= ext << cfg->ext_shift; 454 } else { 455 mixercfg[cfg->idx] |= mix_ext << cfg->shift; 456 } 457 } 458 } 459 460 exit: 461 DPU_REG_WRITE(c, CTL_LAYER(lm), mixercfg[0]); 462 DPU_REG_WRITE(c, CTL_LAYER_EXT(lm), mixercfg[1]); 463 DPU_REG_WRITE(c, CTL_LAYER_EXT2(lm), mixercfg[2]); 464 DPU_REG_WRITE(c, CTL_LAYER_EXT3(lm), mixercfg[3]); 465 if ((test_bit(DPU_CTL_HAS_LAYER_EXT4, &ctx->caps->features))) 466 DPU_REG_WRITE(c, CTL_LAYER_EXT4(lm), mixercfg[4]); 467 } 468 469 470 static void dpu_hw_ctl_intf_cfg_v1(struct dpu_hw_ctl *ctx, 471 struct dpu_hw_intf_cfg *cfg) 472 { 473 struct dpu_hw_blk_reg_map *c = &ctx->hw; 474 u32 intf_active = 0; 475 u32 wb_active = 0; 476 u32 mode_sel = 0; 477 478 /* CTL_TOP[31:28] carries group_id to collate CTL paths 479 * per VM. Explicitly disable it until VM support is 480 * added in SW. Power on reset value is not disable. 481 */ 482 if ((test_bit(DPU_CTL_VM_CFG, &ctx->caps->features))) 483 mode_sel = CTL_DEFAULT_GROUP_ID << 28; 484 485 if (cfg->dsc) 486 DPU_REG_WRITE(&ctx->hw, CTL_DSC_FLUSH, cfg->dsc); 487 488 if (cfg->intf_mode_sel == DPU_CTL_MODE_SEL_CMD) 489 mode_sel |= BIT(17); 490 491 intf_active = DPU_REG_READ(c, CTL_INTF_ACTIVE); 492 wb_active = DPU_REG_READ(c, CTL_WB_ACTIVE); 493 494 if (cfg->intf) 495 intf_active |= BIT(cfg->intf - INTF_0); 496 497 if (cfg->wb) 498 wb_active |= BIT(cfg->wb - WB_0); 499 500 DPU_REG_WRITE(c, CTL_TOP, mode_sel); 501 DPU_REG_WRITE(c, CTL_INTF_ACTIVE, intf_active); 502 DPU_REG_WRITE(c, CTL_WB_ACTIVE, wb_active); 503 504 if (cfg->merge_3d) 505 DPU_REG_WRITE(c, CTL_MERGE_3D_ACTIVE, 506 BIT(cfg->merge_3d - MERGE_3D_0)); 507 if (cfg->dsc) { 508 DPU_REG_WRITE(&ctx->hw, CTL_FLUSH, DSC_IDX); 509 DPU_REG_WRITE(c, CTL_DSC_ACTIVE, cfg->dsc); 510 } 511 } 512 513 static void dpu_hw_ctl_intf_cfg(struct dpu_hw_ctl *ctx, 514 struct dpu_hw_intf_cfg *cfg) 515 { 516 struct dpu_hw_blk_reg_map *c = &ctx->hw; 517 u32 intf_cfg = 0; 518 519 intf_cfg |= (cfg->intf & 0xF) << 4; 520 521 if (cfg->mode_3d) { 522 intf_cfg |= BIT(19); 523 intf_cfg |= (cfg->mode_3d - 0x1) << 20; 524 } 525 526 if (cfg->wb) 527 intf_cfg |= (cfg->wb & 0x3) + 2; 528 529 switch (cfg->intf_mode_sel) { 530 case DPU_CTL_MODE_SEL_VID: 531 intf_cfg &= ~BIT(17); 532 intf_cfg &= ~(0x3 << 15); 533 break; 534 case DPU_CTL_MODE_SEL_CMD: 535 intf_cfg |= BIT(17); 536 intf_cfg |= ((cfg->stream_sel & 0x3) << 15); 537 break; 538 default: 539 pr_err("unknown interface type %d\n", cfg->intf_mode_sel); 540 return; 541 } 542 543 DPU_REG_WRITE(c, CTL_TOP, intf_cfg); 544 } 545 546 static void dpu_hw_ctl_reset_intf_cfg_v1(struct dpu_hw_ctl *ctx, 547 struct dpu_hw_intf_cfg *cfg) 548 { 549 struct dpu_hw_blk_reg_map *c = &ctx->hw; 550 u32 intf_active = 0; 551 u32 wb_active = 0; 552 u32 merge3d_active = 0; 553 554 /* 555 * This API resets each portion of the CTL path namely, 556 * clearing the sspps staged on the lm, merge_3d block, 557 * interfaces , writeback etc to ensure clean teardown of the pipeline. 558 * This will be used for writeback to begin with to have a 559 * proper teardown of the writeback session but upon further 560 * validation, this can be extended to all interfaces. 561 */ 562 if (cfg->merge_3d) { 563 merge3d_active = DPU_REG_READ(c, CTL_MERGE_3D_ACTIVE); 564 merge3d_active &= ~BIT(cfg->merge_3d - MERGE_3D_0); 565 DPU_REG_WRITE(c, CTL_MERGE_3D_ACTIVE, 566 merge3d_active); 567 } 568 569 dpu_hw_ctl_clear_all_blendstages(ctx); 570 571 if (cfg->intf) { 572 intf_active = DPU_REG_READ(c, CTL_INTF_ACTIVE); 573 intf_active &= ~BIT(cfg->intf - INTF_0); 574 DPU_REG_WRITE(c, CTL_INTF_ACTIVE, intf_active); 575 } 576 577 if (cfg->wb) { 578 wb_active = DPU_REG_READ(c, CTL_WB_ACTIVE); 579 wb_active &= ~BIT(cfg->wb - WB_0); 580 DPU_REG_WRITE(c, CTL_WB_ACTIVE, wb_active); 581 } 582 } 583 584 static void dpu_hw_ctl_set_fetch_pipe_active(struct dpu_hw_ctl *ctx, 585 unsigned long *fetch_active) 586 { 587 int i; 588 u32 val = 0; 589 590 if (fetch_active) { 591 for (i = 0; i < SSPP_MAX; i++) { 592 if (test_bit(i, fetch_active) && 593 fetch_tbl[i] != CTL_INVALID_BIT) 594 val |= BIT(fetch_tbl[i]); 595 } 596 } 597 598 DPU_REG_WRITE(&ctx->hw, CTL_FETCH_PIPE_ACTIVE, val); 599 } 600 601 static void _setup_ctl_ops(struct dpu_hw_ctl_ops *ops, 602 unsigned long cap) 603 { 604 if (cap & BIT(DPU_CTL_ACTIVE_CFG)) { 605 ops->trigger_flush = dpu_hw_ctl_trigger_flush_v1; 606 ops->setup_intf_cfg = dpu_hw_ctl_intf_cfg_v1; 607 ops->reset_intf_cfg = dpu_hw_ctl_reset_intf_cfg_v1; 608 ops->update_pending_flush_intf = 609 dpu_hw_ctl_update_pending_flush_intf_v1; 610 ops->update_pending_flush_merge_3d = 611 dpu_hw_ctl_update_pending_flush_merge_3d_v1; 612 ops->update_pending_flush_wb = dpu_hw_ctl_update_pending_flush_wb_v1; 613 } else { 614 ops->trigger_flush = dpu_hw_ctl_trigger_flush; 615 ops->setup_intf_cfg = dpu_hw_ctl_intf_cfg; 616 ops->update_pending_flush_intf = 617 dpu_hw_ctl_update_pending_flush_intf; 618 ops->update_pending_flush_wb = dpu_hw_ctl_update_pending_flush_wb; 619 } 620 ops->clear_pending_flush = dpu_hw_ctl_clear_pending_flush; 621 ops->update_pending_flush = dpu_hw_ctl_update_pending_flush; 622 ops->get_pending_flush = dpu_hw_ctl_get_pending_flush; 623 ops->get_flush_register = dpu_hw_ctl_get_flush_register; 624 ops->trigger_start = dpu_hw_ctl_trigger_start; 625 ops->is_started = dpu_hw_ctl_is_started; 626 ops->trigger_pending = dpu_hw_ctl_trigger_pending; 627 ops->reset = dpu_hw_ctl_reset_control; 628 ops->wait_reset_status = dpu_hw_ctl_wait_reset_status; 629 ops->clear_all_blendstages = dpu_hw_ctl_clear_all_blendstages; 630 ops->setup_blendstage = dpu_hw_ctl_setup_blendstage; 631 ops->update_pending_flush_sspp = dpu_hw_ctl_update_pending_flush_sspp; 632 ops->update_pending_flush_mixer = dpu_hw_ctl_update_pending_flush_mixer; 633 ops->update_pending_flush_dspp = dpu_hw_ctl_update_pending_flush_dspp; 634 if (cap & BIT(DPU_CTL_FETCH_ACTIVE)) 635 ops->set_active_pipes = dpu_hw_ctl_set_fetch_pipe_active; 636 }; 637 638 struct dpu_hw_ctl *dpu_hw_ctl_init(enum dpu_ctl idx, 639 void __iomem *addr, 640 const struct dpu_mdss_cfg *m) 641 { 642 struct dpu_hw_ctl *c; 643 const struct dpu_ctl_cfg *cfg; 644 645 c = kzalloc(sizeof(*c), GFP_KERNEL); 646 if (!c) 647 return ERR_PTR(-ENOMEM); 648 649 cfg = _ctl_offset(idx, m, addr, &c->hw); 650 if (IS_ERR_OR_NULL(cfg)) { 651 kfree(c); 652 pr_err("failed to create dpu_hw_ctl %d\n", idx); 653 return ERR_PTR(-EINVAL); 654 } 655 656 c->caps = cfg; 657 _setup_ctl_ops(&c->ops, c->caps->features); 658 c->idx = idx; 659 c->mixer_count = m->mixer_count; 660 c->mixer_hw_caps = m->mixer; 661 662 return c; 663 } 664 665 void dpu_hw_ctl_destroy(struct dpu_hw_ctl *ctx) 666 { 667 kfree(ctx); 668 } 669