1 // SPDX-License-Identifier: GPL-2.0-only 2 /* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved. 3 */ 4 5 #include <linux/delay.h> 6 #include "dpu_hwio.h" 7 #include "dpu_hw_ctl.h" 8 #include "dpu_kms.h" 9 #include "dpu_trace.h" 10 11 #define CTL_LAYER(lm) \ 12 (((lm) == LM_5) ? (0x024) : (((lm) - LM_0) * 0x004)) 13 #define CTL_LAYER_EXT(lm) \ 14 (0x40 + (((lm) - LM_0) * 0x004)) 15 #define CTL_LAYER_EXT2(lm) \ 16 (0x70 + (((lm) - LM_0) * 0x004)) 17 #define CTL_LAYER_EXT3(lm) \ 18 (0xA0 + (((lm) - LM_0) * 0x004)) 19 #define CTL_TOP 0x014 20 #define CTL_FLUSH 0x018 21 #define CTL_START 0x01C 22 #define CTL_PREPARE 0x0d0 23 #define CTL_SW_RESET 0x030 24 #define CTL_LAYER_EXTN_OFFSET 0x40 25 #define CTL_INTF_ACTIVE 0x0F4 26 #define CTL_INTF_FLUSH 0x110 27 #define CTL_INTF_MASTER 0x134 28 29 #define CTL_MIXER_BORDER_OUT BIT(24) 30 #define CTL_FLUSH_MASK_CTL BIT(17) 31 32 #define DPU_REG_RESET_TIMEOUT_US 2000 33 #define INTF_IDX 31 34 35 static const struct dpu_ctl_cfg *_ctl_offset(enum dpu_ctl ctl, 36 const struct dpu_mdss_cfg *m, 37 void __iomem *addr, 38 struct dpu_hw_blk_reg_map *b) 39 { 40 int i; 41 42 for (i = 0; i < m->ctl_count; i++) { 43 if (ctl == m->ctl[i].id) { 44 b->base_off = addr; 45 b->blk_off = m->ctl[i].base; 46 b->length = m->ctl[i].len; 47 b->hwversion = m->hwversion; 48 b->log_mask = DPU_DBG_MASK_CTL; 49 return &m->ctl[i]; 50 } 51 } 52 return ERR_PTR(-ENOMEM); 53 } 54 55 static int _mixer_stages(const struct dpu_lm_cfg *mixer, int count, 56 enum dpu_lm lm) 57 { 58 int i; 59 int stages = -EINVAL; 60 61 for (i = 0; i < count; i++) { 62 if (lm == mixer[i].id) { 63 stages = mixer[i].sblk->maxblendstages; 64 break; 65 } 66 } 67 68 return stages; 69 } 70 71 static inline u32 dpu_hw_ctl_get_flush_register(struct dpu_hw_ctl *ctx) 72 { 73 struct dpu_hw_blk_reg_map *c = &ctx->hw; 74 75 return DPU_REG_READ(c, CTL_FLUSH); 76 } 77 78 static inline void dpu_hw_ctl_trigger_start(struct dpu_hw_ctl *ctx) 79 { 80 trace_dpu_hw_ctl_trigger_start(ctx->pending_flush_mask, 81 dpu_hw_ctl_get_flush_register(ctx)); 82 DPU_REG_WRITE(&ctx->hw, CTL_START, 0x1); 83 } 84 85 static inline void dpu_hw_ctl_trigger_pending(struct dpu_hw_ctl *ctx) 86 { 87 trace_dpu_hw_ctl_trigger_prepare(ctx->pending_flush_mask, 88 dpu_hw_ctl_get_flush_register(ctx)); 89 DPU_REG_WRITE(&ctx->hw, CTL_PREPARE, 0x1); 90 } 91 92 static inline void dpu_hw_ctl_clear_pending_flush(struct dpu_hw_ctl *ctx) 93 { 94 trace_dpu_hw_ctl_clear_pending_flush(ctx->pending_flush_mask, 95 dpu_hw_ctl_get_flush_register(ctx)); 96 ctx->pending_flush_mask = 0x0; 97 } 98 99 static inline void dpu_hw_ctl_update_pending_flush(struct dpu_hw_ctl *ctx, 100 u32 flushbits) 101 { 102 trace_dpu_hw_ctl_update_pending_flush(flushbits, 103 ctx->pending_flush_mask); 104 ctx->pending_flush_mask |= flushbits; 105 } 106 107 static inline void dpu_hw_ctl_update_pending_intf_flush(struct dpu_hw_ctl *ctx, 108 u32 flushbits) 109 { 110 ctx->pending_intf_flush_mask |= flushbits; 111 } 112 113 static u32 dpu_hw_ctl_get_pending_flush(struct dpu_hw_ctl *ctx) 114 { 115 return ctx->pending_flush_mask; 116 } 117 118 static inline void dpu_hw_ctl_trigger_flush_v1(struct dpu_hw_ctl *ctx) 119 { 120 121 if (ctx->pending_flush_mask & BIT(INTF_IDX)) 122 DPU_REG_WRITE(&ctx->hw, CTL_INTF_FLUSH, 123 ctx->pending_intf_flush_mask); 124 125 DPU_REG_WRITE(&ctx->hw, CTL_FLUSH, ctx->pending_flush_mask); 126 } 127 128 static inline void dpu_hw_ctl_trigger_flush(struct dpu_hw_ctl *ctx) 129 { 130 trace_dpu_hw_ctl_trigger_pending_flush(ctx->pending_flush_mask, 131 dpu_hw_ctl_get_flush_register(ctx)); 132 DPU_REG_WRITE(&ctx->hw, CTL_FLUSH, ctx->pending_flush_mask); 133 } 134 135 static uint32_t dpu_hw_ctl_get_bitmask_sspp(struct dpu_hw_ctl *ctx, 136 enum dpu_sspp sspp) 137 { 138 uint32_t flushbits = 0; 139 140 switch (sspp) { 141 case SSPP_VIG0: 142 flushbits = BIT(0); 143 break; 144 case SSPP_VIG1: 145 flushbits = BIT(1); 146 break; 147 case SSPP_VIG2: 148 flushbits = BIT(2); 149 break; 150 case SSPP_VIG3: 151 flushbits = BIT(18); 152 break; 153 case SSPP_RGB0: 154 flushbits = BIT(3); 155 break; 156 case SSPP_RGB1: 157 flushbits = BIT(4); 158 break; 159 case SSPP_RGB2: 160 flushbits = BIT(5); 161 break; 162 case SSPP_RGB3: 163 flushbits = BIT(19); 164 break; 165 case SSPP_DMA0: 166 flushbits = BIT(11); 167 break; 168 case SSPP_DMA1: 169 flushbits = BIT(12); 170 break; 171 case SSPP_DMA2: 172 flushbits = BIT(24); 173 break; 174 case SSPP_DMA3: 175 flushbits = BIT(25); 176 break; 177 case SSPP_CURSOR0: 178 flushbits = BIT(22); 179 break; 180 case SSPP_CURSOR1: 181 flushbits = BIT(23); 182 break; 183 default: 184 break; 185 } 186 187 return flushbits; 188 } 189 190 static uint32_t dpu_hw_ctl_get_bitmask_mixer(struct dpu_hw_ctl *ctx, 191 enum dpu_lm lm) 192 { 193 uint32_t flushbits = 0; 194 195 switch (lm) { 196 case LM_0: 197 flushbits = BIT(6); 198 break; 199 case LM_1: 200 flushbits = BIT(7); 201 break; 202 case LM_2: 203 flushbits = BIT(8); 204 break; 205 case LM_3: 206 flushbits = BIT(9); 207 break; 208 case LM_4: 209 flushbits = BIT(10); 210 break; 211 case LM_5: 212 flushbits = BIT(20); 213 break; 214 default: 215 return -EINVAL; 216 } 217 218 flushbits |= CTL_FLUSH_MASK_CTL; 219 220 return flushbits; 221 } 222 223 static int dpu_hw_ctl_get_bitmask_intf(struct dpu_hw_ctl *ctx, 224 u32 *flushbits, enum dpu_intf intf) 225 { 226 switch (intf) { 227 case INTF_0: 228 *flushbits |= BIT(31); 229 break; 230 case INTF_1: 231 *flushbits |= BIT(30); 232 break; 233 case INTF_2: 234 *flushbits |= BIT(29); 235 break; 236 case INTF_3: 237 *flushbits |= BIT(28); 238 break; 239 default: 240 return -EINVAL; 241 } 242 return 0; 243 } 244 245 static int dpu_hw_ctl_get_bitmask_intf_v1(struct dpu_hw_ctl *ctx, 246 u32 *flushbits, enum dpu_intf intf) 247 { 248 switch (intf) { 249 case INTF_0: 250 case INTF_1: 251 *flushbits |= BIT(31); 252 break; 253 default: 254 return 0; 255 } 256 return 0; 257 } 258 259 static int dpu_hw_ctl_active_get_bitmask_intf(struct dpu_hw_ctl *ctx, 260 u32 *flushbits, enum dpu_intf intf) 261 { 262 switch (intf) { 263 case INTF_0: 264 *flushbits |= BIT(0); 265 break; 266 case INTF_1: 267 *flushbits |= BIT(1); 268 break; 269 default: 270 return 0; 271 } 272 return 0; 273 } 274 275 static u32 dpu_hw_ctl_poll_reset_status(struct dpu_hw_ctl *ctx, u32 timeout_us) 276 { 277 struct dpu_hw_blk_reg_map *c = &ctx->hw; 278 ktime_t timeout; 279 u32 status; 280 281 timeout = ktime_add_us(ktime_get(), timeout_us); 282 283 /* 284 * it takes around 30us to have mdp finish resetting its ctl path 285 * poll every 50us so that reset should be completed at 1st poll 286 */ 287 do { 288 status = DPU_REG_READ(c, CTL_SW_RESET); 289 status &= 0x1; 290 if (status) 291 usleep_range(20, 50); 292 } while (status && ktime_compare_safe(ktime_get(), timeout) < 0); 293 294 return status; 295 } 296 297 static int dpu_hw_ctl_reset_control(struct dpu_hw_ctl *ctx) 298 { 299 struct dpu_hw_blk_reg_map *c = &ctx->hw; 300 301 pr_debug("issuing hw ctl reset for ctl:%d\n", ctx->idx); 302 DPU_REG_WRITE(c, CTL_SW_RESET, 0x1); 303 if (dpu_hw_ctl_poll_reset_status(ctx, DPU_REG_RESET_TIMEOUT_US)) 304 return -EINVAL; 305 306 return 0; 307 } 308 309 static int dpu_hw_ctl_wait_reset_status(struct dpu_hw_ctl *ctx) 310 { 311 struct dpu_hw_blk_reg_map *c = &ctx->hw; 312 u32 status; 313 314 status = DPU_REG_READ(c, CTL_SW_RESET); 315 status &= 0x01; 316 if (!status) 317 return 0; 318 319 pr_debug("hw ctl reset is set for ctl:%d\n", ctx->idx); 320 if (dpu_hw_ctl_poll_reset_status(ctx, DPU_REG_RESET_TIMEOUT_US)) { 321 pr_err("hw recovery is not complete for ctl:%d\n", ctx->idx); 322 return -EINVAL; 323 } 324 325 return 0; 326 } 327 328 static void dpu_hw_ctl_clear_all_blendstages(struct dpu_hw_ctl *ctx) 329 { 330 struct dpu_hw_blk_reg_map *c = &ctx->hw; 331 int i; 332 333 for (i = 0; i < ctx->mixer_count; i++) { 334 DPU_REG_WRITE(c, CTL_LAYER(LM_0 + i), 0); 335 DPU_REG_WRITE(c, CTL_LAYER_EXT(LM_0 + i), 0); 336 DPU_REG_WRITE(c, CTL_LAYER_EXT2(LM_0 + i), 0); 337 DPU_REG_WRITE(c, CTL_LAYER_EXT3(LM_0 + i), 0); 338 } 339 } 340 341 static void dpu_hw_ctl_setup_blendstage(struct dpu_hw_ctl *ctx, 342 enum dpu_lm lm, struct dpu_hw_stage_cfg *stage_cfg) 343 { 344 struct dpu_hw_blk_reg_map *c = &ctx->hw; 345 u32 mixercfg = 0, mixercfg_ext = 0, mix, ext; 346 u32 mixercfg_ext2 = 0, mixercfg_ext3 = 0; 347 int i, j; 348 int stages; 349 int pipes_per_stage; 350 351 stages = _mixer_stages(ctx->mixer_hw_caps, ctx->mixer_count, lm); 352 if (stages < 0) 353 return; 354 355 if (test_bit(DPU_MIXER_SOURCESPLIT, 356 &ctx->mixer_hw_caps->features)) 357 pipes_per_stage = PIPES_PER_STAGE; 358 else 359 pipes_per_stage = 1; 360 361 mixercfg = CTL_MIXER_BORDER_OUT; /* always set BORDER_OUT */ 362 363 if (!stage_cfg) 364 goto exit; 365 366 for (i = 0; i <= stages; i++) { 367 /* overflow to ext register if 'i + 1 > 7' */ 368 mix = (i + 1) & 0x7; 369 ext = i >= 7; 370 371 for (j = 0 ; j < pipes_per_stage; j++) { 372 enum dpu_sspp_multirect_index rect_index = 373 stage_cfg->multirect_index[i][j]; 374 375 switch (stage_cfg->stage[i][j]) { 376 case SSPP_VIG0: 377 if (rect_index == DPU_SSPP_RECT_1) { 378 mixercfg_ext3 |= ((i + 1) & 0xF) << 0; 379 } else { 380 mixercfg |= mix << 0; 381 mixercfg_ext |= ext << 0; 382 } 383 break; 384 case SSPP_VIG1: 385 if (rect_index == DPU_SSPP_RECT_1) { 386 mixercfg_ext3 |= ((i + 1) & 0xF) << 4; 387 } else { 388 mixercfg |= mix << 3; 389 mixercfg_ext |= ext << 2; 390 } 391 break; 392 case SSPP_VIG2: 393 if (rect_index == DPU_SSPP_RECT_1) { 394 mixercfg_ext3 |= ((i + 1) & 0xF) << 8; 395 } else { 396 mixercfg |= mix << 6; 397 mixercfg_ext |= ext << 4; 398 } 399 break; 400 case SSPP_VIG3: 401 if (rect_index == DPU_SSPP_RECT_1) { 402 mixercfg_ext3 |= ((i + 1) & 0xF) << 12; 403 } else { 404 mixercfg |= mix << 26; 405 mixercfg_ext |= ext << 6; 406 } 407 break; 408 case SSPP_RGB0: 409 mixercfg |= mix << 9; 410 mixercfg_ext |= ext << 8; 411 break; 412 case SSPP_RGB1: 413 mixercfg |= mix << 12; 414 mixercfg_ext |= ext << 10; 415 break; 416 case SSPP_RGB2: 417 mixercfg |= mix << 15; 418 mixercfg_ext |= ext << 12; 419 break; 420 case SSPP_RGB3: 421 mixercfg |= mix << 29; 422 mixercfg_ext |= ext << 14; 423 break; 424 case SSPP_DMA0: 425 if (rect_index == DPU_SSPP_RECT_1) { 426 mixercfg_ext2 |= ((i + 1) & 0xF) << 8; 427 } else { 428 mixercfg |= mix << 18; 429 mixercfg_ext |= ext << 16; 430 } 431 break; 432 case SSPP_DMA1: 433 if (rect_index == DPU_SSPP_RECT_1) { 434 mixercfg_ext2 |= ((i + 1) & 0xF) << 12; 435 } else { 436 mixercfg |= mix << 21; 437 mixercfg_ext |= ext << 18; 438 } 439 break; 440 case SSPP_DMA2: 441 if (rect_index == DPU_SSPP_RECT_1) { 442 mixercfg_ext2 |= ((i + 1) & 0xF) << 16; 443 } else { 444 mix |= (i + 1) & 0xF; 445 mixercfg_ext2 |= mix << 0; 446 } 447 break; 448 case SSPP_DMA3: 449 if (rect_index == DPU_SSPP_RECT_1) { 450 mixercfg_ext2 |= ((i + 1) & 0xF) << 20; 451 } else { 452 mix |= (i + 1) & 0xF; 453 mixercfg_ext2 |= mix << 4; 454 } 455 break; 456 case SSPP_CURSOR0: 457 mixercfg_ext |= ((i + 1) & 0xF) << 20; 458 break; 459 case SSPP_CURSOR1: 460 mixercfg_ext |= ((i + 1) & 0xF) << 26; 461 break; 462 default: 463 break; 464 } 465 } 466 } 467 468 exit: 469 DPU_REG_WRITE(c, CTL_LAYER(lm), mixercfg); 470 DPU_REG_WRITE(c, CTL_LAYER_EXT(lm), mixercfg_ext); 471 DPU_REG_WRITE(c, CTL_LAYER_EXT2(lm), mixercfg_ext2); 472 DPU_REG_WRITE(c, CTL_LAYER_EXT3(lm), mixercfg_ext3); 473 } 474 475 476 static void dpu_hw_ctl_intf_cfg_v1(struct dpu_hw_ctl *ctx, 477 struct dpu_hw_intf_cfg *cfg) 478 { 479 struct dpu_hw_blk_reg_map *c = &ctx->hw; 480 u32 intf_active = 0; 481 u32 mode_sel = 0; 482 483 if (cfg->intf_mode_sel == DPU_CTL_MODE_SEL_CMD) 484 mode_sel |= BIT(17); 485 486 intf_active = DPU_REG_READ(c, CTL_INTF_ACTIVE); 487 intf_active |= BIT(cfg->intf - INTF_0); 488 489 DPU_REG_WRITE(c, CTL_TOP, mode_sel); 490 DPU_REG_WRITE(c, CTL_INTF_ACTIVE, intf_active); 491 } 492 493 static void dpu_hw_ctl_intf_cfg(struct dpu_hw_ctl *ctx, 494 struct dpu_hw_intf_cfg *cfg) 495 { 496 struct dpu_hw_blk_reg_map *c = &ctx->hw; 497 u32 intf_cfg = 0; 498 499 intf_cfg |= (cfg->intf & 0xF) << 4; 500 501 if (cfg->mode_3d) { 502 intf_cfg |= BIT(19); 503 intf_cfg |= (cfg->mode_3d - 0x1) << 20; 504 } 505 506 switch (cfg->intf_mode_sel) { 507 case DPU_CTL_MODE_SEL_VID: 508 intf_cfg &= ~BIT(17); 509 intf_cfg &= ~(0x3 << 15); 510 break; 511 case DPU_CTL_MODE_SEL_CMD: 512 intf_cfg |= BIT(17); 513 intf_cfg |= ((cfg->stream_sel & 0x3) << 15); 514 break; 515 default: 516 pr_err("unknown interface type %d\n", cfg->intf_mode_sel); 517 return; 518 } 519 520 DPU_REG_WRITE(c, CTL_TOP, intf_cfg); 521 } 522 523 static void _setup_ctl_ops(struct dpu_hw_ctl_ops *ops, 524 unsigned long cap) 525 { 526 if (cap & BIT(DPU_CTL_ACTIVE_CFG)) { 527 ops->trigger_flush = dpu_hw_ctl_trigger_flush_v1; 528 ops->setup_intf_cfg = dpu_hw_ctl_intf_cfg_v1; 529 ops->get_bitmask_intf = dpu_hw_ctl_get_bitmask_intf_v1; 530 ops->get_bitmask_active_intf = 531 dpu_hw_ctl_active_get_bitmask_intf; 532 ops->update_pending_intf_flush = 533 dpu_hw_ctl_update_pending_intf_flush; 534 } else { 535 ops->trigger_flush = dpu_hw_ctl_trigger_flush; 536 ops->setup_intf_cfg = dpu_hw_ctl_intf_cfg; 537 ops->get_bitmask_intf = dpu_hw_ctl_get_bitmask_intf; 538 } 539 ops->clear_pending_flush = dpu_hw_ctl_clear_pending_flush; 540 ops->update_pending_flush = dpu_hw_ctl_update_pending_flush; 541 ops->get_pending_flush = dpu_hw_ctl_get_pending_flush; 542 ops->get_flush_register = dpu_hw_ctl_get_flush_register; 543 ops->trigger_start = dpu_hw_ctl_trigger_start; 544 ops->trigger_pending = dpu_hw_ctl_trigger_pending; 545 ops->reset = dpu_hw_ctl_reset_control; 546 ops->wait_reset_status = dpu_hw_ctl_wait_reset_status; 547 ops->clear_all_blendstages = dpu_hw_ctl_clear_all_blendstages; 548 ops->setup_blendstage = dpu_hw_ctl_setup_blendstage; 549 ops->get_bitmask_sspp = dpu_hw_ctl_get_bitmask_sspp; 550 ops->get_bitmask_mixer = dpu_hw_ctl_get_bitmask_mixer; 551 }; 552 553 static struct dpu_hw_blk_ops dpu_hw_ops; 554 555 struct dpu_hw_ctl *dpu_hw_ctl_init(enum dpu_ctl idx, 556 void __iomem *addr, 557 const struct dpu_mdss_cfg *m) 558 { 559 struct dpu_hw_ctl *c; 560 const struct dpu_ctl_cfg *cfg; 561 562 c = kzalloc(sizeof(*c), GFP_KERNEL); 563 if (!c) 564 return ERR_PTR(-ENOMEM); 565 566 cfg = _ctl_offset(idx, m, addr, &c->hw); 567 if (IS_ERR_OR_NULL(cfg)) { 568 kfree(c); 569 pr_err("failed to create dpu_hw_ctl %d\n", idx); 570 return ERR_PTR(-EINVAL); 571 } 572 573 c->caps = cfg; 574 _setup_ctl_ops(&c->ops, c->caps->features); 575 c->idx = idx; 576 c->mixer_count = m->mixer_count; 577 c->mixer_hw_caps = m->mixer; 578 579 dpu_hw_blk_init(&c->base, DPU_HW_BLK_CTL, idx, &dpu_hw_ops); 580 581 return c; 582 } 583 584 void dpu_hw_ctl_destroy(struct dpu_hw_ctl *ctx) 585 { 586 if (ctx) 587 dpu_hw_blk_destroy(&ctx->base); 588 kfree(ctx); 589 } 590