1 /* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved. 2 * 3 * This program is free software; you can redistribute it and/or modify 4 * it under the terms of the GNU General Public License version 2 and 5 * only version 2 as published by the Free Software Foundation. 6 * 7 * This program is distributed in the hope that it will be useful, 8 * but WITHOUT ANY WARRANTY; without even the implied warranty of 9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 10 * GNU General Public License for more details. 11 */ 12 13 #include <linux/delay.h> 14 #include "dpu_hwio.h" 15 #include "dpu_hw_ctl.h" 16 #include "dpu_dbg.h" 17 #include "dpu_kms.h" 18 19 #define CTL_LAYER(lm) \ 20 (((lm) == LM_5) ? (0x024) : (((lm) - LM_0) * 0x004)) 21 #define CTL_LAYER_EXT(lm) \ 22 (0x40 + (((lm) - LM_0) * 0x004)) 23 #define CTL_LAYER_EXT2(lm) \ 24 (0x70 + (((lm) - LM_0) * 0x004)) 25 #define CTL_LAYER_EXT3(lm) \ 26 (0xA0 + (((lm) - LM_0) * 0x004)) 27 #define CTL_TOP 0x014 28 #define CTL_FLUSH 0x018 29 #define CTL_START 0x01C 30 #define CTL_PREPARE 0x0d0 31 #define CTL_SW_RESET 0x030 32 #define CTL_LAYER_EXTN_OFFSET 0x40 33 34 #define CTL_MIXER_BORDER_OUT BIT(24) 35 #define CTL_FLUSH_MASK_CTL BIT(17) 36 37 #define DPU_REG_RESET_TIMEOUT_US 2000 38 39 static struct dpu_ctl_cfg *_ctl_offset(enum dpu_ctl ctl, 40 struct dpu_mdss_cfg *m, 41 void __iomem *addr, 42 struct dpu_hw_blk_reg_map *b) 43 { 44 int i; 45 46 for (i = 0; i < m->ctl_count; i++) { 47 if (ctl == m->ctl[i].id) { 48 b->base_off = addr; 49 b->blk_off = m->ctl[i].base; 50 b->length = m->ctl[i].len; 51 b->hwversion = m->hwversion; 52 b->log_mask = DPU_DBG_MASK_CTL; 53 return &m->ctl[i]; 54 } 55 } 56 return ERR_PTR(-ENOMEM); 57 } 58 59 static int _mixer_stages(const struct dpu_lm_cfg *mixer, int count, 60 enum dpu_lm lm) 61 { 62 int i; 63 int stages = -EINVAL; 64 65 for (i = 0; i < count; i++) { 66 if (lm == mixer[i].id) { 67 stages = mixer[i].sblk->maxblendstages; 68 break; 69 } 70 } 71 72 return stages; 73 } 74 75 static inline void dpu_hw_ctl_trigger_start(struct dpu_hw_ctl *ctx) 76 { 77 DPU_REG_WRITE(&ctx->hw, CTL_START, 0x1); 78 } 79 80 static inline void dpu_hw_ctl_trigger_pending(struct dpu_hw_ctl *ctx) 81 { 82 DPU_REG_WRITE(&ctx->hw, CTL_PREPARE, 0x1); 83 } 84 85 static inline void dpu_hw_ctl_clear_pending_flush(struct dpu_hw_ctl *ctx) 86 { 87 ctx->pending_flush_mask = 0x0; 88 } 89 90 static inline void dpu_hw_ctl_update_pending_flush(struct dpu_hw_ctl *ctx, 91 u32 flushbits) 92 { 93 ctx->pending_flush_mask |= flushbits; 94 } 95 96 static u32 dpu_hw_ctl_get_pending_flush(struct dpu_hw_ctl *ctx) 97 { 98 if (!ctx) 99 return 0x0; 100 101 return ctx->pending_flush_mask; 102 } 103 104 static inline void dpu_hw_ctl_trigger_flush(struct dpu_hw_ctl *ctx) 105 { 106 107 DPU_REG_WRITE(&ctx->hw, CTL_FLUSH, ctx->pending_flush_mask); 108 } 109 110 static inline u32 dpu_hw_ctl_get_flush_register(struct dpu_hw_ctl *ctx) 111 { 112 struct dpu_hw_blk_reg_map *c = &ctx->hw; 113 114 return DPU_REG_READ(c, CTL_FLUSH); 115 } 116 117 static inline uint32_t dpu_hw_ctl_get_bitmask_sspp(struct dpu_hw_ctl *ctx, 118 enum dpu_sspp sspp) 119 { 120 uint32_t flushbits = 0; 121 122 switch (sspp) { 123 case SSPP_VIG0: 124 flushbits = BIT(0); 125 break; 126 case SSPP_VIG1: 127 flushbits = BIT(1); 128 break; 129 case SSPP_VIG2: 130 flushbits = BIT(2); 131 break; 132 case SSPP_VIG3: 133 flushbits = BIT(18); 134 break; 135 case SSPP_RGB0: 136 flushbits = BIT(3); 137 break; 138 case SSPP_RGB1: 139 flushbits = BIT(4); 140 break; 141 case SSPP_RGB2: 142 flushbits = BIT(5); 143 break; 144 case SSPP_RGB3: 145 flushbits = BIT(19); 146 break; 147 case SSPP_DMA0: 148 flushbits = BIT(11); 149 break; 150 case SSPP_DMA1: 151 flushbits = BIT(12); 152 break; 153 case SSPP_DMA2: 154 flushbits = BIT(24); 155 break; 156 case SSPP_DMA3: 157 flushbits = BIT(25); 158 break; 159 case SSPP_CURSOR0: 160 flushbits = BIT(22); 161 break; 162 case SSPP_CURSOR1: 163 flushbits = BIT(23); 164 break; 165 default: 166 break; 167 } 168 169 return flushbits; 170 } 171 172 static inline uint32_t dpu_hw_ctl_get_bitmask_mixer(struct dpu_hw_ctl *ctx, 173 enum dpu_lm lm) 174 { 175 uint32_t flushbits = 0; 176 177 switch (lm) { 178 case LM_0: 179 flushbits = BIT(6); 180 break; 181 case LM_1: 182 flushbits = BIT(7); 183 break; 184 case LM_2: 185 flushbits = BIT(8); 186 break; 187 case LM_3: 188 flushbits = BIT(9); 189 break; 190 case LM_4: 191 flushbits = BIT(10); 192 break; 193 case LM_5: 194 flushbits = BIT(20); 195 break; 196 default: 197 return -EINVAL; 198 } 199 200 flushbits |= CTL_FLUSH_MASK_CTL; 201 202 return flushbits; 203 } 204 205 static inline int dpu_hw_ctl_get_bitmask_intf(struct dpu_hw_ctl *ctx, 206 u32 *flushbits, enum dpu_intf intf) 207 { 208 switch (intf) { 209 case INTF_0: 210 *flushbits |= BIT(31); 211 break; 212 case INTF_1: 213 *flushbits |= BIT(30); 214 break; 215 case INTF_2: 216 *flushbits |= BIT(29); 217 break; 218 case INTF_3: 219 *flushbits |= BIT(28); 220 break; 221 default: 222 return -EINVAL; 223 } 224 return 0; 225 } 226 227 static inline int dpu_hw_ctl_get_bitmask_cdm(struct dpu_hw_ctl *ctx, 228 u32 *flushbits, enum dpu_cdm cdm) 229 { 230 switch (cdm) { 231 case CDM_0: 232 *flushbits |= BIT(26); 233 break; 234 default: 235 return -EINVAL; 236 } 237 return 0; 238 } 239 240 static u32 dpu_hw_ctl_poll_reset_status(struct dpu_hw_ctl *ctx, u32 timeout_us) 241 { 242 struct dpu_hw_blk_reg_map *c = &ctx->hw; 243 ktime_t timeout; 244 u32 status; 245 246 timeout = ktime_add_us(ktime_get(), timeout_us); 247 248 /* 249 * it takes around 30us to have mdp finish resetting its ctl path 250 * poll every 50us so that reset should be completed at 1st poll 251 */ 252 do { 253 status = DPU_REG_READ(c, CTL_SW_RESET); 254 status &= 0x1; 255 if (status) 256 usleep_range(20, 50); 257 } while (status && ktime_compare_safe(ktime_get(), timeout) < 0); 258 259 return status; 260 } 261 262 static int dpu_hw_ctl_reset_control(struct dpu_hw_ctl *ctx) 263 { 264 struct dpu_hw_blk_reg_map *c = &ctx->hw; 265 266 pr_debug("issuing hw ctl reset for ctl:%d\n", ctx->idx); 267 DPU_REG_WRITE(c, CTL_SW_RESET, 0x1); 268 if (dpu_hw_ctl_poll_reset_status(ctx, DPU_REG_RESET_TIMEOUT_US)) 269 return -EINVAL; 270 271 return 0; 272 } 273 274 static int dpu_hw_ctl_wait_reset_status(struct dpu_hw_ctl *ctx) 275 { 276 struct dpu_hw_blk_reg_map *c = &ctx->hw; 277 u32 status; 278 279 status = DPU_REG_READ(c, CTL_SW_RESET); 280 status &= 0x01; 281 if (!status) 282 return 0; 283 284 pr_debug("hw ctl reset is set for ctl:%d\n", ctx->idx); 285 if (dpu_hw_ctl_poll_reset_status(ctx, DPU_REG_RESET_TIMEOUT_US)) { 286 pr_err("hw recovery is not complete for ctl:%d\n", ctx->idx); 287 return -EINVAL; 288 } 289 290 return 0; 291 } 292 293 static void dpu_hw_ctl_clear_all_blendstages(struct dpu_hw_ctl *ctx) 294 { 295 struct dpu_hw_blk_reg_map *c = &ctx->hw; 296 int i; 297 298 for (i = 0; i < ctx->mixer_count; i++) { 299 DPU_REG_WRITE(c, CTL_LAYER(LM_0 + i), 0); 300 DPU_REG_WRITE(c, CTL_LAYER_EXT(LM_0 + i), 0); 301 DPU_REG_WRITE(c, CTL_LAYER_EXT2(LM_0 + i), 0); 302 DPU_REG_WRITE(c, CTL_LAYER_EXT3(LM_0 + i), 0); 303 } 304 } 305 306 static void dpu_hw_ctl_setup_blendstage(struct dpu_hw_ctl *ctx, 307 enum dpu_lm lm, struct dpu_hw_stage_cfg *stage_cfg) 308 { 309 struct dpu_hw_blk_reg_map *c = &ctx->hw; 310 u32 mixercfg = 0, mixercfg_ext = 0, mix, ext; 311 u32 mixercfg_ext2 = 0, mixercfg_ext3 = 0; 312 int i, j; 313 u8 stages; 314 int pipes_per_stage; 315 316 stages = _mixer_stages(ctx->mixer_hw_caps, ctx->mixer_count, lm); 317 if (stages < 0) 318 return; 319 320 if (test_bit(DPU_MIXER_SOURCESPLIT, 321 &ctx->mixer_hw_caps->features)) 322 pipes_per_stage = PIPES_PER_STAGE; 323 else 324 pipes_per_stage = 1; 325 326 mixercfg = CTL_MIXER_BORDER_OUT; /* always set BORDER_OUT */ 327 328 if (!stage_cfg) 329 goto exit; 330 331 for (i = 0; i <= stages; i++) { 332 /* overflow to ext register if 'i + 1 > 7' */ 333 mix = (i + 1) & 0x7; 334 ext = i >= 7; 335 336 for (j = 0 ; j < pipes_per_stage; j++) { 337 enum dpu_sspp_multirect_index rect_index = 338 stage_cfg->multirect_index[i][j]; 339 340 switch (stage_cfg->stage[i][j]) { 341 case SSPP_VIG0: 342 if (rect_index == DPU_SSPP_RECT_1) { 343 mixercfg_ext3 |= ((i + 1) & 0xF) << 0; 344 } else { 345 mixercfg |= mix << 0; 346 mixercfg_ext |= ext << 0; 347 } 348 break; 349 case SSPP_VIG1: 350 if (rect_index == DPU_SSPP_RECT_1) { 351 mixercfg_ext3 |= ((i + 1) & 0xF) << 4; 352 } else { 353 mixercfg |= mix << 3; 354 mixercfg_ext |= ext << 2; 355 } 356 break; 357 case SSPP_VIG2: 358 if (rect_index == DPU_SSPP_RECT_1) { 359 mixercfg_ext3 |= ((i + 1) & 0xF) << 8; 360 } else { 361 mixercfg |= mix << 6; 362 mixercfg_ext |= ext << 4; 363 } 364 break; 365 case SSPP_VIG3: 366 if (rect_index == DPU_SSPP_RECT_1) { 367 mixercfg_ext3 |= ((i + 1) & 0xF) << 12; 368 } else { 369 mixercfg |= mix << 26; 370 mixercfg_ext |= ext << 6; 371 } 372 break; 373 case SSPP_RGB0: 374 mixercfg |= mix << 9; 375 mixercfg_ext |= ext << 8; 376 break; 377 case SSPP_RGB1: 378 mixercfg |= mix << 12; 379 mixercfg_ext |= ext << 10; 380 break; 381 case SSPP_RGB2: 382 mixercfg |= mix << 15; 383 mixercfg_ext |= ext << 12; 384 break; 385 case SSPP_RGB3: 386 mixercfg |= mix << 29; 387 mixercfg_ext |= ext << 14; 388 break; 389 case SSPP_DMA0: 390 if (rect_index == DPU_SSPP_RECT_1) { 391 mixercfg_ext2 |= ((i + 1) & 0xF) << 8; 392 } else { 393 mixercfg |= mix << 18; 394 mixercfg_ext |= ext << 16; 395 } 396 break; 397 case SSPP_DMA1: 398 if (rect_index == DPU_SSPP_RECT_1) { 399 mixercfg_ext2 |= ((i + 1) & 0xF) << 12; 400 } else { 401 mixercfg |= mix << 21; 402 mixercfg_ext |= ext << 18; 403 } 404 break; 405 case SSPP_DMA2: 406 if (rect_index == DPU_SSPP_RECT_1) { 407 mixercfg_ext2 |= ((i + 1) & 0xF) << 16; 408 } else { 409 mix |= (i + 1) & 0xF; 410 mixercfg_ext2 |= mix << 0; 411 } 412 break; 413 case SSPP_DMA3: 414 if (rect_index == DPU_SSPP_RECT_1) { 415 mixercfg_ext2 |= ((i + 1) & 0xF) << 20; 416 } else { 417 mix |= (i + 1) & 0xF; 418 mixercfg_ext2 |= mix << 4; 419 } 420 break; 421 case SSPP_CURSOR0: 422 mixercfg_ext |= ((i + 1) & 0xF) << 20; 423 break; 424 case SSPP_CURSOR1: 425 mixercfg_ext |= ((i + 1) & 0xF) << 26; 426 break; 427 default: 428 break; 429 } 430 } 431 } 432 433 exit: 434 DPU_REG_WRITE(c, CTL_LAYER(lm), mixercfg); 435 DPU_REG_WRITE(c, CTL_LAYER_EXT(lm), mixercfg_ext); 436 DPU_REG_WRITE(c, CTL_LAYER_EXT2(lm), mixercfg_ext2); 437 DPU_REG_WRITE(c, CTL_LAYER_EXT3(lm), mixercfg_ext3); 438 } 439 440 static void dpu_hw_ctl_intf_cfg(struct dpu_hw_ctl *ctx, 441 struct dpu_hw_intf_cfg *cfg) 442 { 443 struct dpu_hw_blk_reg_map *c = &ctx->hw; 444 u32 intf_cfg = 0; 445 446 intf_cfg |= (cfg->intf & 0xF) << 4; 447 448 if (cfg->mode_3d) { 449 intf_cfg |= BIT(19); 450 intf_cfg |= (cfg->mode_3d - 0x1) << 20; 451 } 452 453 switch (cfg->intf_mode_sel) { 454 case DPU_CTL_MODE_SEL_VID: 455 intf_cfg &= ~BIT(17); 456 intf_cfg &= ~(0x3 << 15); 457 break; 458 case DPU_CTL_MODE_SEL_CMD: 459 intf_cfg |= BIT(17); 460 intf_cfg |= ((cfg->stream_sel & 0x3) << 15); 461 break; 462 default: 463 pr_err("unknown interface type %d\n", cfg->intf_mode_sel); 464 return; 465 } 466 467 DPU_REG_WRITE(c, CTL_TOP, intf_cfg); 468 } 469 470 static void _setup_ctl_ops(struct dpu_hw_ctl_ops *ops, 471 unsigned long cap) 472 { 473 ops->clear_pending_flush = dpu_hw_ctl_clear_pending_flush; 474 ops->update_pending_flush = dpu_hw_ctl_update_pending_flush; 475 ops->get_pending_flush = dpu_hw_ctl_get_pending_flush; 476 ops->trigger_flush = dpu_hw_ctl_trigger_flush; 477 ops->get_flush_register = dpu_hw_ctl_get_flush_register; 478 ops->trigger_start = dpu_hw_ctl_trigger_start; 479 ops->trigger_pending = dpu_hw_ctl_trigger_pending; 480 ops->setup_intf_cfg = dpu_hw_ctl_intf_cfg; 481 ops->reset = dpu_hw_ctl_reset_control; 482 ops->wait_reset_status = dpu_hw_ctl_wait_reset_status; 483 ops->clear_all_blendstages = dpu_hw_ctl_clear_all_blendstages; 484 ops->setup_blendstage = dpu_hw_ctl_setup_blendstage; 485 ops->get_bitmask_sspp = dpu_hw_ctl_get_bitmask_sspp; 486 ops->get_bitmask_mixer = dpu_hw_ctl_get_bitmask_mixer; 487 ops->get_bitmask_intf = dpu_hw_ctl_get_bitmask_intf; 488 ops->get_bitmask_cdm = dpu_hw_ctl_get_bitmask_cdm; 489 }; 490 491 static struct dpu_hw_blk_ops dpu_hw_ops = { 492 .start = NULL, 493 .stop = NULL, 494 }; 495 496 struct dpu_hw_ctl *dpu_hw_ctl_init(enum dpu_ctl idx, 497 void __iomem *addr, 498 struct dpu_mdss_cfg *m) 499 { 500 struct dpu_hw_ctl *c; 501 struct dpu_ctl_cfg *cfg; 502 int rc; 503 504 c = kzalloc(sizeof(*c), GFP_KERNEL); 505 if (!c) 506 return ERR_PTR(-ENOMEM); 507 508 cfg = _ctl_offset(idx, m, addr, &c->hw); 509 if (IS_ERR_OR_NULL(cfg)) { 510 kfree(c); 511 pr_err("failed to create dpu_hw_ctl %d\n", idx); 512 return ERR_PTR(-EINVAL); 513 } 514 515 c->caps = cfg; 516 _setup_ctl_ops(&c->ops, c->caps->features); 517 c->idx = idx; 518 c->mixer_count = m->mixer_count; 519 c->mixer_hw_caps = m->mixer; 520 521 rc = dpu_hw_blk_init(&c->base, DPU_HW_BLK_CTL, idx, &dpu_hw_ops); 522 if (rc) { 523 DPU_ERROR("failed to init hw blk %d\n", rc); 524 goto blk_init_error; 525 } 526 527 return c; 528 529 blk_init_error: 530 kzfree(c); 531 532 return ERR_PTR(rc); 533 } 534 535 void dpu_hw_ctl_destroy(struct dpu_hw_ctl *ctx) 536 { 537 if (ctx) 538 dpu_hw_blk_destroy(&ctx->base); 539 kfree(ctx); 540 } 541