1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * (C) COPYRIGHT 2018 ARM Limited. All rights reserved. 4 * Author: James.Qian.Wang <james.qian.wang@arm.com> 5 * 6 */ 7 8 #include <drm/drm_print.h> 9 #include "d71_dev.h" 10 #include "malidp_io.h" 11 12 static u64 get_lpu_event(struct d71_pipeline *d71_pipeline) 13 { 14 u32 __iomem *reg = d71_pipeline->lpu_addr; 15 u32 status, raw_status; 16 u64 evts = 0ULL; 17 18 raw_status = malidp_read32(reg, BLK_IRQ_RAW_STATUS); 19 if (raw_status & LPU_IRQ_IBSY) 20 evts |= KOMEDA_EVENT_IBSY; 21 if (raw_status & LPU_IRQ_EOW) 22 evts |= KOMEDA_EVENT_EOW; 23 24 if (raw_status & (LPU_IRQ_ERR | LPU_IRQ_IBSY)) { 25 u32 restore = 0, tbu_status; 26 /* Check error of LPU status */ 27 status = malidp_read32(reg, BLK_STATUS); 28 if (status & LPU_STATUS_AXIE) { 29 restore |= LPU_STATUS_AXIE; 30 evts |= KOMEDA_ERR_AXIE; 31 } 32 if (status & LPU_STATUS_ACE0) { 33 restore |= LPU_STATUS_ACE0; 34 evts |= KOMEDA_ERR_ACE0; 35 } 36 if (status & LPU_STATUS_ACE1) { 37 restore |= LPU_STATUS_ACE1; 38 evts |= KOMEDA_ERR_ACE1; 39 } 40 if (status & LPU_STATUS_ACE2) { 41 restore |= LPU_STATUS_ACE2; 42 evts |= KOMEDA_ERR_ACE2; 43 } 44 if (status & LPU_STATUS_ACE3) { 45 restore |= LPU_STATUS_ACE3; 46 evts |= KOMEDA_ERR_ACE3; 47 } 48 if (restore != 0) 49 malidp_write32_mask(reg, BLK_STATUS, restore, 0); 50 51 restore = 0; 52 /* Check errors of TBU status */ 53 tbu_status = malidp_read32(reg, LPU_TBU_STATUS); 54 if (tbu_status & LPU_TBU_STATUS_TCF) { 55 restore |= LPU_TBU_STATUS_TCF; 56 evts |= KOMEDA_ERR_TCF; 57 } 58 if (tbu_status & LPU_TBU_STATUS_TTNG) { 59 restore |= LPU_TBU_STATUS_TTNG; 60 evts |= KOMEDA_ERR_TTNG; 61 } 62 if (tbu_status & LPU_TBU_STATUS_TITR) { 63 restore |= LPU_TBU_STATUS_TITR; 64 evts |= KOMEDA_ERR_TITR; 65 } 66 if (tbu_status & LPU_TBU_STATUS_TEMR) { 67 restore |= LPU_TBU_STATUS_TEMR; 68 evts |= KOMEDA_ERR_TEMR; 69 } 70 if (tbu_status & LPU_TBU_STATUS_TTF) { 71 restore |= LPU_TBU_STATUS_TTF; 72 evts |= KOMEDA_ERR_TTF; 73 } 74 if (restore != 0) 75 malidp_write32_mask(reg, LPU_TBU_STATUS, restore, 0); 76 } 77 78 malidp_write32(reg, BLK_IRQ_CLEAR, raw_status); 79 return evts; 80 } 81 82 static u64 get_cu_event(struct d71_pipeline *d71_pipeline) 83 { 84 u32 __iomem *reg = d71_pipeline->cu_addr; 85 u32 status, raw_status; 86 u64 evts = 0ULL; 87 88 raw_status = malidp_read32(reg, BLK_IRQ_RAW_STATUS); 89 if (raw_status & CU_IRQ_OVR) 90 evts |= KOMEDA_EVENT_OVR; 91 92 if (raw_status & (CU_IRQ_ERR | CU_IRQ_OVR)) { 93 status = malidp_read32(reg, BLK_STATUS) & 0x7FFFFFFF; 94 if (status & CU_STATUS_CPE) 95 evts |= KOMEDA_ERR_CPE; 96 if (status & CU_STATUS_ZME) 97 evts |= KOMEDA_ERR_ZME; 98 if (status & CU_STATUS_CFGE) 99 evts |= KOMEDA_ERR_CFGE; 100 if (status) 101 malidp_write32_mask(reg, BLK_STATUS, status, 0); 102 } 103 104 malidp_write32(reg, BLK_IRQ_CLEAR, raw_status); 105 106 return evts; 107 } 108 109 static u64 get_dou_event(struct d71_pipeline *d71_pipeline) 110 { 111 u32 __iomem *reg = d71_pipeline->dou_addr; 112 u32 status, raw_status; 113 u64 evts = 0ULL; 114 115 raw_status = malidp_read32(reg, BLK_IRQ_RAW_STATUS); 116 if (raw_status & DOU_IRQ_PL0) 117 evts |= KOMEDA_EVENT_VSYNC; 118 if (raw_status & DOU_IRQ_UND) 119 evts |= KOMEDA_EVENT_URUN; 120 121 if (raw_status & (DOU_IRQ_ERR | DOU_IRQ_UND)) { 122 u32 restore = 0; 123 124 status = malidp_read32(reg, BLK_STATUS); 125 if (status & DOU_STATUS_DRIFTTO) { 126 restore |= DOU_STATUS_DRIFTTO; 127 evts |= KOMEDA_ERR_DRIFTTO; 128 } 129 if (status & DOU_STATUS_FRAMETO) { 130 restore |= DOU_STATUS_FRAMETO; 131 evts |= KOMEDA_ERR_FRAMETO; 132 } 133 if (status & DOU_STATUS_TETO) { 134 restore |= DOU_STATUS_TETO; 135 evts |= KOMEDA_ERR_TETO; 136 } 137 if (status & DOU_STATUS_CSCE) { 138 restore |= DOU_STATUS_CSCE; 139 evts |= KOMEDA_ERR_CSCE; 140 } 141 142 if (restore != 0) 143 malidp_write32_mask(reg, BLK_STATUS, restore, 0); 144 } 145 146 malidp_write32(reg, BLK_IRQ_CLEAR, raw_status); 147 return evts; 148 } 149 150 static u64 get_pipeline_event(struct d71_pipeline *d71_pipeline, u32 gcu_status) 151 { 152 u32 evts = 0ULL; 153 154 if (gcu_status & (GLB_IRQ_STATUS_LPU0 | GLB_IRQ_STATUS_LPU1)) 155 evts |= get_lpu_event(d71_pipeline); 156 157 if (gcu_status & (GLB_IRQ_STATUS_CU0 | GLB_IRQ_STATUS_CU1)) 158 evts |= get_cu_event(d71_pipeline); 159 160 if (gcu_status & (GLB_IRQ_STATUS_DOU0 | GLB_IRQ_STATUS_DOU1)) 161 evts |= get_dou_event(d71_pipeline); 162 163 return evts; 164 } 165 166 static irqreturn_t 167 d71_irq_handler(struct komeda_dev *mdev, struct komeda_events *evts) 168 { 169 struct d71_dev *d71 = mdev->chip_data; 170 u32 status, gcu_status, raw_status; 171 172 gcu_status = malidp_read32(d71->gcu_addr, GLB_IRQ_STATUS); 173 174 if (gcu_status & GLB_IRQ_STATUS_GCU) { 175 raw_status = malidp_read32(d71->gcu_addr, BLK_IRQ_RAW_STATUS); 176 if (raw_status & GCU_IRQ_CVAL0) 177 evts->pipes[0] |= KOMEDA_EVENT_FLIP; 178 if (raw_status & GCU_IRQ_CVAL1) 179 evts->pipes[1] |= KOMEDA_EVENT_FLIP; 180 if (raw_status & GCU_IRQ_ERR) { 181 status = malidp_read32(d71->gcu_addr, BLK_STATUS); 182 if (status & GCU_STATUS_MERR) { 183 evts->global |= KOMEDA_ERR_MERR; 184 malidp_write32_mask(d71->gcu_addr, BLK_STATUS, 185 GCU_STATUS_MERR, 0); 186 } 187 } 188 189 malidp_write32(d71->gcu_addr, BLK_IRQ_CLEAR, raw_status); 190 } 191 192 if (gcu_status & GLB_IRQ_STATUS_PIPE0) 193 evts->pipes[0] |= get_pipeline_event(d71->pipes[0], gcu_status); 194 195 if (gcu_status & GLB_IRQ_STATUS_PIPE1) 196 evts->pipes[1] |= get_pipeline_event(d71->pipes[1], gcu_status); 197 198 return IRQ_RETVAL(gcu_status); 199 } 200 201 #define ENABLED_GCU_IRQS (GCU_IRQ_CVAL0 | GCU_IRQ_CVAL1 | \ 202 GCU_IRQ_MODE | GCU_IRQ_ERR) 203 #define ENABLED_LPU_IRQS (LPU_IRQ_IBSY | LPU_IRQ_ERR | LPU_IRQ_EOW) 204 #define ENABLED_CU_IRQS (CU_IRQ_OVR | CU_IRQ_ERR) 205 #define ENABLED_DOU_IRQS (DOU_IRQ_UND | DOU_IRQ_ERR) 206 207 static int d71_enable_irq(struct komeda_dev *mdev) 208 { 209 struct d71_dev *d71 = mdev->chip_data; 210 struct d71_pipeline *pipe; 211 u32 i; 212 213 malidp_write32_mask(d71->gcu_addr, BLK_IRQ_MASK, 214 ENABLED_GCU_IRQS, ENABLED_GCU_IRQS); 215 for (i = 0; i < d71->num_pipelines; i++) { 216 pipe = d71->pipes[i]; 217 malidp_write32_mask(pipe->cu_addr, BLK_IRQ_MASK, 218 ENABLED_CU_IRQS, ENABLED_CU_IRQS); 219 malidp_write32_mask(pipe->lpu_addr, BLK_IRQ_MASK, 220 ENABLED_LPU_IRQS, ENABLED_LPU_IRQS); 221 malidp_write32_mask(pipe->dou_addr, BLK_IRQ_MASK, 222 ENABLED_DOU_IRQS, ENABLED_DOU_IRQS); 223 } 224 return 0; 225 } 226 227 static int d71_disable_irq(struct komeda_dev *mdev) 228 { 229 struct d71_dev *d71 = mdev->chip_data; 230 struct d71_pipeline *pipe; 231 u32 i; 232 233 malidp_write32_mask(d71->gcu_addr, BLK_IRQ_MASK, ENABLED_GCU_IRQS, 0); 234 for (i = 0; i < d71->num_pipelines; i++) { 235 pipe = d71->pipes[i]; 236 malidp_write32_mask(pipe->cu_addr, BLK_IRQ_MASK, 237 ENABLED_CU_IRQS, 0); 238 malidp_write32_mask(pipe->lpu_addr, BLK_IRQ_MASK, 239 ENABLED_LPU_IRQS, 0); 240 malidp_write32_mask(pipe->dou_addr, BLK_IRQ_MASK, 241 ENABLED_DOU_IRQS, 0); 242 } 243 return 0; 244 } 245 246 static void d71_on_off_vblank(struct komeda_dev *mdev, int master_pipe, bool on) 247 { 248 struct d71_dev *d71 = mdev->chip_data; 249 struct d71_pipeline *pipe = d71->pipes[master_pipe]; 250 251 malidp_write32_mask(pipe->dou_addr, BLK_IRQ_MASK, 252 DOU_IRQ_PL0, on ? DOU_IRQ_PL0 : 0); 253 } 254 255 static int to_d71_opmode(int core_mode) 256 { 257 switch (core_mode) { 258 case KOMEDA_MODE_DISP0: 259 return DO0_ACTIVE_MODE; 260 case KOMEDA_MODE_DISP1: 261 return DO1_ACTIVE_MODE; 262 case KOMEDA_MODE_DUAL_DISP: 263 return DO01_ACTIVE_MODE; 264 case KOMEDA_MODE_INACTIVE: 265 return INACTIVE_MODE; 266 default: 267 WARN(1, "Unknown operation mode"); 268 return INACTIVE_MODE; 269 } 270 } 271 272 static int d71_change_opmode(struct komeda_dev *mdev, int new_mode) 273 { 274 struct d71_dev *d71 = mdev->chip_data; 275 u32 opmode = to_d71_opmode(new_mode); 276 int ret; 277 278 malidp_write32_mask(d71->gcu_addr, BLK_CONTROL, 0x7, opmode); 279 280 ret = dp_wait_cond(((malidp_read32(d71->gcu_addr, BLK_CONTROL) & 0x7) == opmode), 281 100, 1000, 10000); 282 283 return ret; 284 } 285 286 static void d71_flush(struct komeda_dev *mdev, 287 int master_pipe, u32 active_pipes) 288 { 289 struct d71_dev *d71 = mdev->chip_data; 290 u32 reg_offset = (master_pipe == 0) ? 291 GCU_CONFIG_VALID0 : GCU_CONFIG_VALID1; 292 293 malidp_write32(d71->gcu_addr, reg_offset, GCU_CONFIG_CVAL); 294 } 295 296 static int d71_reset(struct d71_dev *d71) 297 { 298 u32 __iomem *gcu = d71->gcu_addr; 299 int ret; 300 301 malidp_write32_mask(gcu, BLK_CONTROL, 302 GCU_CONTROL_SRST, GCU_CONTROL_SRST); 303 304 ret = dp_wait_cond(!(malidp_read32(gcu, BLK_CONTROL) & GCU_CONTROL_SRST), 305 100, 1000, 10000); 306 307 return ret; 308 } 309 310 void d71_read_block_header(u32 __iomem *reg, struct block_header *blk) 311 { 312 int i; 313 314 blk->block_info = malidp_read32(reg, BLK_BLOCK_INFO); 315 if (BLOCK_INFO_BLK_TYPE(blk->block_info) == D71_BLK_TYPE_RESERVED) 316 return; 317 318 blk->pipeline_info = malidp_read32(reg, BLK_PIPELINE_INFO); 319 320 /* get valid input and output ids */ 321 for (i = 0; i < PIPELINE_INFO_N_VALID_INPUTS(blk->pipeline_info); i++) 322 blk->input_ids[i] = malidp_read32(reg + i, BLK_VALID_INPUT_ID0); 323 for (i = 0; i < PIPELINE_INFO_N_OUTPUTS(blk->pipeline_info); i++) 324 blk->output_ids[i] = malidp_read32(reg + i, BLK_OUTPUT_ID0); 325 } 326 327 static void d71_cleanup(struct komeda_dev *mdev) 328 { 329 struct d71_dev *d71 = mdev->chip_data; 330 331 if (!d71) 332 return; 333 334 devm_kfree(mdev->dev, d71); 335 mdev->chip_data = NULL; 336 } 337 338 static int d71_enum_resources(struct komeda_dev *mdev) 339 { 340 struct d71_dev *d71; 341 struct komeda_pipeline *pipe; 342 struct block_header blk; 343 u32 __iomem *blk_base; 344 u32 i, value, offset; 345 int err; 346 347 d71 = devm_kzalloc(mdev->dev, sizeof(*d71), GFP_KERNEL); 348 if (!d71) 349 return -ENOMEM; 350 351 mdev->chip_data = d71; 352 d71->mdev = mdev; 353 d71->gcu_addr = mdev->reg_base; 354 d71->periph_addr = mdev->reg_base + (D71_BLOCK_OFFSET_PERIPH >> 2); 355 356 err = d71_reset(d71); 357 if (err) { 358 DRM_ERROR("Fail to reset d71 device.\n"); 359 goto err_cleanup; 360 } 361 362 /* probe GCU */ 363 value = malidp_read32(d71->gcu_addr, GLB_CORE_INFO); 364 d71->num_blocks = value & 0xFF; 365 d71->num_pipelines = (value >> 8) & 0x7; 366 367 if (d71->num_pipelines > D71_MAX_PIPELINE) { 368 DRM_ERROR("d71 supports %d pipelines, but got: %d.\n", 369 D71_MAX_PIPELINE, d71->num_pipelines); 370 err = -EINVAL; 371 goto err_cleanup; 372 } 373 374 /* probe PERIPH */ 375 value = malidp_read32(d71->periph_addr, BLK_BLOCK_INFO); 376 if (BLOCK_INFO_BLK_TYPE(value) != D71_BLK_TYPE_PERIPH) { 377 DRM_ERROR("access blk periph but got blk: %d.\n", 378 BLOCK_INFO_BLK_TYPE(value)); 379 err = -EINVAL; 380 goto err_cleanup; 381 } 382 383 value = malidp_read32(d71->periph_addr, PERIPH_CONFIGURATION_ID); 384 385 d71->max_line_size = value & PERIPH_MAX_LINE_SIZE ? 4096 : 2048; 386 d71->max_vsize = 4096; 387 d71->num_rich_layers = value & PERIPH_NUM_RICH_LAYERS ? 2 : 1; 388 d71->supports_dual_link = value & PERIPH_SPLIT_EN ? true : false; 389 d71->integrates_tbu = value & PERIPH_TBU_EN ? true : false; 390 391 for (i = 0; i < d71->num_pipelines; i++) { 392 pipe = komeda_pipeline_add(mdev, sizeof(struct d71_pipeline), 393 &d71_pipeline_funcs); 394 if (IS_ERR(pipe)) { 395 err = PTR_ERR(pipe); 396 goto err_cleanup; 397 } 398 399 /* D71 HW doesn't update shadow registers when display output 400 * is turning off, so when we disable all pipeline components 401 * together with display output disable by one flush or one 402 * operation, the disable operation updated registers will not 403 * be flush to or valid in HW, which may leads problem. 404 * To workaround this problem, introduce a two phase disable. 405 * Phase1: Disabling components with display is on to make sure 406 * the disable can be flushed to HW. 407 * Phase2: Only turn-off display output. 408 */ 409 value = KOMEDA_PIPELINE_IMPROCS | 410 BIT(KOMEDA_COMPONENT_TIMING_CTRLR); 411 412 pipe->standalone_disabled_comps = value; 413 414 d71->pipes[i] = to_d71_pipeline(pipe); 415 } 416 417 /* loop the register blks and probe */ 418 i = 2; /* exclude GCU and PERIPH */ 419 offset = D71_BLOCK_SIZE; /* skip GCU */ 420 while (i < d71->num_blocks) { 421 blk_base = mdev->reg_base + (offset >> 2); 422 423 d71_read_block_header(blk_base, &blk); 424 if (BLOCK_INFO_BLK_TYPE(blk.block_info) != D71_BLK_TYPE_RESERVED) { 425 err = d71_probe_block(d71, &blk, blk_base); 426 if (err) 427 goto err_cleanup; 428 i++; 429 } 430 431 offset += D71_BLOCK_SIZE; 432 } 433 434 DRM_DEBUG("total %d (out of %d) blocks are found.\n", 435 i, d71->num_blocks); 436 437 return 0; 438 439 err_cleanup: 440 d71_cleanup(mdev); 441 return err; 442 } 443 444 #define __HW_ID(__group, __format) \ 445 ((((__group) & 0x7) << 3) | ((__format) & 0x7)) 446 447 #define RICH KOMEDA_FMT_RICH_LAYER 448 #define SIMPLE KOMEDA_FMT_SIMPLE_LAYER 449 #define RICH_SIMPLE (KOMEDA_FMT_RICH_LAYER | KOMEDA_FMT_SIMPLE_LAYER) 450 #define RICH_WB (KOMEDA_FMT_RICH_LAYER | KOMEDA_FMT_WB_LAYER) 451 #define RICH_SIMPLE_WB (RICH_SIMPLE | KOMEDA_FMT_WB_LAYER) 452 453 #define Rot_0 DRM_MODE_ROTATE_0 454 #define Flip_H_V (DRM_MODE_REFLECT_X | DRM_MODE_REFLECT_Y | Rot_0) 455 #define Rot_ALL_H_V (DRM_MODE_ROTATE_MASK | Flip_H_V) 456 457 #define LYT_NM BIT(AFBC_FORMAT_MOD_BLOCK_SIZE_16x16) 458 #define LYT_WB BIT(AFBC_FORMAT_MOD_BLOCK_SIZE_32x8) 459 #define LYT_NM_WB (LYT_NM | LYT_WB) 460 461 #define AFB_TH AFBC(_TILED | _SPARSE) 462 #define AFB_TH_SC_YTR AFBC(_TILED | _SC | _SPARSE | _YTR) 463 #define AFB_TH_SC_YTR_BS AFBC(_TILED | _SC | _SPARSE | _YTR | _SPLIT) 464 465 static struct komeda_format_caps d71_format_caps_table[] = { 466 /* HW_ID | fourcc | layer_types | rots | afbc_layouts | afbc_features */ 467 /* ABGR_2101010*/ 468 {__HW_ID(0, 0), DRM_FORMAT_ARGB2101010, RICH_SIMPLE_WB, Flip_H_V, 0, 0}, 469 {__HW_ID(0, 1), DRM_FORMAT_ABGR2101010, RICH_SIMPLE_WB, Flip_H_V, 0, 0}, 470 {__HW_ID(0, 1), DRM_FORMAT_ABGR2101010, RICH_SIMPLE, Rot_ALL_H_V, LYT_NM_WB, AFB_TH_SC_YTR_BS}, /* afbc */ 471 {__HW_ID(0, 2), DRM_FORMAT_RGBA1010102, RICH_SIMPLE_WB, Flip_H_V, 0, 0}, 472 {__HW_ID(0, 3), DRM_FORMAT_BGRA1010102, RICH_SIMPLE_WB, Flip_H_V, 0, 0}, 473 /* ABGR_8888*/ 474 {__HW_ID(1, 0), DRM_FORMAT_ARGB8888, RICH_SIMPLE_WB, Flip_H_V, 0, 0}, 475 {__HW_ID(1, 1), DRM_FORMAT_ABGR8888, RICH_SIMPLE_WB, Flip_H_V, 0, 0}, 476 {__HW_ID(1, 1), DRM_FORMAT_ABGR8888, RICH_SIMPLE, Rot_ALL_H_V, LYT_NM_WB, AFB_TH_SC_YTR_BS}, /* afbc */ 477 {__HW_ID(1, 2), DRM_FORMAT_RGBA8888, RICH_SIMPLE_WB, Flip_H_V, 0, 0}, 478 {__HW_ID(1, 3), DRM_FORMAT_BGRA8888, RICH_SIMPLE_WB, Flip_H_V, 0, 0}, 479 /* XBGB_8888 */ 480 {__HW_ID(2, 0), DRM_FORMAT_XRGB8888, RICH_SIMPLE_WB, Flip_H_V, 0, 0}, 481 {__HW_ID(2, 1), DRM_FORMAT_XBGR8888, RICH_SIMPLE_WB, Flip_H_V, 0, 0}, 482 {__HW_ID(2, 2), DRM_FORMAT_RGBX8888, RICH_SIMPLE_WB, Flip_H_V, 0, 0}, 483 {__HW_ID(2, 3), DRM_FORMAT_BGRX8888, RICH_SIMPLE_WB, Flip_H_V, 0, 0}, 484 /* BGR_888 */ /* none-afbc RGB888 doesn't support rotation and flip */ 485 {__HW_ID(3, 0), DRM_FORMAT_RGB888, RICH_SIMPLE_WB, Rot_0, 0, 0}, 486 {__HW_ID(3, 1), DRM_FORMAT_BGR888, RICH_SIMPLE_WB, Rot_0, 0, 0}, 487 {__HW_ID(3, 1), DRM_FORMAT_BGR888, RICH_SIMPLE, Rot_ALL_H_V, LYT_NM_WB, AFB_TH_SC_YTR_BS}, /* afbc */ 488 /* BGR 16bpp */ 489 {__HW_ID(4, 0), DRM_FORMAT_RGBA5551, RICH_SIMPLE, Flip_H_V, 0, 0}, 490 {__HW_ID(4, 1), DRM_FORMAT_ABGR1555, RICH_SIMPLE, Flip_H_V, 0, 0}, 491 {__HW_ID(4, 1), DRM_FORMAT_ABGR1555, RICH_SIMPLE, Rot_ALL_H_V, LYT_NM_WB, AFB_TH_SC_YTR}, /* afbc */ 492 {__HW_ID(4, 2), DRM_FORMAT_RGB565, RICH_SIMPLE, Flip_H_V, 0, 0}, 493 {__HW_ID(4, 3), DRM_FORMAT_BGR565, RICH_SIMPLE, Flip_H_V, 0, 0}, 494 {__HW_ID(4, 3), DRM_FORMAT_BGR565, RICH_SIMPLE, Rot_ALL_H_V, LYT_NM_WB, AFB_TH_SC_YTR}, /* afbc */ 495 {__HW_ID(4, 4), DRM_FORMAT_R8, SIMPLE, Rot_0, 0, 0}, 496 /* YUV 444/422/420 8bit */ 497 {__HW_ID(5, 1), DRM_FORMAT_YUYV, RICH, Rot_ALL_H_V, LYT_NM, AFB_TH}, /* afbc */ 498 {__HW_ID(5, 2), DRM_FORMAT_YUYV, RICH, Flip_H_V, 0, 0}, 499 {__HW_ID(5, 3), DRM_FORMAT_UYVY, RICH, Flip_H_V, 0, 0}, 500 {__HW_ID(5, 6), DRM_FORMAT_NV12, RICH, Flip_H_V, 0, 0}, 501 {__HW_ID(5, 6), DRM_FORMAT_YUV420_8BIT, RICH, Rot_ALL_H_V, LYT_NM, AFB_TH}, /* afbc */ 502 {__HW_ID(5, 7), DRM_FORMAT_YUV420, RICH, Flip_H_V, 0, 0}, 503 /* YUV 10bit*/ 504 {__HW_ID(6, 6), DRM_FORMAT_X0L2, RICH, Flip_H_V, 0, 0}, 505 {__HW_ID(6, 7), DRM_FORMAT_P010, RICH, Flip_H_V, 0, 0}, 506 {__HW_ID(6, 7), DRM_FORMAT_YUV420_10BIT, RICH, Rot_ALL_H_V, LYT_NM, AFB_TH}, 507 }; 508 509 static bool d71_format_mod_supported(const struct komeda_format_caps *caps, 510 u32 layer_type, u64 modifier, u32 rot) 511 { 512 uint64_t layout = modifier & AFBC_FORMAT_MOD_BLOCK_SIZE_MASK; 513 514 if ((layout == AFBC_FORMAT_MOD_BLOCK_SIZE_32x8) && 515 drm_rotation_90_or_270(rot)) { 516 DRM_DEBUG_ATOMIC("D71 doesn't support ROT90 for WB-AFBC.\n"); 517 return false; 518 } 519 520 return true; 521 } 522 523 static void d71_init_fmt_tbl(struct komeda_dev *mdev) 524 { 525 struct komeda_format_caps_table *table = &mdev->fmt_tbl; 526 527 table->format_caps = d71_format_caps_table; 528 table->format_mod_supported = d71_format_mod_supported; 529 table->n_formats = ARRAY_SIZE(d71_format_caps_table); 530 } 531 532 static int d71_connect_iommu(struct komeda_dev *mdev) 533 { 534 struct d71_dev *d71 = mdev->chip_data; 535 u32 __iomem *reg = d71->gcu_addr; 536 u32 check_bits = (d71->num_pipelines == 2) ? 537 GCU_STATUS_TCS0 | GCU_STATUS_TCS1 : GCU_STATUS_TCS0; 538 int i, ret; 539 540 if (!d71->integrates_tbu) 541 return -1; 542 543 malidp_write32_mask(reg, BLK_CONTROL, 0x7, TBU_CONNECT_MODE); 544 545 ret = dp_wait_cond(has_bits(check_bits, malidp_read32(reg, BLK_STATUS)), 546 100, 1000, 1000); 547 if (ret < 0) { 548 DRM_ERROR("timed out connecting to TCU!\n"); 549 malidp_write32_mask(reg, BLK_CONTROL, 0x7, INACTIVE_MODE); 550 return ret; 551 } 552 553 for (i = 0; i < d71->num_pipelines; i++) 554 malidp_write32_mask(d71->pipes[i]->lpu_addr, LPU_TBU_CONTROL, 555 LPU_TBU_CTRL_TLBPEN, LPU_TBU_CTRL_TLBPEN); 556 return 0; 557 } 558 559 static int d71_disconnect_iommu(struct komeda_dev *mdev) 560 { 561 struct d71_dev *d71 = mdev->chip_data; 562 u32 __iomem *reg = d71->gcu_addr; 563 u32 check_bits = (d71->num_pipelines == 2) ? 564 GCU_STATUS_TCS0 | GCU_STATUS_TCS1 : GCU_STATUS_TCS0; 565 int ret; 566 567 malidp_write32_mask(reg, BLK_CONTROL, 0x7, TBU_DISCONNECT_MODE); 568 569 ret = dp_wait_cond(((malidp_read32(reg, BLK_STATUS) & check_bits) == 0), 570 100, 1000, 1000); 571 if (ret < 0) { 572 DRM_ERROR("timed out disconnecting from TCU!\n"); 573 malidp_write32_mask(reg, BLK_CONTROL, 0x7, INACTIVE_MODE); 574 } 575 576 return ret; 577 } 578 579 static const struct komeda_dev_funcs d71_chip_funcs = { 580 .init_format_table = d71_init_fmt_tbl, 581 .enum_resources = d71_enum_resources, 582 .cleanup = d71_cleanup, 583 .irq_handler = d71_irq_handler, 584 .enable_irq = d71_enable_irq, 585 .disable_irq = d71_disable_irq, 586 .on_off_vblank = d71_on_off_vblank, 587 .change_opmode = d71_change_opmode, 588 .flush = d71_flush, 589 .connect_iommu = d71_connect_iommu, 590 .disconnect_iommu = d71_disconnect_iommu, 591 .dump_register = d71_dump, 592 }; 593 594 const struct komeda_dev_funcs * 595 d71_identify(u32 __iomem *reg_base, struct komeda_chip_info *chip) 596 { 597 chip->arch_id = malidp_read32(reg_base, GLB_ARCH_ID); 598 chip->core_id = malidp_read32(reg_base, GLB_CORE_ID); 599 chip->core_info = malidp_read32(reg_base, GLB_CORE_INFO); 600 chip->bus_width = D71_BUS_WIDTH_16_BYTES; 601 602 return &d71_chip_funcs; 603 } 604