1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * (C) COPYRIGHT 2018 ARM Limited. All rights reserved.
4  * Author: James.Qian.Wang <james.qian.wang@arm.com>
5  *
6  */
7 
8 #include <drm/drm_blend.h>
9 #include <drm/drm_print.h>
10 #include "d71_dev.h"
11 #include "malidp_io.h"
12 
13 static u64 get_lpu_event(struct d71_pipeline *d71_pipeline)
14 {
15 	u32 __iomem *reg = d71_pipeline->lpu_addr;
16 	u32 status, raw_status;
17 	u64 evts = 0ULL;
18 
19 	raw_status = malidp_read32(reg, BLK_IRQ_RAW_STATUS);
20 	if (raw_status & LPU_IRQ_IBSY)
21 		evts |= KOMEDA_EVENT_IBSY;
22 	if (raw_status & LPU_IRQ_EOW)
23 		evts |= KOMEDA_EVENT_EOW;
24 	if (raw_status & LPU_IRQ_OVR)
25 		evts |= KOMEDA_EVENT_OVR;
26 
27 	if (raw_status & (LPU_IRQ_ERR | LPU_IRQ_IBSY | LPU_IRQ_OVR)) {
28 		u32 restore = 0, tbu_status;
29 		/* Check error of LPU status */
30 		status = malidp_read32(reg, BLK_STATUS);
31 		if (status & LPU_STATUS_AXIE) {
32 			restore |= LPU_STATUS_AXIE;
33 			evts |= KOMEDA_ERR_AXIE;
34 		}
35 		if (status & LPU_STATUS_ACE0) {
36 			restore |= LPU_STATUS_ACE0;
37 			evts |= KOMEDA_ERR_ACE0;
38 		}
39 		if (status & LPU_STATUS_ACE1) {
40 			restore |= LPU_STATUS_ACE1;
41 			evts |= KOMEDA_ERR_ACE1;
42 		}
43 		if (status & LPU_STATUS_ACE2) {
44 			restore |= LPU_STATUS_ACE2;
45 			evts |= KOMEDA_ERR_ACE2;
46 		}
47 		if (status & LPU_STATUS_ACE3) {
48 			restore |= LPU_STATUS_ACE3;
49 			evts |= KOMEDA_ERR_ACE3;
50 		}
51 		if (status & LPU_STATUS_FEMPTY) {
52 			restore |= LPU_STATUS_FEMPTY;
53 			evts |= KOMEDA_EVENT_EMPTY;
54 		}
55 		if (status & LPU_STATUS_FFULL) {
56 			restore |= LPU_STATUS_FFULL;
57 			evts |= KOMEDA_EVENT_FULL;
58 		}
59 
60 		if (restore != 0)
61 			malidp_write32_mask(reg, BLK_STATUS, restore, 0);
62 
63 		restore = 0;
64 		/* Check errors of TBU status */
65 		tbu_status = malidp_read32(reg, LPU_TBU_STATUS);
66 		if (tbu_status & LPU_TBU_STATUS_TCF) {
67 			restore |= LPU_TBU_STATUS_TCF;
68 			evts |= KOMEDA_ERR_TCF;
69 		}
70 		if (tbu_status & LPU_TBU_STATUS_TTNG) {
71 			restore |= LPU_TBU_STATUS_TTNG;
72 			evts |= KOMEDA_ERR_TTNG;
73 		}
74 		if (tbu_status & LPU_TBU_STATUS_TITR) {
75 			restore |= LPU_TBU_STATUS_TITR;
76 			evts |= KOMEDA_ERR_TITR;
77 		}
78 		if (tbu_status & LPU_TBU_STATUS_TEMR) {
79 			restore |= LPU_TBU_STATUS_TEMR;
80 			evts |= KOMEDA_ERR_TEMR;
81 		}
82 		if (tbu_status & LPU_TBU_STATUS_TTF) {
83 			restore |= LPU_TBU_STATUS_TTF;
84 			evts |= KOMEDA_ERR_TTF;
85 		}
86 		if (restore != 0)
87 			malidp_write32_mask(reg, LPU_TBU_STATUS, restore, 0);
88 	}
89 
90 	malidp_write32(reg, BLK_IRQ_CLEAR, raw_status);
91 	return evts;
92 }
93 
94 static u64 get_cu_event(struct d71_pipeline *d71_pipeline)
95 {
96 	u32 __iomem *reg = d71_pipeline->cu_addr;
97 	u32 status, raw_status;
98 	u64 evts = 0ULL;
99 
100 	raw_status = malidp_read32(reg, BLK_IRQ_RAW_STATUS);
101 	if (raw_status & CU_IRQ_OVR)
102 		evts |= KOMEDA_EVENT_OVR;
103 
104 	if (raw_status & (CU_IRQ_ERR | CU_IRQ_OVR)) {
105 		status = malidp_read32(reg, BLK_STATUS) & 0x7FFFFFFF;
106 		if (status & CU_STATUS_CPE)
107 			evts |= KOMEDA_ERR_CPE;
108 		if (status & CU_STATUS_ZME)
109 			evts |= KOMEDA_ERR_ZME;
110 		if (status & CU_STATUS_CFGE)
111 			evts |= KOMEDA_ERR_CFGE;
112 		if (status)
113 			malidp_write32_mask(reg, BLK_STATUS, status, 0);
114 	}
115 
116 	malidp_write32(reg, BLK_IRQ_CLEAR, raw_status);
117 
118 	return evts;
119 }
120 
121 static u64 get_dou_event(struct d71_pipeline *d71_pipeline)
122 {
123 	u32 __iomem *reg = d71_pipeline->dou_addr;
124 	u32 status, raw_status;
125 	u64 evts = 0ULL;
126 
127 	raw_status = malidp_read32(reg, BLK_IRQ_RAW_STATUS);
128 	if (raw_status & DOU_IRQ_PL0)
129 		evts |= KOMEDA_EVENT_VSYNC;
130 	if (raw_status & DOU_IRQ_UND)
131 		evts |= KOMEDA_EVENT_URUN;
132 
133 	if (raw_status & (DOU_IRQ_ERR | DOU_IRQ_UND)) {
134 		u32 restore  = 0;
135 
136 		status = malidp_read32(reg, BLK_STATUS);
137 		if (status & DOU_STATUS_DRIFTTO) {
138 			restore |= DOU_STATUS_DRIFTTO;
139 			evts |= KOMEDA_ERR_DRIFTTO;
140 		}
141 		if (status & DOU_STATUS_FRAMETO) {
142 			restore |= DOU_STATUS_FRAMETO;
143 			evts |= KOMEDA_ERR_FRAMETO;
144 		}
145 		if (status & DOU_STATUS_TETO) {
146 			restore |= DOU_STATUS_TETO;
147 			evts |= KOMEDA_ERR_TETO;
148 		}
149 		if (status & DOU_STATUS_CSCE) {
150 			restore |= DOU_STATUS_CSCE;
151 			evts |= KOMEDA_ERR_CSCE;
152 		}
153 
154 		if (restore != 0)
155 			malidp_write32_mask(reg, BLK_STATUS, restore, 0);
156 	}
157 
158 	malidp_write32(reg, BLK_IRQ_CLEAR, raw_status);
159 	return evts;
160 }
161 
162 static u64 get_pipeline_event(struct d71_pipeline *d71_pipeline, u32 gcu_status)
163 {
164 	u32 evts = 0ULL;
165 
166 	if (gcu_status & (GLB_IRQ_STATUS_LPU0 | GLB_IRQ_STATUS_LPU1))
167 		evts |= get_lpu_event(d71_pipeline);
168 
169 	if (gcu_status & (GLB_IRQ_STATUS_CU0 | GLB_IRQ_STATUS_CU1))
170 		evts |= get_cu_event(d71_pipeline);
171 
172 	if (gcu_status & (GLB_IRQ_STATUS_DOU0 | GLB_IRQ_STATUS_DOU1))
173 		evts |= get_dou_event(d71_pipeline);
174 
175 	return evts;
176 }
177 
178 static irqreturn_t
179 d71_irq_handler(struct komeda_dev *mdev, struct komeda_events *evts)
180 {
181 	struct d71_dev *d71 = mdev->chip_data;
182 	u32 status, gcu_status, raw_status;
183 
184 	gcu_status = malidp_read32(d71->gcu_addr, GLB_IRQ_STATUS);
185 
186 	if (gcu_status & GLB_IRQ_STATUS_GCU) {
187 		raw_status = malidp_read32(d71->gcu_addr, BLK_IRQ_RAW_STATUS);
188 		if (raw_status & GCU_IRQ_CVAL0)
189 			evts->pipes[0] |= KOMEDA_EVENT_FLIP;
190 		if (raw_status & GCU_IRQ_CVAL1)
191 			evts->pipes[1] |= KOMEDA_EVENT_FLIP;
192 		if (raw_status & GCU_IRQ_ERR) {
193 			status = malidp_read32(d71->gcu_addr, BLK_STATUS);
194 			if (status & GCU_STATUS_MERR) {
195 				evts->global |= KOMEDA_ERR_MERR;
196 				malidp_write32_mask(d71->gcu_addr, BLK_STATUS,
197 						    GCU_STATUS_MERR, 0);
198 			}
199 		}
200 
201 		malidp_write32(d71->gcu_addr, BLK_IRQ_CLEAR, raw_status);
202 	}
203 
204 	if (gcu_status & GLB_IRQ_STATUS_PIPE0)
205 		evts->pipes[0] |= get_pipeline_event(d71->pipes[0], gcu_status);
206 
207 	if (gcu_status & GLB_IRQ_STATUS_PIPE1)
208 		evts->pipes[1] |= get_pipeline_event(d71->pipes[1], gcu_status);
209 
210 	return IRQ_RETVAL(gcu_status);
211 }
212 
213 #define ENABLED_GCU_IRQS	(GCU_IRQ_CVAL0 | GCU_IRQ_CVAL1 | \
214 				 GCU_IRQ_MODE | GCU_IRQ_ERR)
215 #define ENABLED_LPU_IRQS	(LPU_IRQ_IBSY | LPU_IRQ_ERR | LPU_IRQ_EOW)
216 #define ENABLED_CU_IRQS		(CU_IRQ_OVR | CU_IRQ_ERR)
217 #define ENABLED_DOU_IRQS	(DOU_IRQ_UND | DOU_IRQ_ERR)
218 
219 static int d71_enable_irq(struct komeda_dev *mdev)
220 {
221 	struct d71_dev *d71 = mdev->chip_data;
222 	struct d71_pipeline *pipe;
223 	u32 i;
224 
225 	malidp_write32_mask(d71->gcu_addr, BLK_IRQ_MASK,
226 			    ENABLED_GCU_IRQS, ENABLED_GCU_IRQS);
227 	for (i = 0; i < d71->num_pipelines; i++) {
228 		pipe = d71->pipes[i];
229 		malidp_write32_mask(pipe->cu_addr,  BLK_IRQ_MASK,
230 				    ENABLED_CU_IRQS, ENABLED_CU_IRQS);
231 		malidp_write32_mask(pipe->lpu_addr, BLK_IRQ_MASK,
232 				    ENABLED_LPU_IRQS, ENABLED_LPU_IRQS);
233 		malidp_write32_mask(pipe->dou_addr, BLK_IRQ_MASK,
234 				    ENABLED_DOU_IRQS, ENABLED_DOU_IRQS);
235 	}
236 	return 0;
237 }
238 
239 static int d71_disable_irq(struct komeda_dev *mdev)
240 {
241 	struct d71_dev *d71 = mdev->chip_data;
242 	struct d71_pipeline *pipe;
243 	u32 i;
244 
245 	malidp_write32_mask(d71->gcu_addr, BLK_IRQ_MASK, ENABLED_GCU_IRQS, 0);
246 	for (i = 0; i < d71->num_pipelines; i++) {
247 		pipe = d71->pipes[i];
248 		malidp_write32_mask(pipe->cu_addr,  BLK_IRQ_MASK,
249 				    ENABLED_CU_IRQS, 0);
250 		malidp_write32_mask(pipe->lpu_addr, BLK_IRQ_MASK,
251 				    ENABLED_LPU_IRQS, 0);
252 		malidp_write32_mask(pipe->dou_addr, BLK_IRQ_MASK,
253 				    ENABLED_DOU_IRQS, 0);
254 	}
255 	return 0;
256 }
257 
258 static void d71_on_off_vblank(struct komeda_dev *mdev, int master_pipe, bool on)
259 {
260 	struct d71_dev *d71 = mdev->chip_data;
261 	struct d71_pipeline *pipe = d71->pipes[master_pipe];
262 
263 	malidp_write32_mask(pipe->dou_addr, BLK_IRQ_MASK,
264 			    DOU_IRQ_PL0, on ? DOU_IRQ_PL0 : 0);
265 }
266 
267 static int to_d71_opmode(int core_mode)
268 {
269 	switch (core_mode) {
270 	case KOMEDA_MODE_DISP0:
271 		return DO0_ACTIVE_MODE;
272 	case KOMEDA_MODE_DISP1:
273 		return DO1_ACTIVE_MODE;
274 	case KOMEDA_MODE_DUAL_DISP:
275 		return DO01_ACTIVE_MODE;
276 	case KOMEDA_MODE_INACTIVE:
277 		return INACTIVE_MODE;
278 	default:
279 		WARN(1, "Unknown operation mode");
280 		return INACTIVE_MODE;
281 	}
282 }
283 
284 static int d71_change_opmode(struct komeda_dev *mdev, int new_mode)
285 {
286 	struct d71_dev *d71 = mdev->chip_data;
287 	u32 opmode = to_d71_opmode(new_mode);
288 	int ret;
289 
290 	malidp_write32_mask(d71->gcu_addr, BLK_CONTROL, 0x7, opmode);
291 
292 	ret = dp_wait_cond(((malidp_read32(d71->gcu_addr, BLK_CONTROL) & 0x7) == opmode),
293 			   100, 1000, 10000);
294 
295 	return ret;
296 }
297 
298 static void d71_flush(struct komeda_dev *mdev,
299 		      int master_pipe, u32 active_pipes)
300 {
301 	struct d71_dev *d71 = mdev->chip_data;
302 	u32 reg_offset = (master_pipe == 0) ?
303 			 GCU_CONFIG_VALID0 : GCU_CONFIG_VALID1;
304 
305 	malidp_write32(d71->gcu_addr, reg_offset, GCU_CONFIG_CVAL);
306 }
307 
308 static int d71_reset(struct d71_dev *d71)
309 {
310 	u32 __iomem *gcu = d71->gcu_addr;
311 	int ret;
312 
313 	malidp_write32_mask(gcu, BLK_CONTROL,
314 			    GCU_CONTROL_SRST, GCU_CONTROL_SRST);
315 
316 	ret = dp_wait_cond(!(malidp_read32(gcu, BLK_CONTROL) & GCU_CONTROL_SRST),
317 			   100, 1000, 10000);
318 
319 	return ret;
320 }
321 
322 void d71_read_block_header(u32 __iomem *reg, struct block_header *blk)
323 {
324 	int i;
325 
326 	blk->block_info = malidp_read32(reg, BLK_BLOCK_INFO);
327 	if (BLOCK_INFO_BLK_TYPE(blk->block_info) == D71_BLK_TYPE_RESERVED)
328 		return;
329 
330 	blk->pipeline_info = malidp_read32(reg, BLK_PIPELINE_INFO);
331 
332 	/* get valid input and output ids */
333 	for (i = 0; i < PIPELINE_INFO_N_VALID_INPUTS(blk->pipeline_info); i++)
334 		blk->input_ids[i] = malidp_read32(reg + i, BLK_VALID_INPUT_ID0);
335 	for (i = 0; i < PIPELINE_INFO_N_OUTPUTS(blk->pipeline_info); i++)
336 		blk->output_ids[i] = malidp_read32(reg + i, BLK_OUTPUT_ID0);
337 }
338 
339 static void d71_cleanup(struct komeda_dev *mdev)
340 {
341 	struct d71_dev *d71 = mdev->chip_data;
342 
343 	if (!d71)
344 		return;
345 
346 	devm_kfree(mdev->dev, d71);
347 	mdev->chip_data = NULL;
348 }
349 
350 static int d71_enum_resources(struct komeda_dev *mdev)
351 {
352 	struct d71_dev *d71;
353 	struct komeda_pipeline *pipe;
354 	struct block_header blk;
355 	u32 __iomem *blk_base;
356 	u32 i, value, offset;
357 	int err;
358 
359 	d71 = devm_kzalloc(mdev->dev, sizeof(*d71), GFP_KERNEL);
360 	if (!d71)
361 		return -ENOMEM;
362 
363 	mdev->chip_data = d71;
364 	d71->mdev = mdev;
365 	d71->gcu_addr = mdev->reg_base;
366 	d71->periph_addr = mdev->reg_base + (D71_BLOCK_OFFSET_PERIPH >> 2);
367 
368 	err = d71_reset(d71);
369 	if (err) {
370 		DRM_ERROR("Fail to reset d71 device.\n");
371 		goto err_cleanup;
372 	}
373 
374 	/* probe GCU */
375 	value = malidp_read32(d71->gcu_addr, GLB_CORE_INFO);
376 	d71->num_blocks = value & 0xFF;
377 	d71->num_pipelines = (value >> 8) & 0x7;
378 
379 	if (d71->num_pipelines > D71_MAX_PIPELINE) {
380 		DRM_ERROR("d71 supports %d pipelines, but got: %d.\n",
381 			  D71_MAX_PIPELINE, d71->num_pipelines);
382 		err = -EINVAL;
383 		goto err_cleanup;
384 	}
385 
386 	/* Only the legacy HW has the periph block, the newer merges the periph
387 	 * into GCU
388 	 */
389 	value = malidp_read32(d71->periph_addr, BLK_BLOCK_INFO);
390 	if (BLOCK_INFO_BLK_TYPE(value) != D71_BLK_TYPE_PERIPH)
391 		d71->periph_addr = NULL;
392 
393 	if (d71->periph_addr) {
394 		/* probe PERIPHERAL in legacy HW */
395 		value = malidp_read32(d71->periph_addr, PERIPH_CONFIGURATION_ID);
396 
397 		d71->max_line_size	= value & PERIPH_MAX_LINE_SIZE ? 4096 : 2048;
398 		d71->max_vsize		= 4096;
399 		d71->num_rich_layers	= value & PERIPH_NUM_RICH_LAYERS ? 2 : 1;
400 		d71->supports_dual_link	= !!(value & PERIPH_SPLIT_EN);
401 		d71->integrates_tbu	= !!(value & PERIPH_TBU_EN);
402 	} else {
403 		value = malidp_read32(d71->gcu_addr, GCU_CONFIGURATION_ID0);
404 		d71->max_line_size	= GCU_MAX_LINE_SIZE(value);
405 		d71->max_vsize		= GCU_MAX_NUM_LINES(value);
406 
407 		value = malidp_read32(d71->gcu_addr, GCU_CONFIGURATION_ID1);
408 		d71->num_rich_layers	= GCU_NUM_RICH_LAYERS(value);
409 		d71->supports_dual_link	= GCU_DISPLAY_SPLIT_EN(value);
410 		d71->integrates_tbu	= GCU_DISPLAY_TBU_EN(value);
411 	}
412 
413 	for (i = 0; i < d71->num_pipelines; i++) {
414 		pipe = komeda_pipeline_add(mdev, sizeof(struct d71_pipeline),
415 					   &d71_pipeline_funcs);
416 		if (IS_ERR(pipe)) {
417 			err = PTR_ERR(pipe);
418 			goto err_cleanup;
419 		}
420 
421 		/* D71 HW doesn't update shadow registers when display output
422 		 * is turning off, so when we disable all pipeline components
423 		 * together with display output disable by one flush or one
424 		 * operation, the disable operation updated registers will not
425 		 * be flush to or valid in HW, which may leads problem.
426 		 * To workaround this problem, introduce a two phase disable.
427 		 * Phase1: Disabling components with display is on to make sure
428 		 *	   the disable can be flushed to HW.
429 		 * Phase2: Only turn-off display output.
430 		 */
431 		value = KOMEDA_PIPELINE_IMPROCS |
432 			BIT(KOMEDA_COMPONENT_TIMING_CTRLR);
433 
434 		pipe->standalone_disabled_comps = value;
435 
436 		d71->pipes[i] = to_d71_pipeline(pipe);
437 	}
438 
439 	/* loop the register blks and probe.
440 	 * NOTE: d71->num_blocks includes reserved blocks.
441 	 * d71->num_blocks = GCU + valid blocks + reserved blocks
442 	 */
443 	i = 1; /* exclude GCU */
444 	offset = D71_BLOCK_SIZE; /* skip GCU */
445 	while (i < d71->num_blocks) {
446 		blk_base = mdev->reg_base + (offset >> 2);
447 
448 		d71_read_block_header(blk_base, &blk);
449 		if (BLOCK_INFO_BLK_TYPE(blk.block_info) != D71_BLK_TYPE_RESERVED) {
450 			err = d71_probe_block(d71, &blk, blk_base);
451 			if (err)
452 				goto err_cleanup;
453 		}
454 
455 		i++;
456 		offset += D71_BLOCK_SIZE;
457 	}
458 
459 	DRM_DEBUG("total %d (out of %d) blocks are found.\n",
460 		  i, d71->num_blocks);
461 
462 	return 0;
463 
464 err_cleanup:
465 	d71_cleanup(mdev);
466 	return err;
467 }
468 
469 #define __HW_ID(__group, __format) \
470 	((((__group) & 0x7) << 3) | ((__format) & 0x7))
471 
472 #define RICH		KOMEDA_FMT_RICH_LAYER
473 #define SIMPLE		KOMEDA_FMT_SIMPLE_LAYER
474 #define RICH_SIMPLE	(KOMEDA_FMT_RICH_LAYER | KOMEDA_FMT_SIMPLE_LAYER)
475 #define RICH_WB		(KOMEDA_FMT_RICH_LAYER | KOMEDA_FMT_WB_LAYER)
476 #define RICH_SIMPLE_WB	(RICH_SIMPLE | KOMEDA_FMT_WB_LAYER)
477 
478 #define Rot_0		DRM_MODE_ROTATE_0
479 #define Flip_H_V	(DRM_MODE_REFLECT_X | DRM_MODE_REFLECT_Y | Rot_0)
480 #define Rot_ALL_H_V	(DRM_MODE_ROTATE_MASK | Flip_H_V)
481 
482 #define LYT_NM		BIT(AFBC_FORMAT_MOD_BLOCK_SIZE_16x16)
483 #define LYT_WB		BIT(AFBC_FORMAT_MOD_BLOCK_SIZE_32x8)
484 #define LYT_NM_WB	(LYT_NM | LYT_WB)
485 
486 #define AFB_TH		AFBC(_TILED | _SPARSE)
487 #define AFB_TH_SC_YTR	AFBC(_TILED | _SC | _SPARSE | _YTR)
488 #define AFB_TH_SC_YTR_BS AFBC(_TILED | _SC | _SPARSE | _YTR | _SPLIT)
489 
490 static struct komeda_format_caps d71_format_caps_table[] = {
491 	/*   HW_ID    |        fourcc         |   layer_types |   rots    | afbc_layouts | afbc_features */
492 	/* ABGR_2101010*/
493 	{__HW_ID(0, 0),	DRM_FORMAT_ARGB2101010,	RICH_SIMPLE_WB,	Flip_H_V,		0, 0},
494 	{__HW_ID(0, 1),	DRM_FORMAT_ABGR2101010,	RICH_SIMPLE_WB,	Flip_H_V,		0, 0},
495 	{__HW_ID(0, 1),	DRM_FORMAT_ABGR2101010,	RICH_SIMPLE,	Rot_ALL_H_V,	LYT_NM_WB, AFB_TH_SC_YTR_BS}, /* afbc */
496 	{__HW_ID(0, 2),	DRM_FORMAT_RGBA1010102,	RICH_SIMPLE_WB,	Flip_H_V,		0, 0},
497 	{__HW_ID(0, 3),	DRM_FORMAT_BGRA1010102,	RICH_SIMPLE_WB,	Flip_H_V,		0, 0},
498 	/* ABGR_8888*/
499 	{__HW_ID(1, 0),	DRM_FORMAT_ARGB8888,	RICH_SIMPLE_WB,	Flip_H_V,		0, 0},
500 	{__HW_ID(1, 1),	DRM_FORMAT_ABGR8888,	RICH_SIMPLE_WB,	Flip_H_V,		0, 0},
501 	{__HW_ID(1, 1),	DRM_FORMAT_ABGR8888,	RICH_SIMPLE,	Rot_ALL_H_V,	LYT_NM_WB, AFB_TH_SC_YTR_BS}, /* afbc */
502 	{__HW_ID(1, 2),	DRM_FORMAT_RGBA8888,	RICH_SIMPLE_WB,	Flip_H_V,		0, 0},
503 	{__HW_ID(1, 3),	DRM_FORMAT_BGRA8888,	RICH_SIMPLE_WB,	Flip_H_V,		0, 0},
504 	/* XBGB_8888 */
505 	{__HW_ID(2, 0),	DRM_FORMAT_XRGB8888,	RICH_SIMPLE_WB,	Flip_H_V,		0, 0},
506 	{__HW_ID(2, 1),	DRM_FORMAT_XBGR8888,	RICH_SIMPLE_WB,	Flip_H_V,		0, 0},
507 	{__HW_ID(2, 2),	DRM_FORMAT_RGBX8888,	RICH_SIMPLE_WB,	Flip_H_V,		0, 0},
508 	{__HW_ID(2, 3),	DRM_FORMAT_BGRX8888,	RICH_SIMPLE_WB,	Flip_H_V,		0, 0},
509 	/* BGR_888 */ /* none-afbc RGB888 doesn't support rotation and flip */
510 	{__HW_ID(3, 0),	DRM_FORMAT_RGB888,	RICH_SIMPLE_WB,	Rot_0,			0, 0},
511 	{__HW_ID(3, 1),	DRM_FORMAT_BGR888,	RICH_SIMPLE_WB,	Rot_0,			0, 0},
512 	{__HW_ID(3, 1),	DRM_FORMAT_BGR888,	RICH_SIMPLE,	Rot_ALL_H_V,	LYT_NM_WB, AFB_TH_SC_YTR_BS}, /* afbc */
513 	/* BGR 16bpp */
514 	{__HW_ID(4, 0),	DRM_FORMAT_RGBA5551,	RICH_SIMPLE,	Flip_H_V,		0, 0},
515 	{__HW_ID(4, 1),	DRM_FORMAT_ABGR1555,	RICH_SIMPLE,	Flip_H_V,		0, 0},
516 	{__HW_ID(4, 1),	DRM_FORMAT_ABGR1555,	RICH_SIMPLE,	Rot_ALL_H_V,	LYT_NM_WB, AFB_TH_SC_YTR}, /* afbc */
517 	{__HW_ID(4, 2),	DRM_FORMAT_RGB565,	RICH_SIMPLE,	Flip_H_V,		0, 0},
518 	{__HW_ID(4, 3),	DRM_FORMAT_BGR565,	RICH_SIMPLE,	Flip_H_V,		0, 0},
519 	{__HW_ID(4, 3),	DRM_FORMAT_BGR565,	RICH_SIMPLE,	Rot_ALL_H_V,	LYT_NM_WB, AFB_TH_SC_YTR}, /* afbc */
520 	{__HW_ID(4, 4), DRM_FORMAT_R8,		SIMPLE,		Rot_0,			0, 0},
521 	/* YUV 444/422/420 8bit  */
522 	{__HW_ID(5, 1),	DRM_FORMAT_YUYV,	RICH,		Rot_ALL_H_V,	LYT_NM, AFB_TH}, /* afbc */
523 	{__HW_ID(5, 2),	DRM_FORMAT_YUYV,	RICH,		Flip_H_V,		0, 0},
524 	{__HW_ID(5, 3),	DRM_FORMAT_UYVY,	RICH,		Flip_H_V,		0, 0},
525 	{__HW_ID(5, 6),	DRM_FORMAT_NV12,	RICH,		Flip_H_V,		0, 0},
526 	{__HW_ID(5, 6),	DRM_FORMAT_YUV420_8BIT,	RICH,		Rot_ALL_H_V,	LYT_NM, AFB_TH}, /* afbc */
527 	{__HW_ID(5, 7),	DRM_FORMAT_YUV420,	RICH,		Flip_H_V,		0, 0},
528 	/* YUV 10bit*/
529 	{__HW_ID(6, 6),	DRM_FORMAT_X0L2,	RICH,		Flip_H_V,		0, 0},
530 	{__HW_ID(6, 7),	DRM_FORMAT_P010,	RICH,		Flip_H_V,		0, 0},
531 	{__HW_ID(6, 7),	DRM_FORMAT_YUV420_10BIT, RICH,		Rot_ALL_H_V,	LYT_NM, AFB_TH},
532 };
533 
534 static bool d71_format_mod_supported(const struct komeda_format_caps *caps,
535 				     u32 layer_type, u64 modifier, u32 rot)
536 {
537 	uint64_t layout = modifier & AFBC_FORMAT_MOD_BLOCK_SIZE_MASK;
538 
539 	if ((layout == AFBC_FORMAT_MOD_BLOCK_SIZE_32x8) &&
540 	    drm_rotation_90_or_270(rot)) {
541 		DRM_DEBUG_ATOMIC("D71 doesn't support ROT90 for WB-AFBC.\n");
542 		return false;
543 	}
544 
545 	return true;
546 }
547 
548 static void d71_init_fmt_tbl(struct komeda_dev *mdev)
549 {
550 	struct komeda_format_caps_table *table = &mdev->fmt_tbl;
551 
552 	table->format_caps = d71_format_caps_table;
553 	table->format_mod_supported = d71_format_mod_supported;
554 	table->n_formats = ARRAY_SIZE(d71_format_caps_table);
555 }
556 
557 static int d71_connect_iommu(struct komeda_dev *mdev)
558 {
559 	struct d71_dev *d71 = mdev->chip_data;
560 	u32 __iomem *reg = d71->gcu_addr;
561 	u32 check_bits = (d71->num_pipelines == 2) ?
562 			 GCU_STATUS_TCS0 | GCU_STATUS_TCS1 : GCU_STATUS_TCS0;
563 	int i, ret;
564 
565 	if (!d71->integrates_tbu)
566 		return -1;
567 
568 	malidp_write32_mask(reg, BLK_CONTROL, 0x7, TBU_CONNECT_MODE);
569 
570 	ret = dp_wait_cond(has_bits(check_bits, malidp_read32(reg, BLK_STATUS)),
571 			100, 1000, 1000);
572 	if (ret < 0) {
573 		DRM_ERROR("timed out connecting to TCU!\n");
574 		malidp_write32_mask(reg, BLK_CONTROL, 0x7, INACTIVE_MODE);
575 		return ret;
576 	}
577 
578 	for (i = 0; i < d71->num_pipelines; i++)
579 		malidp_write32_mask(d71->pipes[i]->lpu_addr, LPU_TBU_CONTROL,
580 				    LPU_TBU_CTRL_TLBPEN, LPU_TBU_CTRL_TLBPEN);
581 	return 0;
582 }
583 
584 static int d71_disconnect_iommu(struct komeda_dev *mdev)
585 {
586 	struct d71_dev *d71 = mdev->chip_data;
587 	u32 __iomem *reg = d71->gcu_addr;
588 	u32 check_bits = (d71->num_pipelines == 2) ?
589 			 GCU_STATUS_TCS0 | GCU_STATUS_TCS1 : GCU_STATUS_TCS0;
590 	int ret;
591 
592 	malidp_write32_mask(reg, BLK_CONTROL, 0x7, TBU_DISCONNECT_MODE);
593 
594 	ret = dp_wait_cond(((malidp_read32(reg, BLK_STATUS) & check_bits) == 0),
595 			100, 1000, 1000);
596 	if (ret < 0) {
597 		DRM_ERROR("timed out disconnecting from TCU!\n");
598 		malidp_write32_mask(reg, BLK_CONTROL, 0x7, INACTIVE_MODE);
599 	}
600 
601 	return ret;
602 }
603 
604 static const struct komeda_dev_funcs d71_chip_funcs = {
605 	.init_format_table	= d71_init_fmt_tbl,
606 	.enum_resources		= d71_enum_resources,
607 	.cleanup		= d71_cleanup,
608 	.irq_handler		= d71_irq_handler,
609 	.enable_irq		= d71_enable_irq,
610 	.disable_irq		= d71_disable_irq,
611 	.on_off_vblank		= d71_on_off_vblank,
612 	.change_opmode		= d71_change_opmode,
613 	.flush			= d71_flush,
614 	.connect_iommu		= d71_connect_iommu,
615 	.disconnect_iommu	= d71_disconnect_iommu,
616 	.dump_register		= d71_dump,
617 };
618 
619 const struct komeda_dev_funcs *
620 d71_identify(u32 __iomem *reg_base, struct komeda_chip_info *chip)
621 {
622 	const struct komeda_dev_funcs *funcs;
623 	u32 product_id;
624 
625 	chip->core_id = malidp_read32(reg_base, GLB_CORE_ID);
626 
627 	product_id = MALIDP_CORE_ID_PRODUCT_ID(chip->core_id);
628 
629 	switch (product_id) {
630 	case MALIDP_D71_PRODUCT_ID:
631 	case MALIDP_D32_PRODUCT_ID:
632 		funcs = &d71_chip_funcs;
633 		break;
634 	default:
635 		DRM_ERROR("Unsupported product: 0x%x\n", product_id);
636 		return NULL;
637 	}
638 
639 	chip->arch_id	= malidp_read32(reg_base, GLB_ARCH_ID);
640 	chip->core_info	= malidp_read32(reg_base, GLB_CORE_INFO);
641 	chip->bus_width	= D71_BUS_WIDTH_16_BYTES;
642 
643 	return funcs;
644 }
645